diff --git "a/4042.jsonl" "b/4042.jsonl" new file mode 100644--- /dev/null +++ "b/4042.jsonl" @@ -0,0 +1,649 @@ +{"seq_id":"412323191","text":"from tkinter import *\r\nimport re\r\nfrom good_boy import *\r\n\r\n\r\nclass log_in:\r\n\r\n def __init__(self, master):\r\n ch = \"*\"\r\n dh = \"\"\r\n frame = Frame(master)\r\n frame.pack()\r\n\r\n label_1 = Label(frame, text=\"E-mail: \")\r\n label_2 = Label(frame, text=\"Password: \")\r\n entry_1 = Entry(frame, width = 30)\r\n entry_2 = Entry(frame, show=ch, width=30)\r\n\r\n label_1.grid(row=0, sticky=E)\r\n label_2.grid(row=1, sticky=E)\r\n\r\n entry_1.grid(row=0, column=1)\r\n entry_2.grid(row=1, column=1)\r\n\r\n def check(event):\r\n if not re.search(\"[a-zA-Z0-9_?]*@[a-zA-Z]*\\.[a-z]*\", entry_1.get()):\r\n entry_1.delete(0, END)\r\n entry_2.delete(0, END)\r\n entry_1.insert(0, \"Please enter a valid E-mail\")\r\n else:\r\n login()\r\n\r\n def login():\r\n filepath1 = \"usernames.txt\"\r\n with open(filepath1) as fp1:\r\n line = fp1.readline()\r\n while line != entry_1.get() + \"\\n\" and line != \"\":\r\n line = fp1.readline()\r\n if line != \"\":\r\n fp1.close()\r\n frame.destroy()\r\n good_boy()\r\n else:\r\n entry_1.delete(0, END)\r\n entry_2.delete(0, END)\r\n entry_1.insert(0, \"Sorry, your e-mail was not found\")\r\n\r\n def hide(event):\r\n if not var.get():\r\n entry_2.config(show=dh)\r\n else:\r\n entry_2.config(show=ch)\r\n\r\n button_1 = Button(frame, text=\"Log In\")\r\n button_1.bind(\"\", check)\r\n button_1.grid(row=2, columnspan=2)\r\n var = IntVar()\r\n checkbox_1 = Checkbutton(frame, text=\"Show the text?\", variable=var)\r\n checkbox_1.bind(\"\", hide)\r\n checkbox_1.grid(row=1, column=2, columnspan=2)\r\n\r\n\r\n","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654550","text":"\nfrom __future__ import absolute_import\nimport hug\n\nfrom config import db\nfrom api.auth.user import check_sysadmin, query_current_user\nfrom models.operationlog import operationlogs\nfrom models.auth.user import users\nfrom models import rows2data\nfrom sqlalchemy.sql import or_\nIGNORES = {'last_modifed'}\n\n\n@hug.object.urls('')\nclass OperationLogs(object):\n @hug.object.get()\n def get(self, request, response, q: str=None):\n t = operationlogs.alias('o')\n joins = {'user': {\n 'select': ['id', 'username'],\n 'table': users.alias('u')}}\n query = db.filter_join(t, joins, request, ['-created_date'])\n u = query_current_user(request)\n if u and not check_sysadmin(u):\n query = db.filter_by_user(t, query, u)\n if q:\n q = '%{}%'.format(q)\n filters = or_(t.c.name.like(q),\n t.c.detail.like(q),\n t.c.username.like(q))\n query = query.where(filters)\n query = db.filter_by_date(t.c.created_date, query, request)\n rows = db.paginate_data(query, request, response)\n\n return rows2data(rows, operationlogs, joins, IGNORES)\n","sub_path":"api/operationlog.py","file_name":"operationlog.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"242923650","text":"\nimport webbrowser\nimport time\nfrom utility import *\n\ndef open_url(url):\n if url is None:\n return\n\n logger.debug('ToDo: auto get baidu path')\n chromePath = r'D:\\Users\\admin\\AppData\\Local\\Baidu\\BaiduBrowser\\8.7.5000.4962\\baidubrowser.exe'\n webbrowser.register('baidu', None, webbrowser.BackgroundBrowser(chromePath)) \n baidu = webbrowser.get('baidu')\n baidu.open(url, new=0, autoraise=False)\n\ndef open_urls(urls):\n if urls is None or len(urls) == 0:\n return\n\n for index, url in enumerate(urls):\n logger.debug('%d/%d: %s' % (index+1, len(urls), url))\n open_url(url)\n\n time.sleep(1)\n if index % 10 == 9:\n time.sleep(5)\n close_browser()\n\ndef get_words():\n url = 'http://top.baidu.com/buzz?b=2&fr=topboards';\n html = requestString(url)\n arr = htmlElements(html, '//*[@class=\"list-title\"]')\n arr = [x.text for x in arr]\n arr = list(set(arr))\n\n return arr\n\ndef get_serach_jifen():\n words = get_words()\n if words is None:\n return\n\n if len(words) > 60:\n words = words[0: 60]\n\n urls = ['http://www.baidu.com/s?wd=%s' % (x) for x in words]\n open_urls(urls)\n\ndef get_links():\n url = 'http://www.sina.com.cn/';\n html = requestString(url)\n arr = htmlElements(html, '//a')\n if arr is not None:\n arr = [x.get('href') for x in arr]\n arr = [x.strip() for x in arr if x is not None and len(x) > 30 and x.startswith('http')]\n arr = list(set(arr))\n\n return arr\n\ndef get_links_jifen():\n links = get_links()\n if len(links) > 200:\n links = links[100: 200]\n\n open_urls(links)\n\ndef close_browser():\n cmd = 'taskkill /F /IM baidubrowser.exe'\n systemCmd(cmd)\n\n\nif __name__ == '__main__':\n # setDebug(True)\n\n shop_url = 'http://vip.liulanqi.baidu.com/statics/user_point/index.html'\n\n if isDebug():\n get_serach_jifen()\n # close_browser()\n # get_links_jifen()\n # close_browser()\n # open_url(shop_url)\n pass\n else:\n time.sleep(5)\n\n logger.info('get_serach_jifen')\n get_serach_jifen()\n close_browser()\n logger.info('get_links_jifen')\n get_links_jifen()\n close_browser()\n\n logger.info('reopen browser')\n open_url(shop_url)\n\n","sub_path":"baidu_jifen.py","file_name":"baidu_jifen.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344046911","text":"import requests\nimport time\n\nstarttime = time.time()\ncount = 0\nx = True\nwhile x == True:\n\n print('tick')\n\n url = 'https://harry-search.herokuapp.com/'\n requests.get(url)\n\n time.sleep(1200.0 - ((time.time() - starttime) % 1200.0))\n\n count = count + 1\n\n if count == 51:\n x = False\n","sub_path":"dyno_up.py","file_name":"dyno_up.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201609423","text":"#!/usr/bin/python3\n\"\"\" \"\"\"\n\n\nclass Node:\n \"\"\"\n defines node\n \"\"\"\n\n def __init__(self, data, next_node=None):\n \"\"\"\n initializes node\n\n Attributes:\n data: data stored in node\n next_node: address of next node in list\n \"\"\"\n self.data = data\n self.next_node = next_node\n\n @property\n def data(self):\n \"\"\"\n finds data\n \"\"\"\n return self.__data\n\n @data.setter\n def data(self, value):\n \"\"\"\n validates that data is an integer\n \"\"\"\n if type(value) is not int:\n raise TypeError('data must be an integer')\n else:\n self.__data = value\n\n @property\n def next_node(self):\n \"\"\"\n finds next_node\n \"\"\"\n return self.__next_node\n\n @next_node.setter\n def next_node(self, value):\n \"\"\"\n validates next_node as either None or a node\n \"\"\"\n if type(value) is not Node and value is not None:\n raise TypeError('next_node must be a Node object')\n else:\n self.__next_node = value\n\n\nclass SinglyLinkedList:\n \"\"\"\n defines singly linked list class\n \"\"\"\n\n def __init__(self):\n \"\"\"\n initializes singly linked list\n \"\"\"\n self.__head = None\n\n def __str__(self):\n \"\"\"\n returns string version of list\n \"\"\"\n string = \"\"\n if self.__head is None:\n return string\n runner = self.__head\n while runner is not None:\n string += str(runner.data)\n runner = runner.next_node\n if runner is not None:\n string += \"\\n\"\n return string\n\n def sorted_insert(self, value):\n \"\"\"\n inserts a new node into correct position in list\n \"\"\"\n if self.__head is None or value < self.__head.data:\n self.__head = Node(value, self.__head)\n return\n runner = self.__head\n while runner.next_node is not None and runner.next_node.data < value:\n runner = runner.next_node\n runner.next_node = Node(value, runner.next_node)\n","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400948498","text":"'''\na + (b.c)\na . (b+c)\na . b . c\n(a+b) . c\na + b + c\n'''\na, b, c = [int(input()) for i in range(3)]\n\n# Evaluate all the formulas, and gather them inside a list\nformulas = [a + (b*c),\n a * (b+c),\n (a+b) * c,\n a * b * c,\n a + b + c]\n\n# Ascending sort, from the smallest to the biggest.\nformulas = sorted(formulas)\n\n# The last item in the list, which is the biggest sum of the forumlas.\nprint(formulas[-1])\n","sub_path":"expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240512935","text":"'''\nWrite a Program that print multiplication of Series up to nth length\nif user provides length.\nSeries: (1) + (1+2) * (1+2+3) * ... * (1+2+3+4+...+n).\nInput: Enter Length of Series: 4\nOutput: The multiplication of Series of length 4 = 20\n\n'''\nx=int(input(\"Enter length of series:\"))\nsum1=0\nfor i in range(1,x+1):\n\tsum=0\n\tfor j in range(1,i+1):\n\t\tsum=sum+j\n\tsum1=sum1+sum\nprint(sum1)\n","sub_path":"Python/DailyFlash/19mar2020/MySolutions/program1.py","file_name":"program1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507556143","text":"from sys import stdin\ninput = stdin.readline\n\narr = [list(map(int, input().split())) for _ in range(10)]\nvisit = [[0]*10 for _ in range(10)]\nsquare = [0, 5, 5, 5, 5, 5]\n\ntotal = 0\nfor i in range(10):\n for j in range(10):\n if arr[i][j]: total += 1\n\ndef put(x, y, size):\n for i in range(x, x+size):\n for j in range(y, y+size):\n if not arr[i][j] or visit[i][j]: return False\n return True\n\ndef on(x, y, size):\n for i in range(x, x+size):\n for j in range(y, y+size):\n visit[i][j] = 1\n\ndef off(x, y, size):\n for i in range(x, x+size):\n for j in range(y, y+size):\n visit[i][j] = 0\n\nans = float('inf')\n\ndef dfs(x, y, cnt, paper):\n if paper == total:\n global ans\n ans = min(ans, cnt)\n return\n if cnt+2 > ans: return\n for i in range(x, 10):\n for j in range(10):\n if not visit[i][j] and arr[i][j]:\n k = 0\n while i+k < 10 and j+k < 10 and k < 5:\n k += 1\n if not square[k]: continue\n if put(i, j, k):\n square[k] -= 1\n on(i, j, k)\n dfs(i, j, cnt+1, paper+k*k)\n off(i, j, k)\n square[k] += 1\n else: return\n return\n\ndfs(0, 0, 0, 0)\nif ans > 25: print(-1)\nelse: print(ans)","sub_path":"BOJ/201904/17136.py","file_name":"17136.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"138585374","text":"# Google Coding Interview Problem: \n# Given a string, compute recursively a new string where all the 'x' chars have been removed.\n\n\n# Input: xaxb\n\n\n# Desired Output: ab\n\n\nstr_ = input('Enter the original string\\n')\nchar_ = input('Enter the character to be removed\\n')\n\nstr_out = ''\n\nfor i in str_:\n\tif i != char_:\n\t\tstr_out += i\n\nprint('Output string after removal of %s is' %(char_))\nprint(str_out)\n\n","sub_path":"RemoveAChar.py","file_name":"RemoveAChar.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510316943","text":"import math\nimport hdfutils\nimport os\nimport numpy as np\nimport astropy.coordinates as coord\nimport astropy.units as u\n# See galcentricutils_new.py for analogies. This is just for Galactic Coordinates, instead.\n\n# To Galactic and Back (via ICRS exclusively.)\nclass galconversion(object):\n def __init__(self, sourcedir):\n self.source = sourcedir\n\n # Note: requires l,b,distance,dmu_l,dmu_b,vlos. Assumes conventions of original ASCII data colnames.\n def to_ICRS(self, hdfdir, hdfname, group, set):\n # Set up HDF and grab table, and SkyCoord objects for all targets.\n writer = hdfutils.hdf5_writer(hdfdir, hdfname)\n table = writer.read_table(group, set)\n skycoords = coord.SkyCoord(l=table['l'] * u.deg, b=table['b'] * u.deg, distance=table['dist'] * u.kpc,\n pm_l_cosb=table['dmu_l']*np.cos(np.deg2rad(table['b'])) * u.mas/u.yr, pm_b=table['dmu_b']*u.mas/u.yr, radial_velocity=table['vlos']*u.km/u.s,\n frame=\"galactic\")\n # Effect conversion to ICRS, work through objects, collect converted quantities.\n icrs_skycoords = skycoords.transform_to(coord.ICRS)\n ra_list, dec_list, pmra_list, pmdec_list, distance_list, radial_velocity_list = [],[],[],[],[],[]\n for object in icrs_skycoords:\n ra, dec, pmra_cosdec, pmdec = object.ra/u.deg, \\\n object.dec/u.deg, \\\n object.pm_ra_cosdec / (u.mas * u.yr), \\\n object.pm_dec / (u.mas * u.yr)\n\n # Discard the dimensionless unit.\n ra, dec, pmra_cosdec, pmdec = ra.value, dec.value, \\\n pmra_cosdec.value, pmdec.value\n\n # Remove cosdec, append to list\n pmra = pmra_cosdec / math.cos(math.radians(dec))\n ra_list.append(ra), dec_list.append(dec), pmra_list.append(pmra), pmdec_list.append(pmdec)\n\n # Modify and save table.\n table['ra'] = ra_list\n table['dec'] = dec_list\n table['pmra'] = pmra_list\n table['pmdec'] = pmdec_list\n writer.write_table(group, set, table)\n\n # Converts ICRS to Galactic instead.\n def to_GALACTIC(self, hdfdir, hdfname, group, set):\n # Set up HDF and grab table, and SkyCoord objects for all targets.\n writer = hdfutils.hdf5_writer(hdfdir, hdfname)\n table = writer.read_table(group, set)\n skycoords = coord.SkyCoord(ra=table['ra']*u.deg,\n dec=table['dec']*u.deg,\n distance=table['dist']*u.kpc,\n pm_ra_cosdec=table['pmra']*np.cos(np.deg2rad(table['dec']))*u.mas/u.yr,\n pm_dec=table['pmdec']*u.mas/u.yr,\n radial_velocity=table['vlos']*u.km/u.s,\n frame=\"icrs\")\n # Effect conversion to Galactocentric, work through objects, collect converted quantities.\n gal_skycoords = skycoords.transform_to(coord.Galactic)\n l_list,b_list,dmu_l_list,dmu_b_list = [],[],[],[]\n for object in gal_skycoords:\n l,b,dmub = object.l/u.deg, object.b/u.deg, object.pm_b/(u.mas/u.yr)\n l,b,dmub = l.value,b.value,dmub.value\n dmul = ((object.pm_l_cosb/(u.mas/u.yr)).value)/math.cos(math.radians(b))\n\n l_list.append(l),b_list.append(b),dmu_b_list.append(dmub),dmu_l_list.append(dmul)\n\n # Modify and save table.\n table['l'],table['b'],table['dmu_l'],table['dmu_b'] = l_list,b_list,dmu_l_list,dmu_b_list\n writer.write_table(group, set, table)\n\n# ICRS and Galactic Conversions. Not useful (any use has been wrapped in galcentricutils_new already, anyway.)","sub_path":"master-streams/master-streams_new/deprecated/DEPRECATED_galacticutils.py","file_name":"DEPRECATED_galacticutils.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80816855","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\nfrom moments.models import Moment, Upvote, Share\nfrom accounts.api.users.serializers import UserInlineSerializer\n\nUser = get_user_model()\n\n\nclass MomentSerializer(serializers.ModelSerializer):\n user = UserInlineSerializer(read_only=True)\n upvote_user_list = UserInlineSerializer(many=True, read_only=True)\n\n class Meta:\n model = Moment\n fields = [\n 'id',\n 'user',\n 'text',\n 'audio',\n 'video',\n 'status',\n 'topic',\n 'upvote',\n 'downvote',\n 'is_origin',\n 'origin_id',\n 'shared_count',\n 'comment_count',\n 'upvote_user_list',\n 'updated',\n 'timestamp',\n ]\n read_only_fields = ['upvote', 'downvote', 'is_origin', 'origin_id', 'shared_count', 'comment_count']\n\n\nclass UpvoteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Upvote\n fields = [\n 'id',\n 'user',\n 'moment',\n 'timestamp'\n ]\n read_only_fields = ['user', 'moment']\n\n\nclass ShareSerializer(serializers.ModelSerializer):\n class Meta:\n model = Share\n fields = [\n 'id',\n 'user',\n 'moment',\n 'timestamp'\n ]\n read_only_fields = ['user', 'moment']\n","sub_path":"src/moments/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535058139","text":"import pygame\nfrom pygame.sprite import Sprite\nclass Bill(Sprite):\n '''一个对飞船所处位置创建子弹'''\n def __init__(self,settings,screen,ship):\n super().__init__()\n self.screen=screen\n #创建子弹,确定位置\n self.rect=pygame.Rect(0,0,settings.bill_width,settings.bill_height)\n self.rect.centerx=ship.rect.centerx\n self.rect.top=ship.rect.top\n self.y=float(self.rect.y)\n self.color=settings.bill_color\n self.bill_speed=settings.bill_speed\n def move(self):\n self.y-=self.bill_speed\n self.rect.y=self.y\n def draw(self):\n pygame.draw.rect(self.screen,self.color,self.rect)\n \n","sub_path":"外星人射击/sheji.py","file_name":"sheji.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"65607805","text":"'''\nCreated on 2016/12/26\nCreated by freeol.cn\n一些排序算法的Python实现\n@author: 拽拽绅士\n'''\n\nimport sys\nfrom _ast import While\nfrom celery.bin.celery import result\n\n'''顺序存储的二叉树实现(非完全存储)'''\nclass node1(object):\n def __init__(self, S, L, R, V):\n self.S = S#��\n self.L = L#左子\n self.R = R#右子\n self.V = V#值\n\n\nclass tree1(object):\n def createTree(self, a):\n data = []\n for n in a:\n data.append(node1(n[0], n[1], n[2], n[3]))\n return data\n def getTree(self, a):\n return self.createTree(a)\n\n'''链式存储的二叉树(非完全存储)'''\nclass tree2(object):\n def __init__(self):\n self.L = None#Left node\n self.R = None#Right node\n self.V = None#value\n self.tmp = {}\n def createTree(self, key, tree):\n if key in self.tmp:\n tmpN = self.tmp[key]\n tree.V = tmpN[3]\n Lkey = tmpN[1]\n Rkey = tmpN[2]\n if Lkey != None:\n Ltree = tree2()\n Ltree = self.createTree(Lkey, Ltree)\n tree.L = Ltree\n if Rkey != None:\n Rtree = tree2()\n Rtree = self.createTree(Rkey, Rtree)\n tree.R = Rtree\n return tree\n def getTree(self, a):\n for n in a:\n self.tmp[n[0]] = n#收集各节点信息\n tree = tree2()\n return self.createTree('1', tree)\n\n'''判断二叉树存储结构'''\ndef checkTree1orTree2(tree):\n if type(tree) == list:\n return 1#顺序存储\n else:\n return 2#链式存储\n\n'''获取根节点'''\ndef root(tree):\n chk = checkTree1orTree2(tree)\n if chk == 1:#顺序存储\n childKeys = {}\n for t in tree:\n if t.L != None:\n childKeys[t.L] = None\n if t.R != None:\n childKeys[t.R] = None\n for t in tree:\n if t.S not in childKeys:\n return t\n else:#链式存储\n return tree\n\n'''获取二叉树的度'''\ndef degree(tree):\n chk = checkTree1orTree2(tree)\n if chk == 1:#顺序存储\n return len(tree)\n else:#链式存储\n cnt = 1\n if tree.L != None:\n cnt += degree(tree.L)\n if tree.R != None:\n cnt += degree(tree.R)\n return cnt\n\n'''深度'''\ndef deepDegree(tree):\n chk = checkTree1orTree2(tree)\n if chk == 1:#顺序存储\n cnt = 0\n leafs = []#叶子集\n branchs = []#枝干集\n for t in tree:\n if t.L==None and t.R == None:\n leafs.append(t)\n else:\n branchs.append(t)\n save_cnt = 0\n for leaf in leafs:#回溯法 叶->枝->根\n cnt = 1\n key = leaf.S\n tmpBranchs = branchs.copy()\n i = 0\n while i < len(tmpBranchs):\n branch = tmpBranchs[i]\n if branch.L == key or branch.R == key:\n cnt+=1\n key = branch.S\n i = 0\n else:\n i+=1\n if cnt > save_cnt:\n save_cnt = cnt\n return save_cnt\n else:#链式存储\n cnt = 1\n Lcnt = 0\n Rcnt = 0\n if tree == None:\n return 0\n if tree.L != None:\n Lcnt = deepDegree(tree.L)\n if tree.R != None:\n Rcnt = deepDegree(tree.R)\n if Lcnt > Rcnt:\n cnt += Lcnt\n else:\n cnt += Rcnt\n return cnt\n\n'''链式结构二叉树\n前序遍历:根节点->左子树->右子树'''\ndef preorder(tree, m, result):\n if m == 1:#非递归实现(栈)\n static = []#栈\n t = tree\n '''法1\n while t != None or static != []:\n while t != None:\n result.append(t)\n static.append(t)\n t=t.L\n if static != []:\n t = static.pop()\n t = t.R\n '''\n static.append(tree)\n while static:\n n = static.pop()\n result.append(n)\n if n.R:\n static.append(n.R)\n if n.L:\n static.append(n.L)\n\n else:#递归实现\n if tree == None:\n return result\n result.append(tree)\n result=preorder(tree.L, 2, result)\n result=preorder(tree.R, 2, result)\n return result\n\n'''链式结构二叉树\n中序遍历:左子树->根节点->右子树'''\ndef inorder(tree, m, result):\n if m == 1:#非递归实现(栈)\n static = []#栈\n t = tree\n '''法1\n while t != None or static != []:\n while t != None:\n static.append(t)\n t=t.L\n if static != []:\n t = static.pop()\n result.append(t)\n t = t.R\n '''\n while t != None or static != []:\n while t != None:\n static.append(t)\n t = t.L\n t = static.pop()\n result.append(t)\n t = t.R\n else:#递归实现\n if tree == None:\n return result\n result=inorder(tree.L, 2, result)\n result.append(tree)\n result=inorder(tree.R, 2, result)\n return result\n\n'''链式结构二叉树\n后序遍历:左子树->右子树->根节点'''\ndef postorder(tree, m, result):\n if m == 1:#非递归实现(栈)\n static = []#栈\n t = tree\n mk = None\n while t != None or static != []:\n while t != None:\n static.append(t)\n t = t.L\n t = static.pop()\n if t.R == None or t.R == mk:\n result.append(t)\n mk = t\n t = None\n else:\n static.append(t)\n t = t.R\n else:#递归实现\n if tree == None:\n return result\n result = postorder(tree.L, 2, result)\n result = postorder(tree.R, 2, result)\n result.append(tree)\n return result\n\n'''order value print'''\ndef resultPrintV(msg, rs):\n v=[]\n for t in rs:\n v.append(t.V)\n print(msg, v)\n\n\n'''期望高度'''\n\n\ndef main():\n ''' 1\n ∧\n 2 3\n ∧ ∧\n 4 5 9 7\n ∧ ∧\n 8 6 10 11'''\n data = [ #原始数据\n ['1', '2', '3', 1],#Self key, Left key, Right key, Value\n ['2', '4', '5', 2],\n ['3', '9', '7', 3],\n ['4', '8', '6', 4],\n ['5', '10', '11', 5],\n ['9', None, None, 9],\n ['7', None, None, 7],\n ['8', None, None, 8],\n ['6', None, None, 6],\n ['10', None, None, 10],\n ['11', None, None, 11],\n ]\n print('原始数据大小', sys.getsizeof(data))\n print('预计二叉树根节点值', 1)\n print('预计二叉树的度', 11)\n print('预计二叉树的深度', 4)\n print('预计前序遍历值的结果', [1, 2, 4, 8, 6, 5, 10, 11, 3, 9, 7])\n print('预计中序遍历值的结果', [8, 4, 6, 2, 10, 5, 11, 1, 9, 3, 7])\n print('预计后序遍历值的结果', [8, 6, 4, 10, 11, 5, 2, 9, 7, 3, 1])\n\n print('========>创建顺序结构二叉树')\n t1 = tree1().getTree(data)#顺序结构\n print('顺序结构二叉树大小', sys.getsizeof(t1))\n root1 = root(t1)\n print('顺序结构二叉树根节点值', root1.V)\n print('顺序结构二叉树的度', degree(t1))\n print('顺序结构二叉树的深度', deepDegree(t1))\n\n print('========>创建链式结构二叉树')\n t2 = tree2().getTree(data)#链式结构\n print('链式结构二叉树大小', sys.getsizeof(t2))\n root2 = root(t2)\n print('链式结构二叉树根节点值', root2.V)\n print('链式结构二叉树的度', degree(t2))\n print('链式结构二叉树的深度', deepDegree(t2))\n rs = [];resultPrintV('链式结构 前序遍历值的结果->非递归实现', preorder(t2, 1, rs))\n rs = [];resultPrintV('链式结构 前序遍历值的结果->递归实现', preorder(t2, 2, rs))\n rs = [];resultPrintV('链式结构 中序遍历值的结果->非递归实现', inorder(t2, 1, rs))\n rs = [];resultPrintV('链式结构 中序遍历值的结果->递归实现', inorder(t2, 2, rs))\n rs = [];resultPrintV('链式结构 后序遍历值的结果->非递归实现', postorder(t2, 1, rs))\n rs = [];resultPrintV('链式结构 后序遍历值的结果->递归实现', postorder(t2, 2, rs))\n\nif __name__ == '__main__':\n main()","sub_path":"机器学习/决策树/二叉树.py","file_name":"二叉树.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576130048","text":"# coco_eval\n\nimport nlgeval\nfrom nlgeval import NLGEval\nnlgeval = NLGEval()\n\ndef coco_eval(model, dataloader):\n #model.eval()\n\n references = list()\n hypothesis = list()\n\n data_tqdm = tqdm(dataloader, total = len(dataloader))\n for step, (images, input_ids, targets, all_caps) in enumerate(data_tqdm):\n with torch.no_grad():\n batch = torch.stack(images).shape[0]\n\n with torch.cuda.amp.autocast(): \n scores = model(torch.stack(images).to(device))\n \n references.extend(all_caps)\n\n samples = scores.argmax(2) \n for i in range(batch):\n if hyper_parameters['tokenizer'] == nltk_tokenizer:\n indices = torch.where((samples[i] > 3) & (samples[i] != 5))[0]\n hypothesis.extend([' '.join([hyper_parameters['tokenizer'].decode(token) for token in samples[i][indices].long().tolist()]) + '.'])\n elif hyper_parameters['tokenizer'] == gpt2_tokenizer:\n indices = torch.where((samples[i] < 50257) & (samples[i] != 13))[0]\n hypothesis.extend([hyper_parameters['tokenizer'].decode(samples[i][indices].long().tolist()) + '.']) \n assert len(references) == len(hypothesis)\n \n return references, hypothesis\n\n\nvalid_dataloader = torch.utils.data.DataLoader(\n valid_dataset, \n batch_size = 16,\n num_workers = caption_config.num_workers,\n shuffle = False,\n sampler = SequentialSampler(valid_dataset),\n pin_memory = False,\n collate_fn = collate_fn)\n\nreferences, hypothesis = coco_eval(caption_net.__sample__, valid_dataloader)\n\nfor i in range(len(references)):\n for j in range(len(references[i])):\n if '\\n' in references[i][j]:\n references[i][j] = references[i][j].replace('\\n', '')\n\nbleu = {\n 'bleu1': round(corpus_bleu(__ref2word__(references), __hyp2word__(hypothesis), weights=(1, 0, 0, 0)), 4),\n 'bleu2': round(corpus_bleu(__ref2word__(references), __hyp2word__(hypothesis), weights=(0.5, 0.5, 0, 0)), 4),\n 'bleu3': round(corpus_bleu(__ref2word__(references), __hyp2word__(hypothesis), weights=(0.33, 0.33, 0.33, 0)), 4),\n 'bleu4': round(corpus_bleu(__ref2word__(references), __hyp2word__(hypothesis), weights=(0.25, 0.25, 0.25, 0.25)), 4)\n } \n\n\nnlgeval = NLGEval()\nnlgeval.compute_metrics(references, hypothesis)\n","sub_path":"coco_eval.py","file_name":"coco_eval.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"551409062","text":"from flask import Flask, request\nfrom flask_restplus import Api, Resource\nfrom database import Database\nfrom swagger_models import *\n\napp = Flask(__name__)\napi = Api(app)\n\nnew_todo = api.schema_model(\"NewTodo\", new_todo)\ntodo = api.schema_model(\"Todo\", todo)\n\n\n@api.route('/list')\nclass List(Resource):\n @api.doc(summary='Get all todos',\n description='Returns a collection of todos',\n responses={200: ('Collection of todos', [todo])})\n def get(self):\n return Database().list(), 200\n\n\n@api.route('/add')\nclass Add(Resource):\n @api.expect(new_todo)\n def post(self):\n Database().add(request.get_json(force=True) or {})\n return {'message': 'The todo has been added successfully'}, 201\n\n\n@api.route('//delete')\nclass Delete(Resource):\n def delete(self, tid):\n Database().delete(tid)\n return {'message': 'The todo has been nuked successfully'}, 200\n\n\n@api.route('//edit')\nclass Edit(Resource):\n @api.expect(new_todo)\n def put(self, tid):\n Database().edit(request.get_json(force=True) or {}, tid)\n return {'message': 'Todo edited'}\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8080, debug=True)\n","sub_path":"backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"527662834","text":"import sqlite3\n\nfrom sources.common.Decorators import log_decorator\n\n\n@log_decorator\ndef create_table_db():\n\t\"\"\" create table if not exists in 'links.db' that receive job_link and its state as integer 1 scraped, -1 for never \"\"\"\n\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" CREATE TABLE IF NOT EXISTS links (joblink TEXT, state INT)\"\"\")\n\tconn.commit()\n\tconn.close()\n\n\n@log_decorator\ndef delete_table_db():\n\t\"\"\" delete table 'links' \"\"\"\n\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" DROP TABLE IF EXISTS links \"\"\")\n\tconn.commit()\n\tconn.close()\n\n\n@log_decorator\ndef add_job_link(job_link):\n\t\"\"\" add a job_link to the database if not exist \"\"\"\n\tif not check_if_exist(job_link):\n\t\tconn = sqlite3.connect(\"links.db\")\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"\"\" INSERT INTO links VALUES (?,?) \"\"\", (job_link, -1))\n\t\tconn.commit()\n\t\tconn.close()\n\n\n@log_decorator\ndef set_state(job_link, state):\n\t\"\"\"\" set a state to job_link if joblink exists in database \"\"\"\n\n\tif check_if_exist(job_link):\n\t\tconn = sqlite3.connect(\"links.db\")\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"\"\" UPDATE links SET state=? WHERE joblink=?\"\"\", (state, job_link))\n\t\tconn.commit()\n\t\tconn.close()\n\n\n@log_decorator\ndef get_state(job_link):\n\tif check_if_exist(job_link):\n\t\tconn = sqlite3.connect(\"links.db\")\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"\"\" SELECT * FROM links WHERE joblink=?\"\"\", (job_link, ))\n\t\tfetched_one = cur.fetchone()\n\t\tconn.close()\n\t\treturn fetched_one[1]\n\telse:\n\t\treturn -1\n\n\n@log_decorator\ndef check_if_exist(job_link):\n\t\"\"\" return boolean if joblink exists in links table \"\"\"\n\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" SELECT * FROM links WHERE joblink=? \"\"\", (job_link, ))\n\tfetched_value = cur.fetchone()\n\tconn.close()\n\tif fetched_value is None:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\n@log_decorator\ndef is_scrarped_job_link(job_link):\n\t\"\"\" Check whether a joblink is in the database and return its state\"\"\"\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" SELECT * FROM links WHERE joblink=? \"\"\", (job_link,))\n\tone_fetched = cur.fetchone()\n\tconn.close()\n\tif one_fetched is None:\n\t\treturn 0\n\telse:\n\t\treturn one_fetched[1]\n\n\n@log_decorator\ndef count_joblinks():\n\t\"\"\" return number of joblinks in links table \"\"\"\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" SELECT * FROM links \"\"\")\n\tfetched_all = cur.fetchall()\n\tconn.close()\n\treturn len(fetched_all)\n\n\n@log_decorator\ndef extract_all_joblinks(state):\n\t\"\"\" extract all joblinks with state arg \"\"\"\n\tconn = sqlite3.connect(\"links.db\")\n\tcur = conn.cursor()\n\tcur.execute(\"\"\" SELECT * FROM links WHERE state=?\"\"\", (state, ))\n\tfetched_all = cur.fetchall()\n\tconn.close()\n\turls = []\n\tfor fetched in fetched_all:\n\t\turls.append(fetched[0])\n\n\treturn urls\n","sub_path":"sources/common/db_manip.py","file_name":"db_manip.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135365155","text":"\"\"\"Make a function that takes an integer i as parameter,\r\nthen return a list of numbers in the fibonacci\r\nsequence all the way to the ith element in the sequence\r\n\r\nСоздайте функцию, которая принимает целое число i в качестве параметра,\r\n а затем возвращает список чисел в последовательности Фибоначчи\r\n до i-го элемента в последовательности\"\"\"\r\n\r\ndef fib_list(i):\r\n if i == 0:\r\n return [0]\r\n elif i == 1:\r\n return [0, 1]\r\n else:\r\n lst = fib_list(i-1)\r\n lst.append(lst[-1] + lst[-2])\r\n return lst\r\n\r\nprint(fib_list(int(input(\"До какого элемента?:\"))))","sub_path":"lsn21_recursion/fib_recursive.py","file_name":"fib_recursive.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"17886510","text":"import time\nimport sys\n\nfrom PIL import Image\n\nimport numpy as np\nfrom bhp_preprocess import read_annotation\n\nNUM_SAMPLES_TO_MEAN = 100000\nIMG_SIZE=(1102, 278)\n\n\n# RESULT is 215.74363\ndef get_mean(train_samples=None, num_samples=NUM_SAMPLES_TO_MEAN):\n \"\"\"\n Get mean of all images for mean subtraction.\n Assume that all images are from the same distribution,\n so we only get a small subset and calculate the mean from it.\n\n Args:\n --------\n train_samples: A generator of (image_path, list of bounding boxes)\n num_samples: A number of samples to collect for calculating the mean\n Returns:\n --------\n Mean of all images\n \"\"\"\n count = 0\n samples = np.zeros(IMG_SIZE[::-1])\n for annotator in train_samples:\n img_path = annotator.keys()[0]\n try:\n im = Image.open(img_path).convert(\"L\")\n im = im.resize(IMG_SIZE)\n data = np.asarray(im)\n samples += data\n count += 1\n except IOError:\n count -= 1\n continue\n\n # stopping\n if count >= num_samples:\n break\n\n if count % 5000 == 0:\n print(\"{} samples processed!\".format(count))\n\n mean = np.mean(samples / float(count))\n\n return mean\n\n\nif __name__ == '__main__':\n start = time.time()\n annotator = read_annotation(sys.argv[1])\n # annotator = read_annotation(\"06_01_17\") # for testing only\n print(\"Mean of this dataset is: {}\".format(get_mean(annotator)))\n print(\"All data processing takes {} seconds\".format(time.time() - start))\n","sub_path":"data/get_mean.py","file_name":"get_mean.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400201688","text":"\"\"\"\n这道题考查的是二进制数的位运算: 与(&),或(|),异或(^),右移(>>),左移(<<)\n计算一个二进制中1的个数,我们可以通过将这个数与1进行与操作,判断这个数的最后一位是否为1,\n如果是正数,我们再将这个数右移,继续计算,直到这个数变为0\n但是如果是负数,-1右移后还是-1,就会导致最后一位一直是1。\n所以为了避免死循环,我们才用左移的方式。先判断n与1,然后将1左移一位,再判断n与1。这样需要循环32次判断完\n\n还有一种更巧妙的方法:\n假设数字最后一位为1 0000 1111 当我们把数字-1: 0000 1110\n假设数字最后一位为0 0000 1110 当我们把数字-1: 0000 1101\n我们会发现最右边的1左边的数没有变,而右边包括自身的数都与原来相反。那我们把减1前后的数进行与操作。就会发现左边的数不变,右边全为0了\n那一个数字中有多少个1,那我们就可以进行n&(n-1)操作n次,不用循环32次判断\n\"\"\"\n\n\nclass Solution:\n def hammingWeight(self, n: int) -> int:\n return self.amazing(n)\n\n @staticmethod\n def normal(n: int) -> int:\n flag = 1\n total = 0\n for i in range(32):\n if n & flag:\n total += 1\n flag <<= 1\n return total\n\n @staticmethod\n def amazing(n: int) -> int:\n total = 0\n while n:\n n = n & (n-1)\n total += 1\n return total\n\n\n\n\n","sub_path":"剑指offer/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572786987","text":"class Solution:\n def _countDict(self, mydict, value, prevValue):\n result = ''\n \n if (prevValue != -1) and (prevValue != value):\n result += \"%d%d\"%(mydict[prevValue], prevValue) \n mydict = {}\n \n if mydict.has_key(value):\n mydict[value] += 1\n else:\n mydict[value] = 1\n \n return result, mydict\n \n def _countvalue(self, value):\n result = ''\n mydict = {}\n prevValue = -1 # keep track of last value within loop \n while value /10 != 0:\n newresult, mydict = self._countDict(mydict, value % 10, prevValue)\n prevValue = value % 10\n value = value /10\n result +=newresult\n\n newresult, mydict = self._countDict(mydict, value, prevValue)\n result+=newresult\n if len(mydict) > 0:\n result+=\"%d%d\"%(mydict[value], value)\n return int(result)\n\n # @return a string\n def countAndSay(self, n): # 1, 11, 21, 1211, 111221, 322211\n value = 1\n result = ''\n while (n > 0) and (value is not None):\n # print \"Value: \", value\n value = self._countvalue(value)\n n -= 1\n return str(value)\ntest = Solution()\ntest.countAndSay(30)\n","sub_path":"count_and_say.py","file_name":"count_and_say.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402448518","text":"\ntry:\n\tfileRef = open(\"sample1.txt\",\"r\");\n\tstr = fileRef.read();\n\tprint(\"The contents of the file: \"+str);\n\tfileRef.close();\nexcept IOError as e:\n\tprint(\"unable to find the specified file! \",e);\nfinally:\n print(\"This code will be executed!\");\n#except:\n # print(\"Error in the execution!\")\n#else:\n\t#print(\"file had been read successfully!\");","sub_path":"exception_handling/readFile.py","file_name":"readFile.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532752473","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 13 20:26:42 2019\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\n# To identify positive numbers\r\nn = 12\r\nif n > 0:\r\n print(\"the entered number {} is positive\".format(n))\r\nelif n == 0:\r\n print(\"the entered number is neither negative nor postive\".format(n))\r\nelse:\r\n print(\"the entered number {} is negative\".format(n))\r\n# To identify even and odd numbers\r\nn = int(input()) #integer\r\nif n%2 == 0:\r\n print(\"the entered number {} is even\".format(n))\r\nelse:\r\n print(\"the entered number {} is odd\".format(n))\r\n# Printing a specific * pattern\r\nn = int(input())\r\nfor i in range(n):\r\n for j in range(i+1):\r\n print(\"*\", end = \" \")\r\n print(\"\\n\")\r\n# Calculating factorial of a number using recursion\r\ndef factorial(n):\r\n if n == 0 or n == 1:\r\n return 1\r\n else:\r\n return n*factorial(n-1)\r\nif __name__ == \"__main__\":\r\n n = int(input())\r\n result = factorial(n)\r\n print(\"factorial of the entered number is {}\".format(result)) \r\n \r\n# Calculate the facctorial of a number without recursion\r\nn = int(input())\r\nresult = 1\r\nif n > 0:\r\n while n > 0:\r\n result = result * n\r\n n -= 1\r\n\r\nif n < 0:\r\n result = \"inavlid\"\r\n\r\nprint(\"Factorial of the entered number is {}\".format(result))\r\n \r\n# Finding Transpose of a matrix\r\nimport numpy as np\r\na = np.array([[1,2,3,4],[1,2,4,5],[4,5,4,7],[6,5,2,1]])\r\nb = np.zeros((4,4))\r\nprint(a)\r\nfor i in range(len(a)):\r\n for j in range(len(a[0])):\r\n b[j][i] = a[i][j]\r\n\r\nprint(\"transpose of matrix a \\n{}\".format(b))\r\n \r\n# Summation of elements of a matrix\r\nimport numpy as np\r\na = np.array([[1,2,3,4],[1,2,4,5],[4,5,4,7],[6,5,2,1]])\r\nsum_of_matrix = 0\r\nfor i in range(len(a)):\r\n for j in range(len(a[0])):\r\n sum_of_matrix += a[i][j]\r\n\r\nprint(\"the Sum of elements of the matrix a is {}\".format(sum_of_matrix))\r\n# Summation of elements of a matrix using sum function\r\nimport numpy as np\r\na = np.array([[1,2,3,4],[1,2,4,5],[4,5,4,7],[6,5,2,1]])\r\nsum_of_matrix = sum(sum(a))\r\nprint(\"the Sum of elements of the matrix a is {}\".format(sum_of_matrix))\r\n \r\n# Multiplication of a 1-D and 2-D matrices\r\nimport numpy as np\r\ng = np.array([1,2,3,4,5])\r\ng_1 = np.zeros(5)\r\nfor i in range(len(g)):\r\n g_1[i] = g[i] * g[i]\r\n\r\nprint(\"Multiplication of 1-D array = \\n\",g_1)\r\n\r\nh = np.array([[1,2,3,4,5],[2,3,4,5,6]])\r\nm = np.array([[2,3,4,5,6],[1,2,3,4,5]])\r\nresult = np.zeros((2,5))\r\nfor i in range(len(h)):\r\n for j in range(len(m[0])):\r\n for k in range(len(m)):\r\n result[i][j] += h[i][k] * m[k][j]\r\n\r\nprint(\"Multiplication of 2-D array = \\n\",result)\r\n# Upper Triangle\r\nimport numpy as np\r\nM = np.array([[1,3,4],[3,6,7],[7,1,5]])\r\nN = np.zeros((3,3))\r\nprint(\"Upper Triangle matrix = \\n\")\r\nfor i in range(len(M)):\r\n for j in range(len(M[0])):\r\n if(i > j):\r\n N[i][j] = 0\r\n else:\r\n N[i][j] = M[i][j]\r\nprint(N)\r\n# Multiplication table\r\na = []\r\nfor i in range(1,11):\r\n print(i,end = \"\\t\")\r\n a.append(i)\r\nprint(\"\\n\")\r\nlen(a)\r\nfor j in range(2,11):\r\n print(j,end = \"\\t\")\r\n for k in range(1,10):\r\n print(j*a[k],end = \"\\t\")\r\n print(\"\\n\")\r\n","sub_path":"2016bec061 Emmanuel Allada/Practical 1 introduction to python/prac 1.py","file_name":"prac 1.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607098522","text":"\"\"\"\nLocation.py\n\n@author: James Fowkes\n\nProvides location information for points on Earth\n\"\"\"\n\n#There's only one public method, this is OK.\n#pylint: disable=too-few-public-methods\n\nimport logging\nimport ephem\n\nclass Location:\n\n \"\"\" Simple class to store lat, long and alt \"\"\"\n\n def __init__(self, lat, long, alt):\n\n self.lat = lat\n self.long = long\n self.alt = alt\n\nLOCATIONS = {\n \"Nottingham\" : Location('1.13', '52.95', 80),\n \"Bletchley\" : Location('0.767', '51.97', 80)\n}\n\ndef get_observer(key):\n\n \"\"\"\n Return the Location object for the requested location,\n or for Nottingham if that location can't be found\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n location = None\n\n try:\n logger.info(\"Found location '%s'\", key)\n location = LOCATIONS[key]\n except KeyError:\n logger.warn(\"Location '%s' not found, defaulting to Nottingham\", key)\n location = LOCATIONS[\"Nottingham\"]\n\n observer = ephem.Observer()\n observer.lat = location.lat\n observer.lon = location.long\n observer.elevation = location.alt\n\n return observer\n","sub_path":"Observer.py","file_name":"Observer.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333119890","text":"import openpyxl\nfrom openpyxl.styles import PatternFill, Font, Alignment\nimport pandas as pd\nimport os.path, time\n\n\n# 使用PANDAS进行数据比对处理,设置对应的状态位\n\n# 将前期表(Previous table)和分析表(Analyze table)分别读入dataframe中,df_p / df_a,为DF保留表头\ndf_p = pd.read_excel(r\".\\DATA\\previous.xlsx\", header=0)\ndf_a = pd.read_excel(r\".\\DATA\\analyze.xlsx\", header=0)\n\n# 前期表和分析表读入后,进行数据清洗\n\n\n# 输出已经数据清洗后的DATAFRAME\n# print(\"previous.xlsx文件内容如下:\")\n# print(df_p.head(10))\n# print(\"analyze.xlsx文件内容如下:\")\n# print(df_a.head(10))\n\n\n# 获取当前时间作为分析时间\n# analyze_time = time.localtime(time.time())\n# 获取前期表和分析表的文件修改时间作为判定状态变化的时间标量\ntime_p = os.path.getmtime(r\".\\DATA\\previous.xlsx\")\ntime_a = os.path.getmtime(r\".\\DATA\\analyze.xlsx\")\nprint(\"previous.xlsx修改时间为:%s\"%(time.ctime(time_p)))\nprint(\"analyze.xlsx修改时间为:%s\"%(time.ctime(time_a)))\ndelta_time = time_a - time_p\nprint(delta_time)\n# 需求备注:“预警”:60天;“小僵尸”:180天;“大僵尸”:365天。\nif 0 < delta_time < 60*86400:\n warning = \"正常\"\nelif 60*86400 <= delta_time < 180*86400:\n warning = \"预警\"\nelif 180*86400 <= delta_time <365*86400:\n warning = \"小僵尸\"\nelif 365*86400 <= delta_time:\n warning = \"大僵尸\"\nprint(\"Warning FLAG:%s\"%(warning))\n\n\nprint(\">>> 比对前期表previous.xlsx和分析表analyze.xlsx...\")\n# 以分析表为基础创建result数据表,简写为df_r\ndf_r = df_a\n# 为结果表df_r创建分析结果列\ndf_r[\"分析结果\"] = \"\"\n\n\n# 以前期表和分析表的索引进行嵌套循环,扫描analyze表中是否有previous表的记录,若有则确定是否修改\n# 外部遍历df_p,循环扫描df_a,扫描均以索引方式进行\n# 获取项目编号、产品编码的列号\nproject_no = df_p.columns.get_loc(\"项目编码\")\nproduct_no = df_p.columns.get_loc(\"产品编码\")\nproduct_count = df_p.columns.get_loc(\"数量\")\nproject_stage = df_p.columns.get_loc(\"项目推进阶段\")\nproduct_amount = df_p.columns.get_loc(\"预计落单金额\")\nproduct_outtime = df_p.columns.get_loc(\"预计出库时间\")\nanalyze_result = df_r.columns.get_loc(\"分析结果\")\n\n# 以遍历前期表为基础,扫描分析表\nfor idx_p in range(df_p.shape[0]):\n # 设置查找标志为假,默认为未找到\n found_flag = False\n\n for idx_a in range(df_a.shape[0]):\n # 若��目编号相同,则比较产品编码\n if df_p.iloc[idx_p, project_no] == df_a.iloc[idx_a, project_no]:\n # 比较previous表和analyze表的产品编码,若相同则比较存货数量,若相同则状态为不变,否则为修改\n if df_p.iloc[idx_p,product_no] == df_a.iloc[idx_a,product_no]:\n # 若项目编号相同,且产品编码相同,则判断为找到相同记录,修改标志位为真\n found_flag = True\n # 依次比较数量、项目推进阶段、预计落单金额、预计出库时间4项内容,有任何一项变化均为有变化\n if df_p.iloc[idx_p, product_count] != df_a.iloc[idx_a, product_count]:\n df_r.iloc[idx_a, analyze_result] += \"#数量:%d-->%d\"%(df_p.iloc[idx_p, product_count],df_a.iloc[idx_a, product_count])\n elif df_p.iloc[idx_p,project_stage] != df_a.iloc[idx_a, project_stage]:\n df_r.iloc[idx_a, analyze_result] += \"#阶段:%s-->%s\"%(df_p.iloc[idx_p,project_stage],df_a.iloc[idx_a, project_stage])\n elif df_p.iloc[idx_p,product_amount] != df_a.iloc[idx_a, product_amount]:\n df_r.iloc[idx_a,analyze_result] += \"#金额:%d-->%d\"%(df_p.iloc[idx_p,product_amount],df_a.iloc[idx_a, product_amount])\n elif df_p.iloc[idx_p,product_outtime] != df_a.iloc[idx_a, product_outtime]:\n df_r.iloc[idx_a,analyze_result] += \"#出库:%s-->%s\"%(df_p.iloc[idx_p,product_outtime],df_a.iloc[idx_a, product_outtime])\n else: # 没有变化\n df_r.iloc[idx_a,analyze_result] = \"未变化\"\n\n # 扫描分析表完毕,根据查找标志位判断df_p表中的记录是否在df_a中,若不在则说明记录已删除,需要添加到结果表中并做删除标记\n if found_flag == False:\n # 复制previous表中当前记录,并添加到analyze表中\n #创建交换df\n df_swap = df_p.iloc[[idx_p]]\n df_swap[\"分析结果\"] = \"删除\"\n df_r = df_r.append(df_swap)\n\n# 以遍历分析表为基础,扫描前期表\nfor idx_a in range(df_a.shape[0]):\n # 设置查找标志为假,默认为未找到\n found_flag = False\n for idx_p in range(df_p.shape[0]):\n if df_a.iloc[idx_a, project_no] == df_p.iloc[idx_p, project_no]:\n # 比较previous表和analyze表的产品编码,若相同则比较存货数量\n if df_a.iloc[idx_a, product_no] == df_p.iloc[idx_p, product_no]:\n # 若项目编号相同,且产品编码相同,则判断为找到相同记录,修改标志位为真\n found_flag = True\n if found_flag == False:\n df_r.iloc[idx_a,analyze_result] = \"新增\"\n\n# 对df_r按项目编号进行排序\ndf_r.sort_values(by=\"项目编码\", ascending=True)\n\n# 从分析结果DataFrame中输出分析结果\nprint(\">>> 输出区域项目分析结果表result.xlsx...\")\n# 输出报备分析DF数据至分析表EXCEL文件中\nwriter = pd.ExcelWriter(r\".\\DATA\\result.xlsx\", datetime_format='YYYY-MM-DD')\ndf_r.to_excel(writer, sheet_name='Analyze', index=False)\nwriter.close()\n\n# 按照定制要求,设置输出EXCEL格式\n# 打开待设置结果表文件,获得操作SHEET\nprint(\">>> 调整区域项目报备分析结果表格式...\")\nwb = openpyxl.load_workbook(r\".\\DATA\\result.xlsx\")\nws = wb.active\n# 设置行高\nws.row_dimensions[1].height = 25\nws.row_dimensions[2].height = 20\n# 设置列宽\n# A B C D E F G H I J K L M N O P Q R S T U V W X\ncolumn_width = [15, 8, 15, 15, 25, 20, 20, 10, 8, 15, 15, 15, 12, 8, 10, 10, 8, 10, 10, 15, 15, 15, 8, 30]\nfor i in range(1, ws.max_column + 1):\n ws.column_dimensions[openpyxl.utils.get_column_letter(i)].width = column_width[i - 1]\n\n# 增加首行标题行\nws.insert_rows(1)\n# 设置首行标题为区域项目报备分析\nws['A1'] = \"区域项目报备分析\"\nws.merge_cells(\"A1:X1\")\n\n\n# 设置表头格式(字体、背景色)\ntitle_font = Font(name=u'微软雅黑', bold=True, size=18)\nheader_font = Font(name=u'宋体', bold=True, size=12)\nmark_font = Font(name=u'宋体', bold=True, size=12)\ndelete_font = Font(name=u'宋体', color='FF0000', strike=True) # 红色\nmodify_font = Font(name=u'宋体', color='336666') #\nnewadd_font = Font(name=u'宋体', color='0033FF')\n\nalign_center = Alignment(horizontal='center', wrap_text=True)\nalign_left = Alignment(horizontal='left', wrap_text=True)\nalign_right = Alignment(horizontal='right', wrap_text=True)\n\n# 设置表头标题行\nws['A1'].font = title_font\nws['A1'].fill = PatternFill(\"solid\", fgColor='DDDDDD')\nws['A1'].alignment = align_center\n\n# 设置表头Header行字体和背景色\nfor i in range(1, 25):\n ws.cell(2, i).font = header_font\n if 1 <= i <= 23:\n ws.cell(2, i).fill = PatternFill(\"solid\", fgColor='0099FF') # 蓝色\n if i == 24:\n ws.cell(2, i).fill = PatternFill(\"solid\", fgColor='00FF66') # 绿���\n\n# 扫描分析状态,设置对应格式\nfor idx_row in range(3, ws.max_row + 1):\n # 设置第22列,预计出库时间的日期显示格式\n ws.cell(idx_row,22).number_format = \"YYYY-MM-DD\"\n # 设置新增项的提醒色和格式\n if ws.cell(idx_row, 24).value.find(\"新增\") != -1:\n for j in range(1, ws.max_column + 1):\n ws.cell(idx_row, j).font = newadd_font\n # 设置修改项的提醒色和格式\n if ws.cell(idx_row, 24).value.find(\"#\") != -1:\n for j in range(1, ws.max_column + 1):\n ws.cell(idx_row, j).font = modify_font\n # 设置删除项的提醒色和格式\n if ws.cell(idx_row, 24).value.find(\"删除\") != -1:\n for j in range(1, ws.max_column + 1):\n ws.cell(idx_row, j).font = delete_font\n\n # 设置不同状态的提醒色\n # 叠加预警标志\n if ws.cell(idx_row, 24).value.find(\"未变化\") != -1:\n ws.cell(idx_row, 24).value += \"-%s\"%(warning)\n # 根据标志设置格式\n if ws.cell(idx_row, 24).value.find(\"预警\") != -1:\n for j in range(1, ws.max_column +1):\n ws.cell(idx_row, j).fill = PatternFill(\"solid\", fgColor='FF33FF') # 紫色\n if ws.cell(idx_row, 24).value.find(\"小僵尸\") != -1:\n for j in range(1, ws.max_column +1):\n ws.cell(idx_row, j).fill = PatternFill(\"solid\", fgColor='FFCC00') # 橙色\n if ws.cell(idx_row, 24).value.find(\"大僵尸\") != -1:\n for j in range(1, ws.max_column +1):\n ws.cell(idx_row, j).fill = PatternFill(\"solid\", fgColor='CC0000') # 暗红\n\n\n# 保存格式设置修改\nwb.save(filename=r\".\\DATA\\result.xlsx\")\n\nprint(\">>> 区域项目报备分析全部完成,可在DATA目录下【result.xlsx】查看分析结果!\")","sub_path":"area_analyze_OA.py","file_name":"area_analyze_OA.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"527682751","text":"import peewee\n\nfrom . import command, helpers, reporter\n\nfrom ban.core.models import Group, PostCode, HouseNumber, Position\n\n__namespace__ = \"import\"\n\n\n@command\n@helpers.nodiff\ndef ign_group(paths=[], **kwargs):\n \"\"\"Import IGN street and locality CSV exports.\n\n paths Paths to street and locality CSV files.\"\"\"\n for path in paths:\n rows = helpers.load_csv(path)\n rows = list(rows)\n helpers.batch(process_group, rows, total=len(rows))\n\n\n@helpers.session\ndef process_group(row):\n name = row.get('nom')\n fantoir = row.get('id_fantoir')\n municipality = 'insee:{}'.format(row.get('code_insee'))\n ign = row.get('identifiant_fpb')\n data = dict(name=name, fantoir=fantoir, municipality=municipality, ign=ign,\n kind=Group.WAY)\n laposte = row.get('id_poste') or None\n if laposte:\n data['laposte'] = laposte\n validator = Group.validator(**data)\n if validator.errors:\n reporter.error('Je suis pas content', validator.errors)\n else:\n validator.save()\n reporter.notice('Je suis content', name)\n\n\n@command\n@helpers.nodiff\ndef ign_postcode(path, **kwargs):\n \"\"\"Import from IGN postcode CSV exports.\n\n path Path to postcode CSV files.\"\"\"\n rows = helpers.load_csv(path)\n rows = list(rows)\n helpers.batch(process_postcode, rows, total=len(rows))\n\n\n@helpers.session\ndef process_postcode(row):\n name = row.get('libelle')\n municipality = 'insee:{}'.format(row.get('code_insee'))\n code = row.get('code_post')\n validator = PostCode.validator(name=name, municipality=municipality,\n code=code)\n if validator.errors:\n reporter.error('Postcode error', validator.errors)\n else:\n validator.save()\n reporter.notice('Postcode saved', code)\n\n\n@command\n@helpers.nodiff\ndef ign_housenumber(path, **kwargs):\n \"\"\"Import from IGN housenumbers CSV exports.\n\n path Path to housenumbers CSV files.\"\"\"\n rows = helpers.load_csv(path)\n rows = list(rows)\n helpers.batch(process_housenumber, rows, total=len(rows))\n\n\n@helpers.session\ndef process_housenumber(row):\n number = row.get('numero')\n ordinal = row.get('rep')\n parent = 'ign:{}'.format(row.get('identifiant_fpb'))\n postcode = 'code:{}'.format(row.get('code_post'))\n complement = row.get('designation_de_l_entree')\n ign = row.get('id')\n lat = row.get('lat')\n lon = row.get('lon')\n localisation_type = row.get('type_de_localisation')\n data = dict(number=number, ordinal=ordinal, ign=ign, parent=parent,\n postcode=postcode)\n laposte = row.get('cea')\n if laposte:\n data['laposte'] = laposte\n validator = HouseNumber.validator(**data)\n if validator.errors:\n reporter.error('HouseNumber error', validator.errors)\n else:\n try:\n housenumber = validator.save()\n except peewee.IntegrityError as e:\n reporter.error('SQL Error ', str(e))\n else:\n reporter.notice('HouseNumber created', (number, ordinal))\n if localisation_type != 'Au centre commune':\n process_position(housenumber, (lon, lat), localisation_type,\n {'complement': complement})\n else:\n reporter.notice('Skipped centre commune', str(housenumber))\n\n\ndef process_position(housenumber, center, localisation_type, attributes):\n if localisation_type == 'Interpolée':\n positioning = Position.INTERPOLATION\n elif localisation_type == 'Projetée du centre parcelle':\n positioning = Position.PROJECTION\n else:\n positioning = Position.OTHER\n validator = Position.validator(housenumber=housenumber, center=center,\n attributes=attributes, source='IGN',\n positioning=positioning,\n kind=Position.ENTRANCE)\n if validator.errors:\n reporter.error('Position error', validator.errors)\n else:\n validator.save()\n reporter.notice('Position created', center)\n","sub_path":"ban/commands/ign.py","file_name":"ign.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"568692171","text":"teams = {}\n\ndef input_or_cancel(prompt):\n i = input(prompt)\n if i == \"q\":\n return None\n else:\n return i\n\ndef get_positive_int():\n while True:\n i = input_or_cancel(\"Enter a positive integer or 'q' to cancel: \")\n if i is not None:\n if i.isdigit():\n return int(i)\n else:\n print(f\"Invalid input {i}\")\n continue\n return i # user canceled, so return none\n\ndef get_bool():\n while True:\n i = input_or_cancel(\"Please enter (y)es or (n)o: \")\n if i is not None:\n i = i.lower()\n if i in [\"t\", \"true\", \"1\", \"y\", \"yes\"]: # Accept more truthy values\n return True\n elif i in [\"f\", \"false\", \"0\", \"n\", \"no\"]: # Same but for falsey\n return False\n else:\n print(f\"Invalid input {i}\")\n continue\n return i # User canceled\n\ndef get_comps():\n comps = {}\n while True:\n if comps == {}: # No comps have been, so add\n print(\"Add competition?\")\n else: # Comps have been added, so add another comp\n print(\"Add another competition?\")\n if not get_bool(): #User does not want to add more competitions\n return comps\n\n name = input_or_cancel(\"Input Competition Name or 'q' to cancel:\\n\")\n if name is None:\n continue\n elif name in comps:\n print(f\"Competition {name} already in list\")\n continue\n\n location = input_or_cancel(\"Input Location or 'q' to cancel:\\n\")\n if location is None:\n continue\n comps[name] = location\n\ndef print_team(team, team_num):\n print(f\"Team Number {team_num}\")\n for a in attributes:\n value = team.get(a)\n if value is not None:\n if a == \"2019_comps\":\n print(a + \":\")\n if value == {}:\n print(f\"No stored competitions for {team_num}\")\n continue\n for comp, location in value.items():\n print(f\"{comp} in {location}\")\n continue\n print(f\"{a}: {value}\")\n else:\n print(f\"{a} is unknown for team {team_num}\")\n\nattributes = [\n \"location\", \n \"rookie_year\", \n \"competed_2019\", \n \"2019_comps\", \n \"2019_awards\",\n ]\n\nwhile True:\n #Main Menu\n print(\"\"\"\n Allowed Actions:\n (a)dd Team\n (v)iew team data\n (m)odify team data\n (r)emove team\n (s)earch for teams\n (l)ist all teams\n (e)xit\n \"\"\")\n selection = \"\"\n selection = input(\"Action: \")\n if selection == \"a\": #Add team\n print(\"Enter team Number:\")\n team_num = get_positive_int()\n if team_num in teams:\n print(f\"Team Number {team_num} already in teams\")\n continue\n elif team_num is None:\n continue #User canceled, so exit add team and return to main menu\n temp_team = {}\n temp_team[\"location\"] = input_or_cancel(\"Input team location or\" \n + \" 'q' if unknown: \") # None for unknowns \n print(\"Enter team rookie year\")\n temp_team[\"rookie_year\"] = get_positive_int()\n print(\"Enter if team competed in 2019\")\n temp_team[\"competed_2019\"] = get_bool()\n temp_team[\"2019_comps\"] = get_comps()\n temp_team[\"2019_awards\"] = input_or_cancel(\"Enter 2019 awards or\"\n + \" 'q' if unknown\\n\")\n\n print(f\"Save team {team_num}\")\n if get_bool():\n teams[team_num] = temp_team\n\n elif selection == \"v\": #View team\n print(\"Input team number: \")\n team_num = get_positive_int()\n if team_num not in teams:\n print(f\"Team Number {team_num} not stored.\")\n continue\n elif team_num == None:\n continue\n print_team(teams[team_num], team_num)\n\n elif selection == \"m\": #modify team\n print(\"Input team number:\")\n team_num = get_positive_int()\n if team_num is None:\n continue\n elif team_num not in teams:\n print(f\"Team number {team_num} not stored.\")\n continue\n print(\"Current Values:\")\n print_team(teams[team_num], team_num) #Show current values\n print(\"\"\"\n Attribute to change:\n (l)ocation\n (r)ookie year\n (c)ompetitions in 2019\n (i)f the team competed in 2019\n (a)wards won in 2019\n \"\"\")\n while True:\n i = input_or_cancel(\"Select attribute or 'q' to cancel: \")\n if i in [\"l\", \"r\", \"c\", \"i\", \"a\", None]:\n break\n else:\n print(\"Invalid attribute selection.\")\n if i == None:\n continue\n if i == \"l\": # Change location\n location = input_or_cancel(\"Enter updated location or 'q' to cancel: \")\n if location is not None:\n teams[team_num][\"location\"] = location\n elif i == \"r\": # Change rookie year\n rookie = input_or_cancel(\"Enter updated rookie year or 'q' to cancel: \")\n if rookie is not None:\n teams[team_num][\"rookie_year\"] = rookie\n elif i == \"c\": # Change 2019 competitions\n print(\"Please enter all 2019 competitions. Enter no competitions to cancel\")\n comps = get_comps()\n if comps != {}:\n teams[team_num][\"2019_comps\"] = comps\n elif i == \"i\": # Change competed 2019\n print(\"Enter if team competed in 2019 or 'q' to cancel\")\n comp_2019 = get_bool()\n if comp_2019 is not None:\n teams[team_num][\"competed_2019\"] = comp_2019\n elif i == \"a\":\n awards = input_or_cancel(\"Enter 2019 awards or 'q' to cancel\")\n if awards is not None:\n teams[team_num][\"2019_awards\"] = awards\n\n elif selection == \"r\": #remove team\n print(\"Input team number:\")\n team_num = get_positive_int()\n if team_num is None:\n continue\n elif team_num not in teams:\n print(f\"Team Number {team_num} not stored.\")\n continue\n\n print(f\"Delete team {team_num}?\")\n if get_bool():\n del teams[team_num]\n\n elif selection == \"s\": #search for teams\n print(\"Enter the start of or complete team number to search for:\")\n search_team = get_positive_int()\n for t in teams:\n if str(t).startswith(str(search_team)):\n print(t)\n\n elif selection == \"l\": #list teams\n if len(teams.keys()) < 1:\n print(\"No stored teams\")\n continue\n print(\"Stored Teams:\")\n temp_ls = [] #used to store numbers temporarily to print nicer\n for team in teams.keys():\n if len(temp_ls) < 4: #print 4 numbers per line\n temp_ls.append(str(team))\n else:\n print(\" \".join(temp_ls)) \n temp_ls = [str(team)]\n else:\n print(\" \".join(temp_ls))\n\n elif selection == \"e\": #exit program\n print(\"Confirm Exit:\")\n if get_bool():\n break\n\n else:\n print(f\"Invalid Action: {selection}\\n\")","sub_path":"ch_3_assign_kevin_rockwell.py","file_name":"ch_3_assign_kevin_rockwell.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"143268389","text":"# Given a binary tree, you need to compute the length of the diameter of the tree. The diameter of a binary tree is the length of the longest path between any two nodes in a tree. This path may or may not pass through the root.\n#\n# Example:\n# Given a binary tree\n# 1\n# / \\\n# 2 3\n# / \\\n# 4 5\n# Return 3, which is the length of the path [4,2,1,3] or [5,2,1,3].\n#\n# Note: The length of path between two nodes is represented by the number of edges between them.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def diameterOfBinaryTree(self, root: TreeNode) -> int:\n self.diameter = 1\n\n # Depth of tree\n def depth(node):\n if node is None:\n return 0\n else:\n ldepth = depth(node.left)\n rdepth = depth(node.right)\n self.diameter = max(self.diameter, ldepth + rdepth + 1)\n return max(ldepth, rdepth) + 1\n\n # Function Call\n depth(root)\n\n # Return\n return self.diameter - 1\n","sub_path":"leetcode/easy/543_diameter_of_binary_tree.py","file_name":"543_diameter_of_binary_tree.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"444851693","text":"# -*- coding: utf-8 -*-\n\n# 给定一个平衡括号字符串 S,按下述规则计算该字符串的分数:\n#\n# () 得 1 分。\n# AB 得 A + B 分,其中 A 和 B 是平衡括号字符串。\n# (A) 得 2 * A 分,其中 A 是平衡括号字符串\n\nclass Solution(object):\n\tdef scoreOfParentheses(self, S):\n\t\t\"\"\"\n\t\t:type S: str\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tstack = []\n\t\tfor i in range(len(S)):\n\t\t\tif S[i] == \"(\":\n\t\t\t\tstack.append(\"(\")\n\t\t\telse:\n\t\t\t\tcount = 0\n\t\t\t\tnode = stack.pop()\n\t\t\t\twhile node != \"(\":\n\t\t\t\t\tif count == 0:\n\t\t\t\t\t\tcount = node\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount += node\n\t\t\t\t\tnode = stack.pop()\n\t\t\t\tif count == 0:\n\t\t\t\t\tstack.append(1)\n\t\t\t\telse:\n\t\t\t\t\tstack.append(count * 2)\n\t\treturn sum(stack)\n\n\ns = \"(()(()))\"\n# 步骤\n# s = \"(1(()))\"\n# s = \"(1(1))\"\n# s = \"(1 2)\"\n# s = \"(3)\"\n# s = \"6\"\na = Solution().scoreOfParentheses(s)\nprint(a)\n","sub_path":"stack/middle/q856.py","file_name":"q856.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"252892990","text":"from random import randrange\n\ndef choice_user(data):\n range_of_index = len(data)\n\n random_index = randrange(range_of_index)\n\n return data[random_index]\n\ndef main():\n # The sequence is a string if it is taken from the console input\n sequence = input('Specify the sequence:')\n # work needs to be done to include lists and tuples.\n #sequence = [1,2,3,4,5,6,7]\n choice = choice_user(sequence)\n\n print(choice)\n\n\nif __name__ == '__main__' :\n main()\n","sub_path":"r1_12.py","file_name":"r1_12.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"39329813","text":"import config\nimport logging\nimport csv\nfrom redis import StrictRedis\n\n# We do not want to use __name__ here because __name__ is \"__main__\"\nlog = logging.getLogger(\"presidents.importer\")\n\ntry:\n reader = csv.reader(open(\"./presidents.csv\"))\n header = reader.next()\n\n client = StrictRedis()\n\n for i, row in enumerate(reader):\n key = \"president:%s\" % (row[0], )\n doc = dict(zip(header, row))\n\n # simulate a disconnect every 3 operations\n if i % 3 == 0:\n client.disconnect()\n\n # simulate a failure\n if row[0] == \"37\":\n raise Exception(\"Crook.\")\n\n client.set(key, doc)\nexcept:\n log.exception(\"Dang it.\")\n","sub_path":"example-code/learn-python-logging/import_presidents4.py","file_name":"import_presidents4.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69234846","text":"from tensorflow.keras.layers import Conv2D, Input, Flatten, Concatenate, Reshape, Activation\nfrom tensorflow.keras.models import Model\nfrom networks.vgg16 import VGG16\nfrom networks.ssd_layers import Normalize\nfrom networks.ssd_layers import PriorBox\n\n\ndef SSD300(input_shape, num_classes=21):\n\n # 输入300,300,3\n input_tensor = Input(shape=input_shape)\n img_size = (input_shape[1], input_shape[0])\n\n # 定义SSD300网络结构,先装载VGG16\n net = VGG16(input_tensor)\n\n # 预测分支1\n # 对conv4_3进行处理 38,38,512\n net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])\n # 每个网格点中先验框的数量\n num_priors = 4\n # 位置预测:38,38,512->38,38,16, 4是指代(x,y,h,w)\n net['conv4_3_norm_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])\n net['conv4_3_norm_mbox_loc_flat'] = Flatten(name='conv4_3_norm_mbox_loc_flat')(net['conv4_3_norm_mbox_loc'])\n # 分类预测:38,38,512->38,38,84\n net['conv4_3_norm_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='conv4_3_norm_mbox_conf')(net['conv4_3_norm'])\n net['conv4_3_norm_mbox_conf_flat'] = Flatten(name='conv4_3_norm_mbox_conf_flat')(net['conv4_3_norm_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 30.0, max_size=60.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])\n\n # 预测分支2\n # 对fc7层进行处理\n num_priors = 6\n # 位置预测:19,19,1024->19,19,24\n net['fc7_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='fc7_mbox_loc')(net['fc7'])\n net['fc7_mbox_loc_flat'] = Flatten(name='fc7_mbox_loc_flat')(net['fc7_mbox_loc'])\n # 分类预测:19,19,1024->19,19,6x21\n net['fc7_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='fc7_mbox_conf')(net['fc7'])\n net['fc7_mbox_conf_flat'] = Flatten(name='fc7_mbox_conf_flat')(net['fc7_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 60.0, max_size=111.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='fc7_mbox_priorbox')\n net['fc7_mbox_priorbox'] = priorbox(net['fc7'])\n\n # 预测分支3\n # 对conv8_2进行处理\n num_priors = 6\n # 位置预测:10x10x512->10,10,24\n net['conv8_2_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='conv8_2_mbox_loc')(net['conv8_2'])\n net['conv8_2_mbox_loc_flat'] = Flatten(name='conv8_2_mbox_loc_flat')(net['conv8_2_mbox_loc'])\n # 分类预测:10x10x512->10,10,6x21\n net['conv8_2_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='conv8_2_mbox_conf')(net['conv8_2'])\n net['conv8_2_mbox_conf_flat'] = Flatten(name='conv8_2_mbox_conf_flat')(net['conv8_2_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 111.0, max_size=162.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv8_2_mbox_priorbox')\n net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])\n\n # 预测分支4\n # 对conv9_2进行处理\n num_priors = 6\n # 位置预测:5x5x256->5,5,24\n net['conv9_2_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='conv9_2_mbox_loc')(net['conv9_2'])\n net['conv9_2_mbox_loc_flat'] = Flatten(name='conv9_2_mbox_loc_flat')(net['conv9_2_mbox_loc'])\n # 分类预测:5x5x256->10,10,6x21\n net['conv9_2_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='conv9_2_mbox_conf')(net['conv9_2'])\n net['conv9_2_mbox_conf_flat'] = Flatten(name='conv9_2_mbox_conf_flat')(net['conv9_2_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 162.0, max_size=213.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv9_2_mbox_priorbox')\n net['conv9_2_mbox_priorbox'] = priorbox(net['conv9_2'])\n\n # 预测分支5\n # 对conv10_2进行处理\n num_priors = 4\n # 位置预测:3x3x256 -> 3x3x16\n net['conv10_2_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='conv10_2_mbox_loc')(net['conv10_2'])\n net['conv10_2_mbox_loc_flat'] = Flatten(name='conv10_2_mbox_loc_flat')(net['conv10_2_mbox_loc'])\n # 分类预测:3x3x256 -> 3,3,4x21\n net['conv10_2_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='conv10_2_mbox_conf')(net['conv10_2'])\n net['conv10_2_mbox_conf_flat'] = Flatten(name='conv10_2_mbox_conf_flat')(net['conv10_2_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 213.0, max_size=264.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv10_2_mbox_priorbox')\n net['conv10_2_mbox_priorbox'] = priorbox(net['conv10_2'])\n\n # 预测分支6\n # 对conv11_2进行处理\n num_priors = 4\n # 位置预测:1x1x256 -> 1x1x16\n net['conv11_2_mbox_loc'] = Conv2D(num_priors * 4, kernel_size=(3, 3), padding='same', name='conv11_2_mbox_loc')(net['conv11_2'])\n net['conv11_2_mbox_loc_flat'] = Flatten(name='conv11_2_mbox_loc_flat')(net['conv11_2_mbox_loc'])\n # 分类预测:1,1,256 -> 1,1,4x21\n net['conv11_2_mbox_conf'] = Conv2D(num_priors * num_classes, kernel_size=(3, 3), padding='same', name='conv11_2_mbox_conf')(net['conv11_2'])\n net['conv11_2_mbox_conf_flat'] = Flatten(name='conv11_2_mbox_conf_flat')(net['conv11_2_mbox_conf'])\n # 获取预测分支Anchors对应的张量\n priorbox = PriorBox(img_size, 264.0, max_size=315.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv11_2_mbox_priorbox')\n net['conv11_2_mbox_priorbox'] = priorbox(net['conv11_2'])\n\n # 将6个分支的预测结果进行堆叠\n net['mbox_loc'] = Concatenate(axis=1, name='mbox_loc')([net['conv4_3_norm_mbox_loc_flat'],\n net['fc7_mbox_loc_flat'],\n net['conv8_2_mbox_loc_flat'],\n net['conv9_2_mbox_loc_flat'],\n net['conv10_2_mbox_loc_flat'],\n net['conv11_2_mbox_loc_flat']])\n\n net['mbox_conf'] = Concatenate(axis=1, name='mbox_conf')([net['conv4_3_norm_mbox_conf_flat'],\n net['fc7_mbox_conf_flat'],\n net['conv8_2_mbox_conf_flat'],\n net['conv9_2_mbox_conf_flat'],\n net['conv10_2_mbox_conf_flat'],\n net['conv11_2_mbox_conf_flat']])\n\n net['mbox_priorbox'] = Concatenate(axis=1, name='mbox_priorbox')([net['conv4_3_norm_mbox_priorbox'],\n net['fc7_mbox_priorbox'],\n net['conv8_2_mbox_priorbox'],\n net['conv9_2_mbox_priorbox'],\n net['conv10_2_mbox_priorbox'],\n net['conv11_2_mbox_priorbox']])\n\n # 38x38x16+19x19x24+10x10x24+5x5x24+3x3x16+1x1x16=34928\n # 34928 -> 8732,4\n net['mbox_loc'] = Reshape((-1, 4), name='mbox_loc_final')(net['mbox_loc'])\n # 8732,21\n net['mbox_conf'] = Reshape((-1, num_classes), name='mbox_conf_logits')(net['mbox_conf'])\n net['mbox_conf'] = Activation('softmax', name='mbox_conf_final')(net['mbox_conf'])\n\n net['predictions'] = Concatenate(axis=2, name='predictions')([net['mbox_loc'],\n net['mbox_conf'],\n net['mbox_priorbox']])\n model = Model(net['input'], net['predictions'])\n\n return model\n\n\n","sub_path":"10. ssd/networks/ssd.py","file_name":"ssd.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510095704","text":"from django.forms import ModelForm, ClearableFileInput, Textarea, Select, SelectMultiple, CheckboxSelectMultiple\nfrom .models import Post, Category, Comment, Message, PostPhoto\nfrom django import forms\nclass PostForm(ModelForm):\n #video_file = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}), required=False)\n class Meta:\n model = Post\n\n fields = ['headline', 'text', 'categories', 'images']\n widgets = {\n 'text': Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Введите текст...'\n }),\n 'categories': CheckboxSelectMultiple(attrs={\n 'class': 'form-check-label',\n 'size': '100',\n }),\n }\nclass PhotoForm(ModelForm):\n image = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}), required=False)\n class Meta:\n model = PostPhoto\n\n fields = ['headline', 'text', 'categories', 'image']\n widgets = {\n 'text': Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Введите текст...'\n }),\n 'categories': CheckboxSelectMultiple(attrs={\n 'class': 'form-check-label',\n 'size': '100',\n }),\n }\nclass CategoryForm(ModelForm):\n class Meta:\n model = Category\n fields = []\nclass AddCommentForm(ModelForm):\n class Meta:\n model = Comment\n fields = ['com_text']\n widgets = {\n 'com_text': Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Введите текст...',\n 'rows': \"3\",\n }),}\nclass AddMessageForm(ModelForm):\n class Meta:\n model = Message\n fields = ['text']\n widgets = {\n 'text': Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Введите текст...',\n 'rows': \"3\",\n }),}","sub_path":"Newsdesk/posts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219686751","text":"import time\n\nfrom binary_search_tree import BSTNode\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\n################################################################\n# Nested for loops with runtime complexity O(n^2)\n\nduplicates = [] # Return the list of duplicates in this data structure\n\n# Replace the nested for loops below with your improvements\n# O(n^2)\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\n# end_time = time.time()\n# print (f\"\\n{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\n# print (f\"runtime of nested loop (O(n^2)): {end_time - start_time} seconds\")\n\n################################################################\n# Using BST data structures to reduce time complexity \n\n\nduplicates1 = [] # Return the list of duplicates in this data structure\n\nbinary_tree = BSTNode('names')\nstart_time1 = time.time()\n\n\nfor name_1 in names_1:\n binary_tree.insert(name_1)\n\nfor name_2 in names_2:\n if binary_tree.contains(name_2):\n duplicates1.append(name_2)\n\n\nend_time1 = time.time()\nprint(f\"\\n{len(duplicates1)} duplicates:\\n\\n{', '.join(duplicates1)}\\n\\n\")\nprint (f\"runtime of for loop with Linked list: {end_time1 - start_time1} seconds\")\n\n\n\n# # ---------- Stretch Goal -----------\n# # Python has built-in tools that allow for a very efficient approach to this problem\n# # What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# # structures, but you may not import any additional libraries that you did not write yourself.\n\n####################################################################################\n\n# # O(n)\n\n# duplicates2 = []\n# start_time2 = time.time()\n# for name_1 in names_1: # O(n)\n# if name_1 in names_2: # O(1) + O(n)\n# duplicates2.append(name_1) # O(1)\n# end_time2 = time.time()\n# print(f\"\\n{len(duplicates2)} duplicates:\\n\\n{', '.join(duplicates2)}\\n\\n\")\n# print(f\"runtime of for loop and if statement (O(n)): {end_time2 - start_time2} seconds\")\n\n###################################################################################\n\n# # O(n) with count() function\n\n# duplicates3 = []\n# start_time3 = time.time()\n# for name_1 in names_1: # O(n)\n# if names_2.count(name_1) > 0: # O(1) + O(n)\n# duplicates3.append(name_1) # O(1)\n# end_time3 = time.time()\n# print(f\"\\n{len(duplicates3)} duplicates:\\n\\n{', '.join(duplicates3)}\\n\\n\")\n# print(f\"runtime of for loop with count(): {end_time3 - start_time3} seconds\")\n\n################################################################################\n\n# List comprehension \n\n# start_time4 = time.time()\n# duplicates4 = [name for name in names_1 if name in names_2] # O(n)\n# end_time4 = time.time()\n# print(f\"\\n{len(duplicates4)} duplicates:\\n\\n{', '.join(duplicates4)}\\n\\n\")\n# print(f\"runtime of list comprehension: {end_time4 - start_time4} seconds\")\n\n#############################################################################\n\n# # most efficient way with Sets Intersection (&)\n\nstart_time5 = time.time()\ndef find_dups(n1, n2):\n names_set1 = set(n1) # O(len(s))\n names_set2 = set(n2) # O(len(s))\n\n if (names_set1 & names_set2): # O(min(len(s1), len(s2)))\n return (names_set1 & names_set2) # O(min(len(s1), len(s2)))\n\nduplicates5 = find_dups(names_1, names_2)\n\nend_time5 = time.time()\nprint(f\"\\n{len(duplicates5)} duplicates:\\n\\n{', '.join(duplicates5)}\\n\\n\")\nprint(f\"runtime funcion with sets intersection: {end_time5 - start_time5} seconds\")\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192226123","text":"class ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n\nclass bucket:\n def __init__(self):\n self.head = ListNode(-1)\n\n def find(self, x):\n head = self.head.next\n while head: \n if head.val == x:\n return True\n head = head.next\n return False\n \n def delete(self,x):\n head = self.head\n while head.next:\n if head.next.val == x:\n head.next = head.next.next\n break\n head = head.next\n \n def add(self, x):\n head = self.head\n while head.next:\n if head.next.val == x:\n break\n head = head.next\n head.next = ListNode(x)\n\nclass HashSet:\n def __init__(self):\n self.N = 2003\n self.array = self.N*[bucket()]\n \n def _hash(self, value):\n return value%self.N\n\n def add(self, value):\n index = self._hash(value)\n curr = self.array[index]\n curr.add(value)\n \n def contains(self, value):\n index = self._hash(value)\n curr = self.array[index]\n return curr.find(value)\n\n def remove(self, value):\n index = self._hash(value)\n curr = self.array[index]\n curr.delete(value)\n\n\nnew = HashSet()\n\nnew.add(1)\nnew.add(2)\nprint(new.contains(1))\nprint(new.contains(3))\nprint(new.contains(2))\nnew.remove(2)\nprint(new.contains(2))","sub_path":"solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412875831","text":"# 给定一个没有重复数字的序列,返回其所有可能的全排列。\n#\n# 示例:\n#\n# 输入: [1,2,3]\n# 输出:\n# [\n# [1,2,3],\n# [1,3,2],\n# [2,1,3],\n# [2,3,1],\n# [3,1,2],\n# [3,2,1]\n# ]\n\n\nclass Solution:\n def permute(self, nums):\n res = []\n\n def backtrack(nums, tmp):\n print('nums:', nums)\n print('tmp:', tmp)\n if not nums:\n res.append(tmp)\n return\n for i in range(len(nums)):\n backtrack(nums[: i] + nums[i + 1:], tmp + [nums[i]])\n # print(nums)\n\n backtrack(nums, [])\n return res\n\n\ns = Solution()\nprint(s.permute([1, 2, 3]))\n","sub_path":"leetcode046_全排列.py","file_name":"leetcode046_全排列.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565444391","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom math import sqrt\n######Cost Function#########\ndef getcost(X,Y,theta):\n r=(1/(2*X.shape[0]))*np.sum(np.square(Y-X.dot(theta.T)))\n return r\n\n\n##### The function to implement gradient descent algorithm #####\ndef changetheta(X,Y,theta,a,itrations):\n for i in range(itrations):\n for j in range(X.shape[0]):\n theta[0,0] = theta[0,0] - (a)*(X[j,0]*(X[j,:].dot(theta.T) - Y[j]))\n theta[0,1] = theta[0,1] - (a)*(X[j,1]*(X[j,:].dot(theta.T) - Y[j]))\n ###just to see the chang in theta each time\n #print(theta) \n \n return theta\n\n\ndef accu(X,Y,theta):\n r=sqrt((np.sum((Y-X.dot(theta.T))**2/(X.shape[0])))-(((np.sum(Y-X.dot(theta.T)))**2)/(X.shape[0])**2))\n return r\n\n\n\n\n\n#setting up the data\nd=pd.read_csv('somedata.csv')\nx=d['Price (Older)']\ny=d['Price (New)']\n\n\n\n#Creating Matrices from data for computations\nY=np.array([y]) #row vector of new price column from data\nY=Y.T #Converting it to a column vector for the sake of maths\none=np.ones((len(x),1)) # creating a column verctor of \"1\" with no of rows same as x\na=0.0000000000003 #learning rate which fits best after tweaking\nitrations=1000 # i love 1 million\nX=np.array([x]) #row vector of old price column from the data\nX=X.T #Making it a column vector for the sake of maths\nX=np.concatenate((X,one), axis=1) # making a matrix with two columns by joing X and one\ntheta=np.array([[0,0]],dtype=float) #theta it is in the form [theta-1,theta-not]\n\nresult=changetheta(X,Y,theta,a,itrations) #getting the values of theta for which cost is minimum\nprint(f\"Result - {result}\")\nprint(f\"Cost at Result - {getcost(X,Y,result)}\")\nprint(f\"RMSE - {accu(X,Y,result)}\")\n\n#----Plotting-------\nplt.scatter(x,y) # scattering the real data\nxx=np.arange(0,200000)\nyy=result[0,0]*xx+result[0,1]\nplt.plot(xx,yy) # ploting the prediction on the same graph\nplt.show()\n","sub_path":"LinearRegression/Stochastic-Gradient-Descent/somedata/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185408734","text":"# Identities\n\"\"\" identity :: a -> a \"\"\"\nidentity = lambda x: x\n\n\"\"\" always :: a -> * -> a \"\"\"\nalways = lambda a: lambda *b: a\n\n\"\"\" defer :: (a -> b) -> a -> * -> b \"\"\"\ndefer = lambda fn: lambda val: lambda *x: fn(val)\n\n\"\"\" complement :: (a -> Boolean) -> a -> Boolean \"\"\"\ncomplement = lambda fn: lambda a: not fn(a)\n\n\"\"\" T :: a -> * -> Boolean \"\"\"\nT = always(True)\n\n\"\"\" F :: a -> * -> Boolean \"\"\"\nF = always(False)\n\n\"\"\" isNone :: a -> Boolean \"\"\"\nisNone = lambda a: a is None\n\n\"\"\" notNone :: a -> boolean \"\"\"\nnotNone = complement (isNone)\n\n\"\"\" flip :: (a -> b -> c) -> b -> a -> c \"\"\"\nflip = lambda fn: lambda b: lambda a: fn (a) (b)\n\n# higher order functions\n\"\"\" map :: (a -> b) -> [a] -> [b] \"\"\"\nmap = lambda fn: lambda arr: [fn(a) for a in arr]\n\n\"\"\" filter :: (a -> Boolean) -> [a] -> [a] \"\"\"\nfilter = lambda fn: lambda arr: [a for a in arr if fn(a)]\n\n\"\"\" reduce :: (a -> b -> a) -> a -> [b] -> a\"\"\"\nfrom functools import reduce as _reduce\nreduce = lambda fn: lambda init: lambda arr: _reduce(fn, arr, init)\n\n# Composition\n\"\"\" compose2 :: ((b -> c), (a -> b)) -> a -> c \"\"\"\ncompose2 = lambda f, g: lambda x: f(g(x))\n\ninitCompose = reduce (compose2) (identity)\n\n\"\"\" compose :: ((y -> z) ... (a -> b)) -> a -> z \"\"\"\ncompose = lambda *fns: initCompose (fns)\n\n\"\"\" pipe :: ((a -> b) ... (y -> z)) -> a -> z \"\"\"\npipe = lambda *fns: initCompose (fns[::-1])\n\n# Conditions\n\"\"\" ifElse :: ((a -> Boolean), (a -> b), (a -> c)) -> a -> b | c \"\"\"\nifElse = lambda pred, ok, notOk: lambda a: ok(a) if pred(a) else notOk(a)\n\n\"\"\" when :: ((a -> Boolean), (a -> b)) -> a -> b | a \"\"\"\nwhen = lambda pred, ok: ifElse(pred, ok, identity)\n\n\"\"\" unless :: ((a -> Boolean), (a -> b)) -> a -> a | b \"\"\"\nunless = lambda pred, notOk: ifElse(pred, identity, notOk)\n\n\"\"\" both :: (a -> Boolean) -> (a -> Boolean) -> a -> Boolean \"\"\"\nboth = lambda fn1: lambda fn2: lambda a: fn1(a) and fn2(a)\n\n\"\"\" eitherOr :: (a -> Boolean) -> (a -> Boolean) -> a -> Boolean \"\"\"\neitherOr = lambda fn1: lambda fn2: lambda a: fn1(a) or fn2(a)\n\n\"\"\" lt :: a -> b -> Boolean \"\"\"\nlt = lambda a: lambda b: b < a\n\n\"\"\" gt :: a -> b -> Boolean \"\"\"\ngt = lambda a: lambda b: b > a\n\n\"\"\" equals :: a -> b -> Boolean \"\"\"\nequals = lambda a: lambda b: a == b\n\n\"\"\" lte :: a -> b -> Boolean \"\"\"\nlte = lambda a: eitherOr (equals (a)) (lt (a))\n\n\"\"\" gte :: a -> b -> Boolean \"\"\"\ngte = lambda a: eitherOr (equals (a)) (gt (a))\n\n\"\"\" isList :: [a] -> Boolean \"\"\"\nisList = compose(equals (list), type)\n\n\"\"\" notEmptyList :: [a] -> Boolean \"\"\"\nnotEmptyList = both (isList) (compose(gt (0), len))\n\n\"\"\" emptyList :: [a] -> Boolean \"\"\"\nemptyList = both (isList) (compose(equals (0), len))\n\n# Accessors\n\"\"\" head :: [a] -> a \"\"\"\nhead = lambda arr: arr[0]\n\n\"\"\" last :: [a] -> a \"\"\"\nlast = lambda arr: arr[-1]\n\n\"\"\" nth :: a -> [b] -> b \"\"\"\nnth = lambda idx: lambda arr: arr[idx]\n\n\"\"\" tail :: [a] -> [a] \"\"\"\ntail = lambda arr: arr[1:]\n\n\"\"\" prop :: a -> {b} -> b[a] \"\"\"\nprop = lambda a: lambda b: b[a]\n\n\"\"\" path :: [a] -> {b} -> b[a[0]]...[a[-1]] \"\"\"\npath = lambda keys: lambda obj: reduce (lambda a, b: prop (b) (a)) (obj) (keys)\n\n\"\"\" attr :: a -> b -> b.a \"\"\"\nattr = lambda name: lambda cls: getattr(cls, name)\n\n\"\"\" pick :: [a] -> {b} -> {b} \"\"\"\npick = lambda keys: lambda obj: {k: v for k, v in obj.items() if k in keys}\n\n# Access and Compare\n\"\"\" propEq :: a -> b -> {c} -> Boolean \"\"\"\npropEq = lambda key: lambda val: compose(equals (val), prop (key))\n\n\"\"\" pathEq :: [a] -> b -> {c} -> Boolean \"\"\"\npathEq = lambda keys: lambda val: compose(equals (val), path (keys))\n\n\"\"\" cond :: [[(a -> Boolean), (a -> b)]] -> a -> b \"\"\"\ncond = lambda conds: lambda x: ifElse(\n compose(head, head)(conds),\n compose(last, head)(conds),\n cond(tail(conds))\n)(x)\n\n# Operations\n\"\"\" inc :: a -> b \"\"\"\ninc = lambda x: x + 1\n\n\"\"\" merge :: {a} -> {b} -> {ab} \"\"\"\nmerge = lambda f: lambda g: {**f, **g}\n\n\"\"\" trim :: a -> b \"\"\"\ntrim = lambda x: x.strip()\n\n\"\"\" assoc :: a -> b -> {c} -> {c} \"\"\"\nassoc = lambda key: lambda val: lambda obj: merge (obj) ({key: val})\n\n\"\"\" evolve :: a -> (b -> c) -> {d} -> {dc} \"\"\"\nevolve = lambda k: lambda f: lambda o: assoc (k) (f(prop (k) (o))) (o)\n\n\"\"\" evolvePath :: [a] -> (b -> c) -> {d} -> {dc} \"\"\"\nevolvePath = lambda keys: lambda fn: lambda obj: compose(\n lambda v: assoc (head (keys)) (v) (obj),\n ifElse(\n compose(lte (1), len),\n compose(fn, flip (prop) (obj), head),\n compose(evolvePath (tail (keys)) (fn), flip (prop) (obj), head)\n )\n)(keys)\n\n\"\"\" assocPath :: [a] -> b -> {c} -> {c} \"\"\"\nassocPath = lambda keys: lambda val: evolvePath (keys) (always (val))\n\n\"\"\" append :: a -> [a] -> [a] \"\"\"\nappend = lambda item: lambda arr: arr + [item]\n\n# Monads (Generic)\n\"\"\" result :: (a -> Boolean) -> a -> m a \"\"\"\nresult = lambda fn: lambda data: {\"ok\": fn (data), \"data\": data}\n\n\"\"\" left :: a -> m a \"\"\"\nleft = result (F)\n\n\"\"\" right :: a -> m a \"\"\"\nright = result (T)\n\n\"\"\" resultOk :: m a -> Boolean \"\"\"\nresultOk = propEq (\"ok\") (True)\n\n\"\"\" join :: m a -> a \"\"\"\njoin = prop (\"data\")\n\n\"\"\" flatMap :: [m a] -> [a] \"\"\"\nflatMap = map (join)\n\n\"\"\" mapM :: (a -> b) -> m a -> m b \"\"\"\n\"\"\" mapM :: (a -> b -> c) -> m a -> m (b -> c) \"\"\"\nmapM = lambda fn: lambda m: when(\n resultOk,\n compose (flip (assoc (\"data\")) (m), fn, join)\n)(m)\n\n\"\"\" chain :: (a -> m b) -> m a -> m b \"\"\"\nchain = lambda fn: when(resultOk, compose(join, mapM (fn)))\n\n\"\"\" ap :: m (a -> b) -> m a -> m b \"\"\"\nap = lambda mFn: lambda m: when(\n resultOk,\n compose(flip (mapM) (m), join)\n)(mFn)\n\n\"\"\" liftA2 :: (a -> b -> c) -> m a -> m b -> m c \"\"\"\nliftA2 = lambda fn: lambda ma: lambda mb: compose(\n flip (ap) (mb),\n mapM (fn)\n)(ma)\n\n# Monads (Maybe)\n\"\"\" onMaybe :: ((a -> Boolean), (a -> b), (a -> c)) -> a -> m b|c \"\"\"\nonMaybe = lambda pred, bad, good: compose(\n either (compose(left, bad), compose(right, good)),\n result (pred)\n)\n\n# Monads (Either)\n\"\"\" either :: ((a -> c), (b -> c)) -> m a|b -> c \"\"\"\neither = lambda notOk, ok: lambda m: compose(\n ifElse(always (resultOk (m)), ok, notOk), join\n)(m)\n\n# Better Result mapping / chainning with Either\n\"\"\" mapMLeft :: (a -> b) -> m a -> m b \"\"\"\nmapMLeft = lambda fn: either (compose (left, fn), right)\n\n\"\"\" chainLeft :: (a -> m b) -> m a -> m b \"\"\"\nchainLeft = lambda fn: either (fn, right)\n\n# Monads (Validaion)\n\"\"\" check :: a -> (b -> Boolean) -> c -> abc \"\"\"\ncheck = lambda p, fn, msg: {\"p\": p, \"fn\": fn, \"msg\": msg}\n\n\"\"\" safeProp :: a -> b -> Either None|b[a] \"\"\"\nsafeProp = lambda k: lambda o: right (prop (k) (o)) if k in o else left (None)\n\n\"\"\" safePath :: [a] -> {b} -> m {b} \"\"\"\nsafePath = lambda keys: lambda obj: reduce (\n lambda obj, key: chain (safeProp (key)) (obj)\n) (right(obj)) (keys)\n\n\"\"\" safeAttr :: a -> b -> m b.a \"\"\"\nsafeAttr = lambda name: lambda cls: result (lambda x: x is not None) (\n getattr(cls, name, None))\n\n\"\"\" validate :: a -> [b] -> m a \"\"\"\nvalidate = lambda data: pipe(\n reduce (\n lambda results, check: pipe(\n safeProp (prop (\"p\") (check)),\n chain (result (prop (\"fn\") (check))),\n either (compose(left, always (prop (\"msg\") (check))), right),\n flip (append) (results)\n )(data)\n )([]),\n filter (complement(resultOk)),\n flatMap,\n ifElse(emptyList, compose(right, always (data)), left)\n)\n","sub_path":"funpy/funpy/funpy.py","file_name":"funpy.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338548281","text":"\"\"\"\n Goal: Visualize images from aircraft camera and load as a pytorch dataloader\n\n 1. load all example png examples as a pytorch dataloader\n 2. save a few images to disk for visualization\n 3. load the corresponding state information in the h5 file\n\"\"\"\n\nimport sys, os\nimport torch\nimport numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\n\n# make sure this is a system variable in your bashrc\nNASA_ULI_ROOT_DIR=os.environ['NASA_ULI_ROOT_DIR']\n\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom PIL import Image\n\nDATA_DIR = NASA_ULI_ROOT_DIR + '/data/'\n\n# where intermediate results are saved\n# never save this to the main git repo\nSCRATCH_DIR = NASA_ULI_ROOT_DIR + '/scratch/'\n\nUTILS_DIR = NASA_ULI_ROOT_DIR + '/src/utils/'\nsys.path.append(UTILS_DIR)\n\nfrom textfile_utils import *\n\nif __name__ == '__main__':\n\n # how often to plot a few images for progress report\n # warning: plotting is slow\n NUM_PRINT = 2\n\n IMAGE_WIDTH = 224\n IMAGE_HEIGHT = 224\n\n # create a temp dir to visualize a few images\n visualization_dir = SCRATCH_DIR + '/viz/'\n remove_and_create_dir(visualization_dir)\n\n # where the final dataloader will be saved\n DATALOADER_DIR = remove_and_create_dir(SCRATCH_DIR + '/dataloader/')\n\n MAX_FILES = np.inf\n\n # where original XPLANE images are stored \n data_dir = DATA_DIR + '/test_dataset_smaller_ims/'\n \n # resize to 224 x 224 x 3 for EfficientNets\n # prepare image transforms\n # warning: you might need to change the normalization values given your dataset's statistics\n tfms = transforms.Compose([transforms.Resize((IMAGE_WIDTH, IMAGE_HEIGHT)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]),])\n\n image_list = [x for x in os.listdir(data_dir) if x.endswith('.png')]\n\n # where the labels for each image (such as distance to centerline) are present\n label_file = data_dir + '/labels.csv'\n \n # dataframe of labels\n labels_df = pandas.read_csv(label_file, sep=',')\n \n # columns are: \n # ['image_filename', 'absolute_time_GMT_seconds', 'relative_time_seconds', 'distance_to_centerline_meters', 'distance_to_centerline_NORMALIZED', 'downtrack_position_meters', 'downtrack_position_NORMALIZED', 'heading_error_degrees', 'heading_error_NORMALIZED', 'period_of_day', 'cloud_type']\n\n # loop through images and save in a dataloader\n image_tensor_list = []\n\n # tensor of targets y: modify to whatever you want to predict from DNN\n target_tensor_list = []\n\n for i, image_name in enumerate(image_list):\n\n # open images and apply transforms\n fname = data_dir + '/' + str(image_name)\n image = Image.open(fname).convert('RGB')\n tensor_image_example = tfms(image)\n\n # add image\n image_tensor_list.append(tensor_image_example)\n\n # get the corresponding state information (labels) for each image\n specific_row = labels_df[labels_df['image_filename'] == image_name]\n # there are many states of interest, you can modify to access which ones you want\n dist_centerline_norm = specific_row['distance_to_centerline_NORMALIZED'].item()\n # normalized downtrack position\n downtrack_position_norm = specific_row['downtrack_position_NORMALIZED'].item()\n\n # normalized heading error\n heading_error_norm = specific_row['heading_error_NORMALIZED'].item()\n\n # add tensor\n target_tensor_list.append([dist_centerline_norm, downtrack_position_norm, heading_error_norm])\n\n # periodically save the images to disk \n if i % NUM_PRINT == 0:\n plt.imshow(image)\n # original image\n title_str = ' '.join(['Dist Centerline: ', str(round(dist_centerline_norm,3)), 'Downtrack Pos. Norm: ', str(round(downtrack_position_norm,3)), '\\n', 'Heading Error Norm: ', str(round(heading_error_norm, 3))]) \n plt.title(title_str)\n plt.savefig(visualization_dir + '/' + str(i) + '.png')\n plt.close()\n\n # resized and normalized image that can be passed to a DNN\n torchvision.utils.save_image(tensor_image_example, visualization_dir + '/resized_transform_' + str(image_name))\n \n # early terminate for debugging\n if i > MAX_FILES:\n break\n\n # first, save image tensors\n # concatenate all image tensors\n all_image_tensor = torch.stack(image_tensor_list)\n print(all_image_tensor.shape)\n\n # save tensors to disk \n image_data = DATALOADER_DIR + '/images_xplane.pt'\n # sizes are: 126 images, 3 channels, 224 x 224 each \n # torch.Size([126, 3, 224, 224])\n torch.save(all_image_tensor, image_data)\n\n ###################################\n # second, save target label tensors\n target_tensor = torch.tensor(target_tensor_list)\n print(target_tensor.shape)\n \n # size: 126 numbers by 3 targets \n # torch.Size([126])\n\n # save tensors to disk \n target_data = DATALOADER_DIR + '/targets_xplane.pt'\n torch.save(target_tensor, target_data)\n","sub_path":"src/examples/load_initial_dataset.py","file_name":"load_initial_dataset.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103421557","text":"import argparse\nimport csv\nimport json\nimport sqlite3\nimport sys\nimport time\n\n# local imports\nimport api_utils\nimport cspace_utils\nimport db_stuff\nimport wikidata_utils\n\ndef set_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t'-m','--mode',\n\t\tchoices=['cspace','csv'],\n\t\tdefault='cspace',\n\t\thelp='what mode? cspace (extract from cspace) or csv. dfault = cspace'\n\t\t)\n\n\treturn parser.parse_args()\n\ndef run_cspace_queries(database):\n\t# set up cspace details from config.json\n\ttry:\n\t\tauthority = database.config['cspace details']['authority to use']\n\t\tauthority_csid = database.config['cspace details']['authority cspace id']\n\t\ttry:\n\t\t\tdatabase.create_cspace_table(authority)\n\t\texcept:\n\t\t\tprint(\"problem w the database file??\")\n\t\t\tsys.exit()\n\texcept KeyError:\n\t\tprint(\"You need to set up the config file correctly, read the readme.\")\n\t\tsys.exit()\n\n\t# set up API handler objects\n\tcspace_api_handler = api_utils.APIHandler(\"collectionspace\")\n\tdatabase.api_handlers.append(cspace_api_handler)\n\n\n\tcspace_utils.fetch_cspace_items(\n\t\tdatabase.secrets,\n\t\tdatabase.config,\n\t\tauthority,\n\t\tauthority_csid,\n\t\tdatabase\n\t\t)\n\tdatabase.db_writer.run_me()\n\trows = database.count_me()\n\tif not rows:\n\t\tprint(\"Something is up with the database or the initial api query. Try again!\")\n\t\tsys.exit()\n\n\tcspace_api_handler.clean_me()\n\n\t# get some additional data points for matching/reconciliation\n\tcspace_utils.enrich_cspace_items(database.secrets,database.config,database)\n\tcspace_api_handler.clean_me()\n\n\tdatabase.db_writer.run_me()\n\ndef run_wikidata_queries(database):\n\twd_api_handler = api_utils.APIHandler(\"wikidata\")\n\tdatabase.api_handlers.append(wd_api_handler)\n\n\twikidata_utils.reconcile_items(database.config,database)\n\tdatabase.db_writer.run_me()\n\twd_api_handler.clean_me()\n\n\twikidata_utils.refine_matches(database)\n\ndef main():\n\targs = set_args()\n\tmode = args.mode\n\n\twith open('secrets.json','r') as f:\n\t\tsecrets = json.load(f)\n\twith open('config.json','r') as f:\n\t\tconfig = json.load(f)\n\tdb_path = \"items.sqlite\"\n\n\tdatabase = db_stuff.Database(db_path)\n\tdatabase.secrets = secrets\n\tdatabase.config = config\n\tdatabase.connection = sqlite3.connect(db_path)\n\tdatabase.cursor = database.connection.cursor()\n\n\tif mode == 'cspace':\n\t\trun_cspace_queries(database)\n\telif mode == 'csv':\n\t\tpass\n\n\trun_wikidata_queries(database)\n\nif __name__=='__main__':\n\tmain()\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"22970840","text":"\nfrom PyQt4.QtGui import * # @UnusedWildImport\nfrom PyQt4.QtCore import * # @UnusedWildImport\n\n\nclass Ventil():\n \n\n def __init__(self,x,y,motor,naziv=''):\n self.crna = QColor(0, 0, 0)\n self.crvena = QColor(255, 0, 0)\n self.bela = QColor(255, 255, 255)\n self.ukljuceno = False;\n self.motor = motor \n self.points = [QPoint(x, y), QPoint(x+30, y), QPoint(x, y+30), QPoint(x+30, y+30)]\n self.x = x;\n self.y = y;\n self.dx = self.x-28;\n self.dy = self.y+15-25/2;\n \n self.senzor = False\n def on(self):\n self.ukljuceno = True; \n def off(self):\n self.ukljuceno = False; \n def senzorOn(self):\n if(self.senzor==True):\n return False;\n self.senzor = True;\n return True;\n def senzorOff(self):\n if(self.senzor==False):\n return False; \n self.senzor = False;\n return True;\n def click(self,x,y): \n okvir = QRect(self.x,self.y,30,30)\n okvir2 = QRect(self.x-27,self.y+15-12,30,25)\n \n if(okvir.contains(x,y)==True or okvir2.contains(x,y)==True):\n if(self.ukljuceno==True):\n self.ukljuceno = False;\n else:\n self.ukljuceno = True;\n return True;\n return False \n \n def nacrtaj(self, paint):\n\n \n pen = QPen(Qt.black, 1, Qt.SolidLine)\n paint.setPen(pen)\n paint.setRenderHint(QPainter.Antialiasing)\n needle =QPolygon(self.points) \n\n if (self.ukljuceno == True):\n paint.setBrush(Qt.green) \n paint.drawPolygon(needle)\n else: \n paint.setBrush(Qt.red) \n paint.drawPolygon(needle) \n if (self.senzor == True):\n paint.setBrush(Qt.green) \n paint.drawEllipse(self.dx, self.dy, 25, 25)\n paint.setPen(QPen(Qt.black, 2, Qt.SolidLine))\n paint.drawLine(self.dx+27, self.y+15,self.x+15,self.y+15)\n else:\n paint.setBrush(Qt.red) \n paint.drawEllipse(self.dx, self.dy, 25, 25) \n paint.setPen(QPen(Qt.black, 2, Qt.SolidLine)) \n paint.drawLine(self.dx+27, self.y+15,self.x+15,self.y+15) ","sub_path":"ui/ventil.py","file_name":"ventil.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440529650","text":"from sys import prefix\nfrom app import db\nfrom flask import Blueprint, request, make_response, jsonify, abort\nfrom app.models.task import Task\nfrom datetime import datetime\n\ntask_bp = Blueprint(\"task\", __name__, url_prefix=\"/tasks\")\n\n# Helper Function\ndef get_task_from_id(id):\n try:\n id = int(id)\n except:\n abort(400, {\"error\": \"invalid id\"})\n return Task.query.get_or_404(id)\n\n#CREATE\n@task_bp.route(\"\", methods=[\"POST\"])\ndef create_task():\n request_body = request.get_json()\n new_task = Task(\n title=request_body[\"title\"],\n priority=request_body[\"priority\"],\n dueDate=request_body[\"dueDate\"],\n )\n\n db.session.add(new_task)\n db.session.commit()\n\n return make_response(f\"New task {new_task.title} successfully created!\", 201)\n\n#READ\n@task_bp.route(\"\", methods=[\"GET\"])\ndef read_all_tasks():\n tasks = Task.query.all()\n tasks_response = []\n for task in tasks:\n tasks_response.append(task.to_dict())\n return jsonify(tasks_response)\n\n@task_bp.route(\"\", methods=[\"GET\"])\ndef read_one_tasks(id):\n task = get_task_from_id(id)\n return jsonify(task.to_dict())\n\n#UPDATE\n@task_bp.route(\"\", methods=[\"PUT\"])\ndef update_task(id):\n task = get_task_from_id(id)\n request_body = request.get_json()\n if \"title\" in request_body:\n task.title = request_body[\"title\"]\n if \"dueDate\" in request_body:\n task.dueDate = request_body[\"dueDate\"]\n if \"priority\" in request_body:\n task.priority = request_body[\"priority\"]\n if \"status\" in request_body:\n task.status = request_body[\"status\"]\n db.session.commit()\n return jsonify([task.to_dict(), \"Update Successful\"])\n\n#DELETE\n@task_bp.route(\"\", methods=[\"DELETE\"])\ndef delete_task(id):\n task = get_task_from_id(id)\n db.session.delete(task)\n db.session.commit()\n return make_response(\"Delete successful\", 200)","sub_path":"app/routes/task_routes.py","file_name":"task_routes.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199210895","text":"'''Start with the program you wrote for Exercise 6-1 (page 102). Make two new\ndictionaries representing different people, and store all three dictionaries\nin a list called people. Loop through your list of people. As you loop through\nthe list, print everything you know about each person.'''\n\nme = {\n 'first name' : 'max',\n 'last name' : 'korenev',\n 'age' : 19,\n 'city' : 'moscow',\n}\n\njane = {\n 'first name' : 'jane',\n 'last name' : 'aldridge',\n 'age' : 18,\n 'city' : 'portland',\n}\n\nshelley = {\n 'first name' : 'shelley',\n 'last name' : 'abramson',\n 'age' : 19,\n 'city' : 'reno',\n}\n\npeople = [me, jane, shelley]\n\nfor person in people:\n full_name = person['first name'] + \" \" + person['last name']\n\n print(\"\\nFull name is \" + full_name.title())\n print(str(person['age']) + \" y.o.\")\n print(\"Lives in \" + person['city'].title())","sub_path":"project1/venv/chapter 6/6.7_people.py","file_name":"6.7_people.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"382354910","text":"#\n# Title: chanmapfile.py\n# Author: Oscar Benjamin\n# Date: 14 Aug 2011\n#\n\"\"\"This module defines the ChanMapFile subclass of EEGFile.\n\nThe ChanMapFile provides the implementation for mapping the channels of one\nEEG file into a new EEG file. In this way it is possible to achieve selection,\nreordering and renaming of channels.\n\"\"\"\n\n\nfrom electrolib.eegfile import EEGFile, Head\nfrom electrolib.block import BlockDescriptor\n\n\nclass ChanMapFile(EEGFile):\n \"\"\"ChanMapFile class\n \"\"\"\n\n def __init__(self, infile, chanmap=None):\n \"\"\"Store the chanmap and initialise\"\"\"\n self._chanmap = chanmap\n super(ChanMapFile, self).__init__(infile)\n\n def _setup_chanmap(self, channames, chanmap):\n \"\"\"Returns a new chanmap based on the source chan info\"\"\"\n # If not provided fill the identity map here\n if chanmap is None:\n chanmap = zip(channames, channames)\n return chanmap\n\n def _get_head(self):\n \"\"\"Override EEGFile._get_head to modify channels\"\"\"\n # Read original header\n head = super(ChanMapFile, self)._get_head()\n chan = head.chan\n\n # Finalise the chanmap based on the source channel list\n channames = [c.label for c in chan]\n self._chanmap = self._setup_chanmap(channames, self._chanmap)\n\n # Create new channel data\n chan_d = dict((c.label, c) for c in chan)\n new_chan = []\n for c_old, c_new in self._chanmap:\n new_chan.append(chan_d[c_old]._replace(label=c_new))\n\n # Prepare data blocks\n descr = [(c.label, 'i2', c.nsblock) for c in new_chan]\n self._block_descriptor = BlockDescriptor(descr)\n\n # Return head with modified information\n return head._replace(chan=new_chan, nchan=len(new_chan))\n\n def _get_blocks(self, n1, n2):\n \"\"\"Override EEGFile._get_blocks to modify channels\"\"\"\n # Read original blocks\n old_blocks = super(ChanMapFile, self)._get_blocks(n1, n2)\n\n # Map into the new block\n new_blocks = self._block_descriptor.new(len(old_blocks))\n for c_old, c_new in self._chanmap:\n new_blocks[c_new] = old_blocks[c_old]\n\n return new_blocks\n\n\nclass ChanRenameFile(ChanMapFile):\n \"\"\"ChanRenameFile\n\n This enables specific channel names to be renamed.\n \"\"\"\n\n def _setup_chanmap(self, channames, chanmap):\n \"\"\"Modify only the names of channels referred to in chanmap\"\"\"\n # Default is identity\n if chanmap is None:\n chanmap = []\n # Rename channels referred to in chanmap\n dmap = dict(chanmap)\n chanmap = []\n for old_name in channames:\n new_name = dmap.get(old_name, old_name)\n chanmap.append((old_name, new_name))\n return chanmap\n\nclass ChanOrderFile(ChanMapFile):\n \"\"\"ChanOrderFile\n\n This enables a reordering and selection of desired channels.\n \"\"\"\n def __init__(self, infile, chanorder=None):\n \"\"\"Store the chanmap and initialise\"\"\"\n if chanorder is not None:\n chanmap = zip(chanorder, chanorder)\n else:\n chanmap = None\n super(ChanOrderFile, self).__init__(infile, chanmap)\n\n\n","sub_path":"electrolib/channel/chanmapfile.py","file_name":"chanmapfile.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557901169","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/dpoliuha/work/opensource/swagger2locustio/swagger2locustio/parsers/swagger_v2.py\n# Compiled at: 2020-05-07 07:40:51\n# Size of source mod 2**32: 722 bytes\n\"\"\"Module: SwaggerV2 parser\"\"\"\nfrom copy import deepcopy\nfrom swagger2locustio.parsers.base_parser import SwaggerBaseParser\n\nclass SwaggerV2Parser(SwaggerBaseParser):\n __doc__ = 'Class: SwaggerV2 parser'\n\n @staticmethod\n def _parse_params(params: dict) -> dict:\n param_data = {}\n for param in params:\n param_name = param.get('name')\n if param_name:\n if param.get('in') or param.get('required'):\n raise ValueError('Not full info about required param')\n else:\n param_data[param_name] = deepcopy(param)\n return param_data","sub_path":"pycfiles/swagger2locustio-0.0.3-py3-none-any/swagger_v2.cpython-38.py","file_name":"swagger_v2.cpython-38.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359300767","text":"import io\r\nimport os\r\nimport shutil\r\nimport zipfile\r\nimport libpath\r\nfrom pid import PID\r\n\r\npath_zip = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"prod\", \"zip\")\r\npath_out = os.path.join(path_zip, \"bearbeitet\")\r\npath_tmp = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"tmp\")\r\n\r\n\r\nclass ZIPdecompress(object):\r\n def __init__(self):\r\n pid_file = PID(path_tmp, \"zipDecompress.pid\")\r\n if pid_file.check_already_running():\r\n exit()\r\n else:\r\n try:\r\n self._files_to_decompress = os.listdir(path_zip)\r\n self.zip = None\r\n finally:\r\n pid_file.unlink()\r\n\r\n def run_zip(self):\r\n for file in self._files_to_decompress:\r\n if \".txt\" in file:\r\n try:\r\n self._read_file(file)\r\n self._decompress(file)\r\n finally:\r\n self._mcve_file(file)\r\n\r\n def _read_file(self, file):\r\n with open(os.path.join(path_zip, file), \"rb\") as f:\r\n self.zip = f.read()\r\n\r\n def _decompress(self, file):\r\n out = os.path.join(path_out, file.split(\".\")[0])\r\n os.mkdir(out)\r\n zf = zipfile.ZipFile(io.BytesIO(self.zip))\r\n with open(os.path.join(out, \"debug.txt\"), \"w\") as f:\r\n for i in zf.infolist():\r\n f.write(str(i))\r\n zf.extractall(out)\r\n\r\n @staticmethod\r\n def _mcve_file(file):\r\n shutil.move(os.path.join(path_zip, file), path_out)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n libpath.lib()\r\n ZIPdecompress()\r\n","sub_path":"hf.toolbox/zipDecompressAppl.py","file_name":"zipDecompressAppl.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71986831","text":"\nfrom keras.models import Sequential\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.datasets import mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = Sequential([\n Flatten(),\n Dense(512, activation='relu'),\n Dropout(0.2),\n Dense(10, activation='softmax')\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\nmodel.evaluate(x_test, y_test)\n","sub_path":"turtorial_0_.py","file_name":"turtorial_0_.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631692614","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nimport glob\n\nsys.path.append(\"../..\")\n\nimport utils\n\nif __name__ == \"__main__\":\n path = \"/Users/austin.shen/Dropbox/UWA/ICRAR_Kenji/data/additional/v1/\"\n\n n = 9\n files = glob.glob(path + \"*\")\n files.sort()\n data = []\n for file in files:\n dat = utils.inout.read_file(file, n, 1)\n data.append(dat)\n\n data_array = np.array(data)\n data_array = data_array[0]\n img = data_array[-1,:,:,0]\n\n plt.imshow(img)\n plt.tight_layout()\n plt.axis('off')\n plt.savefig(\"galaxy.png\", transparent=True)\n","sub_path":"figures/galaxy_figure.py","file_name":"galaxy_figure.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"139903849","text":"#! env/bin/python\nfrom PIL import Image \nimport numpy as np\n\ndef create_image():\n array = np.zeros([4,8,3], dtype=np.uint8)\n array[:,4:] = [255, 255, 0]\n img = Image.fromarray(array)\n img.save('image.png')\n\ndef read_image():\n img = Image.open('image.png')\n array = np.array(img)\n print(array)\n\nimg = Image.open('tests/test_same/image 1.png')\narray1 = np.array(img)\nprint(array1[3,3])\nimg = Image.open('tests/test_same/image 1 duplicate.png')\narray2 = np.array(img)\narray2[3,3] = [255]*3 + [255] # last is opacity 0 - transparent 255 - opaque\nimg = Image.fromarray(array2)\nimg.save('tests/test_same/image 2 appears like 1 but different.png')\nprint((array1 == array2).all())\n\n","sub_path":"playground_images.py","file_name":"playground_images.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"218699175","text":"from numpy import *\r\nimport operator\r\nimport string\r\n\r\ndef classify0(inX, dataSet, labels, k):\r\n inx=array(inX)\r\n dataSetSize = dataSet.shape[0]\r\n diffMat0 = tile(inx,(dataSetSize,1))\r\n diffMat=diffMat0-dataSet\r\n sqDiffMat = diffMat**2\r\n sqDistances = sqDiffMat.sum(axis=1)\r\n distances = sqDistances**0.5\r\n sortedDistIndicies = distances.argsort()\r\n #\r\n classCount={}\r\n for i in range(k):\r\n voteIlabel = (str)(labels[sortedDistIndicies[i]])\r\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1\r\n #\r\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\r\n #\r\n return sortedClassCount[0][0]\r\n\r\ndef ReadFile2Mat0(fileName):\r\n fr=open(fileName)\r\n init=1\r\n for line in fr.readlines():\r\n str=line.strip('\\n').split(',')\r\n t=array([str[0],str[1],str[2]]).astype(float)\r\n t1=array(str[3]).astype(float)\r\n\r\n if(init==1):\r\n ReturnMat=t\r\n labels=t1\r\n init=0\r\n else:\r\n ReturnMat=row_stack([ReturnMat,t])\r\n labels=row_stack([labels,t1])\r\n return ReturnMat,labels\r\n\r\ndef ReadFile2Mat(fileName):\r\n fr=open(fileName)\r\n ReturnMat=[]\r\n labels=[]\r\n for line in fr.readlines():\r\n str=line.strip('\\n').split(',')\r\n t=array([str[0],str[1],str[2]]).astype(float)\r\n t1=array(str[3]).astype(float)\r\n\r\n ReturnMat.append(t)\r\n labels.append(t1)\r\n\r\n return ReturnMat,labels","sub_path":"AIDlls/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81283252","text":"#a = [ [3, -0.1, -0.2, 7.85], [0.1, 7, -0.3, -19.3], [0.3, -0.2, 10, 71.4]]\n#a = [ [4, 1, 2, 16], [1, 3, 1, 10], [1, 2, 5, 12]]\n#a = [ [4, -1, 0, 2], [-1, 4, -1, 6], [0, -1, 4, 2]]\n#a = [ [10, 3, 1, 14], [5, -10, 3, -5], [1, 3, 10, 14]]\n#a = [ [1, 4, 2, 2], [1, 2, 4, 5], [4, 1, 2, 1]]\n#a = [ [1, 2, 4, 0], [4, 1, 2, 0], [1, 4, 2, 2]]\n#a = [ [1, 2, 4, 0, 1], [1, 0, 2, 4, 5], [2, 4, 0, 1, 3], [4, 1, 0, 2, 3]]\n#a = [ [1, 2, 5, 0, 1, 0], [2, 5, 0, 1, 1, 0], [1, 0, 2, 5, 1, 0], [5, 1, 0, 2, 1, 0], [1, 1, 0, 2, 5, 0]]\n#a = [ [1, 2, 6, 0, 1, 1, 0], [2, 1, 0, 1, 1, 6, 0], [2, 6, 0, 1, 1, 1, 0], [1, 0, 2, 6, 1, 1, 0], [6, 1, 0, 2, 1, 1, 0], [1, 1, 0, 2, 6, 1, 0]]\n#a = [ [6, 1, 0, 2, 1, 1, 9], [2, 6, 0, 1, 1, 1, 8], [1, 2, 6, 0, 1, 1, 7], [1, 0, 2, 6, 1, 1, 6], [1, 1, 0, 2, 6, 1, 5], [2, 1, 0, 1, 1, 6, 4]]\n\nper = 2\nsuma = []\nr = []\nr2 = []\na = []\n\n#ec = int(input('Ingrese el numero de variables en su sistema de ecuaciones: '))\n\n\nwhile True:\n try:\n a1 = [float(i) for i in input('Inserte coeficientes seguido del valor resultante separados por espacios de la ecuacion 1: ').split()]\n break\n except ValueError:\n print('Numero no valido, intente de nuevo')\n\nwhile True:\n try:\n a2 = [float(i) for i in input('Inserte coeficientes seguido del valor resultante separados por espacios de la ecuacion 2: ').split()]\n break\n except ValueError:\n print('Numero no valido, intente de nuevo')\n\nwhile True:\n try:\n a3 = [float(i) for i in input('Inserte coeficientes seguido del valor resultante separados por espacios de la ecuacion 3: ').split()]\n break\n except ValueError:\n print('Numero no valido, intente de nuevo')\n \na.append(a1)\na.append(a2)\na.append(a3)\n \nfor n in a:\n suma.append(0)\n r.append(0)\n r2.append(0)\n\ndef sumar():\n\t#print('\\n** Sumar() **\\n')\n\ti=0\n\tfor n in a:\n\t\tsuma[i] = 0\n\t\t#print('suma : ', suma)\n\t\tfor x in range(len(n)-1):\n\t\t\tsuma[i] = suma[i] + n[x]\n\n\t\t#print(n)\n\t\t#print('suma ', i+1, ' = ', suma[i])\n\t\ti = i + 1\n\ndef acomodar():\n\tm=0\n\n\t#print('\\n** Acomodar() **\\n')\n\twhile m == 0:\n\t\t#print('\\nwhile\\n')\n\t\tfor i in range(len(a)):\n\t\t\tsumar()\n\t\t\t#print('\\n\\n')\n\t\t\t#print(a[i])\n\t\t\tfor j in range(len(a)):\n\t\t\t\n\t\t\t\t#print(a[i][j], ' > ', suma[i] - a[i][j])\n\t\t\t\tif a[i][j] > suma[i] - a[i][j]:\n\t\t\t\t\t#print(' si ')\n\t\t\t\t\t#print(a[i][j], ' es el mayor')\n\t\t\t\t\tif i!=j:\n\t\t\t\t\t\tprint(' acomodar ', i + 1, ' en ', j + 1)\n\t\t\t\t\t\taux = a[i]\n\t\t\t\t\t\ta[i] = a[j]\n\t\t\t\t\t\ta[j] = aux\n\t\t\t\t\t\tm=m+1\n\t\t\t\t\t\timprimirMatriz()\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tm=m-1\n\t\t\t\t\t\tbreak\n\t\t\t\t#else:\n\t\t\t\t\t#print(' no ')\n\n\t\t\ndef imprimirMatrizIndices():\n\tprint('\\n** MATRIZ **\\n')\n\to=0\n\tp=0\n\tfor o in range(len(a)):\n\t\tfor p in range(len(a)+1):\n\t\t\tprint(a[o][p])\n\t\t\tp = p + 1\n\t\tprint()\n\t\to = p + 1\n\tprint('\\n\\n')\n\ndef imprimirMatriz():\n\tprint('\\n** MATRIZ **\\n')\n\tfor n in a:\n\t\tprint(n)\n\tprint('\\n\\n')\n\nimprimirMatriz()\nacomodar()\n\nimprimirMatriz()\nerrorM = 100\nwhile errorM > per:\n r2 = list(r)\n y=1\n\n for i in range(len(a)):\n print('\\n\\nResultados\\n', r)\n # for k in range(len(a)):\n s = 0\t# s = 0\n for j in range(len(a)):\n\n if j != i:\n print('a[',i,'][',j, '] * ', 'r[',j,']')\n print(a[i][j], ' * ', r[j])\n s = s + a[i][j] * r[j]\n # print(a[j][i], ' * ', r[i])\n # s = s + a[j][i] * r[i]\n print(' j = ', j)\n for j in range(len(a)):\n if i == j:\n r[i] = (a[i][len(a)] - s)/a[i][j]\n print('(',a[i][len(a)], ' - ', s,') / ', a[i][j], ' = ', r[i])\n break\n\n errorM = 0\n for i in range(len(a)):\n error = abs(((r[i] - r2[i]) / r[i])) * 100\n print('\\n\\nError Aprox x[',y,']: (((',r[i], ' - ', r2[i], ') / ', r[i],')) * 100 = ', error,' \\n\\n')\n if errorM < error:\n errorM = error\n y=y+1\n\n#print( 'x[i] = ( ' ,n[len(n)-1], ' - ', n[len(n)-3], ' * ', x[i+1], ' - ', n[len(n)-2], ' * ', x[i+2], ' ) / ', n[i], ' )')\n#x[i] = (n[len(n)-1] - n[len(n)-3] * x[i+1] - n[len(n)-2] * x[i+2]) / n[i]\nprint('\\n\\nResultados\\n\\n')\ni=1\nfor res in r:\n\tprint('x[',i,'] =', res)\n\ti=i+1\n\n\n#print(' \\n\\n\\n\\n0 / 4 = ', 0/4)\n#\tError = (xi^j - xi^j-1) / xi^j","sub_path":"gaussSeidel.py","file_name":"gaussSeidel.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"281154829","text":"import json\nfrom pathlib import Path\n\n\ndef add_person(person, filepath: str):\n '''\n Add person to a json encoded file, create file if it doesn't exist\n \n Args:\n person:\n person to add to the json file\n filepath:\n path of the json file\n '''\n filepath = Path(filepath)\n if not filepath.exists():\n Path(filepath.parent).mkdir(parents=True, exist_ok=True)\n with filepath.open('w') as file:\n json.dump({}, file)\n with filepath.open() as file:\n data = json.load(file)\n people = data.get('people', [])\n people = people + [person]\n data['people'] = people\n with filepath.open('w') as file:\n json.dump(data, file, indent=4)\n\n\ndef create_person(name: str, surname: str, age: int, sex: bool, major: str=None, image: str=None):\n '''\n Create a dictionary describing a person\n\n Args:\n name:\n given name of the person\n surname:\n surname of the person\n age:\n age of the person\n sex:\n Sex of the person, True = female, False = male\n major:\n Major of the person\n image:\n filepath to an image of the person\n '''\n return {\n 'name': name,\n 'surname': surname,\n 'age': age,\n 'sex': sex,\n 'major': major,\n 'image': str(Path(image).absolute()) if image is not None else None\n }\n","sub_path":"logic/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10271521","text":"def dijkstra(graph, src):\n if graph is None:\n return None\n nodes = [i for i in range(len(graph))]\n visited = []\n if src in nodes:\n visited.append(src)\n nodes.remove(src)\n else:\n return None\n distance = {src:0}\n path={src:{src:[]}}\n k = pre = src\n while nodes:\n mid_distance = float('inf')\n for v in visited:\n for d in nodes:\n new_distance = graph[src][v] + graph[v][d]\n if new_distance < mid_distance:\n mid_distance = new_distance\n graph[src][d] = new_distance\n k = d\n pre = v\n distance[k] = mid_distance\n path[src][k] = [i for i in path[src][pre]]\n path[src][k].append(k)\n visited.append(k)\n nodes.remove(k)\n return distance, path\n\nif __name__ == '__main__':\n graph_list = [ [0, 2, 1, 4, 5, 1],\n [1, 0, 4, 2, 3, 4],\n [2, 1, 0, 1, 2, 4],\n [3, 5, 2, 0, 3, 3],\n [2, 4, 3, 4, 0, 1],\n [3, 4, 7, 3, 1, 0]]\n\n distance,path= dijkstra(graph_list, 0) \n print(distance,path)\n ","sub_path":"classic_algorithm/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268719426","text":"# Convert Sorted List to Binary Search Tree\n# Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.\n# For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two\n# subtrees of every node never differ by more than 1.\n\n# Example:\n# Given the sorted linked list: [-10,-3,0,5,9],\n# One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:\n#\n# 0\n# / \\\n# -3 9\n# / /\n# -10 5\n# ======================================================================================\n# Algorithm:\n# TC:\n# SC:\n# Description:\n# ========================================================================================\n\n\nclass SllNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __repr__(self):\n \"\"\"Returns a printable representation of object we call it on.\"\"\"\n return \"{}\".format(self.val)\n\n\nclass TreeNode:\n\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def __repr__(self):\n \"\"\"Returns a printable representation of object we call it on.\"\"\"\n return \"{}\".format(self.val)\n\n\n# Method-1\ndef sorted_list_to_bst(head):\n\n # LL: [-10,-3,0,5,9]\n def create_bst(head, tail):\n\n if head == tail:\n return None\n\n slow = fast = head\n # Find mid of list\n while fast != tail and fast.next != tail:\n slow = slow.next\n fast = fast.next.next\n\n # Create Root, left child, right child of BST\n root = TreeNode(slow.val)\n root.left = create_bst(head, slow)\n root.right = create_bst(slow.next, tail)\n return root\n\n curr = head\n tail = None\n return create_bst(curr, tail)\n\n\ndef print_list(llist):\n curr = llist\n while curr:\n print(curr.val)\n curr = curr.next\n\n\nif __name__ == \"__main__\":\n node1 = SllNode(-10)\n node1.next = SllNode(-3)\n node1.next.next = SllNode(0)\n node1.next.next.next = SllNode(5)\n node1.next.next.next.next = SllNode(9)\n\n print(sorted_list_to_bst(node1))\n","sub_path":"code/set_2_linkedlist/109_convert_sorted_list_to_binary_search_tree.py","file_name":"109_convert_sorted_list_to_binary_search_tree.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349430195","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 1 21:32:18 2018\n\n@author: owen\n\"\"\"\n\n# https://leetcode.com/problems/score-after-flipping-matrix/discuss/143722/C++JavaPython-Easy-and-Concise\nclass Solution:\n def matrixScore(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: int\n \"\"\"\n # greedy, time O(mn)\n m,n=len(A),len(A[0])\n res=m*(1<<(n-1)) # make the left-most column all 1s ( toggle all A[i][0] to 1), since it is greater than the sum of all other columns with 1s\n for j in range(1,n):\n cnt=0 # count ones in j-th column\n for i in range(m):\n if A[i][j]==A[i][0]: # since A[i][j] has the same value as A[i][0], after toggling, it must be one\n cnt+=1\n if m-cnt>cnt:\n res+=(m-cnt)*(1<<(n-j-1)) # toggling this col, then it has m-cnt ones\n else:\n res+=cnt*(1<<(n-j-1)) # no toggling, cnt ones\n# cnt= sum(A[i][j] == A[i][0] for i in range(m))\n# res += max(cnt, m-cnt) * (1 << n-1-j)\n return res\n \nif __name__==\"__main__\":\n print(Solution().matrixScore([[0,0,1,1],[1,0,1,0],[1,1,0,0]]))\n \n \n ","sub_path":"861. Score After Flipping Matrix.py","file_name":"861. Score After Flipping Matrix.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"628055500","text":"\n\nimport subprocess\nfrom .cigarParser import parse_cigar\nfrom sys import exit\nfrom .alignment import flatten, reverseComplement\nfrom .ssr import _isSSR, _getSsrType, _ssrReadPos, TARGET_SEQ, mergeINS\n\n\nvcf_header = '''##fileformat=VCFv4.2\n##FILTER=\n##reference=file:///nfs/spot/home/atorres/CRISPR_mouse/reference/GRCm38_68.fa\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##contig=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##ALT=\n##ALT=\n##ALT=\n##ALT=\n##ALT=\n##ALT=\n##ALT=\n##ALT=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n##INFO=\n#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\n'''\n\n\ndef main(dictBam, out_vcf, ref_genome, in_fa, temp, prefix):\n\n minSV = 15\n\n with open(out_vcf, mode='w') as writer:\n out = []\n writer.write(vcf_header)\n if _isSSR(in_fa):\n with open(in_fa, 'r') as fasta_ssr:\n for line in fasta_ssr:\n if not line.startswith('>'):\n seq = line\n fasta_ssr_line = line\n out.append(list(flatten(inferSSR(dictBam=dictBam, ref_genome=ref_genome, in_fa=in_fa, ssr_type=_getSsrType(fasta_ssr_line)[0], ssr_pos=_ssrReadPos(fasta=fasta_ssr_line, dictBam=dictBam)))))\n in_fa = temp + '{}.noSSR.fasta'.format(prefix)\n\n with open(in_fa, 'r') as fasta:\n for line in fasta:\n if not line.startswith('>'):\n try:\n seq\n except:\n seq = line\n fasta_line = line\n out.append(list(flatten(inferVariants(dictBam=dictBam, ref_genome=ref_genome, in_fa=in_fa, minSV=minSV, fasta=fasta_line))))\n out.append(list(flatten(inferIndels(dictBam=dictBam, ref_genome=ref_genome))))\n\n out = mergeINS(list(flatten(out)), seq, dictBam)\n\n writer.write(''.join(flatten(out)))\n\n\ndef inferVariants(dictBam, ref_genome, in_fa, minSV, fasta):\n\n output = []\n\n if len(dictBam) == 1:\n return (output)\n\n lf = [c for c in dictBam if c['rpos'] == 0][0]\n rf = [c for c in dictBam if c['rpos'] == len(dictBam)-1][0]\n homArmDown = lf if lf['gs'] < rf['gs'] else rf\n print ('Homology arm downstream')\n print (homArmDown)\n homArmUps = rf if rf['ge'] > lf['ge'] else lf\n print ('Homology arm upstream')\n print (homArmUps)\n if homArmDown['strand'] != homArmUps['strand']:\n exit('ERROR: Exit variant calling: homologous regions have different orientation.')\n\n if homArmDown['chrom'] != homArmUps['chrom']:\n exit('ERROR: Exit variant calling: homologous regions are in different chromosome.')\n\n go = homArmDown['strand']\n gc = homArmDown['chrom']\n\n i = 0\n while i < len(dictBam)-1:\n read = [c for c in dictBam if c['rpos'] == i][0]\n if len(dictBam) == 2:\n nread = [c for c in dictBam if c['rpos'] == i+1][0]\n output.append(inferSimpleSV(read=read, nread=nread, ncread=nread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n i += 1\n elif len(dictBam) > 2:\n nread = [c for c in dictBam if c['rpos'] == i+1][0]\n ncread = nread\n try:\n # ncread = [c for c in dictBam if c['gs'] >= read['ge']] if go == '+' else [c for c in dictBam if c['ge'] <= read['gs']]\n # ncread = min(ncread, key=lambda k: (k['gs'])) if go == '+' else max(ncread, key=lambda k: (k['ge']))\n ncread = [c for c in dictBam if c['ge'] > read['ge']]\n ncread = min(ncread, key=lambda k: (k['gs']))\n\n except:\n i += 1\n continue\n print ('The iteration number is')\n print (i)\n print ('The read is')\n print (read)\n print ('The nread is')\n print (nread)\n print ('The ncread is')\n print (ncread)\n\n if nread == ncread:\n print ('one')\n output.append(inferSimpleSV(read=read, nread=nread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n elif nread['ge'] <= homArmDown['gs'] or nread['gs'] >= homArmUps['ge'] or nread['chrom'] != gc:\n print ('two')\n output.append(inferDUP(read=read, nread=nread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n output.append(inferSimpleSV(read=read, nread=ncread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=True))\n i += 1\n # elif (go == '+' and nread['gs'] >= read['ge'] and nread['ge'] <= ncread['gs']) or (go == '-' and nread['ge'] <= read['gs'] and nread['gs'] >= ncread['ge']):\n elif (nread['gs'] >= read['ge'] and nread['ge'] <= ncread['gs']):\n print ('three')\n output.append(inferSimpleSV(read=read, nread=nread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n elif (nread['gs'] >= read['gs'] and nread['ge'] <= read['ge']) or (nread['gs'] >= ncread['gs'] and nread['ge'] <= ncread['ge']):\n print ('four')\n output.append(inferSimpleSV(read=read, nread=nread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n output.append(inferSimpleSV(read=read, nread=ncread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=True))\n i += 1\n # elif go == '+' and (nread['ge'] <= read['gs'] or nread['gs'] >= ncread['ge']):\n # output.append(inferDUP(read=read, nread=nread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n # elif go == '-' and (nread['gs'] >= read['ge'] or nread['ge'] <= ncread['gs']):\n # output.append(inferDUP(read=read, nread=nread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n elif (nread['ge'] <= read['gs'] or nread['gs'] >= ncread['ge']):\n print ('five')\n output.append(inferDUP(read=read, nread=nread, ncread=ncread, go=go, gc=gc, ref_genome=ref_genome, minSV=minSV, fasta=fasta, nested=False))\n i += 1\n print ('And the output so far is:')\n print (output)\n continue\n\n return (output)\n\n\ndef inferIndels(dictBam, ref_genome):\n indels = []\n for i in range(len(dictBam)):\n read = dictBam[i]\n cigarParsed = parse_cigar(read['cigar'])\n seq = list(read['seq'])\n read_pos = 0\n pos = int(dictBam[i]['gs'])\n del_count = 0\n for event in cigarParsed:\n if event[0] == 'M' or event[0] == '=':\n pos += event[1]\n read_pos += event[1]\n elif event[0] == 'D':\n del_count += 1\n svtype = 'DEL'\n svlen = -event[1]\n svend = (pos-1) + event[1]\n ref = get_ref(ref_genome, read['chrom'], (pos-1), (pos-1)+abs(svlen))\n alt = ref[0]\n indels.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={}\\n'.format(read['chrom'], pos-1, ref, alt, svtype, svlen, svend))\n pos += event[1]\n elif event[0] == 'I':\n svtype = 'INS'\n svlen = event[1]\n svend = pos - 1\n ref = ''.join(seq[read_pos-1])\n alt = ''.join(seq[(read_pos-1):(read_pos + event[1])])\n indels.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={}\\n'.format(read['chrom'], pos-1, ref, alt, svtype, svlen, svend))\n read_pos += event[1]\n elif event[0] == 'S' or event[0] == 'H':\n continue\n return (indels)\n\n# def inferSNP(dictBam):\n# snps = []\n# for i in range(len(dictBam)):\n\n\ndef inferSimpleSV(read, nread, ncread, go, gc, ref_genome, minSV, fasta, nested):\n sv = []\n if nread['gs'] - read['ge'] > minSV:\n svstart = read['ge'] - 1\n svend = nread['gs'] - 1\n svtype = 'DEL'\n svlen = svstart - svend\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref[0]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svlen, svend))\n\n if (max(nread['rs'], read['rs']) - min(read['re'], nread['re'])) - [nread['gs'] - read['ge'] if nread['gs'] - read['ge'] > 0 else 0][0] > minSV:\n # svstart = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n svstart = read['ge'] - 1\n svend = svstart\n svtype = 'INS'\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref + fasta[min(read['re'], nread['re'])-1:max(nread['rs'], read['rs'])-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svend))\n\n elif min(read['re'], nread['re']) < max(nread['rs'], read['rs']) and nested is False:\n # svstart = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n svstart = read['ge'] - 1\n svend = svstart\n svtype = 'INS'\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref + fasta[min(read['re'], nread['re'])-1:max(nread['rs'], read['rs'])-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svend))\n\n if min(nread['re'], ncread['re']) < max(nread['rs'], ncread['rs']) and nested is False and nread != ncread:\n # svstart = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n svstart = read['ge'] - 1\n svend = svstart\n svtype = 'INS'\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref + fasta[min(nread['re'], ncread['re'])-1:max(ncread['rs'], nread['rs'])-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(nread['chrom'], svstart, ref, alt, svtype, svend))\n\n\n # if (nread['gs'] - read['ge']) > minSV and (nread['rs'] - read['re']) < minSV:\n if min(read['ge'], nread['ge']) > max(nread['gs'], read['gs']) + minSV and (max(nread['rs'], read['rs']) - min(read['re'], nread['re'])) <= minSV:\n # svstart = nread['gs'] - 1 if go == '+' else read['gs'] - 1\n # svend = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n cs = max(nread['gs'], read['gs'])\n ce = min(read['ge'], nread['ge'])\n # dupSeq = get_ref(ref_genome, int(read['chrom']), (int(nread['gs'])), int(read['ge']))\n dupSeq = get_ref(ref_genome, read['chrom'], cs - 1, ce - 1)\n svstart = max(read['ge'], nread['ge']) - 1\n svend = svstart\n # ndups = 2 # TODO: Allow for more than 1 tandem duplication\n # ndups = 0\n # nnread = read[i + 1 + ndups]\n # while read['ge'] == nnread['ge']:\n # ndups += 1\n # nnread = read[i + 1 + ndups]\n svlen = len(dupSeq)\n svtype = 'DUP:TANDEM' if nread['strand'] == go else 'INVDUP:TANDEM'\n ref = get_ref(ref_genome, read['chrom'], svstart, svend)\n alt = ref + dupSeq\n coords = '{}:{}-{}'.format(nread['chrom'], cs, ce - 1)\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={};DUPCOORDS={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svlen, svend, coords))\n return (sv)\n\n elif min(read['ge'], nread['ge']) > max(nread['gs'], read['gs']) + minSV and (max(nread['rs'], read['rs']) - min(read['re'], nread['re'])) > minSV:\n cs = max(nread['gs'], read['gs'])\n ce = min(read['ge'], nread['ge'])\n svstart = max(read['ge'], nread['ge']) - 1\n svend = svstart\n dupSeq = get_ref(ref_genome, read['chrom'], cs - 1, ce - 1)\n\n svlen = len(dupSeq)\n svtype = 'DUP' if nread['strand'] == go else 'INVDUP'\n ref = get_ref(ref_genome, read['chrom'], int(svstart), int(svend))\n alt = ref + nread['seq']\n coords = '{}:{}-{}'.format(nread['chrom'], cs, ce - 1)\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={};DUPCOORDS={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svlen, svend, coords))\n return(sv)\n\n if nread['strand'] != go:\n svstart = nread['gs']\n svend = nread['ge'] - 1\n svtype = 'INV'\n svlen = nread['ge'] - nread['gs']\n ref = get_ref(ref_genome, read['chrom'], int(svstart), int(svend))\n alt = ref[::-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};SVLEN={};END={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svlen, svend))\n\n return (sv)\n\n\ndef inferDUP(read, nread, ncread, go, gc, ref_genome, minSV, fasta, nested):\n sv = []\n svstart = read['ge'] - 1\n svend = svstart\n if (max(nread['rs'], read['rs']) - min(read['re'], nread['re'])) <= minSV and (nread['gs'] >= read['gs'] and nread['ge'] <= read['ge']):\n svtype = 'DUP:TANDEM' if nread['strand'] == go else 'INVDUP:TANDEM'\n else:\n svtype = 'DUP' if nread['strand'] == go else 'INVDUP'\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref + nread['seq']\n coords = '{}:{}-{}'.format(nread['chrom'], nread['gs'], nread['ge'] - 1)\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={};DUPCOORDS={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svend, coords))\n\n if min(read['re'], nread['re']) < max(nread['rs'], read['rs']) and nested is False:\n # svstart = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n svstart = read['ge'] - 1\n svend = svstart\n svtype = 'INS'\n ref = get_ref(ref_genome, read['chrom'], (svstart), int(svend))\n alt = ref + fasta[min(read['re'], nread['re'])-1:max(nread['rs'], read['rs'])-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(read['chrom'], svstart, ref, alt, svtype, svend))\n\n if min(nread['re'], ncread['re']) < max(nread['rs'], ncread['rs']) and nested is False and nread != ncread:\n # svstart = read['ge'] - 1 if go == '+' else nread['ge'] - 1\n svstart = ncread['gs'] - 1\n svend = svstart\n svtype = 'INS'\n ref = get_ref(ref_genome, nread['chrom'], (svstart), int(svend))\n alt = ref + fasta[min(nread['re'], ncread['re'])-1:max(ncread['rs'], nread['rs'])-1]\n sv.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(nread['chrom'], svstart, ref, alt, svtype, svend))\n\n return(sv)\n\n\ndef get_ref(fasta, chrom, start, end):\n p1 = subprocess.Popen(['samtools', 'faidx', fasta, '{}:{}-{}'.format(chrom, start, end)], stdout=subprocess.PIPE)\n refSeq = p1.communicate()[0].decode('utf-8')\n return(''.join(refSeq.split('\\n')[1:]))\n\n\ndef inferSSR(ssr_pos, dictBam, ssr_type, in_fa, ref_genome):\n ssr_ins = []\n for pos in ssr_pos:\n chrom = pos.split(':')[0]\n svstart = int(pos.split(':')[1].split('-')[0]) - 1\n svend = svstart\n svtype = 'INS:LOXP' if ssr_type == 'LOXP' or ssr_type == 'LOXPRC' else 'INS:FRT'\n ref = get_ref(ref_genome, chrom, (svstart), int(svend))\n alt = ref + TARGET_SEQ[ssr_type]\n\n ssr_ins.append('{}\\t{}\\t.\\t{}\\t{}\\t.\\t.\\tSVTYPE={};END={}\\n'.format(chrom, svstart, ref, alt, svtype, svend))\n return(ssr_ins)\n","sub_path":"crisprcon/lib/infervariants.py","file_name":"infervariants.py","file_ext":"py","file_size_in_byte":18516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10455452","text":"import pytest\nfrom mock import patch\n\nfrom pyramid.security import (\n Allow, Deny, Everyone, Authenticated, ALL_PERMISSIONS)\n\nfrom nefertari_guards.base import ACLEncoderMixin, NEF_PERMISSIONS\n\n\nclass TestACLEncoderMixin(object):\n def test_validate_action_valid(self):\n obj = ACLEncoderMixin()\n try:\n obj._validate_action(list(obj.ACTIONS.values())[0])\n except ValueError:\n raise Exception('Unexpected error')\n\n def test_validate_action_invalid(self):\n obj = ACLEncoderMixin()\n with pytest.raises(ValueError) as ex:\n obj._validate_action('foobarbaz')\n expected = 'Invalid ACL action value: foobarbaz. Valid values are:'\n assert expected in str(ex.value)\n\n def test_validate_permission_valid(self):\n obj = ACLEncoderMixin()\n try:\n obj._validate_permission(list(NEF_PERMISSIONS.values())[0])\n except ValueError:\n raise Exception('Unexpected error')\n\n def test_validate_permission_invalid(self):\n obj = ACLEncoderMixin()\n with pytest.raises(ValueError) as ex:\n obj._validate_permission('foobarbaz')\n expected = 'Invalid ACL permission value: foobarbaz. Valid values are:'\n assert expected in str(ex.value)\n\n @patch.object(ACLEncoderMixin, '_validate_action')\n @patch.object(ACLEncoderMixin, '_validate_permission')\n def test_validate_acl(self, mock_perm, mock_action):\n obj = ACLEncoderMixin()\n obj.validate_acl([{'action': 1, 'principal': 2, 'permission': 3}])\n mock_action.assert_called_once_with(1)\n mock_perm.assert_called_once_with(3)\n\n def test_stringify_action_existing(self):\n obj = ACLEncoderMixin()\n assert obj._stringify_action(Deny) == 'deny'\n assert obj._stringify_action(Allow) == 'allow'\n\n def test_stringify_action_nonexisting(self):\n obj = ACLEncoderMixin()\n assert obj._stringify_action('not allow') == 'not allow'\n\n def test_stringify_principal_special(self):\n obj = ACLEncoderMixin()\n assert obj._stringify_principal(Everyone) == 'everyone'\n assert obj._stringify_principal(Authenticated) == 'authenticated'\n\n def test_stringify_principal(self):\n obj = ACLEncoderMixin()\n assert obj._stringify_principal('g:admin') == 'g:admin'\n\n def test_stringify_permissions_regular_string(self):\n obj = ACLEncoderMixin()\n assert obj._stringify_permissions('Foo ') == ['foo']\n\n def test_stringify_permissions_special(self):\n obj = ACLEncoderMixin()\n perms = obj._stringify_permissions(['foo', ALL_PERMISSIONS])\n assert sorted(perms) == ['all', 'foo']\n\n @patch.object(ACLEncoderMixin, '_stringify_action')\n @patch.object(ACLEncoderMixin, '_stringify_principal')\n @patch.object(ACLEncoderMixin, '_stringify_permissions')\n def test_stringify_acl(self, mock_perm, mock_id, mock_action):\n obj = ACLEncoderMixin()\n mock_action.return_value = 1\n mock_id.return_value = 2\n mock_perm.return_value = [3, 4]\n result = obj.stringify_acl([('a', 'b', 'c')])\n assert result == [\n {'action': 1, 'principal': 2, 'permission': 3},\n {'action': 1, 'principal': 2, 'permission': 4},\n ]\n mock_action.assert_called_once_with('a')\n mock_id.assert_called_once_with('b')\n mock_perm.assert_called_once_with('c')\n\n def test_objectify_action(self):\n assert ACLEncoderMixin._objectify_action('allow') is Allow\n assert ACLEncoderMixin._objectify_action('deny') is Deny\n\n def test_objectify_principal(self):\n assert ACLEncoderMixin._objectify_principal(\n 'everyone') is Everyone\n assert ACLEncoderMixin._objectify_principal(\n 'authenticated') is Authenticated\n assert ACLEncoderMixin._objectify_principal('foo') == 'foo'\n\n def test_objectify_permission(self):\n assert ACLEncoderMixin._objectify_permission(\n 'all') == ALL_PERMISSIONS\n assert ACLEncoderMixin._objectify_permission('foo') == 'foo'\n\n @patch.object(ACLEncoderMixin, '_objectify_action')\n @patch.object(ACLEncoderMixin, '_objectify_principal')\n @patch.object(ACLEncoderMixin, '_objectify_permission')\n def test_objectify_acl(self, mock_perm, mock_id, mock_action):\n mock_action.return_value = 1\n mock_id.return_value = 2\n mock_perm.return_value = [3]\n result = ACLEncoderMixin.objectify_acl([\n {'action': 'a', 'principal': 'b', 'permission': 'c'}\n ])\n assert result == [[1, 2, [3]]]\n mock_action.assert_called_once_with('a')\n mock_id.assert_called_once_with('b')\n mock_perm.assert_called_once_with('c')\n","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447340110","text":"numlist = []\n\nwhile True:\n num = input(\"Enter a number: \")\n \n if num == 'done':\n break\n \n try:\n fv = float(num)\n \n except:\n print('Invalid input')\n continue\n numlist.append(num)\n #print(numlist) \n \nprint(\"maximum: \",max(numlist))\nprint(\"minimum: \",min(numlist))","sub_path":"pyfor/ex5_2_correct.py","file_name":"ex5_2_correct.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258292061","text":"import threading\nimport socket\nimport time\n\nclass APRSISClient(threading.Thread):\n def __init__(self, addr=\"euro.aprs2.net\", port=14580, callsign=\"N0CALL\", filter=\"u/APE6UB\"):\n threading.Thread.__init__(self)\n self.port = port\n self.addr = addr\n self.callsign = callsign\n self.filter = filter\n self.callbacks = []\n self.timeout = 200\n self.isRunning = True\n\n @property\n def onReceive(self):\n return None\n\n @onReceive.setter\n def onReceive(self, client):\n self.callbacks.append(client)\n\n def connect(self):\n connected=False\n while not connected:\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(self.timeout)\n self.socket.connect((self.addr, self.port))\n self.send(\"user %s-TS pass -1 vers aprs2ssdv 1.0 filter %s\" % (self.callsign, self.filter))\n connected = True\n except Exception as x:\n time.sleep(5)\n\n def run(self):\n self.connect()\n while self.isRunning:\n try:\n data = self.socket.recv(1024)\n for cb in self.callbacks:\n cb(data.decode('utf-8'))\n except socket.timeout as msg:\n print(msg)\n self.socket.close()\n self.connect()\n except Exception as x:\n if 'errno' in x and x.errno == 107: # Connection refused\n try:\n self.socket.connect((self.addr, self.port))\n except:\n time.sleep(1)\n else:\n print((\"listener error:\", x))\n\n # \n def send(self, msg):\n msg += \"\\r\\n\"\n self.socket.send(msg.encode())\n\n def close(self):\n self.isRunning = False\n self.socket.close()\n\n########################################################################################################################\n\ndef onMessage(msg):\n # 4X6UB-11>APE6UB,WIDE1-1,WIDE2-1,qAO,4X6UB:{{KAAJt7FN/C\"Kb^{/!R=^:POi#r4J_;x-\"RsP68s%/xuXwLt{[p*b}S?bYy4Wu-u/4APE6UB,WIDE1-1,WIDE2-1,qAO,4X6UB:{{IAANt7F5FuWKA,os%rHvWZn[sY30`:J5&#E1enIE&K_^,q8b{-!Wl[${G,uR5WsaYpz;s+]xUA,FW0^tdO{(Gx-!bxwFL-/NX$wZZurY*.xuc0DAPE6UB,WIDE1-1,WIDE2-1,qAO,4X6UB:{{JAANt7F5FuWJ^levb2Y0?6`<7qSxX1S2~b{;u2Q[sY3Do:gY-P}k/#0d{87oi#V{FpqoOZY5%j)KaW_+rq~;64-c+/3FA\n if msg == '' :\n return\n if msg.startswith(\"#\"):\n print(msg)\n return\n header, payload = msg.split(\":\", 1)\n tokens = header.split(',')\n src, dest = tokens[0].split(\">\")\n if dest == 'APE6UB':\n print(\"assets:\", payload)\n\nif __name__ == \"__main__\":\n client = APRSISClient(callsign=\"4X6UB\")\n client.onReceive = onMessage\n client.start()\n print(\"started\")\n while True:\n try:\n pass\n except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly\n break\n except Exception as x:\n client.close()\n client = APRSISClient(callsign=\"4X6UB\")\n client.onReceive = onMessage\n client.start()\n client.stop()\n print(\"done\")\n","sub_path":"aprsis.py","file_name":"aprsis.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421565162","text":"from flask import render_template, url_for, redirect, Blueprint, flash\nfrom flaskblog.surveys.forms import EZbreadForm\nfrom flaskblog.surveys.utils import complete_survey\n\nsurveys = Blueprint('surveys', __name__)\n\n\n@surveys.route(\"/ezbread\", methods=['GET', 'POST'])\ndef ezbread():\n form = EZbreadForm()\n if form.validate_on_submit():\n try:\n code = form.survey_code.data\n coupon, tellcode = complete_survey(code)\n flash(f'Success! Coupon Code: {coupon}, TellCode: {tellcode}')\n return redirect(url_for('main.home'))\n except Exception as e:\n print(e)\n flash(f'Failure. Verify coupon code: {form.survey_code.data}')\n\n return render_template('ezbread.html', title='EZ Bread', \n legend=\"It ain't easy...\", form=form)\n","sub_path":"flaskblog/surveys/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28703292","text":"import os\nimport sys\n\nif __name__ == '__main__':\n # set python path\n sys.path.insert(0, os.getcwd())\n\n # setup django : django app 이 아닌 곳에서는 삭제할 것\n DEPLOYMENT_LEVEL = os.environ.setdefault(\"DEPLOYMENT_LEVEL\", \"development\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jtelips.settings_{dlevel}\".format(dlevel=DEPLOYMENT_LEVEL))\n import django\n django.setup()\n\nfrom base.utils import console_log, ArgumentParser, ReaderContext, WriterContext, LineStripReader\nfrom base.exporters import DOC_BEGIN_LINE, DOC_END_LINE\nfrom base.json import json_loads\n\n\ndef extract_title_abstract_claims(line):\n d = json_loads(line)\n title = d['title']\n abstract = d['abstract']['normalized']\n claims = [claim['plain'] for claim in d['claims']]\n desc = d['description']['full_raw']\n result = [title, abstract]\n result.extend(claims)\n result.append(desc)\n return result\n\n\ndef main(**kwargs):\n parser = ArgumentParser(**kwargs)\n parser.add_argument('--input_file_path', type=str, default=None)\n parser.add_argument('--output_file_path', type=str, default=None)\n args = parser.parse_args()\n\n console_log('{} start'.format(__file__))\n with ReaderContext(path=args.input_file_path) as input_f, WriterContext(path=args.output_file_path) as output_f:\n for line in LineStripReader(input_f):\n sentences = extract_title_abstract_claims(line)\n if sentences:\n output_f.write(DOC_BEGIN_LINE)\n for sentence in sentences:\n if sentence:\n output_f.write(sentence)\n output_f.write('\\n')\n output_f.write(DOC_END_LINE)\n console_log('{} end'.format(__file__))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"patent/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42733785","text":"#!/usr/bin/ python\n# -*- coding: utf-8 -*-\n__author__ = 'mtianyan'\n__date__ = '2018/11/10 09:22'\n\"\"\"\n  ┏┓   ┏┓+ +\n        ┏┛┻━━━┛┻┓ + +\n        ┃       ┃  \n        ┃   ━   ┃ ++ + + +\n        ████━████ ┃+\n        ┃       ┃ +\n        ┃   ┻   ┃\n        ┃       ┃ + +\n        ┗━┓   ┏━┛\n          ┃   ┃           \n          ┃   ┃ + + + +\n          ┃   ┃    Code is far away from bug with the animal protecting       \n          ┃   ┃ +     神兽保佑,代码无bug  \n          ┃   ┃\n          ┃   ┃  +         \n          ┃    ┗━━━┓ + +\n          ┃        ┣┓\n          ┃        ┏┛\n          ┗┓┓┏━┳┓┏┛ + + + +\n           ┃┫┫ ┃┫┫\n           ┗┻┛ ┗┻┛+ + + +\n\"\"\"\n\n\ndef is_isbn_or_key(word):\n # isbn isbn13 13个0-9的数字组成\n # isbn10 10个0-9数字组成,含有一些' - '\n isbn_or_key = 'key'\n if len(word) == 13 and word.isdight():\n isbn_or_key = 'isbn'\n short_word = word.replace(\"-\", '')\n if \"-\" in word and len(short_word) == 10 and short_word.isdight():\n isbn_or_key = 'isbn'\n return isbn_or_key\n","sub_path":"0-fisher/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461050874","text":"##深度学习过程中,需要制作训练集和验证集、测试集。\n\nimport os, random, shutil\n# from posix import times_result\ndef moveFile(img_Dir, label_Dir):\n img_path = os.listdir(img_Dir) # 取图片的原始路径\n # print(len(img_path))\n filenumber = len(img_path)\n rate = 0.1 # 自定义抽取图片的比例,比方说100张抽10张,那就是0.1\n picknumber = int(filenumber * rate) #按照rate比例从文件夹中取一定数量图片\n sample = random.sample(img_path, picknumber) # 随机选取picknumber数量的样本图片\n # print(sample)\n for val_name in sample:\n shutil.move(img_Dir + val_name, val_img_tarDir + val_name)\n shutil.move(label_Dir + val_name[:-4] + '.txt', val_label_tarDir + val_name[:-4] + '.txt')\n for train_name in img_path:\n if train_name not in sample:\n shutil.move(img_Dir + train_name, train_img_tarDir + train_name)\n shutil.move(label_Dir + train_name[:-4] + '.txt', train_label_tarDir + train_name[:-4] + '.txt') \n # print(img_path)\nif __name__ == '__main__':\n img_Dir = \"images/train/\" #源图片文件夹路径\n label_Dir = \"labels/train/\"\n\n train_img_tarDir = 'datasets/object/images/train/' # 移动到新的文件夹路径\n val_img_tarDir = 'datasets/object/images/val/'\n train_label_tarDir = 'datasets/object/labels/train/'\n val_label_tarDir = 'datasets/object/labels/val/'\n moveFile(img_Dir, label_Dir)","sub_path":"move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"21259926","text":"def isLeapYear(year):\n \"\"\"\n Tests whether the input year is a leap year\n @author: Nikolas Skoufis\n @since: 15/8/14\n @modified: 15/8/14\n @parameter: year, the year to check\n @precondition:\n @poscondition:\n @return:\n @raises: ValueError if the year is not an integer or if the year is before the Gregorian calendar started in 1582\n @complexity: O(1)\n \"\"\"\n\n # Check whether the year is an int\n if not(type(year) is int):\n raise TypeError(\"Input year was not an integer\")\n\n # Check whether the Gregorian calendar had started\n if year < 1592:\n raise ValueError(\"Year is from before the Gregorian calendar had begun\")\n\n # Make checks for whether it is a leap year using a flag\n retVal = False\n if year % 4 == 0 and year % 100 != 0:\n retVal = True\n\n if year % 400 == 0:\n retVal = True\n\n return retVal\n\ndef approxSin(argument):\n \"\"\"\n Since MIPS doesn't have access to any trig functions, we have to implement them ourselves.\n We will implement them using the high precision algorithm given at http://lab.polygonal.de/?p=205\n @author: Nikolas Skoufis\n @since: 17/8/14\n @modified: 17/8/14\n @parameter: argument: The argument of the sin function to compute in degrees\n @pre\n @post\n @return: An approximation to sin(argument)\n @complexity: O(1)\n \"\"\"\n\n # Define pi approximately\n pi = 3.14159265\n\n # Make the argument between -180 and 180\n if argument > 180:\n argument -= 360\n if argument < -180:\n argument += 360\n\n # Convert the argument to radians\n argument = argument * 2 * pi / 360\n\n # Now compute sine\n if argument < 0:\n sin = 1.27323954 * argument + 0.405284735 * argument * argument\n if (sin < 0):\n sin = .225 * (sin * -sin - sin) + sin\n else:\n sin = .225 * (sin * sin - sin) + sin\n\n else:\n sin = 1.27323954 * argument - 0.405284735 * argument * argument\n if (sin < 0):\n sin = .225 * (sin * -sin - sin) + sin\n else:\n sin = .225 * (sin * sin - sin) + sin\n\n return sin\n\ndef sakamoto(day, month, year):\n \"\"\"\n Computes the day of the week for a given day, month and year\n A C implementation can be found at http://en.wikipedia.org/wiki/Determination_of_the_day_of_the_week\n and this implementation is based off of that\n @author: Tomohiko Sakamoto, implemented in Python by Nikolas Skoufis\n @since: 17/8/14\n @modified: 17/8/14\n @param: day: The day of the month ie. an integer from 1-31\n @param: month: The month of the year ie. an integer from 1-12\n @param: year: The year of the Gregorian calendar\n @pre\n @post\n @return: An integer between 0 and 6 representing the day of the week with 0=Sunday, 1=Monday etc.\n @complexity: O(1)\n \"\"\"\n # Setup an array which is used in the calculation\n t = [0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4]\n\n if month < 3:\n year -= 1\n\n # Day of week computation. This appears to be doing something related to leap year correction\n # followed by a lookup in the array t (which is presumably precomputed so that the algorithm works)\n # and then finally doing a modulo with the number of days in the week\n retVal = (year + year/4 - year/100 + year/400 + t[month - 1] + day) % 7\n return int(retVal)\n\ndef julianDay(day, month, year):\n \"\"\"\n Computes the julian day\n Method taken from http://www.ben-daglish.net/moon.shtml\n @author: Nikolas Skoufis\n @since: 18/8/14\n @modified: 18/8/14\n @param: day: day of the gregorian calendar\n @param: month: month of the gregorian calendar\n @param: year: year in the gregorian calendar\n @pre\n @post\n @return; the julian day\n @complexity: O(1)\n \"\"\"\n if year < 0:\n year += 1\n\n month += 1\n\n if month <= 2:\n year -= 1\n month += 12\n\n jul = (365.35 * year) // 1 + (30.6001 * month) // 1 + day + 1720995\n\n if (day + 31 * (month + year * 12)) >= (15 + 31 * (10 + 12 * 1582)):\n ja = (0.01 * year) // 1\n jul = jul + 2 - ja + (0.25 * ja) // 1\n\n return jul\n\ndef lunarPhase(day, month, year):\n \"\"\"\n Computes the lunar phase for a given day, month and year using a trig based approach.\n Method taken from http://www.ben-daglish.net/moon.shtml\n @author: Nikolas Skoufis\n @since: 18/8/14\n @modified: 18/8/14\n @param: day: day of the year 1-31\n @param: month: month of the year 1-12\n @param: year: year\n @pre\n @post\n @return: lunar phase day from 1-28\n @complexity: O(1)\n \"\"\"\n n = (12.37 * (year - 1900 + ((1.0 * month - 0.5) / 12))) // 1 # // 1 gives the floor\n degToRad = 3.14159265/180.0 # Conversion factor for degrees to radians\n t = n / 1236.85\n t2 = t * t\n aS = 359.2242 + 29.105456 * n\n aM = 306.0253 + 385.816918 * n + 0.010730 * t2\n xtra = 0.75933 + 1.53058868 * n + ((1.178e-4) - (1.55e-7) * t) * t2\n xtra += (0.1734 - 3.93e-4 * t) * approxSin(degToRad * aS) - 0.4068 * approxSin(degToRad * aM)\n if (xtra > 0):\n i = xtra // 1\n else:\n i = (xtra // 1) - 1\n jl = julianDay(day, month, year)\n jd = (2415020 + 28 * n) + 1\n return (jl - jd + 30) % 30\n\nif __name__ == '__main__':\n # The following code prints a calendar\n # The user is asked for a month and a year, and optionally, a latitude and longitude\n # The following features are included in the calendar:\n # * Days of the week\n # * The phase of the moon\n\n # Setup an array with the number of days in each month\n monthDays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n\n # Get input from the user\n month = int(input(\"Please enter a month (1-12): \"))\n year = int(input(\"Please enter a year (after 1592): \"))\n\n # Do some input validation\n if month <= 0 or month >= 13:\n raise ValueError(\"Invalid month\")\n\n if year < 1592:\n raise ValueError(\"Years earlier than 1592 did not follow the Gregorian calendar\")\n\n # First we will calculate the day of the week that the month starts on.\n # We will use this later on as an offset for where we start printing the days of the month\n # To do this we call the Sakamoto algorithm\n firstDay = sakamoto(1, month, year)\n\n # Next we will calculate the lunar phase for the first day using a lunar phase algorithm\n firstPhase = lunarPhase(1, month, year)\n\n # Now we need to print the calendar\n # First, print the month name\n\n january = \"\"\" _\n | | __ _ _ __ _ _ __ _ _ __ _ _\n _ | |/ _` | '_ \\| | | |/ _` | '__| | | |\n| |_| | (_| | | | | |_| | (_| | | | |_| |\n \\___/ \\__,_|_| |_|\\__,_|\\__,_|_| \\__, |\n |___/ \"\"\"\n february = \"\"\" _____ _\n| ___|__| |__ _ __ _ _ __ _ _ __ _ _\n| |_ / _ \\ '_ \\| '__| | | |/ _` | '__| | | |\n| _| __/ |_) | | | |_| | (_| | | | |_| |\n|_| \\___|_.__/|_| \\__,_|\\__,_|_| \\__, |\n |___/ \"\"\"\n march = \"\"\" __ __ _\n| \\/ | __ _ _ __ ___| |__\n| |\\/| |/ _` | '__/ __| '_ \\\\\n| | | | (_| | | | (__| | | |\n|_| |_|\\__,_|_| \\___|_| |_|\"\"\"\n\n april = \"\"\" _ _ _\n / \\ _ __ _ __(_) |\n / _ \\ | '_ \\| '__| | |\n / ___ \\| |_) | | | | |\n/_/ \\_\\ .__/|_| |_|_|\n |_| \"\"\"\n may = \"\"\" __ __\n| \\/ | __ _ _ _\n| |\\/| |/ _` | | | |\n| | | | (_| | |_| |\n|_| |_|\\__,_|\\__, |\n |___/ \"\"\"\n june = \"\"\" _\n | |_ _ _ __ ___\n _ | | | | | '_ \\ / _ \\\\\n| |_| | |_| | | | | __/\n \\___/ \\__,_|_| |_|\\___|\"\"\"\n\n july = \"\"\" _ _\n | |_ _| |_ _\n _ | | | | | | | | |\n| |_| | |_| | | |_| |\n \\___/ \\__,_|_|\\__, |\n |___/ \"\"\"\n\n august = \"\"\" _ _\n / \\ _ _ __ _ _ _ ___| |_\n / _ \\| | | |/ _` | | | / __| __|\n / ___ \\ |_| | (_| | |_| \\__ \\ |_\n/_/ \\_\\__,_|\\__, |\\__,_|___/\\__|\n |___/ \"\"\"\n september = \"\"\" ____ _ _\n/ ___| ___ _ __ | |_ ___ _ __ ___ | |__ ___ _ __\n\\___ \\ / _ \\ '_ \\| __/ _ \\ '_ ` _ \\| '_ \\ / _ \\ '__|\n ___) | __/ |_) | || __/ | | | | | |_) | __/ |\n|____/ \\___| .__/ \\__\\___|_| |_| |_|_.__/ \\___|_|\n |_| \"\"\"\n october = \"\"\" ___ _ _\n / _ \\ ___| |_ ___ | |__ ___ _ __\n| | | |/ __| __/ _ \\| '_ \\ / _ \\ '__|\n| |_| | (__| || (_) | |_) | __/ |\n \\___/ \\___|\\__\\___/|_.__/ \\___|_| \"\"\"\n\n november = \"\"\" _ _ _\n| \\ | | _____ _____ _ __ ___ | |__ ___ _ __\n| \\| |/ _ \\ \\ / / _ \\ '_ ` _ \\| '_ \\ / _ \\ '__|\n| |\\ | (_) \\ V / __/ | | | | | |_) | __/ |\n|_| \\_|\\___/ \\_/ \\___|_| |_| |_|_.__/ \\___|_| \"\"\"\n\n december = \"\"\" ____ _\n| _ \\ ___ ___ ___ _ __ ___ | |__ ___ _ __\n| | | |/ _ \\/ __/ _ \\ '_ ` _ \\| '_ \\ / _ \\ '__|\n| |_| | __/ (_| __/ | | | | | |_) | __/ |\n|____/ \\___|\\___\\___|_| |_| |_|_.__/ \\___|_| \"\"\"\n\n monthText = [january,february,march,april,may,june,july,august,september,october,november,december]\n print(monthText[month-1])\n\n # Print the year\n print(\"\\n\\t\\t\\t\" + str(year))\n\n # Print the days of the week\n daysOfWeek = \"\\n| Sun | Mon | Tue | Wed | Thu | Fri | Sat |\"\n print(daysOfWeek)\n horizLine = \"------------------------------------------------------------------------------------\"\n print(horizLine)\n\n # set up a buffering line\n bufferLine = \"| | | | | | | |\"\n\n # Print the spaces where the month hasn't started yet\n blankSpace = \"| \"\n blankLine = blankSpace * firstDay\n calWeekStrings = [blankLine, \"\", \"\", \"\", \"\", \"\"]\n\n # Now start printing days\n # First set up some counters to track the current day and the day of the week\n currentDay = 1\n currentLunarDay = firstPhase\n weekDay = firstDay\n week = 0\n\n # Now start the loop\n while currentDay <= monthDays[month-1]:\n dayStr = str(currentDay)\n # If our day is one digit it should be in the middle of the three day characters\n # Otherwise, we buffer at the beginning by one space\n # If it's the full moon we put a F in the first space, if it's a new moon we put a N in the first space\n if currentLunarDay == 1:\n firstChar = \"N\"\n elif currentLunarDay == 14:\n firstChar = \"F\"\n else:\n firstChar = \" \"\n\n if currentDay < 10:\n dayStr = \"| \" + firstChar + dayStr + \" \"\n else:\n dayStr = \"| \" + firstChar + dayStr + \" \"\n\n # If the day of the week is saturday we need to add the final bar to our week and increment week\n if weekDay == 6:\n dayStr = dayStr + \"|\"\n\n # Add the dayStr to the correct week\n calWeekStrings[week] = calWeekStrings[week] + dayStr\n\n # Increment counter\n currentDay += 1\n\n # Increment or reset counters for week and weekDay\n if weekDay == 6:\n weekDay = 0\n week += 1\n else:\n weekDay += 1\n\n # Increment or reset counters for lunar day\n if currentLunarDay == 28:\n currentLunarDay = 1\n else:\n currentLunarDay += 1\n\n # Now we need to print the calendar\n for i in range(week+1):\n print(bufferLine)\n print(calWeekStrings[i])\n print(bufferLine)\n print(horizLine)\n\n\n","sub_path":"printCal.py","file_name":"printCal.py","file_ext":"py","file_size_in_byte":11426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"352268154","text":"import os\n\nfrom unimport.config import is_ignore_files, is_ignore_folder\n\n\ndef get_files(direction):\n for root, dirs, files in os.walk(direction):\n if is_ignore_folder(root):\n continue\n for name in files:\n file_path = os.path.join(root, name)\n if file_path.endswith(\".py\") and not is_ignore_files(file_path):\n yield file_path\n","sub_path":"unimport/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247184517","text":"from typing import Sequence\nimport yaml\nimport numpy as np\nimport gym, gym_fastsim\nimport pandas as pd\nfrom .Genotype import *\n\n\nclass Experience(object):\n\n def __init__(self, *args, **kwargs):\n super(Experience, self).__init__(*args)\n self.env = kwargs[\"env\"] if \"env\" in kwargs else None\n self.gym_env = None\n self.read(\"conf/conf.yaml\")\n \n def read(self,file_path : str):\n \"\"\"Construct an evaluator from a YAML file.\n Args:\n file_path (str): path to the yaml file.\n \"\"\"\n self.env = {}\n with open(file_path) as f:\n docs = yaml.load_all(f, Loader=yaml.FullLoader)\n self.env.update(list(docs)[0][\"conf\"])\n \n\n def prepareEnv(self):\n env_name = self.env['gym_name']\n self.gym_env = gym.make(self.env['gym_name'], **self.env['env_params'])\n\n def get_criterion_from_logs(self, logs : pd.DataFrame, criterion = \"dist_obj\"):\n L = [] \n for i in np.unique(logs.epoch):\n c = np.array(logs.loc[logs[\"epoch\"] == i, criterion].tail(1))[0]\n L.append(c)\n mean = np.mean(np.array(L))\n std = np.std(np.array(L))\n return mean,std\n\n def get_multi_criterions_from_logs(self, logs : pd.DataFrame, criterions = [\"dist_obj\"]):\n results = {}\n for criterion in criterions:\n L = []\n for i in np.unique(logs.epoch):\n c = np.array(logs.loc[logs[\"epoch\"] == i, criterion].tail(1))[0]\n L.append(c)\n mean = np.mean(np.array(L))\n std = np.std(np.array(L))\n results[criterion] = (mean, std)\n return results\n\n def simple_evaluation(self, genotype : Genotype, criterion = \"dist_obj\",max_steps = 1000, epochs = 10,render=True):\n logs = self.get_logs(genotype, max_steps = max_steps, epochs = epochs,render=render)\n L = [] \n for i in np.unique(logs.epoch):\n c = np.array(logs.loc[logs[\"epoch\"] == i, criterion].tail(1))[0]\n L.append(c)\n mean = np.mean(np.array(L))\n std = np.std(np.array(L))\n return mean,std\n\n\n def mo_evaluation(self, genotype : Genotype, criterions = [\"dist_obj\"],max_steps = 1000, epochs = 10,render=True):\n logs = self.get_logs(genotype, max_steps = max_steps, epochs = epochs,render=render)\n return self.get_multi_criterions_from_logs(logs,criterions), logs\n\n def get_logs(self,genotype: Genotype, max_steps = 1000, epochs = 10,render=True):\n if(not self.gym_env):\n self.prepareEnv()\n observation = self.gym_env.reset()\n spec = genotype.get_spec()\n observation, reward, done, info = self.gym_env.step([0]*spec[\"nb_output\"])\n action_scale_factor = self.gym_env.action_space.high\n episode_reward=0\n episode_bd=None\n self.gym_env.enable_display()\n logs = {}\n for epoch in range(epochs):\n episode_log={}\n observation = self.gym_env.reset()\n observation, reward, done, info = self.gym_env.step([0]*spec[\"nb_output\"])\n for t in range(max_steps):\n if render:\n self.gym_env.render()\n action=genotype.get_action(observation)\n action=action_scale_factor*np.array(action)\n observation, reward, done, info = self.gym_env.step(action)\n\n if (self.env[\"episode_reward_kind\"] == \"cumul\"):\n episode_reward+=reward\n\n for k in self.env[\"episode_log\"].keys():\n if (self.env[\"episode_log\"][k] == \"cumul\"):\n episode_log[k] = episode_log.get(k,[]) + [episode_log.get(k,[0])[-1] + info[k]]\n else:\n episode_log[k] = episode_log.get(k,[]) + [info[k]]\n if(done):\n break\n logs[epoch] = episode_log\n records = []\n for epoch,log in logs.items():\n arr = np.array(pd.DataFrame(log))\n for r in arr:\n records.append((epoch,*r))\n df = pd.DataFrame.from_records(records,columns=[\"epoch\"]+list(episode_log.keys()))\n return df\n\n\n\n\n\n\n \n\n","sub_path":"QD_Algorithms/Experience.py","file_name":"Experience.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498781864","text":"## Imports\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n## Settings\n\nx_0 = 0.1 # starting value\nnum = 1000 # total number of samples\nframe = 10 # number of samples in a timeframe\nsplit_ft = 0.8 # fitting/testing split\nneurons = 64 # number of LSTM neurons\nepochs = 100 # number of epochs\nbatch = 16 # training batch size\nsplit_vt = 0.1 # validation/training split\ntf.random.set_seed (123456)\n\n## End of settings\n\n## Generate chaotic data\nx_all = [x_0]\nfor i in range (num-1):\n x_0 = 4*x_0*(1 - x_0)\n x_all.append (x_0)\nx_all = np.array (x_all)\n\n#plt.plot (x)\n#plt.show ()\n\n\n## Prepare data\n# need (S,T,C,) array, where\n# S = number of samples\n# T = timesteps per sample\n# C = channels (1)\n# for x\n\n# create separate x and y sets with batched frames\nx = []\ny = []\nfor i in range (num - frame):\n f = []\n for j in range (frame):\n f.append ([x_all[i + j]])\n x.append (f)\n y.append (x_all[i + frame])\nx = np.array (x)\ny = np.array (y)\nnum -= frame\n# split x and y into fitting and testing\nfitt_num = int (num*split_ft)\nfitt_x = x[0:fitt_num]\nfitt_y = y[0:fitt_num]\ntest_num = num - fitt_num\ntest_x = x[fitt_num:num]\ntest_y = y[fitt_num:num]\n\nfitt_x += np.random.uniform (-0.10, 0.10, fitt_x.shape)\n\n## Build model\n\nlayers = tf.keras.layers\nmodel = tf.keras.Sequential ()\n\nmodel.add (layers.LSTM (neurons, stateful = False))\n#model.add (layers.Flatten (input_shape = fitt_x[0].shape))\n#model.add (layers.Dense (neurons, activation = 'tanh'))\n#model.add (layers.Dense (neurons, activation = 'tanh'))\n\nmodel.add (layers.Dense (1))\nmodel.compile (loss = 'mean_squared_error', optimizer = 'Adam')\n\n## Train\n\nresults = model.fit (fitt_x, fitt_y,\n epochs = epochs,\n batch_size = batch,\n validation_split = split_vt,\n shuffle = False)\n\n## Results\n\n# plot error history\nplt.plot (results.history['loss'], label = \"Loss\")\nplt.plot (results.history['val_loss'], label = \"Validation\")\nplt.legend ()\nplt.show ()\n\n# plot prediction graph\npred_y = model.predict (test_x)[:,0]\nprint (pred_y.shape)\nprint (test_y.shape)\npred_err = np.sqrt (sum ((pred_y - test_y)**2))\nprint (\"Prediction error\", pred_err)\nplt.plot (pred_y, label = \"Prediction\")\nplt.plot (test_y, label = \"Actual\")\nplt.legend ()\nplt.show ()\n","sub_path":"Code/Chaos/old_Logistics.py","file_name":"old_Logistics.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"1261125","text":"import os\nimport re\nimport sqlite3\nimport socket\nimport struct\nimport tempfile\n\nunperlize_re = re.compile('^/?(.*)/si$')\n\n\ndef join_sql_columns(columns_dict, set_index=None):\n def columns():\n for col_name, col_expression in columns_dict.items():\n if set_index is not None:\n col_expression = col_expression[set_index]\n\n if col_expression == col_name:\n yield col_name\n else:\n yield '{0} AS {1}'.format(col_expression or 'NULL', col_name)\n\n return ',\\n '.join(columns())\n\n\ndef make_empty_row(columns_dict):\n return dict((col, None) for col in columns_dict)\n\n\nclass cached_property(object):\n \"\"\"\n Decorator that converts a method with a single self argument into a\n property cached on the instance.\n \"\"\"\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, unused_type=None):\n if instance is None:\n return self\n res = instance.__dict__[self.func.__name__] = self.func(instance)\n return res\n\n\nclass UdgerBase(object):\n db_filename = 'udgerdb_v3.dat'\n\n _client_columns = {\n 'client_id': (None, 'client_id'),\n 'class_id': (None, 'class_id'),\n\n 'ua_class': ('\"Crawler\"', 'client_classification'),\n 'ua_class_code': ('\"crawler\"', 'client_classification_code'),\n 'ua': ('name', 'name'),\n 'ua_engine': (None, 'engine'),\n 'ua_version': ('ver', None),\n 'ua_version_major': ('ver_major', None),\n 'crawler_last_seen': ('last_seen', None),\n 'crawler_respect_robotstxt': ('respect_robotstxt', None),\n 'crawler_category': ('crawler_classification', None),\n 'crawler_category_code': ('crawler_classification_code', None),\n 'ua_uptodate_current_version': (None, 'uptodate_current_version'),\n 'ua_family': ('family', 'name'),\n 'ua_family_code': ('family_code', 'name_code'),\n 'ua_family_homepage': ('family_homepage', 'homepage'),\n 'ua_family_icon': ('family_icon', 'icon'),\n 'ua_family_icon_big': (None, 'icon_big'),\n 'ua_family_vendor': ('vendor', 'vendor'),\n 'ua_family_vendor_code': ('vendor_code', 'vendor_code'),\n 'ua_family_vendor_homepage': ('vendor_homepage', 'vendor_homepage'),\n 'ua_family_info_url': ('\"https://udger.com/resources/ua-list/bot-detail?bot=\" || '\n 'REPLACE(family, \" \", \"%20\") || \"#id\" || udger_crawler_list.id',\n '\"https://udger.com/resources/ua-list/browser-detail?browser=\" || '\n 'REPLACE(name, \" \", \"%20\")'),\n }\n\n crawler_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_crawler_list\n LEFT JOIN\n udger_crawler_class ON udger_crawler_class.id = udger_crawler_list.class_id\n WHERE\n ua_string = ?\n \"\"\" % join_sql_columns(_client_columns, 0)\n\n client_emptyrow = make_empty_row(_client_columns)\n client_emptyrow.update(\n ua_class=\"Unrecognized\",\n ua_class_code=\"unrecognized\",\n )\n\n client_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_client_regex\n JOIN\n udger_client_list ON udger_client_list.id = udger_client_regex.client_id\n JOIN\n udger_client_class ON udger_client_class.id = udger_client_list.class_id\n WHERE\n ? REGEXP regstring\n ORDER BY\n sequence ASC\n LIMIT 1\n \"\"\" % join_sql_columns(_client_columns, 1)\n\n _os_columns = {\n 'os_family': 'family',\n 'os_family_code': 'family_code',\n 'os': 'name',\n 'os_code': 'name_code',\n 'os_homepage': 'homepage',\n 'os_icon': 'icon',\n 'os_icon_big': 'icon_big',\n 'os_family_vendor': 'vendor',\n 'os_family_vendor_code': 'vendor_code',\n 'os_family_vendor_homepage': 'vendor_homepage',\n 'os_info_url': '\"https://udger.com/resources/ua-list/os-detail?os=\" || '\n 'REPLACE(name, \" \", \"%20\")',\n }\n\n os_emptyrow = make_empty_row(_os_columns)\n\n os_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_os_regex\n JOIN\n udger_os_list ON udger_os_list.id = udger_os_regex.os_id\n WHERE\n ? REGEXP regstring\n ORDER BY\n sequence ASC\n LIMIT 1\n \"\"\" % join_sql_columns(_os_columns)\n\n client_os_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_client_os_relation\n JOIN\n udger_os_list ON udger_os_list.id = udger_client_os_relation.os_id\n WHERE\n client_id = ?\n \"\"\" % join_sql_columns(_os_columns)\n\n _device_columns = {\n 'device_class': 'name',\n 'device_class_code': 'name_code',\n 'device_class_icon': 'icon',\n 'device_class_icon_big': 'icon_big',\n 'device_class_info_url': '\"https://udger.com/resources/ua-list/device-detail?device=\" || '\n 'REPLACE(name, \" \", \"%20\")',\n }\n\n device_emptyrow = make_empty_row(_device_columns)\n\n device_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_deviceclass_regex\n JOIN\n udger_deviceclass_list ON udger_deviceclass_list.id = udger_deviceclass_regex.deviceclass_id\n WHERE\n ? REGEXP regstring\n ORDER BY\n sequence ASC\n LIMIT 1\n \"\"\" % join_sql_columns(_device_columns)\n\n client_class_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_deviceclass_list\n JOIN\n udger_client_class ON udger_client_class.deviceclass_id = udger_deviceclass_list.id\n WHERE\n udger_client_class.id = ?\n \"\"\" % join_sql_columns(_device_columns)\n\n _ip_columns = {\n 'ip_classification': 'ip_classification',\n 'ip_classification_code': 'ip_classification_code',\n 'ip_last_seen': 'ip_last_seen',\n 'ip_hostname': 'ip_hostname',\n 'ip_country': 'ip_country',\n 'ip_country_code': 'ip_country_code',\n 'ip_city': 'ip_city',\n 'crawler_name': 'name',\n 'crawler_ver': 'ver',\n 'crawler_ver_major': 'ver_major',\n 'crawler_family': 'family',\n 'crawler_family_code': 'family_code',\n 'crawler_family_homepage': 'family_homepage',\n 'crawler_family_vendor': 'vendor',\n 'crawler_family_vendor_code': 'vendor_code',\n 'crawler_family_vendor_homepage': 'vendor_homepage',\n 'crawler_family_icon': 'family_icon',\n 'crawler_family_info_url': '\"https://udger.com/resources/ua-list/bot-detail?bot=\" || '\n 'REPLACE(family, \" \", \"%20\") || \"#id\" || udger_crawler_list.id',\n 'crawler_last_seen': 'last_seen',\n 'crawler_category': 'crawler_classification',\n 'crawler_category_code': 'crawler_classification_code',\n 'crawler_respect_robotstxt': 'respect_robotstxt',\n }\n\n ip_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_ip_list\n JOIN\n udger_ip_class ON udger_ip_class.id=udger_ip_list.class_id\n LEFT JOIN\n udger_crawler_list ON udger_crawler_list.id=udger_ip_list.crawler_id\n LEFT JOIN\n udger_crawler_class ON udger_crawler_class.id=udger_crawler_list.class_id\n WHERE\n ip = ?\n ORDER BY\n sequence\n \"\"\" % join_sql_columns(_ip_columns)\n\n _datacenter_columns = {\n 'datacenter_name': 'name',\n 'datacenter_name_code': 'name_code',\n 'datacenter_homepage': 'homepage',\n }\n\n ip_datacenter_emptyrow = make_empty_row(_ip_columns)\n ip_datacenter_emptyrow.update(\n make_empty_row(_datacenter_columns),\n ip_ver=None,\n )\n\n datacenter_sql = \"\"\"\n SELECT\n %s\n FROM\n udger_datacenter_range\n JOIN\n udger_datacenter_list ON udger_datacenter_range.datacenter_id = udger_datacenter_list.id\n WHERE\n iplong_from <= ? AND iplong_to >= ?\n \"\"\" % join_sql_columns(_datacenter_columns)\n\n def __init__(self, data_dir=None):\n self.data_dir = data_dir or tempfile.gettempdir()\n self.regexp_cache = {}\n\n @staticmethod\n def dict_factory(cursor, row):\n return dict(\n (col[0], row[idx])\n for idx, col in enumerate(cursor.description)\n )\n\n def regexp_func(self, expr, item):\n global unperlize_re\n\n expr_re = self.regexp_cache.get(expr)\n\n if expr_re is None:\n m = unperlize_re.match(expr)\n if m:\n expr = m.group(1) # strip / from the beginning and /si from the end\n\n expr_re = re.compile(expr, re.I | re.S)\n self.regexp_cache[expr] = expr_re\n\n self.last_regexp_match = expr_re.search(item)\n\n return bool(self.last_regexp_match)\n\n @cached_property\n def db_cursor(self):\n db_filepath = os.path.join(self.data_dir, self.db_filename)\n db = sqlite3.connect(db_filepath)\n db.create_function(\"REGEXP\", 2, self.regexp_func)\n\n cursor = db.cursor()\n cursor.row_factory = self.dict_factory\n\n return cursor\n\n def db_get_first_row(self, sql, *params):\n self.last_regexp_match = None\n\n self.db_cursor.execute(sql, params)\n\n for row in self.db_cursor:\n return row\n\n @staticmethod\n def normalize_ipaddress(ip_string):\n try:\n ip_string = socket.inet_ntop(socket.AF_INET, socket.inet_pton(socket.AF_INET, ip_string))\n ipv4_int = struct.unpack(\"!L\", socket.inet_aton(ip_string))[0]\n except socket.error:\n ip_string = socket.inet_ntop(socket.AF_INET6, socket.inet_pton(socket.AF_INET6, ip_string))\n ipv4_int = None\n\n return ip_string, ipv4_int\n","sub_path":"udger/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"491185161","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n文件说明:\n设置Button 例子,设置一个button ,点击后显示label\n\n\"\"\"\nimport tkinter\n\ndef showlabel():\n global baseFrame\n # 在函数中定义了一个label\n # label 的父组件是baseFrame\n lb = tkinter.Label(baseFrame, text='显示Label')\n lb.pack()\n\nbaseFrame = tkinter.Tk()\nbaseFrame.wm_title('test button')\n# 生成一个按钮\n# command 参数指示:当按钮被按下的时候,执行哪个函数\n\nbtn = tkinter.Button(baseFrame, text='SHOW LABEL', command=showlabel)\nbtn.pack()\n\nbaseFrame.mainloop()","sub_path":"tuling/Tkinter/test_button.py","file_name":"test_button.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498371165","text":"import subprocess as sp\nimport pymysql\nimport pymysql.cursors\n\n# import selection\n# import projection\n# import aggregate\n# import search\n# import analysis\n# import insert\n# import update\n# import delete\nfrom functions import selection, projection, aggregate, search, analysis, insert, update, delete\n\nclear = lambda : sp.call('clear', shell=True)\nwait = lambda: input(\"Press ENTER key to continue\")\n\ndef dispatch(ch):\n if ch == 1:\n selection.getinfoCustomer(con, cur)\n elif ch == 2:\n selection.getinfoEmployee(con, cur)\n elif ch == 3:\n selection.Get_Salary(con, cur)\n elif ch == 4:\n projection.getCustomerperModel(con, cur)\n elif ch == 5:\n aggregate.get_total_salary(con, cur)\n elif ch == 6:\n aggregate.Total_Profit(con, cur)\n elif ch == 7:\n search.getinfo_accto_vehicleID_partialsearch(con, cur)\n elif ch == 8:\n analysis.sales_generated(con, cur)\n elif ch == 9:\n analysis.Get_Inventory(con, cur)\n elif ch == 10:\n analysis.getVehiclesSoldperModel(con, cur)\n elif ch == 11:\n insert.add_employee(con, cur)\n elif ch == 12:\n insert.Add_Sale(con, cur)\n elif ch == 13:\n insert.insertCustomer(con, cur)\n elif ch == 14:\n insert.insertVehicle(con, cur)\n elif ch == 15:\n update.update_contact(con, cur)\n elif ch == 16:\n update.Update_Address(con, cur)\n elif ch == 17:\n delete.removeEmployee(con, cur)\n elif ch == 18:\n delete.Return_Vehicle(con, cur)\n else:\n print(\"Invalid Choice!\")\n wait()\n \n\n\nif __name__ == '__main__':\n flag = True\n while flag:\n clear()\n\n username = \"root\"\n password = \"blahblah\"\n\n try: \n con = pymysql.connect(\n host=\"localhost\",\n user=username,\n password=password,\n port=5005,\n db=\"non_prod\",\n cursorclass=pymysql.cursors.DictCursor\n )\n\n clear()\n if con.open:\n print(\"Connected\")\n else:\n print(\"Failed to connect\")\n \n with con.cursor() as cur:\n while True:\n clear()\n\n print(\"1. Get Customer Details\")\n print(\"2. Get Employee Details\")\n print(\"3. Get Salary of Employee\")\n print(\"4. Get Customers by Vehicle Model\")\n print(\"5. Get Total Salary being paid\")\n print(\"6. Get Total Profit\")\n print(\"7. Search for a car\")\n print(\"8. Get Sales Report\")\n print(\"9. Get Inventory\")\n print(\"10. Get Models sold\")\n print(\"11. Add Employee\")\n print(\"12. Add Sale\")\n print(\"13. Add Customer\")\n print(\"14. Add Vehicle\")\n print(\"15. Update Contact\")\n print(\"16. Update Address\")\n print(\"17. Fire Employee\")\n print(\"18. Return Vehicle to factory\")\n\n\n\n\n ch = int(input(\"Enter Choice: \"))\n clear()\n if ch == -1:\n flag = False\n break\n else:\n dispatch(ch)\n \n except Exception as e:\n clear()\n print(\"Couldn't Connect\")\n print(\">>\", e)\n wait()\n\n\n","sub_path":"project-cli.py","file_name":"project-cli.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591928203","text":"# coding=utf-8\n\"\"\"Bulls and Cows.\n\n>>> solve = _solve1\n>>> solve('1807', '7810')\n'1A3B'\n>>> solve('1123', '0111')\n'1A1B'\n\"\"\"\n\nimport collections\nimport operator\n\n\n# 3 line:代码来自:https://discuss.leetcode.com/topic/28457/3-lines-in-python\ndef _solve(secret, guess):\n bulls = sum(map(operator.eq, secret, guess))\n both = sum(min(secret.count(x), guess.count(x)) for x in '0123456789')\n return '{}A{}B'.format(bulls, both - bulls)\n\n\n# one pass:思路来自:https://discuss.leetcode.com/topic/28463/one-pass-java-solution\ndef _solve1(secret, guess):\n bulls = cows = 0\n numbers = collections.Counter()\n for i in xrange(len(secret)):\n if secret[i] == guess[i]:\n bulls += 1\n else:\n if numbers[secret[i]] < 0:\n cows += 1\n if numbers[guess[i]] > 0:\n cows += 1\n numbers[secret[i]] += 1\n numbers[guess[i]] -= 1\n return '{}A{}B'.format(bulls, cows)\n","sub_path":"medium/299.py","file_name":"299.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481250132","text":"import numpy as np\nimport gdal\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport jenkspy\nfrom collections import defaultdict\nimport osr\nimport seaborn as sb\nimport pandas as pd\n\ndef clean(m):\n m[np.isinf(m)]=0\n m[np.isnan(m)]=0\n return m\n\ndef scale(l):\n X = np.array(l)\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n return X_std\n\ndef write_tif(m, path):\n spatialref = osr.SpatialReference()\n spatialref.ImportFromEPSG(28992)\n # Just point to a KNMI layer to get the reference metadata/size (350x300)\n tif_template = gdal.Open(\"E:/RSData/KNMI/yearly/tmax/2014_tmax.tif\")\n rows = tif_template.RasterXSize\n cols = tif_template.RasterYSize\n\n # Get the origin coordinates for the tif file\n geotransform = tif_template.GetGeoTransform()\n outDs = tif_template.GetDriver().Create(path, rows, cols, 1, gdal.GDT_Float32)\n outBand = outDs.GetRasterBand(1)\n\n # write the data\n outDs.GetRasterBand(1).WriteArray(m)\n\n # flush data to disk, set the NoData value and calculate stats\n outBand.FlushCache()\n outBand.SetNoDataValue(-1)\n\n # georeference the image and set the projection\n outDs.SetGeoTransform(geotransform)\n outDs.SetProjection(spatialref.ExportToWkt())\n outDs = None\n outBand = None\n\ndef process_lyr(path, vble):\n # Load data and locate the zero and non-zero pixels\n tif = gdal.Open(path)\n band = tif.GetRasterBand(1).ReadAsArray().astype(np.float)\n band[np.isnan(band)]=-1\n pos_nz = np.where(band>0.0)\n pos_ze = np.where(band<=0.0)\n canvas = paint_zeros(pos_ze, 0)\n\n # Add to a list the >0 pixels\n lv = []\n rows = pos_nz[0]\n cols = pos_nz[1]\n for i in range(len(rows)):\n r = rows[i]\n c = cols[i]\n lv.append(band[r,c])\n\n # Scale and re-locate non-zero pixels in place\n X_std = scale(lv)\n values = np.empty((350, 300))\n for i in range(pos_nz[0].shape[0]):\n r = pos_nz[0][i]\n c = pos_nz[1][i]\n values[r,c] = X_std[i]\n\n # Create exposure layer\n lyr = canvas + values\n lyr_ori = canvas + band\n\n print(\"PROCESSING: {0}\".format(vble))\n print(\"-\" * 40)\n print(\"Exposure values (before scaling)\")\n print(\"\\tMin: {0}\\tMax: {1}\".format(np.amin(band[band>=0]), np.amax(band)))\n print(\"Exposure values (after scaling)\")\n print(\"\\tMin: {0}\\tMax: {1}\".format(np.amin(lyr), np.amax(lyr)))\n print(\"Checking for extreme values\")\n print(\"\\tNaNs: {0}, \\tInf: {1}, \\tNegInf: {2}\".format(np.isnan(lyr).any(), np.isinf(lyr).any(), np.isneginf(lyr).any()))\n print()\n\n return [lyr, lyr_ori]\n\n\ndef process_layers(path_ris, path_exp, path_haz_irn):\n risk, risk_ori = process_lyr(path_ris, \"RISK\")\n exposure, exposure_ori = process_lyr(path_exp, \"EXPOSURE\")\n hazard, hazard_ori = process_lyr(path_haz_irn, \"HAZARD\")\n return [risk, exposure, hazard, risk_ori, exposure_ori, hazard_ori]\n\n\ndef goodness_of_variance_fit(array, classes):\n # classes = jenks(array, classes)\n the_breaks = np.round(jenkspy.jenks_breaks(array.tolist(), nb_class=classes), decimals=2).tolist()\n print(\"These are the breaks for the gvf fit: \", the_breaks)\n classifiedd = np.array([classify(np.round(i, decimals=2), the_breaks) for i in array])\n maxz = np.amax(classifiedd)\n zone_indices = [[idx for idx, val in enumerate(classifiedd) if zone + 1 == val] for zone in range(maxz)]\n sdam = np.sum((array - array.mean()) ** 2)\n array_sort = [np.array([array[index] for index in zone]) for zone in zone_indices]\n sdcm = np.sum([np.sum((cla - cla.mean()) ** 2) for cla in array_sort])\n gvf = (sdam - sdcm) / sdam\n return gvf\n\ndef classify(value, breaks):\n # print(\"Value and breaks: \", value, breaks)\n for i in range(1, len(breaks)):\n if value < breaks[i]:\n return i\n return len(breaks) - 1\n\ndef natural_breaks(m):\n gvf = 0.0\n nclasses = 2\n print(\"Calculating breaks\", m.shape)\n while gvf < 0.85 and nclasses < 10:\n gvf = goodness_of_variance_fit(m, nclasses)\n print(\"\\tGVF for {0} classes: {1}\".format(nclasses, round(gvf,2)))\n nclasses += 1\n\n print(\"Running jenks with {0} classes\".format(nclasses-1))\n # breaks = jenks(m, nclasses-1)\n breaks = np.round(jenkspy.jenks_breaks(m.tolist(), nb_class=nclasses-1), decimals=2)\n print(\"\\t Breaks in: \", breaks)\n classified = np.array([classify(i, breaks) for i in m])\n return classified\n\ndef paint_zeros(pos_zer, v):\n canvas = np.empty((350, 300))\n rows = pos_zer[0]\n cols = pos_zer[1]\n\n for i in range(len(rows)):\n r = rows[i]\n c = cols[i]\n canvas[r, c] = v\n\n return canvas\n\n\ndef find_pos_nonzero_values(exposure):\n dic = defaultdict(list)\n for i in range(exposure.shape[0]):\n for j in range(exposure.shape[1]):\n if exposure[i, j] > 0.0:\n key = (i,j)\n dic[key] = [i, j, exposure[i, j]]\n return dic\n\ndef matcher(expnz, classified):\n l = []\n for i in range(len(lv)):\n item = expnz[i] + [classified[i]]\n l.append(item)\n return l\n\ndef place(l):\n r = np.empty((350, 300))\n c = np.empty((350, 300))\n\n for item in l:\n rowpos = item[0]\n colpos = item[1]\n valraw = item[2]\n valcls = item[3]\n r[rowpos, colpos] = valraw\n c[rowpos, colpos] = valcls\n return [r, c]\n\ndef do_boxplots(risk, hazard, exposure):\n sb.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n df = pd.DataFrame()\n df['Risk'] = risk.ravel()\n df['Hazard'] = hazard.ravel()\n df['ExpRaw'] = exposure.ravel()\n df['Exposure'] = exposure.ravel()\n\n df2 = df.copy()\n\n df['Exposure'][df2[\"Exposure\"]==1] = \"Low\"\n df['Exposure'][df2[\"Exposure\"]==2] = \"Medium\"\n df['Exposure'][df2[\"Exposure\"]==3] = \"High\"\n\n exp_nz = df[\"ExpRaw\"].isin([1, 2, 3, 27])\n print(df[\"ExpRaw\"].unique())\n\n pal = {\"Low\":\"#0084CC\", \"Medium\":\"#F0DF6A\", \"High\":\"#D93223\"}\n\n plt.subplot(1, 2, 1)\n ax1 = sb.boxplot(\"Exposure\", \"Risk\", data=df[exp_nz], palette=pal)\n ax1.axes.set_title(\"Title\", fontsize=36)\n ax1.set_xlabel(\"Exposure\", fontsize=28)\n ax1.set_ylabel(\"Risk\", fontsize=28)\n ax1.tick_params(labelsize=24)\n plt.title(\"(a) Exposure (Classified) vs. Risk\", size=36)\n\n plt.subplot(1, 2, 2)\n ax2 = sb.boxplot(\"Exposure\", \"Hazard\", data=df[exp_nz], palette=pal)\n ax2.axes.set_title(\"Title\", fontsize=36)\n ax2.set_xlabel(\"Exposure\", fontsize=28)\n ax2.set_ylabel(\"Hazard\", fontsize=28)\n ax2.tick_params(labelsize=24)\n plt.title(\"(b) Exposure (Classified) vs. Hazard\", size=36)\n\n print(df[\"Hazard\"].unique())\n print(df[\"Risk\"].unique())\n\ndef save_fig_maximized(path_fig, name_fig):\n manager = plt.get_current_fig_manager()\n manager.full_screen_toggle()\n manager.window.showMaximized()\n fig = plt.gcf()\n print(manager)\n plt.show()\n print(\"Saving in... \", path_fig.format(name_fig))\n fig.savefig(path_fig.format(name_fig), format=\"pdf\", dpi=300)\n\n\n################\n# Main program #\n################\n\n# In this version I am getting rid of the MinMaxScaler, because it has some\n# side effects with the existence of -99 values. I will also get rid of the -99\n# to focus on making the cross plots without them.\n\n# I am also using the hazard as the maximum for all years in the time-series\n\npath_ris_tif = r\"D:\\PycharmProjects\\IGM_PhD_Materials\\data\\P03\\in\\Risk_Tekenradar_2006-2016_1km_RD_New.tif\"\npath_haz_tif = r\"D:\\PycharmProjects\\IGM_PhD_Materials\\data\\P03\\in\\NL_Hazard_Mean_2006-2016_Max.tif\"\npath_exp_tif = r\"D:\\PycharmProjects\\IGM_PhD_Materials\\data\\P03\\in\\Exposure_RD_New.tif\"\npath_exp_out_tif = r\"D:\\PycharmProjects\\IGM_PhD_Materials\\data\\P03\\out\\Exposure_RD_New_classified.tif\"\n\nrisk, exposure, hazard, risk_ori, exposure_ori, hazard_ori = process_layers(path_ris_tif, path_exp_tif, path_haz_tif)\n\npos_zer = np.where(exposure==0.0)\n\ncanvas = paint_zeros(pos_zer, 27)\n\ndicpos = find_pos_nonzero_values(exposure)\n\nexposure_nonzero = []\nfor key in sorted(dicpos.keys()):\n exposure_nonzero.append(dicpos[key])\n\nlv = [item[2] for item in exposure_nonzero]\n\nclassified = natural_breaks(np.array(lv))\n\nmatched = matcher(exposure_nonzero, classified)\n\nrawexp, clsexp = place(matched)\n\ntif = canvas + clsexp\n\n# This is to avoid an outlier causing visual cluttering\nhazard_ori[hazard_ori<2] = 34\ndo_boxplots(risk_ori, hazard_ori, clsexp)\n\nplt.clf()\nplt.imshow(clsexp, interpolation=\"None\")\nplt.colorbar()\nprint(\"Uniques: \", np.unique(clsexp, return_counts=True))\nplt.show()\n\n# write_tif(tif, path_exp_out_tif)\n\n","sub_path":"analysis/00_calculate_exposure_classified.py","file_name":"00_calculate_exposure_classified.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"285782697","text":"from string import ascii_letters\n\nascii_letters += \" \"\n\ndef longestWord(sen):\n sen = list(sen)\n for l in sen:\n if l not in ascii_letters:\n sen.remove(l)\n\n words = \"\"\n words = words.join(sen).split(\" \")\n\n longest_word = words[0]\n for i in range(0, len(words)):\n if len(words[i]) > len(longest_word):\n longest_word = words[i]\n\n return longest_word\n","sub_path":"coderbyte/longestword.py","file_name":"longestword.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"90248028","text":"# -*- coding: UTF-8 -*- \nimport http.client\nimport urllib\nimport json\nfrom base import log\nfrom base import base\nfrom base import config\nfrom base import cache\n\nlog = log.Log(__name__)\n\nclass IndustryInfo:\n parentCode = '' # PARENTCODE 父类编码\tvarchar\t\n classCode = ''\t # SORTCODE 类目编码\tvarchar\t\n className = ''\t # SORTNAME 类目名称\tvarchar\t\n classNameEn = '' # F001V\t类目名称(英文)\tvarchar\t\n # F002D\t终止日期\tDATE\t\n industryCode = '' # F003V\t行业类型编码\tvarchar\t\n industryType = '' # F004V\t行业类型\tvarchar\n\n def __init__(self, industry):\n self.parse(industry)\n\n def parse(self, industry):\n self.parentCode = industry['PARENTCODE']\n self.classCode = industry['SORTCODE']\n self.className = industry['SORTNAME']\n self.classNameEn = industry['F001V']\n self.industryCode = industry['F003V']\n self.industryType = industry['F004V']\n\ndef callService(industryType, industryCode):\n url = '/api/stock/p_public0002'\n params = {\n 'indtype' : industryType,\n 'indcode' : industryCode\n }\n respContent = base.cacheService(config.cache_industry, url, params)\n if respContent == '':\n return ''\n respContent = json.loads(respContent)\n records = respContent['records']\n recordDict = {}\n for item in records:\n obj = IndustryInfo(item)\n recordDict[obj.classCode] = obj\n return recordDict\n\ndef industryClass(industryType):\n ''' Get all industry class info by industryType\n \n Args:\n industryType: 行业分类标准:\n 008001\t证监会行业分类标准 \n 008002\t巨潮行业分类标准 \n 008003\t申银万国行业分类标准 \n 008004\t新财富行业分类标准 \n 008005\t国资委行业分类标准 \n 008006\t巨潮产业细分标准 \n 008007\t天相行业分类标准 \n 008008\t全球行业分类标准(GICS) \n\n Returns:\n IndustryInfo object dict, eg: {classCode : objIndustryInfo, ... }\n '''\n return callService(industryType, '')\n\ndef swIndustryClass():\n '''Get Shen Yin Wang Guo industry class info.\n \n Returns:\n IndustryInfo object dict, eg: {classCode : objIndustryInfo, ... }\n '''\n return callService('008003', '')\n\n\n\n","sub_path":"cninfo/common/industryClass.py","file_name":"industryClass.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"311488180","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\n__all__ = [\"load_light_curves_for_kic\", \"load_light_curves\"]\n\nimport os\nimport fitsio\nimport logging\nimport requests\nimport numpy as np\nfrom scipy.ndimage.measurements import label as contig_label\n\nfrom .catalogs import KOICatalog\nfrom .settings import TEXP, PEERLESS_DATA_DIR\n\n\ndef load_light_curves_for_kic(kicid, clobber=False, remove_kois=True, **kwargs):\n # Make sure that that data directory exists.\n bp = os.path.join(PEERLESS_DATA_DIR, \"data\")\n try:\n os.makedirs(bp)\n except os.error:\n pass\n\n # Get the list of data URLs.\n urls = _get_mast_light_curve_urls(kicid)\n\n # Loop over the URLs and download the files if needed.\n fns = []\n for url in urls:\n fn = os.path.join(bp, url.split(\"/\")[-1])\n fns.append(fn)\n if os.path.exists(fn) and not clobber:\n continue\n\n # Download the file.\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n with open(fn, \"wb\") as f:\n f.write(r.content)\n\n # Load the light curves.\n if remove_kois:\n kwargs[\"remove_kois\"] = kicid\n return load_light_curves(fns, **kwargs)\n\n\ndef load_light_curves(fns, pdc=True, min_break=10, delete=False,\n remove_kois=None, downsample=1):\n # Find any KOIs.\n if remove_kois is not None:\n df = KOICatalog().df\n kois = df[df.kepid == remove_kois]\n if len(kois):\n logging.info(\"Removing {0} known KOIs\".format(len(kois)))\n\n # Load the light curves.\n lcs = []\n for fn in fns:\n # Load the data.\n data, hdr = fitsio.read(fn, header=True)\n texp = hdr[\"INT_TIME\"] * hdr[\"NUM_FRM\"] / (24. * 60. * 60.)\n x = data[\"TIME\"]\n q = data[\"SAP_QUALITY\"]\n if pdc:\n y = data[\"PDCSAP_FLUX\"]\n yerr = data[\"PDCSAP_FLUX_ERR\"]\n else:\n y = data[\"SAP_FLUX\"]\n yerr = data[\"SAP_FLUX_ERR\"]\n\n # Compute the median error bar.\n yerr = np.median(yerr[np.isfinite(yerr)])\n\n # Resample the time series.\n if downsample > 1:\n # Reshape the arrays to downsample.\n downsample = int(downsample)\n l = len(x) // downsample * downsample\n inds = np.arange(l).reshape((-1, downsample))\n x, y, q = x[inds], y[inds], q[inds]\n\n # Ignore missing points.\n m = np.isfinite(y) & np.isfinite(x) & (q == 0)\n x[~m] = 0.0\n y[~m] = 0.0\n x = np.sum(x, axis=1)\n y = np.sum(y, axis=1)\n q = np.min(q, axis=1)\n\n # Take the mean.\n norm = np.sum(m, axis=1)\n m = norm > 0.0\n x[m] /= norm[m]\n x[~m] = np.nan\n y[m] /= norm[m]\n y[~m] = np.nan\n\n # Update the exposure time.\n texp = downsample * texp\n\n # Load the meta data.\n hdr = fitsio.read_header(fn, 0)\n meta = dict(\n channel=hdr[\"CHANNEL\"],\n skygroup=hdr[\"SKYGROUP\"],\n module=hdr[\"MODULE\"],\n output=hdr[\"OUTPUT\"],\n quarter=hdr[\"QUARTER\"],\n season=hdr[\"SEASON\"],\n )\n\n # Remove any KOI points.\n if remove_kois is not None:\n for _, koi in kois.iterrows():\n period = float(koi.koi_period)\n t0 = float(koi.koi_time0bk) % period\n tau = float(koi.koi_duration) / 24.\n m = np.abs((x-t0+0.5*period) % period-0.5*period) < tau\n y[m] = np.nan\n\n # Remove bad quality points.\n y[q != 0] = np.nan\n\n # Find and flag long sections of missing NaNs.\n lbls, count = contig_label(~np.isfinite(y))\n for i in range(count):\n m = lbls == i+1\n # Label sections of missing fluxes longer than min_break points\n # by setting the times equal to NaN.\n if m.sum() > min_break:\n x[m] = np.nan\n\n # Split into months.\n m = np.isfinite(x)\n gi = np.arange(len(x))[m]\n bi = np.arange(len(x))[~m]\n if len(bi):\n bi = bi[(bi > gi[0]) & (bi < gi[-1])]\n d = np.diff(bi)\n chunks = [slice(gi[0], bi[0])]\n for a, b in zip(bi[:-1][d > 1], bi[1:][d > 1]):\n chunks.append(slice(a+1, b-1))\n chunks.append(slice(bi[-1]+1, gi[-1]))\n else:\n chunks = [slice(gi[0], gi[-1])]\n\n # Interpolate missing data.\n for c in chunks:\n x0, y0 = x[c], y[c]\n m = np.isfinite(y0)\n if not np.any(m):\n continue\n # y0[~m] = np.interp(x0[~m], x0[m], y0[m])\n # y0[~m] += yerr * np.random.randn((~m).sum())\n lcs.append(LightCurve(x0, y0, yerr, meta, texp=texp))\n\n if delete:\n os.remove(fn)\n return lcs\n\n\nclass LightCurve(object):\n\n def __init__(self, time, flux, yerr, meta, texp=TEXP):\n self.time = np.ascontiguousarray(time, dtype=float)\n mu = np.median(flux)\n self.flux = np.ascontiguousarray(flux / mu, dtype=float)\n self.yerr = float(yerr) / mu\n self.meta = meta\n self.footprint = self.time.max() - self.time.min()\n self.texp = texp\n\n def __len__(self):\n return len(self.time)\n\n\ndef _get_mast_light_curve_urls(kic, short_cadence=False, **params):\n # Build the URL and request parameters.\n url = \"http://archive.stsci.edu/kepler/data_search/search.php\"\n params[\"action\"] = params.get(\"action\", \"Search\")\n params[\"outputformat\"] = \"JSON\"\n params[\"coordformat\"] = \"dec\"\n params[\"verb\"] = 3\n params[\"ktc_kepler_id\"] = kic\n params[\"ordercolumn1\"] = \"sci_data_quarter\"\n if not short_cadence:\n params[\"ktc_target_type\"] = \"LC\"\n\n # Get the list of files.\n r = requests.get(url, params=params)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n\n # Format the data URLs.\n kic = \"{0:09d}\".format(kic)\n base_url = (\"http://archive.stsci.edu/pub/kepler/lightcurves/{0}/{1}/\"\n .format(kic[:4], kic))\n for row in r.json():\n ds = row[\"Dataset Name\"].lower()\n tt = row[\"Target Type\"].lower()\n yield base_url + \"{0}_{1}lc.fits\".format(ds, tt[0])\n","sub_path":"peerless/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591158383","text":"import os\nimport base64\nimport hashlib\n\nimport requests\n\n\nAUTH_URL = 'https://api.backblaze.com/b2api/v1/b2_authorize_account'\n\n\nclass BackBlazeB2(object):\n\n authorization_token = None\n\n def __init__(self, app_key=None, account_id=None, bucket_name=None,\n max_retries=3):\n self.bucket_id = None\n self.account_id = account_id\n self.app_key = app_key\n self.bucket_name = bucket_name\n self.max_retries = max_retries\n\n def _ensure_authorization(self):\n if self.authorization_token:\n return\n\n self.authorize()\n self.get_bucket_id_by_name()\n\n def authorize(self):\n headers = {'Authorization': 'Basic: %s' % (\n base64.b64encode(('%s:%s' % (self.account_id, self.app_key)\n ).encode('utf-8'))).decode('utf-8')}\n response = requests.get(AUTH_URL, headers=headers)\n\n if response.status_code != 200:\n response.raise_for_status()\n\n data = response.json()\n\n self.base_url = data['apiUrl']\n self.download_url = data['downloadUrl']\n self.authorization_token = data['authorizationToken']\n\n return data\n\n def get_upload_url(self):\n self._ensure_authorization()\n\n url = self._build_url('/b2api/v1/b2_get_upload_url')\n headers = {'Authorization': self.authorization_token}\n params = {'bucketId': self.bucket_id}\n\n response = requests.get(url, headers=headers, params=params)\n\n if response.status_code == 401:\n self.authorize()\n headers = {'Authorization': self.authorization_token}\n response = requests.get(url, headers=headers, params=params)\n elif response.status_code != 200:\n response.raise_for_status()\n\n return response.json()\n\n def _build_url(self, endpoint):\n return self.base_url + endpoint\n\n def upload_file(self, name, content):\n self._ensure_authorization()\n\n upload_url_response = self.get_upload_url()\n\n url = upload_url_response['uploadUrl']\n sha1_of_file_data = hashlib.sha1(content.read()).hexdigest()\n content.seek(0)\n\n headers = {\n 'Authorization': upload_url_response['authorizationToken'],\n 'X-Bz-File-Name': name,\n 'Content-Type': \"b2/x-auto\",\n 'X-Bz-Content-Sha1': sha1_of_file_data,\n 'X-Bz-Info-src_last_modified_millis': '',\n }\n\n attempts = 0\n while attempts <= self.max_retries:\n attempts += 1\n try:\n response = requests.post(\n url, headers=headers, data=content.read())\n except ConnectionError:\n continue\n\n if response.status_code == 200:\n break\n\n if response.status_code != 200:\n response.raise_for_status()\n\n return response.json()\n\n def get_file_info(self, name):\n self._ensure_authorization()\n headers = {'Authorization': self.authorization_token}\n return requests.get(self.get_file_url(name), headers=headers)\n\n def download_file(self, name):\n return self.get_file_info(name).content\n\n def get_file_url(self, name):\n self._ensure_authorization()\n return os.path.join(self.download_url, 'file', self.bucket_name, name)\n\n def get_bucket_id_by_name(self):\n \"\"\"\n BackBlaze B2 should make an endpoint to retrieve buckets by its name.\n \"\"\"\n self._ensure_authorization()\n headers = {'Authorization': self.authorization_token}\n params = {'accountId': self.account_id}\n response = requests.get(self._build_url(\"/b2api/v1/b2_list_buckets\"),\n headers=headers, params=params)\n if response.status_code != 200:\n response.raise_for_status()\n\n for bucket in response.json()['buckets']:\n if bucket['bucketName'] == self.bucket_name:\n self.bucket_id = bucket['bucketId']\n","sub_path":"b2_storage/backblaze_b2.py","file_name":"backblaze_b2.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108760811","text":"import os\n\nimport pytest\nfrom starlette.testclient import TestClient\nfrom tortoise.contrib.fastapi import register_tortoise\n\nfrom app.config import Settings, get_settings\nfrom app.main import create_application\n\n\ndef get_settings_override():\n return Settings(testing=1, database_url=os.environ.get(\"DATABASE_TEST_URL\"))\n\n\n@pytest.fixture(scope=\"module\")\ndef test_app():\n # set up\n app = create_application() # new\n app.dependency_overrides[get_settings] = get_settings_override\n with TestClient(app) as test_client: # updated\n\n # testing\n yield test_client\n\n # tear down\n\n\n@pytest.fixture(scope=\"module\")\ndef test_app_with_db():\n # set up\n app = create_application()\n app.dependency_overrides[get_settings] = get_settings_override\n register_tortoise(\n app,\n db_url=os.environ.get(\"DATABASE_TEST_URL\"),\n modules={\"models\": [\"app.models.tortoise\"]},\n generate_schemas=True,\n add_exception_handlers=True,\n )\n with TestClient(app) as test_client:\n # testing\n yield test_client\n\n # tear down\n\n\n# normal run\n# $ docker-compose exec web python -m pytest\n\n# disable warnings\n# $ docker-compose exec web python -m pytest -p no:warnings\n\n# run only the last failed tests\n# $ docker-compose exec web python -m pytest --lf\n\n# run only the tests with names that match the string expression\n# $ docker-compose exec web python -m pytest -k \"summary and not test_read_summary\"\n\n# stop the test session after the first failure\n# $ docker-compose exec web python -m pytest -x\n\n# enter PDB after first failure then end the test session\n# $ docker-compose exec web python -m pytest -x --pdb\n\n# stop the test run after two failures\n# $ docker-compose exec web python -m pytest --maxfail=2\n\n# show local variables in tracebacks\n# $ docker-compose exec web python -m pytest -l\n\n# list the 2 slowest tests\n# $ docker-compose exec web python -m pytest --durations=2\n","sub_path":"project/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532664109","text":"#Imports\nfrom Morse_Code_File import *\nfrom Tap_Code_File import *\n\n#Variables\nuserMessage = \"\"\nwelcomeMessageString = \"Welcome to the Code Sender Application!\"\nsendMessageString = \"Would you like to send a message? (y/n): \"\ninvalidInputString = \"Invalid input.\\n\"\n\n\n\n#Functions\n\n\n#Choice Method\ndef TapOrMorse(sentMsg):\n while True:\n msgMedium = raw_input(\"Would you like to send your message with Tap or Morse code? (t/m): \").lower()\n if msgMedium.lower() == \"t\":\n print(\"[Message is sent using Tap Code with LED]\\n\")\n SendMessageViaTapCode(sentMsg)\n break\n elif msgMedium.lower() == \"m\":\n print(\"[Message is sent using Morse Code with LED]\\n\")\n SendMessageViaMorseCode(sentMsg)\n break\n else:\n print (\"\")\n return\n\n#User Input Method\ndef ReadMessageFromUserInput():\n return raw_input(\"Enter your message: \")\n\n#Read Message From File Method\ndef ReadMessageFromFile():\n fileMessage = \"\"\n try:\n inputFile = raw_input(\"Enter the file path and name: \")\n with open(inputFile, 'r') as f:\n lines = f.readlines()\n for x in range(0, len(lines)):\n fileMessage += lines[x]\n return fileMessage\n except:\n print(\"Problem Loading File\\n\")\n return","sub_path":"Assignments/FinalProject/Helper_File.py","file_name":"Helper_File.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"127822158","text":"\"\"\"\n2966\n\"\"\"\n\n\nimport sys\n\n\ndef input(): return sys.stdin.readline().rstrip()\n\n\nN, Ans = int(input()), list(input())\n\nAdrian = ['A', 'B', 'C']\nBruno = ['B', 'A', 'B', 'C']\nGoran = ['C', 'C', 'A', 'A', 'B', 'B']\nscore = [0, 0, 0]\n\nfor Ans_i, Ans_e in enumerate(Ans):\n if Adrian[Ans_i % len(Adrian)] == Ans_e:\n score[0] += 1\n if Bruno[Ans_i % len(Bruno)] == Ans_e:\n score[1] += 1\n if Goran[Ans_i % len(Goran)] == Ans_e:\n score[2] += 1\n\nwinnerScore = max(score)\nprint(winnerScore)\n\nif score[0] == winnerScore:\n print(\"Adrian\")\nif score[1] == winnerScore:\n print(\"Bruno\")\nif score[2] == winnerScore:\n print(\"Goran\")\n\n# winnerNames = []\n# for scoreIdx, scoreE in enumerate(score):\n# if scoreE == winnerScore:\n# name = ''\n# if scoreIdx == 0:\n# name = \"Adrian\"\n# elif scoreIdx == 1:\n# name = \"Bruno\"\n# else:\n# name = \"Goran\"\n# winnerNames.append(name)\n\n\n# for winnerName in winnerNames:\n# print(winnerName)\n\n\"\"\"\n리팩토링\n\n1. 리스트의 인덱스를 고를때 \n[] 으로 고르는데 -> 인덱\n\n2. 입력줄때\n\n\nN, Ans = int(input()), list(input()) 이렇게 튜플 형태로 해도 된다. 더 읽기 좋잖아\n\n3. 최고점자 출력할때.\n\n왜 구지 최고점자를 따로 모아서, 출력을하려함 ?\n그냥 바로바로 점수 최고인지 보고 출력해.\n\n\"\"\"\n","sub_path":"BOJ_Bronze/2966.py","file_name":"2966.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"36340152","text":"#!/usr/bin/env python3\n\"\"\"Create Workout Of (the) Day (WOD)\"\"\"\n\nimport argparse\nimport csv\nimport io\nimport os\nimport random\nfrom itertools import starmap\nfrom tabulate import tabulate\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Create Workout Of (the) Day (WOD)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-f',\n '--file',\n help='CSV input file of exercises',\n metavar='str',\n type=argparse.FileType('r'),\n default='exercises.csv')\n\n parser.add_argument('-s',\n '--seed',\n help='Random seed',\n metavar='int',\n type=int,\n default=None)\n\n parser.add_argument('-n',\n '--num_exercises',\n help='Number of exercises',\n metavar='int',\n type=int,\n default=4)\n\n parser.add_argument('-e',\n '--easy',\n help='Make it easy',\n action='store_true')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n random.seed(args.seed)\n exercises = read_csv(args.file)\n\n wod = starmap(mk_exercise, random.sample(exercises, k=args.num_exercises))\n print(tabulate(wod, headers=('Exercise', 'Reps')))\n\n\n# --------------------------------------------------\ndef parse_row(row):\n \"\"\"Parse a row\"\"\"\n\n name = row.get('exercise')\n reps = row.get('reps')\n\n if name and '-' in reps:\n low, high = row['reps'].split('-')\n\n if low.isdigit() and high.isdigit():\n return (name, int(low), int(high))\n\n return None\n\n\n# --------------------------------------------------\ndef test_parse_row():\n \"\"\"Test parse_row\"\"\"\n\n assert parse_row({}) is None\n assert parse_row({'exercise': 'foo', 'reps': '10'}) is None\n assert parse_row({'exercise': 'foo', 'reps': '10.5-11'}) is None\n assert parse_row({'exercise': 'bar', 'reps': '10-20'}) == ('bar', 10, 20)\n\n\n# --------------------------------------------------\ndef read_csv(fh):\n \"\"\"Read the CSV input\"\"\"\n\n return list(map(parse_row, csv.DictReader(fh, delimiter=',')))\n\n\n# --------------------------------------------------\ndef make_exercise(name, low, high, easy):\n \"\"\"Make an exercise\"\"\"\n\n if easy:\n low, high = int(low / 2), int(high / 2)\n\n return (name, random.randint(low, high))\n\n\n# --------------------------------------------------\ndef test_read_csv():\n \"\"\"Test read_csv\"\"\"\n\n text1 = io.StringIO('exercise,reps\\nfoo,10-20\\nbar,30-40')\n assert read_csv(text1) == [('foo', 10, 20), ('bar', 30, 40)]\n\n text2 = io.StringIO('exercise,reps\\nfoo,10.5-20\\nbar,30')\n assert read_csv(text) == [('foo', 10, 20), ('bar', 30, 40)]\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"20_wod/wodX.py","file_name":"wodX.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75180453","text":"# coding=utf-8\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom datetime import datetime\nfrom pychroner import PluginMeta, PluginType, importModule\n\n@PluginMeta(PluginType.Schedule, multipleMinute=2)\ndef do(pluginApi):\n oauth = importModule(\"JPMCPvPy.oauth\").OAuth(pluginApi.config.secret.JPMCPvPClientID, pluginApi.config.secret.JPMCPvPClientSecret)\n token = oauth.getToken()\n\n api = importModule(\"JPMCPvPy.api\").API(token)\n\n current = 0\n for x in api.get(\"/servers\"):\n current += x.get(\"current\", 0)\n if current == 0:\n return True\n\n gs = gspread.authorize(\n ServiceAccountCredentials.from_json_keyfile_dict(\n pluginApi.config.secret.GoogleServiceAccount,\n scopes=[\"https://spreadsheets.google.com/feeds\"]\n )\n )\n gs.open_by_key(pluginApi.config.secret.GoogleSpreadsheetJPMCPvPOnlineUsers).sheet1.append_row([\n datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n current\n ])\n","sub_path":"plugins/JPMCPvPNews/PlotOnlineUsersCount.py","file_name":"PlotOnlineUsersCount.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"8257684","text":"\"\"\"WebHooks for HomeControl\"\"\"\n\nimport json\nfrom aiohttp import web\n\nSPEC = {\n \"meta\": {\n \"name\": \"WebHooks\",\n \"description\": \"Provides WebHook endpoints\"\n }\n}\n\n\nclass Module:\n \"\"\"The WebHook module\"\"\"\n async def init(self):\n \"\"\"Initialise the module\"\"\"\n\n @self.core.event_engine.register(\"http_add_api_routes\")\n async def add_route(event, router):\n \"\"\"Add an API route\"\"\"\n\n @router.get(\"/webhook/{target}\")\n @router.post(\"/webhook/{target}\")\n async def webhook_route(request):\n self.core.event_engine.broadcast(\n \"webhook_event\",\n target=request.match_info[\"target\"], params={})\n return web.Response(\n body=json.dumps(\n {\"msg\": \"Webhook triggered\"},\n indent=4, sort_keys=True),\n content_type=\"application/json\")\n\n @self.core.event_engine.register(\"gather_automation_providers\")\n async def on_gather_automation_providers(event, engine, callback):\n \"\"\"Register as an automation provider\"\"\"\n callback(trigger={\"webhook\": self.provider_factory})\n\n # pylint: disable=no-self-use\n def provider_factory(self, rule, engine):\n \"\"\"Return a WebhookTriggerProvider for automation\"\"\"\n return WebhookTriggerProvider(rule, engine)\n\n\nclass WebhookTriggerProvider:\n \"\"\"The trigger provider for automation\"\"\"\n def __init__(self, rule, engine):\n self.rule = rule\n self.engine = engine\n self.core = engine.core\n\n self.data = rule.data[\"trigger\"]\n self.event = self.data[\"target\"]\n\n self.core.event_engine.register(\"webhook_event\")(self.on_webhook)\n\n async def on_webhook(self, event, target, params):\n \"\"\"Handle WebHook event\"\"\"\n if target == self.event:\n await self.rule.on_trigger(params)\n\n async def stop(self) -> None:\n \"\"\"Stops the WebhookTriggerProvider\"\"\"\n self.core.event_engine.remove_handler(\"webhook_event\", self.on_webhook)\n","sub_path":"homecontrol/modules/webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"423632800","text":"import logging\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision.datasets import CIFAR10\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass CIFAR10_truncated(data.Dataset):\n\n def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False, noise_rate=0):\n\n self.root = root\n self.dataidxs = dataidxs\n self.train = train\n self.transform = transform\n self.target_transform = target_transform\n self.download = download\n self.noise_rate = noise_rate\n self.data, self.target = self.__build_truncated_dataset__()\n\n\n def corrupt_label(self, y_train, noise_rate):\n \"\"\"Corrupts training labels.\n\n Args:\n y_train: training labels\n noise_rate: input noise ratio\n\n Returns:\n corrupted_y_train: corrupted training labels\n noise_idx: corrupted index\n \"\"\"\n\n if(noise_rate == 0.0):\n return y_train, []\n y_set = list(set(y_train))\n\n # Sets noise_idx\n temp_idx = np.random.permutation(len(y_train))\n noise_idx = temp_idx[:int(len(y_train) * noise_rate)]\n\n # Corrupts label\n corrupted_y_train = y_train[:]\n\n for itt in noise_idx:\n temp_y_set = y_set[:]\n del temp_y_set[y_train[itt]]\n rand_idx = np.random.randint(len(y_set) - 1)\n corrupted_y_train[itt] = temp_y_set[rand_idx]\n\n return corrupted_y_train, noise_idx\n\n def __build_truncated_dataset__(self):\n print(\"download = \" + str(self.download))\n cifar_dataobj = CIFAR10(self.root, self.train, self.transform, self.target_transform, self.download)\n\n if self.train:\n # print(\"train member of the class: {}\".format(self.train))\n # data = cifar_dataobj.train_data\n data = cifar_dataobj.data\n target = np.array(cifar_dataobj.targets)\n else:\n data = cifar_dataobj.data\n target = np.array(cifar_dataobj.targets)\n\n if self.dataidxs is not None:\n data = data[self.dataidxs]\n target = target[self.dataidxs]\n if(self.noise_rate != 0):\n target, noise_idx = self.corrupt_label(target, self.noise_rate)\n target = np.delete(target, noise_idx, axis=0)\n data = np.delete(data, noise_idx, axis=0)\n\n return data, target\n\n def truncate_channel(self, index):\n for i in range(index.shape[0]):\n gs_index = index[i]\n self.data[gs_index, :, :, 1] = 0.0\n self.data[gs_index, :, :, 2] = 0.0\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.target[index]\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n","sub_path":"data_preprocessing/cifar10/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"215510802","text":"from functions import mean, variance, desviation\nimport random\n\n\ndef main():\n my_list = [random.randint(1, 10000) for _ in range(1, 10000)]\n print(f\"The mean is: {mean(my_list)}\")\n print(f\"Variance is: {variance(my_list)}\")\n print(f\"Desviation is: {desviation(my_list)}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108441616","text":"import numpy as np\nfrom fractions import Fraction #Para aceptar fracciones en la entrada\nfrom sympy import Matrix, linsolve, symbols\nfrom scipy import linalg\nfrom itertools import product\n#Método de Mejoramiento de políticas con descuento\ndef Enumeracion_Politicas(m,k, s, matriztrans, politica, costos):\n # Empezamos el algoritmo \n \n permutaciones=[]\n mat=[]\n for i in range(k):\n mat.append(i)\n mat=np.array(mat)\n \n \n for c in product(mat, repeat=m):\n permutaciones.append(c) \n \n poliExha=[]\n for i in range(len(permutaciones)):\n cont=0\n for j in range(m):\n if(permutaciones[i][j] in politica[j]):\n cont=cont+1\n if(cont==m):\n print(\"Politica R\"+str(i+1)+\" \"+str(permutaciones[i]))\n poliExha.append(permutaciones[i])\n cont=0\n \n #print(\"El numero de politicas posibles es =\", pow(k,m))\n # print(\"Politica %d\" %(i+1)+ \"=\"+ str(permutaciones(i)))\n resultado=[]\n\n \n for i in range(len(poliExha)):\n polArb=poliExha[i] #Se toma la primera decisión que hay en cada estado\n print(\"\\nLa política R%d\"%(i+1)+\"=\"+str(polArb))\n matriz=[] #Definimos la matriz de transicion respecto a la politica \n aux2=[]\n costoabr=[]\n aux3=[]\n\n b=0 #Contador\n for i in range(m):\n for j in range(len(politica[i])): #Medimos el lengeth de la política \n for x in range(k):\n if(polArb[i]==x+1 & x+1==politica[i][j] ): #La política arbitraria y las decisiones son iguales\n aux3=costos[b] #El costo[i][j]\n costoabr.append(aux3)\n b=b+1 #Aumentamos para saber la posición exacta del costo\n costoabr=np.array(costoabr)\n print(\"\\nLa costos de la política arbitraria son: \\n\", costoabr ) \n \n for i in range(m):\n for j in range(k):\n if(polArb[i]==j+1): #Si la política j es = al dato [i] de la política arbitraria\n aux2=matriztrans[j][i] #De la fila i respecto a k=j\n matriz.append(aux2)\n matriz=np.array(matriz) #Convertimos a numpy array\n print(\"\\nLa matriz respecto a la política arbitraria es: \\n\", matriz)\n\n \n matriz2=[] #guardamos la matriz transpuesta\n aux4=[]\n aux5=[]\n resul=[]\n matriz3=[]\n aux6=[]\n aux7=[]\n matrizR=[]\n MatrizEstacionaria=[]\n MatEc=[]\n \n #matriz para la restriccion pi=p0+P1+..+Pn\n for i in range(m):\n for j in range(m):\n pi=np.ones((1,m))\n \n for i in range(m):\n for j in range(m):\n res=np.zeros((m-1,1))\n\n #print(\"matriz resultados\")\n resul=np.append(res,[1])\n resul=np.array(resul)\n\n for c in range(len(matriz[1,: ])):\n aux4=matriz[:,c]\n matriz2.append(aux4)\n matriz2=np.array(matriz2)\n # print(matriz2)\n \n \n for i in range(m):\n for j in range(m):\n if (matriz2[i][j]!=0): #Siempre que no tenga 0's \n matriz2[i][j]=-1*matriz2[i][j] #Multiplicamos por -1, como si lo pasarmos al otro lado\n if(i==j):\n matriz2[i][j]=1+matriz2[i][j]\n aux5=np.delete(matriz2,m-1, axis=0)\n \n \n matrizR=np.concatenate([aux5, pi], axis=0) #axis=0 es filas, axis=1 columnas\n matrizR=np.array(matrizR)\n\n\n print(\"\\nEl sistema a Resolver es: \" +str(matrizR)+\" = \"+str(costoabr))\n \n \n if np.linalg.det(matrizR) == 0: \n x= None\n print(\"No se puede resolver\")\n else:\n x=linalg.solve(matrizR,resul)\n x=np.array(x)\n MatrizEstacionaria=np.array(x)\n print(\"La solucion del sistema es:\", MatrizEstacionaria)\n aux6=MatrizEstacionaria*costoabr #calculamos el vector solucion4\n print(\"La solucion a nuestro sistema tomando en cuenta los costos\",aux6)\n aux7=np.sum(np.dot(MatrizEstacionaria,costoabr))\n print(\"E(c)=\",aux7)\n resultado.append(aux7) \n\n resultado=np.array(resultado)\n print(\"Estas son las E(C) de nuestras politicas\\n\",resultado)\n \n\n if(s=='min' or s=='MIN'):\n sol=np.min(resultado) \n print(\"La solucion optima minima es:\",sol)\n if(s=='max' or s=='MAX'):\n sol2=np.max(resultado) \n print(\"La solucion optima maxima es:\", sol2) \n \n \n return \n \n\n\n","sub_path":"EnumeracionExahustiva.py","file_name":"EnumeracionExahustiva.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"119983726","text":"import gym\nimport torch\nimport unittest\n\nimport gymag\nimport gymag.spaces as spaces\n\n\nclass Cloak(gym.ActionWrapper, gym.ObservationWrapper, gym.RewardWrapper):\n \"\"\" Hide all `torch.Tensor` inputs/outputs and spaces by converting them to a standard `numpy.array`. \"\"\"\n\n def __init__(self, autograd_env):\n super().__init__(autograd_env)\n if isinstance(autograd_env.unwrapped, gymag.Env):\n assert isinstance(autograd_env.action_space, gymag.Space)\n assert isinstance(autograd_env.observation_space, gymag.Space)\n self.action_space = autograd_env.action_space.to_gym_space()\n self.observation_space = autograd_env.observation_space.to_gym_space()\n self.cloak = True\n else:\n self.cloak = None\n\n def action(self, action):\n torch_action = self.env.action_space.from_gym(action)\n return torch_action\n\n def observation(self, torch_observation):\n observation = self.env.observation_space.to_gym(torch_observation)\n return observation\n\n def reward(self, torch_reward):\n reward = float(torch_reward)\n return reward\n\n def step(self, action):\n with torch.no_grad():\n if self.cloak:\n action = self.action(action)\n observation, reward, done, info = self.env.step(action)\n if self.cloak:\n observation, reward = self.observation(observation), self.reward(reward)\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n with torch.no_grad():\n observation = self.env.reset(**kwargs)\n if self.cloak:\n observation = self.observation(observation)\n return observation\n\n def __str__(self):\n if self.cloak:\n return ''.format(self.env)\n else:\n return ''.format(self.env)\n\n\n# region Tests\n\n\nclass Basics(unittest.TestCase):\n\n def test_discrete(self):\n baseline_env = gym.make('CartPole-v0')\n baseline_env = Cloak(baseline_env)\n autograd_env = gym.make('CartPoleAG-v0')\n autograd_env.action_space = spaces.Discrete.from_gym_space(baseline_env.action_space)\n autograd_env = Cloak(autograd_env)\n baseline_env.seed(1)\n gym.spaces.seed(1)\n\n self.assertEqual(autograd_env.action_space, baseline_env.action_space)\n self.assertEqual(autograd_env.observation_space, baseline_env.observation_space)\n\n state, done = baseline_env.reset(), False\n while not done:\n action = baseline_env.action_space.sample()\n self.assertTrue(autograd_env.action_space.contains(action))\n\n state, reward, done, info = baseline_env.step(action)\n self.assertTrue(autograd_env.observation_space.contains(state))\n\n def test_continuous(self):\n baseline_env = gym.make('Pendulum-v0')\n baseline_env = Cloak(baseline_env)\n autograd_env = gym.make('PendulumAG-v0')\n autograd_env = autograd_env.unwrapped.GymCompatible(autograd_env)\n autograd_env = Cloak(autograd_env)\n baseline_env.seed(1)\n gym.spaces.seed(1)\n\n self.assertEqual(autograd_env.action_space, baseline_env.action_space)\n self.assertEqual(autograd_env.observation_space, baseline_env.observation_space)\n\n state, done = baseline_env.reset(), False\n while not done:\n action = baseline_env.action_space.sample()\n self.assertTrue(autograd_env.action_space.contains(action))\n\n state, reward, done, info = baseline_env.step(action)\n self.assertTrue(autograd_env.observation_space.contains(state))\n\n\n# endregion\n","sub_path":"gymag/wrappers/cloak.py","file_name":"cloak.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555193451","text":"import os\nimport requests\nimport requests.exceptions\nimport sys\nimport hashlib\nfrom random import choice\nfrom string import ascii_uppercase\nfrom datetime import datetime\nimport threading\nimport time\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import abort\nfrom flask import jsonify\nimport json\n\napp = Flask(__name__)\napp.config.update(\n\tPROPAGATE_EXCEPTIONS = True\n)\nlock = threading.Lock()\nKvStore = {}\nMAINPORT=8080 #default\nglobal VIEW\nglobal TRIPLETS\n\nVIEW = []\nK=0 # number of replicas\nIP=0\nIPPORT=0 # IP:PORT of this instance\nGlobalView={} #a list indexed by HashIDs, contains a tuple of K(number of replicas) IPs & the range associated with the HashID\nnewNode=0\nsysSize=100\n\nthisHashID=-1 # uninitialized\nposition=-1 # position in vector clock\n\nif \"VIEW\" in os.environ:\n\tVIEW=os.environ[\"VIEW\"]\n\tVIEWsplit = VIEW.split(',')\n\tVIEW = VIEWsplit\nelse:\n\tnewNode=1\nif \"MAINPORT\" in os.environ:\n\tMAINPORT=int(os.environ[\"MAINPORT\"])\nif \"IPPORT\" in os.environ:\n\tIPPORT=str(os.environ[\"IPPORT\"])\nif \"K\" in os.environ:\n\tK=str(os.environ[\"K\"])\n\n#random string generator, from stack overflow\ndef randomword(length):\n return ''.join(choice(ascii_uppercase) for i in range(length))\n\ndef consistentHash(value):\n\tShaHash=0\n\tfilteredVal = 0\n\tvalue = value.encode('utf-8')\n\tShaHash = hashlib.sha224(value).hexdigest()\n\tfor x in ShaHash:\n\t\tif x != 'a' and x !='b' and x != 'c' and x != 'd' and x != 'e' and x != 'f':\n\t\t\tfilteredVal += int(x)\n\treturn filteredVal\n\n#modulo is set above\ndef GetHashID(IP, sysSize):\n\tdummyIP = IP\n\tcollision=0\n\tcounter = 0\n\twhile 1:\n\t\tcollision=0\n\t\thashedVal = consistentHash(dummyIP)\n\t\t#check for collisions\n\t\tfor x in range(len(GlobalView)):\n\t\t\tif x == hashedVal%sysSize and GlobalView[x] != None:\n\t\t\t\tcollision = 1\n\t\t\t\tdummyIP=dummyIP+randomword(5)\n\n\t\tif collision != 1:\n\t\t\tbreak\n\thashID = hashedVal%sysSize\n\treturn hashID\n\n#use the old and new ranges to move the keys around\n#def RedistributeKeysAdd(newTriplets, newIP):\n\n#get the key range of IP and inserted into the triplets under key \"IP\"; get hashID first before calling this\n#counter-clockwise\ndef GetRange(GlobalView, targetHashID, sysSize):\n\tHashIDs=[]\n\tRange = []\n\tindex=0\n\tfor x in GlobalView:\n\t\tHashIDs.append(int(x)) # append the hashIDs to a list\n\tHashIDs.sort()\n\tindex = HashIDs.index(int(targetHashID)) # the hash ID of 'IP'\n\n\t#if there is only 1 hash ID, the node is in charge of all key ranges\n\tif len(HashIDs) == 1:\n\t\tfor x in range(sysSize):\n\t\t\tRange.append(x)\n\n\tif len(HashIDs) > 1:\n\t\tmaxIndex = len(HashIDs)-1\n\t\tif index == 0:\n\t\t\t\tfor x in range(HashIDs[0]):\n\t\t\t\t\tRange.append(x)\n\t\t\t\tRange.append(HashIDs[0])\n\t\t\t\tfor x in range(HashIDs[maxIndex]+1,sysSize):\n\t\t\t\t\tRange.append(x)\n\t\telse:\n\t\t\tfor x in range(HashIDs[index-1]+1, HashIDs[index]+1):\n\t\t\t\tRange.append(x)\n\treturn Range\n\n#we are initializing a view if this is true\n#View Initialize\nfillingPartiton=0\nif len(VIEW) != 0:\n\tfor x in VIEW:\n\n\t\tif len(GlobalView) == 0:\n\t\t\thashID=GetHashID(x, sysSize)\n\t\t\tif IPPORT == x:\n\t\t\t\tthisHashID = hashID\n\t\t\tGlobalView[str(hashID)] = {}\n\t\t\tGlobalView[str(hashID)]['0'] = []\n\t\t\tGlobalView[str(hashID)]['0'].append(x)\n\t\t\tGlobalView[str(hashID)]['1'] = []\n\t\t\t#GlobalView[hashID]['1'] = GetRange(IPPORT, sysSize) do this when global view is finished being created\n\t\telse:\n\t\t\tfor y in GlobalView:\n\t\t\t\tif len(GlobalView[y]['0']) < int(K):\n\t\t\t\t\tprint(\"true\")\n\t\t\t\t\tfillingPartiton=1\n\t\t\t\t\tif IPPORT== x:\n\t\t\t\t\t\tthisHashID=y\n\t\t\t\t\tGlobalView[y]['0'].append(x)\n\t\t\tif fillingPartiton == 0:\n\t\t\t\thashID=GetHashID(x, sysSize)\n\t\t\t\tif IPPORT == x:\n\t\t\t\t\tthisHashID = hashID\n\t\t\t\tGlobalView[str(hashID)] = {}\n\t\t\t\tGlobalView[str(hashID)]['0'] = []\n\t\t\t\tGlobalView[str(hashID)]['0'].append(x)\n\t\t\t\tGlobalView[str(hashID)]['1'] = []\n\t\t\tfillingPartiton = 0\n\tfor x in GlobalView:\n\t\tGlobalView[x]['1'] = GetRange(GlobalView, x, sysSize)\n\n#check if this node is prev, else send a message to start a redistribute function on the correct node, Redis(newIP,upperbound)\ndef RedistributeAdd(newIP, hashID, GlobalView):\n\tprint(\"in redistribute ADD\")\n\tprint(\"new hash ID is:\"+ hashID)\n\tprint(\"GlobalView is:\"+ str(GlobalView))\n\tHashIDs = []\n\tprevNode=0\n\tnewIPID=0\n\tnextNode=0\n\tnewHashID = str(hashID)\n\tfor x in GlobalView:\n\t\tHashIDs.append(int(x))\n\tHashIDs.sort()\n\n\tif newHashID == str(HashIDs[len(HashIDs)-1]):\n\t\tfor x in GlobalView:\n\t\t\tif str(HashIDs[0]) == x:\n\t\t\t\tfor n in GlobalView[x]['0']:\n\t\t\t\t\tif IPPORT == n: #if this is the correct redis node\n\t\t\t\t\t\tnextNode = IPPORT\n\t\t\t\t\t\toldKeys=[]\n\t\t\t\t\t\tfor y in KvStore:\n\t\t\t\t\t\t\tif consistentHash(y)%sysSize in GlobalView[newHashID]['1']:\n\t\t\t\t\t\t\t\trequests.put(\"http://\"+newIP+\"/kvs/\"+y, data = {\"val\":KvStore[y]})\n\t\t\t\t\t\t\t\toldKeys.append(y)\n\t\t\t\t\t\tfor k in oldKeys:\n\t\t\t\t\t\t\tKvStore.pop(k, None)\n\t\t\t\t\t\treturn 1\n\t\t\t\t\t\t\t#for the keys in my kvs, hash%100 them and see if they are > HashIDs[0]. O(n^2),\n\n\t\t\t\tnextNode = GlobalView[x]['0'][0]\n\t\t\t\tr=requests.put(\"http://\"+nextNode+\"/shiftKeysAdd\", data={\"newHashID\":newHashID})\n\t\t\t\tr.raise_for_status\n\t\t\t\tif r.status_code == 200:\n\t\t\t\t\treturn 1\n\n\t#other cases, next node is x +1\n\telse:\n\t\tfor x in range(len(HashIDs)):\n\t\t\tif newHashID == str(HashIDs[x]):\n\t\t\t\tprint(\"damn right\")\n\t\t\t\tnewIPID = x\n\t\t\t\tnextNode = x+1\n\t\t\t\tfor y in GlobalView:\n\t\t\t\t\tif str(HashIDs[nextNode]) == y:\n\t\t\t\t\t\tfor n in GlobalView[str(HashIDs[nextNode])]['0']:\n\t\t\t\t\t\t\tif IPPORT == n:\n\t\t\t\t\t\t\t\toldKeys=[]\n\t\t\t\t\t\t\t\tfor z in KvStore:\n\t\t\t\t\t\t\t\t\tif consistentHash(z)%sysSize in GlobalView[newHashID]['1']:\n\t\t\t\t\t\t\t\t\t\trequests.put(\"http://\"+newIP+\"/kvs/\"+z, data = {\"val\":KvStore[z]})\n\t\t\t\t\t\t\t\t\t\toldKeys.append(z)\n\t\t\t\t\t\t\t\tfor k in oldKeys:\n\t\t\t\t\t\t\t\t\tKvStore.pop(k, None)\n\t\t\t\t\t\t\t\treturn 1\n\t\t\t\t\t\tnextNode = GlobalView[y]['0'][0]\n\t\t\t\t\t\tr=requests.put(\"http://\"+nextNode+\"/shiftKeysAdd\", data={\"newHashID\":newHashID})\n\t\t\t\t\t\tr.raise_for_status\n\t\t\t\t\t\tif r.status_code == 200:\n\t\t\t\t\t\t\treturn 1\n\n\n#if newView == Null, then this instance is the coordinator/primary broadcaster\n#if all repsponse status codes are 200 then we are good\n#UPDATE: If some partition is not full yet, simply append to the partition, else create new partition\ndef UpdateViewAdd(newIP, newView, newGlobalView):\n\tglobal VIEW, GlobalView\n\thashID=0\n\tfillingPartiton=0\n\tif newView != None:\n\t\tVIEW = json.loads(newView)\n\t\tGlobalView = json.loads(newGlobalView)\n\t\treturn\n\n\t\t#the Triplets that are returned should already be redistributed\n\n\t#this node is in charge of partitioning,redistributing and broadcasting\n\tif newView == None:\n\t\tVIEW.append(newIP)\n\t\taccepted = 1\n\t\tfor y in GlobalView:\n\t\t\tprint(\"THIS IS Y:\"+ str(y))\n\t\t\tif len(GlobalView[y]['0']) < int(K):\n\t\t\t\tfillingPartiton=1\n\t\t\t\thashID=y\n\t\t\t\tGlobalView[y]['0'].append(newIP)\n\t\tif fillingPartiton == 0:\n\t\t\thashID=GetHashID(newIP, sysSize)\n\t\t\tGlobalView[str(hashID)] = {}\n\t\t\tGlobalView[str(hashID)]['0'] = []\n\t\t\tGlobalView[str(hashID)]['0'].append(newIP)\n\t\t\tGlobalView[str(hashID)]['1'] = []\n\t\t\tfor x in GlobalView:\n\t\t\t\tGlobalView[x]['1'] = GetRange(GlobalView, x, sysSize)\n\t\tfor x in VIEW:\n\t\t\t#if we hit this instance\n\t\t\tif x == IPPORT:\n\t\t\t\tcontinue\n\t\t\t#LOOP THROUGH then call Redistribute\n\t\t\t#also add a data field for the current TRIPLETS state + VIEW when sending back to the new node\n\t\t\ttry:\n\t\t\t\tr=requests.put(\"http://\"+x+\"/kvs/update_view?type=add\", data= {\"IP\":newIP,\"newView\":json.dumps(VIEW),\"newGlobalView\":json.dumps(GlobalView)}, timeout=5)\n\t\t\t\tr.raise_for_status\n\t\t\t\tif r.status_code != 200:\n\t\t\t\t\taccepted = 0\n\t\t\t\t\treturn 0\n\t\t\texcept requests.exceptions.RequestException as e:\n\t\t\t\tprint(e)\n\t\t\t\tabort(404)\n\t\tif fillingPartiton == 0:\n\t\t\tRedistributeAdd(newIP,str(hashID), GlobalView)\n\t\tif accepted==1:\n\t\t\treturn hashID\ndef RedistributeSub(oldIP, nextNode):\n\t#print(\"redisSub\")\n\t#print(\"oldIP:\"+ oldIP)\n\t#print(\"nextNode:\"+ nextNode)\n\tif IPPORT == oldIP:\n\t\tfor x in KvStore:\n\t\t\trequests.put(\"http://\"+nextNode+\"/kvs/\"+x, data = {\"val\": KvStore[x]})\n\t\treturn 1\n\telse:\n\t\t#print(\"redis, else\")\n\t\ttry:\n\t\t\tr=requests.put(\"http://\"+oldIP+\"/shiftKeysSub\", data= {\"nextIP\":nextNode})\n\t\t\tr.raise_for_status\n\t\t\tif r.status_code==200:\n\t\t\t\treturn 1\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint(e)\n\t\t\treturn 0\n\n\ndef UpdateViewSub(oldIP, newView, newGlobalView):\n\tglobal VIEW, GlobalView\n\tshorteningPartition=0\n\tTargetHashID=0\n\tif newView != None:\n\t\t#print(newView)\n\t\tVIEW = json.loads(newView)\n\t\t#print(newTriplets)\n\t\tGlobalView = json.loads(newGlobalView)\n\t\treturn \"success\"\n\n\tfor x in GlobalView:\n\t\tfor y in GlobalView[x]['0']:\n\t\t\tif y == oldIP:\n\t\t\t\tTargetHashID=x\n\n\tif newView == None:\n\t\taccepted = 1\n\t\tnextNode=0\n\t\tHashIDs=[]\n\t\tfor x in GlobalView:\n\t\t\tHashIDs.append(int(x))\n\t\tHashIDs.sort()\n\t\tif TargetHashID == str(HashIDs[len(HashIDs)-1]):\n\t\t\tnextNode = str(HashIDs[0])\n\t\telse:\n\t\t\tfor x in range(len(HashIDs)):\n\t\t\t\tif TargetHashID == str(HashIDs[x]):\n\t\t\t\t\tnextNode = str(HashIDs[x+1])\n\t\tif len(GlobalView[TargetHashID]['0']) == 1:\n\t\t\tshorteningPartition = 0 #only one node left in that partition\n\t\telse:\n\t\t\tshorteningPartition = 1\n\t\tif shorteningPartition == 0:\n\t\t\tfor c in GlobalView:\n\t\t\t\tif c == nextNode:\n\t\t\t\t\tGlobalView[nextNode]['1'] = GlobalView[nextNode]['1'] + GlobalView[TargetHashID]['1']\n\t\t\tGlobalView.pop(TargetHashID)\n\n\t\tif shorteningPartition == 1:\n\t\t\tgivingNode=-1\n\t\t\tswapIP=-1\n\t\t\tpartitionDestruction=0\n\t\t\tswapCounter=0\n\t\t\tprevNode=-1\n\t\t\tGlobalView[TargetHashID]['0'].remove(oldIP)\n\t\t\tfor x in GlobalView:\n\t\t\t\tif x == TargetHashID:\n\t\t\t\t\tprint(x)\n\t\t\t\t\tcontinue\n\t\t\t\tprint(x)\n\t\t\t\tif len(GlobalView[x]['0'])>1:\n\t\t\t\t\tpartitionDestruction=0\n\t\t\t\t\tgivingNode=x\n\t\t\t\t\tswapIP=GlobalView[x]['0'][0]\n\t\t\t\t\tGlobalView[x]['0'].remove(swapIP)\n\t\t\t\t\tswapCounter+=1\n\t\t\t\telse:\n\t\t\t\t\tpartitionDestruction=1\n\t\t\t\tif partitionDestruction==0:\n\t\t\t\t\tprint(\"partitionDestruction==0\")\n\t\t\t\t\tif swapCounter == 1:\n\t\t\t\t\t\tGlobalView[TargetHashID]['0'].append(swapIP)\n\t\t\t\t\telse:\n\t\t\t\t\t\tGlobalView[prevNode]['0'].append(swapIP)\n\t\t\t\tif partitionDestruction ==1:\n\t\t\t\t\t#need to find next node of x .. give keys\n\t\t\t\t\t#then append ip to prevNode, then break\n\t\t\t\t\tprint(\"x is:\"+x)\n\t\t\t\t\tnextNode2=0\n\t\t\t\t\tif swapCounter == 0:\n\t\t\t\t\t\tGlobalView[TargetHashID]['0'].append(GlobalView[x]['0'][0])\n\t\t\t\t\tif swapCounter > 0:\n\t\t\t\t\t\tGlobalView[prevNode]['0'].append(GlobalView[x]['0'][0])\n\t\t\t\t\tif x == str(HashIDs[len(HashIDs)-1]):\n\t\t\t\t\t\tprint(\"here?1\")\n\t\t\t\t\t\tnextNode2 = HashIDs[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor y in range(len(HashIDs)):\n\t\t\t\t\t\t\tprint(\"here?2\")\n\t\t\t\t\t\t\tprint(HashIDs[y])\n\t\t\t\t\t\t\tif x == str(HashIDs[y]):\n\t\t\t\t\t\t\t\tnextNode2 = HashIDs[y+1]\n\t\t\t\t\tGlobalView[str(nextNode2)]['1'] = GlobalView[str(nextNode2)]['1'] + GlobalView[str(x)]['1']\n\t\t\t\t\tRedistributeSub(GlobalView[x]['0'][0], GlobalView[str(nextNode2)]['0'][0])\n\t\t\t\t\tGlobalView.pop(x)\n\t\t\t\t\tbreak\n\t\t\t\tprevNode=x\n\t\tVIEW.remove(oldIP)\n\n\t\tfor x in VIEW:\n\t\t\t#if we hit this instance\n\t\t\tif x == IPPORT:\n\t\t\t\tcontinue\n\t\t\t#LOOP THROUGH then call Redistribute\n\t\t\t#also add a data field for the current TRIPLETS state + VIEW when sending back to the new node\n\t\t\ttry:\n\t\t\t\tr=requests.put(\"http://\"+x+\"/kvs/update_view?type=remove\", data= {\"IP\":oldIP,\"newView\":json.dumps(VIEW),\"newGlobalView\":json.dumps(GlobalView)}, timeout=5)\n\t\t\t\tr.raise_for_status\n\t\t\t\tif r.status_code != 200:\n\t\t\t\t\taccepted = 0\n\t\t\t\t\treturn 0\n\t\t\texcept requests.exceptions.RequestException as e:\n\t\t\t\tprint(e)\n\t\t\t\tabort(404)\n\t\tif shorteningPartition == 0:\n\t\t\t#next node should be GlobalView[nextNode]['0'][0]\n\t\t\tRedistributeSub(oldIP, nextNode)\n\t\tif accepted==1:\n\t\t\treturn 1\n\ndef ForwardRequest(IP, key, method, data):\n\n\tmain = \"http://\"+str(IP)+\"/kvs/\"+str(key)\n\tprint(\"the forward IP is:\"+str(IP))\n\tprint(\"the key is:\"+ key)\n\tif method == 'GET':\n\t\ttry:\n\t\t\tr = requests.get(main, timeout=5) # if cannot connect in 5 seconds, jump to exception.\n\t\t\tr.raise_for_status()\n\t\t\treturn r.content\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint(e)\n\t\t\tresponse = jsonify(msg=\"value not found\", value=1)\n\t\t\tresponse.status_code = 404\n\t\t\treturn response\n\n\tif method == 'PUT':\n\t\ttry:\n\t\t\tif \"val\" not in request.form:\n\t\t\t\tabort(400)\n\t\t\tr = requests.put(main, data = {\"val\":data}, timeout=5)\n\t\t\tr.raise_for_status()\n\t\t\treturn r.content\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint(e)\n\t\t\treturn r.content\n\n\tif method == 'DELETE':\n\t\ttry:\n\t\t\tr = requests.delete(main, timeout=5)\n\t\t\tr.raise_for_status()\n\t\t\treturn r.content\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint(e)\n\t\t\treturn r.content\n\n\n#TEST function: return the details of a node, see the distribution of keys\n@app.route('/test/view_initialize')\ndef view_initialize():\n\tresponse = jsonify(msg=\"this IP is\" + IPPORT, view= str(VIEW) , GlobalView= str(GlobalView), K=K, myHashID=thisHashID)\n\treturn response\n\n\n\n# first check if key is in range; if not forward to the correct IP\n@app.route('/kvs/', methods=['GET','PUT','DELETE'])\ndef kvs(key=0):\n\tglobal thisHashID, GlobalView, position\n\tCorrectNode=0\n\tCorrectIP=0\n\tif sys.getsizeof(key) > 250:\n\t\tprint(\"too big of a key\")\n\t\tabort(400)\n\t\t#if we have a view update call the above function based on type request arg\n\tif request.method == 'PUT' and key=='update_view':\n\t\tif request.args.get(\"type\") == 'add':\n\t\t\tif \"ip_port\" in request.form:\n\t\t\t\taccepted = UpdateViewAdd(request.form.get(\"ip_port\"), None, None)\n\t\t\t\tif int(accepted) >= 0:\n\t\t\t\t\tresponse = jsonify(msg=\"success\",number_of_partitions=len(GlobalView), partition_id=accepted)\n\t\t\t\t\treturn response\n\t\t\t\t\treturn \"not all nodes responded\"\n\t\t\tif \"newView\" in request.form:\n\t\t\t\tUpdateViewAdd(None, request.form.get(\"newView\"), request.form.get(\"newGlobalView\"))\n\t\t\t\tresponse = jsonify(msg=\"update success from\" + IPPORT)\n\t\t\t\tresponse.status_code=200\n\t\t\t\treturn response\n\t\tif request.args.get(\"type\") == 'remove':\n\t\t\tif \"ip_port\" in request.form:\n\t\t\t\taccepted = UpdateViewSub(request.form.get(\"ip_port\"), None, None)\n\t\t\t\tif accepted == 1:\n\t\t\t\t\tresponse = jsonify(msg=\"success\", number_of_partitions=len(GlobalView))\n\t\t\t\t\treturn response\n\t\t\t\treturn \"not all nodes responded\"\n\n\t\t\tif \"newView\" in request.form:\n\t\t\t\tUpdateViewSub(None, request.form.get(\"newView\"), request.form.get(\"newGlobalView\"))\n\t\t\t\tresponse = jsonify(msg=\"update success from\" + IPPORT)\n\t\t\t\tresponse.status_code=200\n\t\t\t\treturn response\n\n\n\tif thisHashID == -1:\n\t\tprint(\"Global View is:\"+ str(GlobalView))\n\t\tfor x in GlobalView:\n\t\t\tfor y in GlobalView[x]['0']:\n\t\t\t\tif y == IPPORT:\n\t\t\t\t\tthisHashID = x\n\tprint('THE KEY IS:'+ key)\n\tif request.method == 'GET' and key == 'get_partition_id':\n\t\tresponse = jsonify(msg='success', partition_id=thisHashID)\n\t\treturn response\n\tif request.method == 'GET' and key == 'get_all_partition_ids':\n\t\tListofIDs = []\n\t\tfor x in GlobalView:\n\t\t\tListofIDs.append(x)\n\t\tresponse = jsonify(msg='success', partition_id_list = ListofIDs)\n\t\treturn response\n\tif request.method == 'GET' and key == 'get_partition_members':\n\t\tmembers =[]\n\t\tID = request.form.get('partition_id')\n\t\tID = str(ID)\n\t\tfor x in GlobalView:\n\t\t\tif ID == x:\n\t\t\t\tfor y in GlobalView[x]['0']:\n\t\t\t\t\tmembers.append(y)\n\t\tresponse = jsonify(msg = 'success', partition_members=members)\n\t\treturn response\n\n\t#if we have a key, is this the correct node?\n\thashedKey = consistentHash(key)%sysSize\n\tfor x in GlobalView:\n\t\tfor y in GlobalView[x]['0']:\n\t\t\tif y == IPPORT:\n\t\t\t\tthisHashID = x\n\tif hashedKey in GlobalView[str(thisHashID)]['1']:\n\t\tCorrectNode=1\n\telse:\n\t\tfor x in GlobalView:\n\t\t\tif hashedKey in GlobalView[x]['1']:\n\t\t\t\tCorrectIP = GlobalView[x]['0'][0]\n\t\tif request.method != 'PUT':\n\t\t\tresponse = ForwardRequest(CorrectIP, key, request.method, 0)\n\t\t\treturn response\n\t\tif request.method == 'PUT':\n\t\t\tresponse = ForwardRequest(CorrectIP, key, request.method, request.form.get(\"val\"))\n\t\t\treturn response\n\n\t#if the key is on this node\n\tif CorrectNode == 1:\n\t\treplaced=0\n\t\tvalue=0\n\t\tfound=0\n\t\t#PUT request\n\t\tif request.method == 'PUT':\n\t\t\tif \"val\" not in request.form:\n\t\t\t\tabort(400)\n\t\t\tvalue = request.form.get(\"val\")\n\t\t\t#check size\n\t\t\tif sys.getsizeof(value) > 1500000:\n\t\t\t\tabort(406) # too big\n\t\t\tif key in KvStore:\n\t\t\t\treplaced=1\n\t\t\telse:\n\t\t\t\treplaced=0\n\t\t\tif replaced == 0:\n\t\t\t\tvClock = []\n\t\t\t\tfor x in range(int(K)):\n\t\t\t\t\tvClock.append(0)\n\t\t\t\tfor y in range(len(GlobalView[str(thisHashID)]['0'])):\n\t\t\t\t\tif GlobalView[str(thisHashID)]['0'][y] == IPPORT:\n\t\t\t\t\t\tposition = y\n\t\t\t\tvClock[position] += 1\n\t\t\t\tif 'causal_payload' in request.form:\n\t\t\t\t\tif(len(request.form.get(\"causal_payload\")) > 2):\n\t\t\t\t\t\tvClock = json.loads(request.form.get(\"causal_payload\"))\n\t\t\t\tKvStore[key] = [value,vClock,str(datetime.now())]\n\t\t\tif replaced == 1:\n\t\t\t\tKvStore[key][0] = value\n\t\t\t\tKvStore[key][1][position] += 1\n\t\t\t\tif 'causal_payload' in request.form:\n\t\t\t\t\tif(len(request.form.get(\"causal_payload\")) > 2):\n\t\t\t\t\t\tKvStore[key][1] = json.loads(request.form.get(\"causal_payload\"))\n\t\t\t\tKvStore[key][2] = str(datetime.now())\n\t\t\treturn jsonify(replaced=replaced, msg='success', owner=IPPORT,partition_id=thisHashID, causal_payload=KvStore[key][1], timestamp=KvStore[key][2])\n\n\t\t#GET request\n\t\tif len(request.args) == 0 and request.method == 'GET':\n\t\t\tif key in KvStore:\n\t\t\t\treturn jsonify(msg=\"success\", value=KvStore[key][0], owner=IPPORT,partition_id=thisHashID, causal_payload=KvStore[key][1], timestamp =json.dumps(KvStore[key][2]))\n\t\t\telse:\n\t\t\t\temptyPayload=[]\n\t\t\t\tfor x in range(int(K)):\n\t\t\t\t\temptyPayload.append(0)\n\t\t\t\tresponse = jsonify(msg=\"error\", error=\"key does not exist\",causal_payload=emptyPayload, owner=IPPORT)\n\t\t\t\tresponse.status_code = 404\n\t\t\t\treturn response\n\n\t\t#DELETE request\n\t\tif len(request.args) == 0 and request.method == 'DELETE':\n\t\t\tif key in KvStore:\n\t\t\t\tKvStore.pop(key, None)\n\t\t\t\treturn jsonify(msg=\"success\", owner=IPPORT)\n\t\t\telse:\n\t\t\t\tresponse = jsonify(msg=\"error\", error=\"key does not exist\")\n\t\t\t\tresponse.status_code = 404\n\t\t\t\treturn response\n\t\tabort(400) # if we get here for some reason\n\n@app.route('/shiftKeysAdd', methods=['PUT'])\ndef shiftKeysAdd():\n\tnewHashID = request.form.get(\"newHashID\")\n\toldKeys=[]\n\tfor z in KvStore:\n\t\tif consistentHash(z)%sysSize in GlobalView[newHashID]['1']:\n\t\t\trequests.put(\"http://\"+GlobalView[newHashID]['0'][0]+\"/kvs/\"+z, data = {\"val\":KvStore[z]})\n\t\t\toldKeys.append(z)\n\tfor k in oldKeys:\n\t\tKvStore.pop(k, None)\n\tresponse = jsonify(msg= \"success\")\n\tresponse.status_code=200\n\treturn response\n\n@app.route('/shiftKeysSub', methods=['PUT'])\ndef shiftKeysSub():\n\tnextIP = request.form.get(\"nextIP\")\n\tfor z in KvStore:\n\t\trequests.put(\"http://\"+nextIP+\"/kvs/\"+z, data = {\"val\":KvStore[z]})\n\tresponse = jsonify(msg= \"success\")\n\tresponse.status_code=200\n\treturn response\n\ndef Gossip():\n\tglobal thisHashID, GlobalView\n\n\twhile(1):\n\t\tif thisHashID == -1:\n\t\t\t#print(\"Global View is:\"+ str(GlobalView))\n\t\t\tfor x in GlobalView:\n\t\t\t\tfor y in GlobalView[x]['0']:\n\t\t\t\t\tif y == IPPORT:\n\t\t\t\t\t\tthisHashID = x\n\t\ttime.sleep(3) # gossip every 3 seconds\n\t\t#print(\"Gossip from:\"+ IPPORT)\n\t\tif thisHashID == -1:\n\t\t\tcontinue\n\t\tif str(thisHashID) not in GlobalView:\n\t\t\tcontinue\n\t\tfor x in GlobalView[str(thisHashID)]['0']:\n\t\t\tif x == IPPORT:\n\t\t\t\tcontinue\n\t\t\tfor k in KvStore:\n\t\t\t\tif consistentHash(k)%sysSize not in GlobalView[str(thisHashID)]['1']:\n\t\t\t\t\tcontinue\n\t\t\t\t#print(k)\n\t\t\t\t#print('x is:'+ x)\n\t\t\t\treq = requests.get(\"http://\"+x+\"/kvs/\"+k)\n\t\t\t\tdata= json.loads(req.text)\n\t\t\t\t#print(\"the data is:\" + str(json.loads(req.text)))\n\t\t\t\tmyVectorClock = KvStore[k][1]\n\t\t\t\totherVectorClock=[]\n\t\t\t\totherVectorClock = data['causal_payload']\n\n\t\t\t\t#print(\"The error is:\"+ data['msg'])\n\t\t\t\t#print(\"The payload is:\"+ str(otherVectorClock[0])+\",\"+str(otherVectorClock[1]))\n\t\t\t\tgreaterCount=0\n\t\t\t\tlessThanCount=0\n\t\t\t\tequalCount=0\n\t\t\t\tfor a in range(len(myVectorClock)):\n\t\t\t\t\tif myVectorClock[a] > otherVectorClock[a]:\n\t\t\t\t\t\tgreaterCount+=1\n\t\t\t\t\tif myVectorClock[a] < otherVectorClock[a]:\n\t\t\t\t\t\tlessThanCount+=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tequalCount += 1\n\t\t\t\t\t#concurrent\n\t\t\t\t\t#eventually stale nodes will get here\n\t\t\t\tif greaterCount!=0 and lessThanCount!=0:\n\t\t\t\t\tif KvStore[k][2] <= data['timestamp']:\n\t\t\t\t\t\tKvStore[k][0] = data['value']\n\t\t\t\t\t\tKvStore[k][1] = otherVectorClock\n\t\t\t\t\t\tKvStore[k][2] = data['timestamp']\n\t\t\t\t\telse:\n\t\t\t\t\t\trequests.put(\"http://\"+x+\"/kvs/\"+k, data={'val':KvStore[k][0],'causal_payload':json.dumps(KvStore[k][1])})\n\t\t\t\tif greaterCount == 0 and lessThanCount>0:\n\t\t\t\t\tKvStore[k][0] = data['value']\n\t\t\t\t\tKvStore[k][1] = otherVectorClock\n\t\t\t\t\tKvStore[k][2] = data['timestamp']\n\t\t\t\tif lessThanCount == 0 and greaterCount>0:\n\t\t\t\t\trequests.put(\"http://\"+x+\"/kvs/\"+k, data={'val':KvStore[k][0],'causal_payload':json.dumps(KvStore[k][1])})\n\n\n\nGossipThread = threading.Thread(target=Gossip)\nGossipThread.start()\nif __name__ == '__main__':\n\t# Bind to PORT if defined, otherwise default to 5000.\n\t#port = int(os.environ.get('PORT', 5000))\n\tapp.run(host='0.0.0.0', port=MAINPORT, threaded=True)\n","sub_path":"hw4/HW4server.py","file_name":"HW4server.py","file_ext":"py","file_size_in_byte":20475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474147589","text":"DO_NOT_SPAM = True\n\n\ndef lprint(*args, **kwargs):\n\tif not DO_NOT_SPAM:\n\t\tprint(*args, **kwargs)\n\nlprint('Importing time')\nimport time\n\nstime = time.time()\n\nif DO_NOT_SPAM:\n\timport sys\n\tsys.stderr = open('/dev/null','w')\n\nlprint('Importing slimage')\nfrom skimage import io, transform\nlprint('Importing numpy')\nimport numpy as np\nlprint('Importing model')\nfrom INeuronet import GetModel\nlprint('Importing dataset')\nfrom keras.datasets import mnist\nlprint('Importing datagen')\nfrom keras.preprocessing.image import ImageDataGenerator as datagenc\nlprint('Complited in')\nlprint(time.time()-stime, 'sec')\n\nlprint('Loading data')\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nlprint('Loading model')\nmodel = GetModel.getmodel()\n\nlprint('Loading weights')\nmodel.load_weights('Weights.hd5')\n\nlprint('Setting up datagen')\ndatagen = datagenc(\n rotation_range = 20,\n zoom_range = [0.9, 1.2],\n width_shift_range=0.2,\n height_shift_range=0.2, \n)\n\"\"\"\nflow = datagen.flow(X_train.reshape(-1, 28, 28, 1), y_train, batch_size = 1)\nfor i in flow:\n\tio.imshow(i[0].reshape(28, 28)/255)\t\n\tprint(i[1][0], model.predict_classes(i[0], verbose=0)[0])\n\tio.show()\n\t\"\"\"\n\n\ncounter = 0\nprint('Predicting')\npredicted = model.predict(X_test.reshape(10000, 28, 28, 1))\n\nprint('Shape is', predicted.shape)\nfor ip in range(10000):\n\tresultsl = predicted[ip]\n\t\n\tcmax = 0\n\tfor i, v in enumerate(resultsl):\n\t\tif v > resultsl[cmax]:\n\t\t\tcmax = i\n\tif y_test[ip] == cmax:\n\t\tcounter+=1\t\t\n\nprint(counter)\nprint(counter / 10000, 'part of all')\n\nbarcount = int(counter/250)\n\nprint('['+'='*barcount+' '*(40 - barcount)+']')\n\nio.use_plugin('matplotlib')\n\ntime.sleep(1)\n\n\n\n\n","sub_path":"Workspace.py","file_name":"Workspace.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104300613","text":"'''\nCreated on 9/1/20\n\n@author: dulanj\n'''\nimport tensorflow as tf\n\nfrom src.model import Model\n\n\nclass TransferLearnModel(Model):\n def __init__(self):\n pass\n\n @staticmethod\n def get_model(verbose):\n model = tf.keras.applications.ResNet50(include_top=False, weights='imagenet', input_shape=(120, 120, 3))\n input_layer = model.inputs\n x = model.layers[-1].output\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dense(32, activation='softmax')(x)\n x = tf.keras.layers.Dropout(0.3)(x)\n x = tf.keras.layers.Dense(12, activation='softmax')(x)\n model = tf.keras.Model(input_layer, x)\n if verbose == 1:\n print(model.summary())\n\n # compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n\n\nif __name__ == \"__main__\":\n obj = TransferLearnModel()\n obj.get_model()\n","sub_path":"src/transfer_learn_model.py","file_name":"transfer_learn_model.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326422872","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport csv\nimport gc\nimport pickle as pickle\nimport time\nfrom imblearn.over_sampling import SMOTE, ADASYN\nfrom imblearn.combine import SMOTEENN, SMOTETomek\nimport collections\nfrom sklearn import svm\n\n\n# In[2]:\n\n\n# Compatibility layer between Python 2 and Python 3\nfrom __future__ import print_function\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom numpy import genfromtxt\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats\n\n\n# In[3]:\n\n\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\nfrom sklearn.utils import shuffle\n\n\n# In[4]:\n\n\nimport keras\nfrom keras.models import Sequential, load_model, model_from_json, model_from_yaml\nfrom keras.layers import Dense, Dropout, Flatten, Reshape, GlobalAveragePooling1D\nfrom keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D\nfrom keras.utils import np_utils\n\n\n# In[5]:\n\n\ndef show_basic_dataframe_info(dataframe, preview_rows=20):\n\n\n # Shape and how many rows and columns\n print(\"Number of columns in the dataframe: %i\" % (dataframe.shape[1]))\n print(\"Number of rows in the dataframe: %i\\n\" % (dataframe.shape[0]))\n print(\"First 20 rows of the dataframe:\\n\")\n # Show first 20 rows\n print(dataframe.head(preview_rows))\n\n\n# In[6]:\n\n\nprint(\"\\n--- Load, inspect and transform data ---\\n\")\n\n# Load data set containing all the data from csv\ndf_train = pd.read_csv('Data/ECG_data_train_full_noise_removed.txt')\ndf_test = pd.read_csv('Data/ECG_data_test_full_noise_removed.txt')\n\nLABELS=['Normal', 'Patient']\n\nle = preprocessing.LabelEncoder()\ndf_train['2'] = le.fit_transform(df_train['1'].values.ravel())\ndf_test['2'] = le.fit_transform(df_test['1'].values.ravel())\n#print('..........df_train...........')\n#show_basic_dataframe_info(df_train, 20)\n#print('..........df_test...........')\n#show_basic_dataframe_info(df_test, 20)\n\n\n# In[7]:\n\n\n##.....Number of samples in each class......##\np = 0\nn = 0\nfor i in df_train['2'] :\n if(i == 0):\n n+=1\n else:\n p+=1\n \nprint('Normal = ', n)\nprint('Patients = ', p)\n\n\n# In[8]:\n\n\nfor _ in range (5):\n df_train = shuffle(df_train)\n\n#dataset = np.array(df_train)\n#x_train = dataset[:,3:29003]\n#y_train = dataset[:,2]\n\n# %%\n\n\n# In[9]:\n\n\n# Perform the oversampling method over the descriptor data\ndef perform_oversampling(oversamp_features_pickle_name, oversamp_label_name, oversamp_method, tr_features, tr_labels):\n start = time.time()\n cpu_threads = 7\n \n print(oversamp_features_pickle_name)\n\n if True:\n print(\"Oversampling method:\\t\" + oversamp_method + \" ...\")\n # 1 SMOTE\n if oversamp_method == 'SMOTE': \n #kind={'borderline1', 'borderline2', 'svm'}\n svm_model = svm.SVC(C=0.001, kernel='rbf', degree=3, gamma='auto', decision_function_shape='ovo')\n oversamp = SMOTE(ratio='auto', random_state=None, k_neighbors=5, m_neighbors=10, out_step=0.5, kind='svm', svm_estimator=svm_model, n_jobs=1)\n\n # PROBAR SMOTE CON OTRO KIND\n\n elif oversamp_method == 'SMOTE_regular_min':\n oversamp = SMOTE(ratio='minority', random_state=None, k_neighbors=5, m_neighbors=10, out_step=0.5, kind='regular', svm_estimator=None, n_jobs=1)\n\n elif oversamp_method == 'SMOTE_regular':\n oversamp = SMOTE(ratio='auto', random_state=None, k_neighbors=5, m_neighbors=10, out_step=0.5, kind='regular', svm_estimator=None, n_jobs=1)\n \n elif oversamp_method == 'SMOTE_border':\n oversamp = SMOTE(ratio='auto', random_state=None, k_neighbors=5, m_neighbors=10, out_step=0.5, kind='borderline1', svm_estimator=None, n_jobs=1)\n \n # 2 SMOTEENN\n elif oversamp_method == 'SMOTEENN': \n oversamp = SMOTEENN()\n\n # 3 SMOTE TOMEK\n # NOTE: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.65.3904&rep=rep1&type=pdf\n elif oversamp_method == 'SMOTETomek':\n oversamp = SMOTETomek()\n\n # 4 ADASYN\n elif oversamp_method == 'ADASYN':\n oversamp = ADASYN(ratio='auto', random_state=None, n_neighbors=5, n_jobs=cpu_threads)#, k=None\n \n tr_features_balanced, tr_labels_balanced = oversamp.fit_sample(tr_features, tr_labels)\n # TODO Write data oversampled!\n print(\"Writing oversampled data at: \" + oversamp_features_pickle_name + \" ...\")\n np.savetxt(oversamp_label_name, tr_labels_balanced.astype(int), '%.0f') \n f = open(oversamp_features_pickle_name, 'wb')\n pickle.dump(tr_features_balanced, f, 2)\n f.close\n\n end = time.time()\n\n count = collections.Counter(tr_labels_balanced)\n print(\"Oversampling balance\")\n print(count)\n print(\"Time required: \" + str(format(end - start, '.2f')) + \" sec\" )\n \n metadata = {'Oversampling method':oversamp_method, 'count':count, 'Time_required':str(format(end - start, '.2f')) + \" sec\"}\n \n file = open('oversamp/' + oversamp_method +'_metadata' + '.pickle', 'wb')\n pickle.dump(metadata, file, 2)\n file.close\n\n return tr_features_balanced, tr_labels_balanced \n\n\n# In[10]:\n\n\ndef show_confusion_matrix(validations, predictions):\n matrix = metrics.confusion_matrix(validations, predictions)\n plt.figure(figsize=(6, 4))\n sns.heatmap(matrix,\n cmap=\"coolwarm\",\n linecolor='white',\n linewidths=1,\n xticklabels=LABELS,\n yticklabels=LABELS,\n annot=True,\n fmt=\"d\")\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True Label\")\n plt.xlabel(\"Predicted Label\")\n plt.show()\n\n\n# In[ ]:\n\n\ndb_path = 'oversamp/'\noversamp_features_name = 'oversamp_ECG_data'\noversamp_metd = ['SMOTEENN','SMOTE','SMOTETomek','SMOTE_border','SMOTE_regular','SMOTE_regular_min','ADASYN']\n\nfor oversamp_method in oversamp_metd:\n\n dataset = np.array(df_train)\n x_train = dataset[:,3:29003]\n y_train = dataset[:,2]\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n oversamp_features_pickle_name = db_path + oversamp_features_name + '_' + oversamp_method + '.pickle'\n oversamp_label_name = 'oversamp/' + oversamp_features_name + '_' + oversamp_method +'_label'+ '.csv'\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n if os.path.isfile(oversamp_features_pickle_name and oversamp_label_name):\n print('Loading oversampled data...')\n \n f = open(oversamp_features_pickle_name, 'rb')\n # disable garbage collector \n gc.disable()# this improve the required loading time!\n x_train = pickle.load(f)\n gc.enable()\n f.close()\n \n y_train = genfromtxt(oversamp_label_name)\n \n file = open(db_path + oversamp_method +'_metadata' + '.pickle', 'rb')\n # disable garbage collector \n gc.disable()# this improve the required loading time!\n metadata = pickle.load(file)\n gc.enable()\n file.close()\n \n print('Oversampling method :', metadata['Oversampling method'], '\\n','Oversampling balance\\n', metadata['count'], '\\n','Time required: ',metadata['Time_required'] )\n \n else:\n print('\\nPerforming oversamp......')\n x_train = x_train.astype(\"float32\")\n y_train = y_train.astype(\"float32\")\n x_train, y_train = perform_oversampling(oversamp_features_pickle_name, oversamp_label_name, oversamp_method, x_train, y_train)\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n print(\"\\n--- Reshape data to be accepted by Keras ---\\n\")\n\n # Inspect x data\n print('x_train shape: ', x_train.shape)\n\n print(x_train.shape[0], 'training samples')\n\n # Inspect y data\n print('y_train shape: ', y_train.shape)\n\n\n # Set input & output dimensions\n num_time_periods, num_sensors = x_train.shape[0], x_train.shape[1]\n num_classes = le.classes_.size \n print(list(le.classes_))\n print('Number of classes : ',num_classes)\n\n # Convert type for Keras otherwise Keras cannot process the data\n x_train = x_train.astype(\"float32\")\n y_train = y_train.astype(\"float32\")\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n # One-hot encoding of y_train labels (only execute once!)\n y_train = np_utils.to_categorical(y_train, num_classes)\n print('New y_train shape: ', y_train.shape)\n\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n print(x_train.shape[1])\n \n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n print(\"\\n--- Create neural network model ---\\n\")\n\n # 1D CNN neural network\n model_m = Sequential()\n model_m.add(Reshape((x_train.shape[1], 1), input_shape=(x_train.shape[1],)))\n model_m.add(Conv1D(150, 5, activation='relu', input_shape=(x_train.shape[1], 1)))\n model_m.add(Conv1D(150, 5, activation='relu'))\n model_m.add(MaxPooling1D(2))\n model_m.add(Dropout(0.25))\n model_m.add(Conv1D(160, 3, activation='relu'))\n model_m.add(Conv1D(160, 3, activation='relu'))\n model_m.add(MaxPooling1D(2))\n model_m.add(Dropout(0.25))\n model_m.add(Conv1D(160, 3, activation='relu'))\n model_m.add(Conv1D(160, 3, activation='relu'))\n model_m.add(GlobalAveragePooling1D())\n model_m.add(Dropout(0.25))\n model_m.add(Dense(500, activation='relu'))\n model_m.add(Dense(800, activation='relu'))\n model_m.add(Dense(num_classes, activation='softmax'))\n print(model_m.summary())\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n model_name = 'Trained_oversamp_model/'+ 'conv_1D_' + oversamp_method + '.h5'\n json_file_name = 'Trained_oversamp_model/'+ 'conv_1D_' + oversamp_method + '.json'\n weight_file_name = 'Trained_oversamp_model/'+ 'conv_1D_' + oversamp_method + '_weignt' + '.h5'\n history_name = 'Trained_oversamp_model/'+ 'conv_1D_' + oversamp_method + '_history' + '.pickle'\n\n if os.path.isfile(json_file_name and weight_file_name):\n \n print(\"Loaded model from disk.....\")\n json_file = open(json_file_name, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model_m = model_from_json(loaded_model_json)\n \n model_m.load_weights(weight_file_name)\n \n file = open(history_name, 'rb')\n # disable garbage collector \n gc.disable()# this improve the required loading time!\n history = pickle.load(file)\n gc.enable()\n file.close()\n \n print('Loss: ',history.history['loss'])\n print('Acc: ',history.history['acc'], )\n \n print('Val_loss: ',history.history['val_loss'])\n print('Val_acc: ',history.history['val_acc'])\n \n print(\"\\n--- Compile model ---\\n\")\n\n model_m.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n \n else:\n \n print(\"\\n--- Compile model ---\\n\")\n\n model_m.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n \n # The EarlyStopping callback monitors training accuracy:\n # if it fails to improve for two consecutive epochs,\n # training stops early\n callbacks_list = [\n keras.callbacks.ModelCheckpoint(\n filepath='temp/best_model.{epoch:02d}-{val_loss:.2f}.h5',\n monitor='val_loss', save_best_only=True),\n keras.callbacks.EarlyStopping(monitor='acc', patience=1)\n ]\n\n # Hyper-parameters\n BATCH_SIZE = 65\n EPOCHS = 50\n\n # Enable validation to use ModelCheckpoint and EarlyStopping callbacks.\n history = model_m.fit(x_train,\n y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n callbacks=callbacks_list,\n validation_split=0.2,\n verbose=1)\n\n # %%\n\n # Saving model on disk\n print(\"Saving model to disk.....\")\n model_m.save(model_name) # creates a HDF5 file\n model_json = model_m.to_json()\n with open(json_file_name, \"w\") as json_file:\n json_file.write(model_json)\n model_m.save_weights(weight_file_name)\n\n file = open(history_name, 'wb')\n pickle.dump(history, file, 2)\n file.close\n\n\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n # summarize history for accuracy and loss\n print(\"\\n--- Learning curve of model training ---\\n\")\n plt.figure(figsize=(6, 4))\n plt.plot(history.history['acc'], \"g--\", label=\"Accuracy of training data\")\n plt.plot(history.history['val_acc'], \"g\", label=\"Accuracy of validation data\")\n plt.plot(history.history['loss'], \"r--\", label=\"Loss of training data\")\n plt.plot(history.history['val_loss'], \"r\", label=\"Loss of validation data\")\n plt.title('Model Accuracy and Loss')\n plt.ylabel('Accuracy and Loss')\n plt.xlabel('Training Epoch')\n plt.ylim(0)\n plt.legend()\n plt.show()\n\n\n \n print(\"\\n--- Check against test data ---\\n\")\n\n # Normalize features for training data set\n dataset = np.array(df_test)\n x_test = dataset[:,3:29003]\n y_test = dataset[:,2]\n \n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n # Set input_shape / reshape for Keras\n y_test = np_utils.to_categorical(y_test, num_classes)\n x_test = x_test.astype(\"float32\")\n y_test = y_test.astype(\"float32\")\n\n\n score = model_m.evaluate(x_test, y_test, verbose=1)\n\n print(\"\\nAccuracy on test data: %0.2f\" % score[1])\n print(\"\\nLoss on test data: %0.2f\" % score[0])\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55\n \n print(\"\\n--- Confusion matrix for test data ---\\n\")\n\n y_pred_test = model_m.predict(x_test)\n # Take the class with the highest probability from the test predictions\n max_y_pred_test = np.argmax(y_pred_test, axis=1)\n max_y_test = np.argmax(y_test, axis=1)\n\n show_confusion_matrix(max_y_test, max_y_pred_test)\n\n # %%%%%%%\n\n print(\"\\n--- Classification report for test data ---\\n\")\n\n print(classification_report(max_y_test, max_y_pred_test))\n\n","sub_path":"ECG_classification/ECG_classification_without_voting/raw data_1D_CNN.py","file_name":"raw data_1D_CNN.py","file_ext":"py","file_size_in_byte":14342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"95827463","text":"import time\nimport random\n\ndef main():\n nullSpace = 4\n huttSpace = 3\n houseSpace = 0\n mansionSpace = 0\n foodSpace = 1\n woodSpace = 1\n stoneSpace = 1\n metalSpace = 0\n swordSpace = 0\n\n swordPlace = 0\n \n nullWorker = 0\n foodWorker = 1\n woodWorker = 1\n stoneWorker = 1\n metalWorker = 0\n swordWorker = 0\n\n swordFighter = 0\n\n food = 0\n wood = 0\n stone = 0\n ore = 0\n metal = 0\n sword = 0\n \n def printLine():\n i = 0\n while i < 50:\n print(\"-\", end = \"\")\n i += 1\n print(\"\")\n\n def displayBuilding():\n print(\"BUILDINGS\")\n print(\"Cleared Land: \" + str(nullSpace))\n print(\"Hutts: \" + str(huttSpace))\n print(\"Houses: \" + str(houseSpace))\n print(\"Mansions: \" +str(mansionSpace))\n print(\"Farms: \" + str(foodSpace))\n print(\"Lumbermills: \" + str(woodSpace))\n print(\"Mines: \" + str(stoneSpace))\n print(\"Blacksmiths: \" + str(metalSpace))\n print(\"Swordsmiths: \" + str(swordSpace))\n print(\"Swordman Garrison: \" + str(swordPlace))\n\n def displayWorker():\n print(\"WORKERS\")\n print(\"Untrained Labourers: \" + str(nullWorker))\n print(\"Trained Ploughmen: \" + str(foodWorker))\n print(\"Trained Axemen: \" + str(woodWorker))\n print(\"Trained Quarrymen: \" + str(stoneWorker))\n print(\"Trained Forgers: \" + str(metalWorker))\n print(\"Trained Bladesmiths: \" + str(swordWorker))\n print(\"Trained Scrimers: \" + str(swordFighter))\n\n def displayResource():\n print(\"RESOURCES\")\n print(\"Food Stored: \" + str(food))\n print(\"Wood Stored: \" + str(wood))\n print(\"Stone Stored: \" + str(stone))\n print(\"Ore Stored: \" + str(ore))\n print(\"Metal Stored: \" + str(metal))\n print(\"Swords Stored: \" + str(sword))\n\n def displayRequirement():\n print(\"BUILDING REQUIREMENTS\")\n print(\"Cleared Land --> Farm\")\n print(\"Cleared Land --> Lumbermill\")\n print(\"Cleared Land --> Mine\")\n print(\"Cleared Land --> Hutt\")\n print(\"Hutt --> House\")\n print(\"House --> Mansion\")\n print(\"Cleared Land --> Blacksmith\")\n print(\"Blacksmith --> Swordsmith\")\n print(\"Cleared Land --> Swordman Garrison\")\n printLine()\n print(\"WORKER REQUIREMENTS\")\n print(\"Labourer --> Ploughman\")\n print(\"Labourer --> Axeman\")\n print(\"Labourer --> Quarryman\")\n print(\"Labourer --> Forger\")\n print(\"Forger --> Bladesmith\")\n print(\"Labourer + Sword --> Scrimer\")\n\n def resourceRate(spaces, workers):\n if spaces * 10 < workers:\n return spaces\n else:\n return workers\n\n def working(wording):\n i = 0\n print(wording, end = \"\")\n time.sleep(0.25)\n while i < 24:\n print(\".\", end = \"\")\n time.sleep(0.25)\n i+=1\n print(\"\")\n \n displayRequirement()\n printLine()\n\n selection = input(\"What do you want to do? \")\n printLine()\n while \"quit\" not in selection.lower():\n foodRate = resourceRate(foodSpace, foodWorker)\n woodRate = resourceRate(woodSpace, woodWorker)\n stoneRate = resourceRate(stoneSpace, stoneWorker)\n metalRate = resourceRate(metalSpace, metalWorker)\n if \"building\" in selection.lower():\n displayBuilding()\n elif \"worker\" in selection.lower():\n displayWorker()\n elif \"resource\" in selection.lower():\n displayResource()\n elif \"requirement\" in selection.lower():\n displayRequirement()\n elif \"work\" in selection.lower():\n if \"farm\" in selection.lower():\n if foodWorker > 0:\n if foodSpace > 0:\n amount = input(\"How many times do you want to work the Farms? \")\n for i in range(0, int(amount)):\n working(\"Harvesting Food\")\n food += foodRate\n print(\"You harvested \" + str(foodRate) + \" food\")\n else:\n print(\"No Farms\")\n else:\n print(\"No Ploughmen\")\n elif \"lumbermill\" in selection.lower():\n if woodWorker > 0:\n if woodSpace > 0:\n amount = input(\"How many times do you want to work the Lumbermills? \")\n for i in range(0, int(amount)):\n working(\"Chopping Wood\")\n wood += woodRate\n print(\"You harvested \" + str(woodRate) + \" wood\")\n else:\n print(\"No Lumbermills\")\n else:\n print(\"No Axemen\")\n elif \"mine\" in selection.lower():\n if stoneWorker > 0:\n if stoneSpace > 0:\n amount = input(\"How many times do you want to work the Mines? \")\n for i in range(0, int(amount)):\n working(\"Mining Stone\")\n stone += stoneRate\n print(\"You harvested \" + str(stoneRate) + \" stone\")\n oreRate = random.randint(0, 100)\n if oreRate <= 25:\n ore += stoneRate\n print(\"You harvested \" + str(stoneRate) + \" ore\")\n else:\n print(\"No Mines\")\n else:\n print(\"No Quarrymen\")\n elif \"blacksmith\" in selection.lower():\n if metalWorker > 0:\n if metalSpace > 0:\n amount = input(\"How many times do you want to work the Blacksmiths? \")\n for i in range(0, int(amount)):\n if ore > 0:\n working(\"Smelting Ore\")\n if ore >= metalRate:\n metal += metalRate\n ore -= metalRate\n print(\"You have made \" + str(metalRate) + \" metal\")\n else:\n metal += ore\n ore -= ore\n print(\"You have made \" + str(ore) + \" metal\")\n else:\n print(\"No Ore\")\n else:\n print(\"No Blacksmiths\")\n else:\n print(\"No Forgers\")\n elif \"swordsmith\" in selection.lower():\n if swordWorker > 0:\n if metalSpace > 0:\n amount = input(\"How many times do you want to work the Swordsmiths? \")\n for i in range(0, int(amount)):\n if metal > 0:\n if wood > 0:\n working(\"Making a Sword\")\n sword += 1\n metal -= 1\n wood -= 1\n print(\"You have made a sword\")\n else:\n print(\"No Wood\")\n else:\n print(\"No Metal\")\n else:\n print(\"No Swordsmiths\")\n else:\n print(\"No Bladesmiths\")\n else:\n print(\"Work where???\")\n elif \"build\" in selection.lower():\n if \"hutt\" in selection.lower():\n amount = input(\"How many Hutts do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Hutt\")\n nullSpace -= 1\n huttSpace += 1\n nullWorker += 1\n print(\"Built a Hutt and increased total Population by 1\")\n else:\n print(\"No more Cleared Lands\")\n break\n elif \"farm\" in selection.lower():\n amount = input(\"How many Farms do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Farm\")\n nullSpace -= 1\n foodSpace += 1\n print(\"Built a Farm\")\n else:\n print(\"No more Cleared Lands\")\n break\n elif \"lumbermill\" in selection.lower():\n amount = input(\"How many Lumbermills do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Lumbermill\")\n nullSpace -= 1\n woodSpace += 1\n print(\"Built a Lumbermill\")\n else:\n print(\"No more Cleared Lands\")\n break\n elif \"mine\" in selection.lower():\n amount = input(\"How many Mines do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Mine\")\n nullSpace -= 1\n stoneSpace += 1\n print(\"Built a Mine\")\n else:\n print(\"No more Cleared Land\")\n break\n elif \"blacksmith\" in selection.lower():\n amount = input(\"How many Blacksmiths do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Blacksmith\")\n nullSpace -= 1\n metalSpace += 1\n print(\"Built a Blacksmith\")\n else:\n print(\"No more Cleared Land\")\n break\n elif \"sword\" in selection.lower() and \"garrison\" in selection.lower():\n amount = input(\"How many Swordmen Garrisons do you want to build? \")\n for i in range(0, int(amount)):\n if nullSpace > 0:\n working(\"Building Swordmen Garrison\")\n nullSpace -= 1\n swordPlace += 1\n print(\"Built a Swordmen Garrison\")\n else:\n print(\"No more Cleared Land\")\n break\n else:\n print(\"Build what??????\")\n elif \"upgrade\" in selection.lower():\n if \"blacksmith\" in selection.lower():\n amount = input(\"How many Blacksmiths do you want to upgrade? \")\n for i in range(0, int(amount)):\n if metalSpace > 0:\n working(\"Upgrading Blacksmith\")\n metalSpace -= 1\n swordSpace += 1\n print(\"Built a Swordsmith\")\n else:\n print(\"No more Blacksmiths\")\n break\n elif \"hutt\" in selection.lower():\n amount = input(\"How many Hutts do you want to upgrade? \")\n for i in range(0, int(amount)):\n if huttSpace > 0:\n working(\"Upgrading Hutt\")\n huttSpace -= 1\n houseSpace += 1\n nullWorker += 1\n print(\"Built a House and increased total Population by 1\")\n else:\n print(\"No more Hutts\")\n break\n elif \"house\" in selection.lower():\n amount = input(\"How many Houses do you want to upgrade? \")\n for i in range(0, int(amount)):\n if houseSpace > 0:\n working(\"Upgrading House\")\n houseSpace -= 1\n mansionSpace += 1\n nullWorker += 2\n print(\"Built a Mansion and increased total Population by 2\")\n else:\n print(\"No more Houses\")\n break\n else:\n print(\"Upgrade what???\")\n elif \"train\" in selection.lower():\n if \"plough\" in selection.lower():\n amount = input(\"How many Ploughmen do you want to train? \")\n for i in range(0, int(amount)):\n if nullWorker > 0:\n working(\"Training a Ploughman\")\n nullWorker -= 1\n foodWorker += 1\n print(\"Trained a Ploughman\")\n else:\n print(\"No more Untrained Labourers\")\n break\n elif \"axe\" in selection.lower():\n amount = input(\"How many Axemen do you want to train? \")\n for i in range(0, int(amount)):\n if nullWorker > 0:\n working(\"Training a Axeman\")\n nullWorker -= 1\n woodWorker += 1\n print(\"Trained a Axeman\")\n else:\n print(\"No more Untrained Labourers\")\n break\n elif \"quarry\" in selection.lower():\n amount = input(\"How many Quarrymen do you want to train? \")\n for i in range(0, int(amount)):\n if nullWorker > 0:\n working(\"Training a Quarryman\")\n nullWorker -= 1\n stoneWorker += 1\n print(\"Trained a Quarryman\")\n else:\n print(\"No more Untrained Labourers\")\n break\n elif \"forger\" in selection.lower():\n amount = input(\"How many Forgers do you want to train? \")\n for i in range(0, int(amount)):\n if nullWorker > 0:\n working(\"Training a Forger\")\n nullWorker -= 1\n metalWorker += 1\n print(\"Trained a Forger\")\n else:\n print(\"No more Untrained Labourers\")\n break\n elif \"bladesmith\" in selection.lower():\n amount = input(\"How many Bladesmith do you want to train? \")\n for i in range(0, int(amount)):\n if metalWorker > 0:\n working(\"Training a Bladesmith\")\n metalWorker -= 1\n swordWorker += 1\n print(\"Trained a Bladesmith\")\n else:\n print(\"No more Forgers\")\n break\n elif \"scrimer\" in selection.lower():\n amount = input(\"How many Scrimers do you want to train? \")\n for i in range(0, int(amount)):\n if nullWorker > 0:\n if sword > 0:\n working(\"Training a Scrimer\")\n nullWorker -= 1\n swordFighter += 1\n sword -= 1\n print(\"Trained a Scrimer\")\n else:\n print(\"No more Swords\")\n break\n else:\n print(\"No more Untrained Labourers\")\n break\n else:\n print(\"Train whom?????\")\n elif \"clear\" in selection.lower():\n amount = input(\"How many Lands do you want to clear? \")\n for i in range(0, int(amount)):\n working(\"Clearing\")\n nullSpace += 1\n print(\"Cleared more land\")\n printLine()\n selection = input(\"What do you want to do? \")\n printLine()\n if \"quit\" in selection.lower():\n quit()\nmain()\n","sub_path":"Civilian Housing.py","file_name":"Civilian Housing.py","file_ext":"py","file_size_in_byte":16513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279966850","text":"import argparse\nfrom spherenet import OmniMNIST, OmniFashionMNIST\nfrom spherenet import SphereConv2D, SphereMaxPool2D\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision import datasets, transforms\n\n\nclass SphereNet(nn.Module):\n def __init__(self):\n super(SphereNet, self).__init__()\n self.conv1 = SphereConv2D(1, 32, stride=1)\n self.pool1 = SphereMaxPool2D(stride=2)\n self.conv2 = SphereConv2D(32, 64, stride=1)\n self.pool2 = SphereMaxPool2D(stride=2)\n\n self.fc = nn.Linear(14400, 10)\n\n def forward(self, x):\n x = F.relu(self.pool1(self.conv1(x)))\n x = F.relu(self.pool2(self.conv2(x)))\n x = x.view(-1, 14400) # flatten, [B, C, H, W) -> (B, C*H*W)\n x = self.fc(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.fc = nn.Linear(64 * 13 * 13, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = x.view(-1, 64 * 13 * 13) # flatten, [B, C, H, W) -> (B, C*H*W)\n x = self.fc(x)\n return x\n\n\nclass NetMNIST(nn.Module):\n def __init__(self):\n super(NetMNIST, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.fc = nn.Linear(64 * 5 * 5, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = x.view(-1, 64 * 5 * 5) # flatten, [B, C, H, W) -> (B, C*H*W)\n x = self.fc(x)\n return x\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n if data.dim() == 3:\n data = data.unsqueeze(1) # (B, H, W) -> (B, C, H, W)\n output = model(data)\n test_loss += F.cross_entropy(output, target).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data', type=str, default='MNIST',\n help='dataset for training, options={\"FashionMNIST\", \"MNIST\"}')\n parser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training')\n parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',\n help='input batch size for testing')\n parser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train')\n parser.add_argument('--optimizer', type=str, default='adam',\n help='optimizer, options={\"adam, sgd\"}')\n parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',\n help='learning rate')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed')\n parser.add_argument('--log-interval', type=int, default=1, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--save-interval', type=int, default=1, metavar='N',\n help='how many epochs to wait before saving model weights')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device('cuda' if use_cuda else 'cpu')\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n np.random.seed(args.seed)\n if args.data == 'FashionMNIST':\n test_dataset = OmniFashionMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, img_std=255, train=False,\n fix_aug=True)\n elif args.data == 'MNIST':\n test_dataset = OmniMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, train=False, fix_aug=True)\n\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, **kwargs)\n\n load_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n perspective_test_dataset = datasets.MNIST('datas/MNIST', train=False, download=True, transform=load_transform)\n perspective_test_loader = torch.utils.data.DataLoader(perspective_test_dataset, batch_size=args.test_batch_size,\n shuffle=False, **kwargs)\n\n\n perspective_model = NetMNIST()\n perspective_model_state_dict = torch.load(\n '/home/iago/workspace/SphereNet-pytorch/datas/models/model_perspective.pkl')\n perspective_model.load_state_dict(perspective_model_state_dict)\n perspective_model = perspective_model.to(device).eval()\n\n sphere_model = SphereNet()\n sphere_state_dict = torch.load('/home/iago/workspace/SphereNet-pytorch/datas/models/sphere_model.pkl')\n sphere_model.load_state_dict(sphere_state_dict)\n sphere_model = sphere_model.to(device).eval()\n\n model = Net()\n model_state_dict = torch.load('/home/iago/workspace/SphereNet-pytorch/datas/models/model.pkl')\n model.load_state_dict(model_state_dict)\n model = model.to(device).eval()\n\n # SphereCNN\n print('{} Sphere CNN {}'.format('=' * 10, '=' * 10))\n test(args, sphere_model, device, test_loader)\n\n # Conventional CNN\n print('{} Conventional CNN {}'.format('=' * 10, '=' * 10))\n test(args, model, device, test_loader)\n\n # Perspective CNN\n print('{} Perspective CNN {}'.format('=' * 10, '=' * 10))\n test(args, perspective_model, device, perspective_test_loader)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"445608288","text":"\"\"\"NOTE: this works, but is wrong.\n\nDecodes a sentence using the Viterbi Algorithm\n\nInput: a sentence (str)\nInitialization: set pi(0, , ) = 1\n\n\"\"\"\n\nfrom nltk.tokenize import TreebankWordTokenizer\n\nfrom preprocessing import import_wsj\nfrom parameters_class import Parameters\n\nclass Decoder:\n\n\tdef __init__(self, corpus_name):\n\t\t\"\"\"Initialize parameters using named corpus\"\"\"\n\t\tself.corpus = import_wsj(corpus_name)\n\t\tself.params = Parameters(self.corpus)\n\n\tdef decode(self, sentence):\n\t\t\"\"\"Decode a sentence\n\n\t\treturns a tuple with lists of tokens, tags, and probabilities\n\t\tprobabilities are not cumulative\n\t\t\"\"\"\n\t\ttokens = self.prep_sentence(sentence)\n\t\ttags = ['', '']\n\t\tprobs = []\n\n\t\tfor i in range(len(tokens)):\n\t\t\tnew_tag_prob = self.next_tag(i, tokens, tags, probs)\n\t\t\ttags.append(new_tag_prob[0])\n\t\t\tprobs.append(new_tag_prob[1])\n\n\t\tprobs.append(self.params.q('', tags[-1], tags[-2])) # Adds final tag and its prob\n\t\ttags.append('')\n\n\t\treturn (tokens, tags, probs)\n\n\n\tdef next_tag(self, i, tokens, tags, prev_probs):\n\t\t\"\"\"Returns the tag for a token and the latest probability\n\n\t\tInput: index, tokens, tags, probabilities\n\t\t\"\"\"\n\t\tprobs = []\n\n\t\tfor tag in self.params.tags:\n\t\t\tprobs.append(self.get_prob(tags[i], tags[i+1], tag, tokens[i])) # (u, v, s, x)\n\n\t\tif all(prob == 0.0 for prob in probs): # True if token not in training data\n\t\t\tprobs = []\n\n\t\t\tfor tag in self.params.tags:\n\t\t\t\tprobs.append(self.get_prob(tags[i], tags[i+1], tag, ''))\n\n\t\tmax_prob = max(probs)\n\t\tmax_prob_index = probs.index(max_prob)\n\n\t\treturn (self.params.tags[max_prob_index], max_prob)\n\n\tdef get_prob(self, u, v, s, x):\n\n\t\tq = self.params.q(s, u, v)\n\n\t\te = self.params.e(x, s)\n\n\t\treturn q * e\n\n\tdef prep_sentence(self, sentence):\n\t\t\"\"\"Tokenizes a sentence string\"\"\"\n\t\tsentence_list = TreebankWordTokenizer().tokenize(sentence)\n\t\treturn sentence_list\n\n\n\n","sub_path":"src_deprecated/decoder_class_bad.py","file_name":"decoder_class_bad.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"387997176","text":"(2**2) == 4\nTrue\n\n\ndef greeting(name):\n if name == \"Taylor\":\n return \"Welcome back Taylor!\"\n else:\n return \"Hello there, \" + name\n\n\nprint(greeting(\"Taylor\"))\nprint(greeting(\"John\"))\n\nif number > 11: \n print(0)\nelif number != 10:\n print(1)\nelif number >= 20 or number < 12:\n print(2)\nelse:\n print(3)\n\n\nprint(\"A dog\" + \"A mouse\")\nprint(9999+8888 + 100*100)\n\n\ndef calculate_storage(filesize):\n block_size = 4096\n # Use floor division to calculate how many blocks are fully occupied\n full_blocks = (filesize//4096)\n # Use the modulo operator to check whether there's any remainder\n partial_block_remainder = (filesize%4096)\n # Depending on whether there's a remainder or not, return\n # the total number of bytes required to allocate enough blocks\n # to store your data.\n if partial_block_remainder > 0:\n return 4096*(full_blocks+1)\n return 4096*(full_blocks)\n\nprint(calculate_storage(1)) # Should be 4096\nprint(calculate_storage(4096)) # Should be 4096\nprint(calculate_storage(4097)) # Should be 8192\nprint(calculate_storage(6000)) # Should be 8192","sub_path":"week2/conditionalsQuiz.py","file_name":"conditionalsQuiz.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"373231223","text":"import os\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal.windows as sp\n\ndef bracewell_buneman(xarray, length, log2length):\n '''\n bracewell-buneman bit reversal function\n inputs: xarray is array; length is array length; log2length=log2(length).\n output: bit reversed array xarray.\n '''\n muplus = int((log2length + 1) / 2)\n mvar = 1\n reverse = np.zeros(length, dtype=int)\n upper_range = muplus + 1\n for _ in np.arange(1, upper_range):\n for kvar in np.arange(0, mvar):\n tvar = 2 * reverse[kvar]\n reverse[kvar] = tvar\n reverse[kvar + mvar] = tvar + 1\n mvar = mvar + mvar\n if (log2length & 0x01):\n mvar = mvar / 2\n for qvar in np.arange(1, mvar):\n nprime = qvar - mvar\n rprimeprime = reverse[int(qvar)] * mvar\n for pvar in np.arange(0, reverse[int(qvar)]):\n nprime = nprime + mvar\n rprime = rprimeprime + reverse[pvar]\n temp = xarray[int(nprime)]\n xarray[int(nprime)] = xarray[int(rprime)]\n xarray[int(rprime)] = temp\n return xarray\n\n\ndef dif_fft0(xarray, twiddle, log2length):\n '''\n radix-2 dif fft\n '''\n xarray = xarray.astype(np.complex_)\n b_p = 1\n nvar_p = xarray.size\n twiddle_step_size = 1\n for _ in range(0, log2length): # pass loop\n nvar_pp = int(nvar_p / 2)\n base_e = 0\n for _ in range(0, b_p): # block loop\n base_o = int(base_e + nvar_pp)\n for nvar in range(0, nvar_pp): # butterfly loop\n\n evar = xarray[int(base_e + nvar)] + xarray[int(base_o + nvar)]\n if nvar == 0:\n ovar = xarray[int(base_e + nvar)] - xarray[int(base_o + nvar)]\n else:\n twiddle_factor = nvar * twiddle_step_size\n ovar = (xarray[int(base_e + nvar)] \\\n - xarray[int(base_o + nvar)]) * twiddle[twiddle_factor]\n xarray[int(base_e + nvar)] = evar\n xarray[int(base_o + nvar)] = ovar\n base_e = base_e + nvar_p\n b_p = b_p * 2\n nvar_p = int(nvar_p / 2)\n twiddle_step_size = 2 * twiddle_step_size\n xarray = bracewell_buneman(xarray, xarray.size, log2length)\n return xarray\n\n\ndef test(time, yarray, samplefreq):\n '''\n Set up plot, call FFT function, plot result.\n Called from testbench function.\n Inputs time:time vector, yarray: array, samplefreq: sampling rate.\n Outputs: none.\n '''\n plt.subplot(2, 1, 1)\n plt.title('Test of DIF FFT with 10 Hz Sine Input')\n plt.plot(time, yarray, 'k-')\n plt.xlabel('time')\n plt.ylabel('amplitude')\n plt.subplot(2, 1, 2)\n ylength = len(yarray) # length of the signal\n kvar = np.arange(ylength)\n tvar = ylength / samplefreq\n frq = kvar / tvar # two-sided frequency range\n freq = frq[list(range(int(ylength/2)))] # one-sided frequency range\n #e^-2j * np.pi * np.arange(0, 0.5, 1. / ylength, dtype=np.complex_)\n twiddle = np.exp(-2j * np.pi * np.arange(0, 0.5, 1. / ylength, dtype=np.complex_))\n y2array = abs(dif_fft0(yarray, twiddle, \\\n int(np.log2(ylength))) / ylength) # fft normalized magnitude\n y3array = y2array[list(range(ylength // 2))]\n markerline, stemlines, baseline = plt.stem(freq, y3array, '--')\n plt.xlabel('freq (Hz)')\n plt.ylabel('|Y(freq)|')\n plt.setp(markerline, 'markerfacecolor', 'b')\n plt.setp(baseline, 'color', 'b', 'linewidth', 2)\n plt.show()\n return None\n\n\ndef testbench(signal, sampling_frequency, window):\n '''\n Call test function.\n :param signal:\n :return:\n '''\n samplinginterval = 1.0 / sampling_frequency\n time = np.arange(0, 1, samplinginterval) # ts\n yarray = np.array(signal)\n yarray = yarray * window\n test(time, yarray, sampling_frequency)\n\n\ndef read_from_file():\n signal = []\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'input.txt')\n with open(path, 'r') as f:\n for line in f:\n signal.append(float(line.rstrip()))\n sampling_frequency = signal[-1]\n signal.pop()\n if not check_for_power_2(len(signal)):\n signal = fill_signal_with_zeros(signal)\n sampling_frequency = len(signal)\n else:\n pass\n\n return signal, sampling_frequency\n\n\ndef check_for_power_2(length):\n '''\n Проверка числа на степень двойки\n True - если да\n False - если нет\n '''\n return length != 0 and length & (length - 1) == 0\n\n\ndef fill_signal_with_zeros(signal):\n '''Заполнение массива нулями до ближайшей степени двойки'''\n while not check_for_power_2(len(signal)):\n signal.append(0)\n return signal\n\n\ndef main():\n signal, sampling_frequency = read_from_file()\n number_of_window = sys.argv[1]\n if number_of_window == '1':\n window = np.hanning(sampling_frequency)\n elif number_of_window == '2':\n window = np.blackman(sampling_frequency)\n elif number_of_window == '3':\n window = np.bartlett(sampling_frequency)\n else:\n # Прямоугольное окно, в numpy его нет\n window = sp.boxcar(sampling_frequency)\n testbench(signal, sampling_frequency, window)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"96284741","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom .models import Client\nfrom rest_framework import viewsets\nfrom .serializers import ClientSerializer\n#\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom susu.models import Client\n\n\n@api_view(['GET', 'POST'])\ndef client_list(request, format=None):\n \"\"\"\n List all clients, or create a new client.\n \"\"\"\n if request.method == 'GET':\n clients = Client.objects.all()\n serializer = ClientSerializer(clients, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ClientSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef client_detail(request, pk, format=None):\n \"\"\"\n Retrieve, update or delete a client instance.\n \"\"\"\n try:\n client = Client.objects.get(pk=pk)\n except Client.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ClientSerializer(client)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ClientSerializer(client, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n client.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n# class ClientAllViewSet(viewsets.ModelViewSet):\n# \"\"\"\n# API endpoint that allows Clients to be viewed or edited.\n# \"\"\"\n# queryset = Client.objects.all().order_by('lastName')\n# serializer_class = ClientSerializer\n\n","sub_path":"susu/viewsFunctionBased.py","file_name":"viewsFunctionBased.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291413397","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nfrom flask.ext.restful import Api\nfrom flask_restful_swagger import swagger\n\nfrom tests.apps import config\nfrom tests.apps.shared_code import Todo, MarshalWithExample\n\n__author__ = 'sobolevn'\n\n\napi_meta = {\n 'apiVersion': '0.1',\n 'resourcePath': '/',\n 'produces': [\n 'application/json',\n 'text/html',\n ],\n 'api_spec_url': '/api/spec',\n 'description': 'A Basic API',\n}\n\napp = Flask(__name__, static_folder='../static')\napp.config.from_object(config)\n\napi = swagger.docs(Api(app), **api_meta)\napi.add_resource(Todo, '/todo/')\napi.add_resource(MarshalWithExample, '/marshal')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"tests/apps/basic_app.py","file_name":"basic_app.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271973773","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# class to collect and manage the metadata (airmass, seeing, ccd temp, time, \n# zero point, ecc..) of a given dataset into a dataframe\n#\n# Author: M. Giomi (matteo.giomi@desy.de)\n\nimport os, tqdm, logging\nfrom astropy.io import fits\nimport pandas as pd\n\nfrom dataslicer.dataset_base import dataset_base\nfrom dataslicer.df_utils import downcast_df, check_col\n\n\ndef load_IRSA_meta(df, IRSA_meta_cols = ['airmass'], expid_col = 'EXPID', rcid = None, logger = None):\n \"\"\"\n retrieve metadata for each file from the IRSA archieve. It uses the EXPID\n header keyword to search for the right metadata and then merge the dfs.\n \n Requires IRSA account and ztfquery.\n \n Parameters:\n -----------\n \n df: `pandas.DataFrame` or `dataslicer.srcdf`\n dataframe-like object to which you want to add IRSA metadata.\n \n IRSA_meta_cols: `list`\n names of the IRSA metadata you want to add to the fits file metadata.\n In None, all the IRSA columns will be added.\n \n expid_col: `str`\n name of metadata column containing the expid.\n\n rcid: int\n if given, only metadata for this readout-channel will be retrieved\n \n logger: `logging.logger`\n logger instance. If none, default will be used.\n \"\"\"\n \n if logger is None:\n logging.basicConfig(level = logging.INFO)\n logger = logging.getLogger(__name__)\n \n # check which are the expid we have in this object\n check_col(expid_col, df)\n expids = pd.unique(df[expid_col])\n logger.info(\"found {} unique exposures ({}) in metadata.\".format(len(expids), expid_col))\n expids_str = [\"%d\"%expid for expid in expids]\n \n # query IRSA\n from ztfquery import query\n zquery = query.ZTFQuery()\n\n if rcid is None:\n query_str = \"expid+IN+({})\".format(\",\".join(expids_str))\n logger.info(\"querying IRSA using: {}\".format(query_str))\n else:\n query_str = \"expid+IN+({}) and rcid+=+{}\".format(\",\".join(expids_str),rcid)\n logger.info(\"querying IRSA (for readout channel {}) using: {}\".format(rcid, query_str))\n \n zquery.load_metadata(kind=\"sci\", sql_query=\"%s\"%query_str)\n logger.info(\"retrieved {} metadata rows\".format(len(zquery.metatable)))\n\n print(zquery.metatable)\n\n # select which IRSA columns to add\n if not IRSA_meta_cols is None:\n logger.info(\"selecting IRSA meta columns: {}\".format(\", \".join(IRSA_meta_cols)))\n IRSA_meta_cols.append('expid') # you need this to join the dfs\n metatable = zquery.metatable[IRSA_meta_cols]\n else:\n logger.info(\"using all IRSA meta columns.\")\n metatable = zquery.metatable\n logger.info(\"adding the following columns to metadata dataframe: {}\".format(\", \".join(metatable.columns.values)))\n \n # join the dataframe\n metatable = metatable.rename(columns={'expid': expid_col})\n clean_metatable = metatable[list(metatable.columns[~metatable.columns.duplicated()])]\n clean_metatable = clean_metatable.drop_duplicates()\n df = df.merge(clean_metatable, on = expid_col)\n logger.info(\"joined IRSA meta to dataframe. The following columns are now available: {}\".format(\", \".join(df.columns.values)))\n return df\n \n\nclass metadata(dataset_base):\n \"\"\"\n class to collect and manage the metadata of a given dataset.\n \"\"\"\n \n def to_csv(self, **args):\n \"\"\"\n \"\"\"\n self._to_csv(tag = 'metadata', **args)\n\n\n def read_csv(self, **args):\n \"\"\"\n \"\"\"\n self._read_csv(tag = 'metadata', **args)\n\n\n def load_header_meta(self, header_keys = None, downcast = True, **getheader_args):\n \"\"\"\n go and read the header of the fits files and create a \n dataframe with the desired header keywords. The dataframe\n will be stored in this object's metadata attribute. It will have \n one row for each file, one column for each header key plus the\n absolute path of the file.\n \n Parameters:\n -----------\n \n header_keys: `list`, or None\n list of header key names that will form the columns of the \n dataframe. If None, a default list of keywords will be used.\n primitive wildchar support is possible (e.g: APCOR* will take\n all the keywords with APCOR in the name).\n \n downcast: `bool`\n if True, uses pd.to_numeric to downcast ints and floats columns\n in order to reduce RAM usage.\n \n getheader_args: `kwargs`\n arguments to be passed to astropy.io.fits.getheader.\n You should have at least one specifying the extension!\n \"\"\"\n # init the logger\n self._set_logger(__name__)\n self.logger.info(\"Reading headers for metadata..\")\n \n # default keywords\n if header_keys is None:\n header_keys = [\n 'NMATCHES', 'MAGZP', 'MAGZPUNC', 'MAGZPRMS', 'CLRCOEFF', 'CLRCOUNC',\n 'ZPCLRCOV', 'PCOLOR', 'SATURATE', 'ZPMED', 'ZPAVG', 'ZPRMSALL',\n 'CLRMED', 'CLRAVG', 'CLRRMS', 'FIXAPERS', 'APCOR*', 'APCORUN*',\n 'FIELDID', 'CCDID', 'QID', 'FILTERID', 'RCID', 'OBSMJD', 'EXPID', 'PROGRMID'\n ]\n magic_keys = [k.replace('*', '') for k in header_keys]\n \n # loop on files and fill in the dataframe with header keywords\n rows = []\n for fitsfile in tqdm.tqdm(self.files):\n try:\n head, row = fits.getheader(fitsfile, **getheader_args), {}\n for key, val in head.items():\n if key in header_keys or any([mk in key for mk in magic_keys]):\n self.logger.debug(\"found key %s in desired header keys.\"%key)\n row[key] = val\n row['PATH'] = fitsfile\n rows.append(row)\n except OSError:\n self.logger.warning(\"skipping corrupted file %s\"%fitsfile)\n self.df = pd.DataFrame.from_records(rows)\n\n # check that you have all the keys you asked for\n df_cols = self.df.columns.values.tolist()\n for mk in magic_keys:\n if not any([mk in key for key in df_cols]):\n self.logger.warning(\"couldn't find requested key: %s in file headers.\"%mk)\n \n # add obsid column as unique identifier of the data product\n self.df['OBSID'] = (\n self.df['EXPID'].astype(str) + \n self.df['RCID'].astype(str) ).astype(int)\n \n if downcast:\n self.df = downcast_df(self.df)\n \n# self.df.set_index('OBSID', inplace = True, drop = False)\n self.logger.info(\"loaded meta data from fits headers for %d files into metadata dataframe.\"%len(self.df))\n\n \n\n\n# def get_paths(self, **querydf_args):\n# \"\"\"\n# query the dataframe and reuturn a list of file path matching the query\n# \n# Parameters:\n# -----------\n# \n# dfquery_args: \n# pandas.DataFrame.query arguments.\n# \n# Returns:\n# --------\n# list of paths\n# \"\"\"\n# return self.query_df ['PATH'].values\n\n","sub_path":"dataslicer/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152882750","text":"#!/usr/bin/env python3\nimport select\nimport socket\nimport sys\nimport queue\n\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setblocking(0)\n\nserver_address = (\"localhost\", 10000)\nprint(\"starting up on {} port {}\".format(*server_address))\nserver.bind(server_address)\n\nserver.listen(5)\n\ninputs = [server]\noutputs = []\nmessage_queues = {}\n\nwhile inputs:\n print(\"waiting for the next event\")\n readable, writeable, exceptional = select.select(inputs, outputs, inputs)\n\n for s in readable:\n if s is server:\n connection, client_address = s.accept()\n print(\" connection from\", client_address)\n connection.setblocking(0)\n inputs.append(connection)\n message_queues[connection] = queue.Queue()\n if s not in outputs:\n outputs.append(s)\n else:\n data = s.recv(1024)\n if s in outputs:\n outputs.remove(s)\n inputs.remove(s)\n s.close()\n del message_queues[s]","sub_path":"network_communication/_select/select_echo_server.py","file_name":"select_echo_server.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37547849","text":"#PlatformBotProj\nimport numpy as np\nimport gym\nimport retro\nfrom PIL import Image\nfrom io import BytesIO\nimport os\nimport datetime\nimport threading\nimport time\nimport random\n\nfrom .replay import Replay, ReplayMemory\n\n\nclass TrainerThread(threading.Thread):\n def __init__(self, bot=None, **kwargs):\n super(TrainerThread, self).__init__(**kwargs)\n self.stop_flag = False\n self.bot = bot\n self.mode = \"\"\n self.replay_memory = None\n\n def run(self):\n sleep = False\n while not self.stop_flag:\n if sleep:\n time.sleep(1)\n sleep = True\n\n if self.mode == \"replay\":\n if self.replay_memory is None:\n continue\n\n batch = self.replay_memory.sample(64)\n result = self.bot.train_value_network_batch_step(batch)\n value_loss = result[\"loss\"]\n result = self.bot.train_policy_network_batch_step(batch)\n policy_loss = result[\"loss\"]\n if random.randint(0, 100) == 0:\n self.bot.normalize_weight()\n self.bot.save_model()\n print(\"Value loss:\", value_loss)\n print(\"Policy loss:\", policy_loss)\n else:\n continue\n\n sleep = False\n\n def train_with_replay(self, replay_memory):\n self.replay_memory = replay_memory\n self.mode = \"replay\"\n\n def pause_training(self):\n self.mode = \"\"\n self.bot.save_model()\n\n def stop(self):\n self.stop_flag = True\n\n\nclass Trainer:\n def __init__(self, bot=None):\n self.bot = bot\n self.thread = TrainerThread(bot=bot)\n self.thread.start()\n\n def train_with_replays(self, replays):\n replay_memory = ReplayMemory.from_replays(replays)\n self.thread.train_with_replay(replay_memory)\n\n def pause(self):\n self.thread.pause_training()\n\n def __del__(self):\n if self.thread.isAlive():\n self.thread.stop()\n self.thread.join()\n","sub_path":"CNN PJ3 Code/platformerbot-master/PlatformerBot/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114942668","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.phonon import Phonon\n\nimport Track\n\nclass Player (QObject):\n\tdef __init__ (self, app):\n\t\tQObject.__init__ (self)\n\t\tself.app = app\n\t\t\n\t\tself.out = Phonon.AudioOutput (Phonon.MusicCategory, self.app)\n\t\tself.media = Phonon.MediaObject (self.app)\n\t\tself.media.prefinishMark = 1000\n\t\tPhonon.createPath (self.media, self.out)\n\t\n\tdef play (self, source):\n\t\tself.media.stop ()\n\t\tself.media.setCurrentSource (source)\n\t\tself.media.play ()\n\t\n\tdef enqueue (self, source):\n\t\tself.media.enqueue (source)\n","sub_path":"src/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535744912","text":"from django.shortcuts import render\nfrom django.views.generic.base import View\n\nfrom apps.product.models import Product\n# Create your views here.\n\n\nclass ProductView(View):\n\n\t@staticmethod\n\tdef get(request):\n\t\tusername = request.session.get('user', '')\n\t\tproduct_list = Product.objects.all()\n\t\tcontext = {\n\t\t\t'user': username,\n\t\t\t'product_list': product_list,\n\t\t}\n\t\treturn render(request, 'product/product.html', context=context)\n","sub_path":"apps/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284605510","text":"\"\"\"\nUnit tests of functions within :mod:`weaver.processes.wps_package`.\n\n.. seealso::\n - :mod:`tests.functional.wps_package`.\n\"\"\"\nimport contextlib\nimport os\nimport shutil\nimport sys\nimport tempfile\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport mock\nimport pytest\nfrom pywps.app import WPSRequest\n\nfrom weaver.datatype import Process\nfrom weaver.exceptions import PackageExecutionError\nfrom weaver.processes.wps_package import WpsPackage, _check_package_file, _get_package_ordered_io # noqa: W0212\n\n# pylint: disable=R1729 # ignore non-generator representation employed for displaying test log results\n\n\ndef test_get_package_ordered_io_with_builtin_dict_and_hints():\n \"\"\"\n Validate that I/O are all still there in the results with their respective contents.\n\n Literal types should be modified to a dictionary with ``type`` key.\n All dictionary contents should then remain as is, except with added ``id``.\n\n .. note::\n Ordering is not mandatory, so we don't validate this.\n Also actually hard to test since employed python version running the test changes the behaviour.\n \"\"\"\n test_inputs = {\n \"id-literal-type\": \"float\",\n \"id-dict-details\": {\n \"type\": \"string\"\n },\n \"id-array-type\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\"\n }\n },\n \"id-literal-array\": \"string[]\"\n }\n test_wps_hints = [\n {\"id\": \"id-literal-type\"},\n {\"id\": \"id-array-type\"},\n {\"id\": \"id-dict-with-more-stuff\"},\n {\"id\": \"id-dict-details\"},\n ]\n expected_result = [\n {\"id\": \"id-literal-type\", \"type\": \"float\"},\n {\"id\": \"id-dict-details\", \"type\": \"string\"},\n {\"id\": \"id-array-type\", \"type\": {\"type\": \"array\", \"items\": \"float\"}},\n {\"id\": \"id-literal-array\", \"type\": \"string[]\"}\n ]\n result = _get_package_ordered_io(test_inputs, test_wps_hints)\n assert isinstance(result, list) and len(result) == len(expected_result)\n # *maybe* not same order, so validate values accordingly\n for expect in expected_result:\n validated = False\n for res in result:\n if res[\"id\"] == expect[\"id\"]:\n assert res == expect\n validated = True\n if not validated:\n raise AssertionError(\"expected '{}' was not validated against any result value\".format(expect[\"id\"]))\n\n\ndef test_get_package_ordered_io_with_ordered_dict():\n test_inputs = OrderedDict([\n (\"id-literal-type\", \"float\"),\n (\"id-dict-details\", {\"type\": \"string\"}),\n (\"id-array-type\", {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\"\n }\n }),\n (\"id-literal-array\", \"string[]\"),\n ])\n expected_result = [\n {\"id\": \"id-literal-type\", \"type\": \"float\"},\n {\"id\": \"id-dict-details\", \"type\": \"string\"},\n {\"id\": \"id-array-type\", \"type\": {\"type\": \"array\", \"items\": \"float\"}},\n {\"id\": \"id-literal-array\", \"type\": \"string[]\"}\n ]\n result = _get_package_ordered_io(test_inputs)\n assert isinstance(result, list) and len(result) == len(expected_result)\n assert result == expected_result\n\n\ndef test_get_package_ordered_io_with_list():\n \"\"\"\n Everything should remain the same as list variant is only allowed to have I/O objects.\n\n (i.e.: not allowed to have both objects and literal string-type simultaneously as for dictionary variant).\n \"\"\"\n expected_result = [\n {\"id\": \"id-literal-type\", \"type\": \"float\"},\n {\"id\": \"id-dict-details\", \"type\": \"string\"},\n {\"id\": \"id-array-type\", \"type\": {\"type\": \"array\", \"items\": \"float\"}},\n {\"id\": \"id-literal-array\", \"type\": \"string[]\"}\n ]\n result = _get_package_ordered_io(deepcopy(expected_result))\n assert isinstance(result, list) and len(result) == len(expected_result)\n assert result == expected_result\n\n\nclass MockResponseOk(object):\n status_code = 200\n\n\ndef test_check_package_file_with_url():\n package_url = \"https://example.com/package.cwl\"\n with mock.patch(\"requests.Session.request\", return_value=MockResponseOk()) as mock_request:\n res_path, is_url = _check_package_file(package_url)\n assert mock_request.call_count == 1\n assert mock_request.call_args[0][:2] == (\"head\", package_url) # ignore extra args\n assert res_path == package_url\n assert is_url is True\n\n\ndef test_check_package_file_with_file_scheme():\n with mock.patch(\"requests.Session.request\", return_value=MockResponseOk()) as mock_request:\n with tempfile.NamedTemporaryFile(mode=\"r\", suffix=\"test-package.cwl\") as tmp_file:\n package_file = \"file://{}\".format(tmp_file.name)\n res_path, is_url = _check_package_file(package_file)\n mock_request.assert_not_called()\n assert res_path == tmp_file.name\n assert is_url is False\n\n\ndef test_check_package_file_with_posix_path():\n with tempfile.NamedTemporaryFile(mode=\"r\", suffix=\"test-package.cwl\") as tmp_file:\n res_path, is_url = _check_package_file(tmp_file.name)\n assert res_path == tmp_file.name\n assert is_url is False\n\n\n@pytest.mark.skipif(not sys.platform.startswith(\"win\"), reason=\"Test for Windows only.\")\ndef test_check_package_file_with_windows_path():\n test_file = \"C:/Windows/Temp/package.cwl\" # fake existing, just test format handled correctly\n with mock.patch(\"os.path.isfile\", return_value=True) as mock_isfile:\n res_path, is_url = _check_package_file(test_file)\n mock_isfile.assert_called_with(test_file)\n assert res_path == test_file\n assert is_url is False\n\n\ndef test_get_package_ordered_io_when_direct_type_string():\n inputs_as_strings = {\n \"input-1\": \"File[]\",\n \"input-2\": \"float\"\n }\n result = _get_package_ordered_io(inputs_as_strings)\n assert isinstance(result, list)\n assert len(result) == len(inputs_as_strings)\n assert all([isinstance(res_i, dict) for res_i in result])\n assert all([i in [res_i[\"id\"] for res_i in result] for i in inputs_as_strings])\n assert all([\"type\" in res_i and res_i[\"type\"] == inputs_as_strings[res_i[\"id\"]] for res_i in result])\n\n\nclass MockWpsPackage(WpsPackage):\n \"\"\"\n Mock of WPS package definition that ignores real status location updates and returns the mock for test validation.\n \"\"\"\n mock_status_location = None\n\n @property\n def status_location(self):\n return self.mock_status_location\n\n @status_location.setter\n def status_location(self, value):\n pass\n\n\nclass MockWpsRequest(WPSRequest):\n def __init__(self, process_id=None):\n if not process_id:\n raise ValueError(\"must provide mock process identifier\")\n super(MockWpsRequest, self).__init__()\n self.identifier = process_id\n self.json = {\n \"identifier\": process_id,\n \"operation\": \"execute\",\n \"version\": \"1.0.0\",\n \"language\": \"null\",\n \"identifiers\": \"null\",\n \"store_execute\": \"true\",\n \"status\": \"true\",\n \"lineage\": \"true\",\n \"raw\": \"false\",\n \"inputs\": {\n \"message\": [\n {\n \"identifier\": \"message\",\n \"title\": \"A dummy message\",\n \"type\": \"literal\",\n \"data_type\": \"string\",\n \"data\": \"Dummy message\",\n \"allowed_values\": [],\n }\n ]\n },\n \"outputs\": {}\n }\n\n\nclass MockProcess(Process):\n def __init__(self, shell_command=None):\n if not shell_command:\n raise ValueError(\"must provide mock process shell command\")\n # fix for Windows, need to tell explicitly the path to shell command\n # since cwltool sets subprocess.Popen with shell=False\n if sys.platform == \"win32\":\n shell_command = [shutil.which(\"cmd.exe\"), \"/c\", shell_command]\n body = {\n \"title\": \"mock-process\",\n \"id\": \"mock-process\",\n \"package\": {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": shell_command,\n \"inputs\": {\n \"message\": {\n \"type\": \"string\",\n \"inputBinding\": {\n \"position\": 1\n }\n }\n },\n \"outputs\": {}\n }\n }\n super(MockProcess, self).__init__(body)\n\n\ndef test_stdout_stderr_logging_for_commandline_tool_success():\n \"\"\"\n Execute a process and assert that stdout is correctly logged to log file upon successful process execution.\n \"\"\"\n with contextlib.ExitStack() as stack:\n xml_file = stack.enter_context(tempfile.NamedTemporaryFile(suffix=\".xml\")) # noqa\n workdir = stack.enter_context(tempfile.TemporaryDirectory())\n process = MockProcess(shell_command=\"echo\")\n wps_package_instance = MockWpsPackage(identifier=process[\"id\"], title=process[\"title\"],\n payload=process, package=process[\"package\"])\n wps_package_instance.mock_status_location = xml_file.name\n wps_package_instance.set_workdir(workdir)\n\n # ExecuteResponse mock\n wps_request = MockWpsRequest(process_id=process.id)\n wps_response = type(\"\", (object,), {\"_update_status\": lambda *_, **__: 1})()\n wps_package_instance._handler(wps_request, wps_response)\n\n # log assertions\n expect_log = os.path.splitext(wps_package_instance.mock_status_location)[0] + \".log\"\n with open(expect_log, \"r\") as file:\n log_data = file.read()\n # FIXME: add more specific asserts... validate CWL command called and sub-operations logged\n assert \"Dummy message\" in log_data\n\n\ndef test_stdout_stderr_logging_for_commandline_tool_failure():\n \"\"\"\n Execute a process and assert that stderr is correctly logged to log file upon failing process execution.\n \"\"\"\n with contextlib.ExitStack() as stack:\n xml_file = stack.enter_context(tempfile.NamedTemporaryFile(suffix=\".xml\")) # noqa\n workdir = stack.enter_context(tempfile.TemporaryDirectory())\n process = MockProcess(shell_command=\"not_existing_command\")\n wps_package_instance = MockWpsPackage(identifier=process[\"id\"], title=process[\"title\"],\n payload=process, package=process[\"package\"])\n wps_package_instance.mock_status_location = xml_file.name\n wps_package_instance.set_workdir(workdir)\n\n # ExecuteResponse mock\n wps_request = MockWpsRequest(process_id=process[\"id\"])\n wps_response = type(\"\", (object,), {\"_update_status\": lambda *_, **__: 1})()\n # FIXME: add more specific asserts... validate CWL command called but as some execution error entry logged\n try:\n wps_package_instance._handler(wps_request, wps_response)\n except PackageExecutionError as exception:\n assert \"Completed permanentFail\" in exception.args[0]\n else:\n pytest.fail(\"\\\"wps_package._handler()\\\" was expected to throw \\\"PackageExecutionError\\\" exception\")\n","sub_path":"tests/processes/test_wps_package.py","file_name":"test_wps_package.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28110297","text":"def find_a_thief():\r\n true = 1\r\n for i in range(4):\r\n #只有一个人说的是真话。因此如果小偷是i的话,要num == 1 才能抓住小偷\r\n num = (i != 1) + (i == 4) + (i == 2) + (i != 4)\r\n if true == num:\r\n print(chr(96 + i),\"是小偷\")\r\n \r\nif __name__ == \"__main__\":\r\n find_a_thief()\r\n","sub_path":"168206239/小偷.py","file_name":"小偷.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166462553","text":"# Text based long term memory.\n\nimport os\nimport json\nimport re\n\nmemories = {}\n\ndef mem_encode(keyword, inputstr):\n memories[keyword.lower()] = inputstr\n\ndef mem_retrieve(keyword):\n if keyword.lower() in memories:\n return memories[keyword.lower()]\n \n return ''\n\ndef mem_delete(keyword):\n if keyword.lower() in memories:\n del memories[keyword.lower()]\n\n# Compiles the context for memories in a string\ndef mem_compile(inputstr):\n compiled = ''\n splitstr = inputstr.split(' ')\n for i in range(len(splitstr)):\n splitstr[i] = ''.join(filter(str.isalnum, splitstr[i])).lower()\n retrievedstr = mem_retrieve(splitstr[i])\n if retrievedstr != '':\n compiled = compiled + retrievedstr + '\\n'\n \n return compiled\n\n\ndef mem_save(filepath):\n fp = open(filepath, \"w\")\n json.dump(memories, fp)\n\ndef mem_load(filepath):\n global memories\n try:\n fp = open(filepath, \"r\")\n if fp:\n memories = json.load(fp)\n \n return True\n except OSError as e:\n print('Could not find ' + filepath)\n return False\n\ndef mem_dict():\n return memories\n","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421075936","text":"import requests\r\nimport re\r\n\r\n\r\ndef _get(text):\r\n _list = re.findall('[^\"。!?…]{5,}[。!?…]', text, re.L)\r\n end_list = []\r\n for num, i in enumerate(_list, start=1):\r\n if num/2 == int(num/2):\r\n end_list.append(i)\r\n return end_list\r\n\r\ndef translate(text):\r\n r = requests.post('https://translate.google.cn/translate_a/single?client=at&sl=en&tl=zh-CN&hl=zh-CN&dt=at&ie=UTF-8&oe=UTF-8&q='\\\r\n + text)\r\n result = _get(r.text)\r\n return result\r\n\r\n#https://translate.google.cn/translate_a/single?client=at&sl=en&tl=zh-CN&hl=zh-CN&dt=at&ie=UTF-8&oe=UTF-8&q=sure\r\ntext = '''Right now your dad and I have been married for about two years, living on Ellis Avenue; when we move out you'll still be too young to remember the house\r\n'''\r\nprint(translate(text))\r\n","sub_path":"Others/Handling/Core/Translate.py","file_name":"Translate.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185649838","text":"# -*- coding:utf-8 -*-\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import abort\r\nimport hashlib\r\nimport xmltodict\r\nimport time\r\nimport re\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return \"Hello World!\"\r\n\r\n@app.route(\"/wechat\", methods=[\"GET\",\"POST\"])\r\ndef weixin():\r\n if request.method == \"GET\": # 判断请求方式是GET请求\r\n my_signature = request.args.get('signature') # 获取携带的signature参数\r\n my_timestamp = request.args.get('timestamp') # 获取携带的timestamp参数\r\n my_nonce = request.args.get('nonce') # 获取携带的nonce参数\r\n my_echostr = request.args.get('echostr') # 获取携带的echostr参数\r\n\r\n if not all([my_signature, my_timestamp, my_nonce, my_echostr]):\r\n \tabort(400)\r\n\r\n token = \"test123\" #token验证\r\n\r\n # 进行字典排序\r\n data = [token,my_timestamp ,my_nonce ]\r\n data.sort()\r\n\r\n # 拼接成字符串\r\n temp = ''.join(data)\r\n\r\n # 进行sha1加密\r\n mysignature = hashlib.sha1()\r\n mysignature.update(temp.encode('utf-8'))\r\n res = mysignature.hexdigest()\r\n # 加密后的字符串可与signature对比,标识该请求来源于微信\r\n if my_signature == res:\r\n return my_echostr\r\n else:\r\n \tabort(403)\r\n elif request.method == \"POST\": # 判断请求方式是POST请求\r\n \txml_str = request.data\r\n \tif not xml_str:\r\n \t\tabort(400)\r\n\r\n \t# 对xml字符串进行解析\r\n \txml_dict = xmltodict.parse(xml_str)\r\n \txml_dict = xml_dict.get(\"xml\")\r\n\r\n \t# 提取消息类型\r\n \tmsg_type = xml_dict.get(\"MsgType\")\r\n\r\n \tif msg_type == \"text\":\r\n \t\t# 表示发送的是文本消息\r\n \t\t# 构造返回值,经由微信服务器恢复给用户的消息内容\r\n content = xml_dict.get(\"Content\")\r\n if \"天气\" in content:\r\n url = \"https://www.tianqiapi.com/api/?version=v6&city=\"\r\n b = content.strip(\"天气\")\r\n r='[’!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~]+' # 正则删除标点符号\r\n c=re.sub(r,'',b)\r\n post = c.strip(\" \")\r\n appid = \"&appid=1001&appsecret=5566\" # 现在要appid或appsecret 否则报错\r\n url_now = url + post + appid\r\n rs_we = requests.get(url_now).json()\r\n weather_city = rs_we[\"city\"]\r\n if weather_city == post:\r\n weather_uptime = rs_we[\"date\"] + \" \" + rs_we[\"week\"] + \" \" + rs_we[\"update_time\"] # 更新时间\r\n weather_wea = rs_we[\"wea\"] # 天气情况\r\n weather_tem = rs_we[\"tem\"] + \"℃\" # 当前温度\r\n weather_temnow = rs_we[\"tem2\"] + \"/\" + rs_we[\"tem1\"] + \"℃\" # 早晚温差\r\n weather_win = rs_we[\"win\"] # 风向\r\n weather_win_speed = rs_we[\"win_speed\"] # 风速等级\r\n weather_win_meter = rs_we[\"win_meter\"] # 风速\r\n weather_humidity = rs_we[\"humidity\"] # 湿度\r\n weather_visibility = rs_we[\"visibility\"] # 能见度\r\n weather_pressure = rs_we[\"pressure\"] + \"hPa\" # 气压\r\n weather_air = rs_we[\"air\"] # 空气质量\r\n weather_air_pm25 = rs_we[\"air_pm25\"] # PM2.5\r\n weather_air_level = rs_we[\"air_level\"] # 空气质量等级\r\n weather_info = weather_city + \"-今日天气预报(实时):\" + \"\\r\\n当前温度:\" + weather_tem + \"\\r\\n早晚温差:\" + weather_temnow + \"\\r\\n天气情况:\" + weather_wea + \"\\r\\n湿度:\" + weather_humidity + \"\\r\\n空气质量:\" + weather_air + \"\\r\\nPM2.5:\" + weather_air_pm25 + \"\\r\\n空气质量等级:\" + weather_air_level + \"\\r\\n气压:\" + weather_pressure + \"\\r\\n风向:\" + weather_win + \"\\r\\n风速:\" + weather_win_meter + \"\\r\\n风速等级:\" + weather_win_speed + \"\\r\\n能见度:\" + weather_visibility + \"\\r\\n更新时间:\" + weather_uptime\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": weather_info\r\n }\r\n }\r\n else:\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"当前地区无法查询天气\"\r\n }\r\n }\r\n elif \"快递查询\" in content:\r\n url = \"https://m.kuaidi100.com/apicenter/kdquerytools.do?method=autoComNum&text=\"\r\n b = content.strip(\"快递查询\")\r\n r='[’!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~]+' # 正则删除标点符号\r\n c=re.sub(r,'',b)\r\n post = c.strip(\" \")\r\n if post == \"\":\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"注意快递查询格式(快递查询 单号)\"\r\n }\r\n }\r\n\r\n else:\r\n url_now = url + post\r\n rs_we = requests.get(url_now).json()\r\n kd_message = rs_we[\"auto\"]\r\n if len(kd_message):\r\n url2 = \"https://m.kuaidi100.com/result.jsp?nu=\"\r\n url_auto = url2 + post\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"快递单号\" + post + \"结果: \\n点击查看物流\"\r\n }\r\n }\r\n else:\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"无法识别当前单号\"\r\n }\r\n }\r\n\r\n elif content == \"查询\" or content == \"功能\" or content == \"菜单\" or content == \"帮助\":\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"舟山物联查询小助手当前功能有:\\n1.天气查询(天气 地名)\\n2.快递查询(快递查询 单号)\"\r\n }\r\n }\r\n\r\n \t# 将字典转换成为xml字符串\r\n \tresp_xml_str = xmltodict.unparse(resp_dict)\r\n \t# 返回消息数据给微信服务器\r\n \treturn resp_xml_str\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=80, debug=True)","sub_path":"5.返回有哪些功能.py","file_name":"5.返回有哪些功能.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134991958","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nimport numpy as np\nfrom django.views import View\nimport json, sys\nfrom calculation.formulas import ExcelFormulas\nfrom calculation.forms import MortageForm\n# Create your views here.\n\ndef calculate(request):\n try:\n mortage = MortageForm()\n # return HttpResponse('Test')\n return render(request,'calculate.html', {'form': mortage})\n except Exception as e:\n print (e, \"LLLLLLLLLLLLLL\")\n\n\ndef ajax_request_data(request):\n try:\n if request.method == 'POST':\n if request.POST['initial'] == '0':\n loan_amnt = float(request.POST['loan_amnt'])\n interst_rate = float(request.POST['intrest_rate'])\n loan_months = float(request.POST['loan_months'])\n emi_amount = np.pmt((interst_rate/100)/12, loan_months, loan_amnt)\n print (emi_amount,\"emi ammount\")\n intrest_total = abs((abs(emi_amount)*loan_months) - loan_amnt)\n response = {'status': 200, 'count': round(abs(emi_amount), 2),'intrest': round(intrest_total,2)}\n excel_formula = ExcelFormulas()\n print(excel_formula.CumulativeInterestPaid(.12/12,300,10000000,1,84,0) ,\"LLLLLLLLLLLLLLL\")\n print (excel_formula.CumPrinc(.12/12,300,10000000,1,84,0), 'Cummalative price')\n return HttpResponse(json.dumps(response))\n elif request.POST['initial'] == '1':\n emi = round(float(request.POST['emi']),0)\n loan_amnt = float(request.POST['loan_amnt'])\n intrest_rate = float(request.POST['intrest_rate'])\n loan_months = int(request.POST['loan_months'])\n installment_paid = int(request.POST['installment_paid'])\n cal_type = int(request.POST['cal_type'])\n number_of_months = int(request.POST['number_of_months'])\n new_roi = float(request.POST['new_roi'])\n\n\n new_anno = return_ammotization(loan_amnt,intrest_rate, new_roi, loan_months, installment_paid, cal_type,number_of_months)\n return HttpResponse(json.dumps(new_anno))\n # ammotization = []\n # update_loan = loan_amnt\n # for cnt in range(1,loan_months+1):\n # principle = round(emi - ((intrest_rate/100)/12 * update_loan) ,0)\n # intrest = round(emi - principle, 0)\n # # update_loan = update_loan - emi\n # if round(update_loan,0) < emi:\n # ammotization.append([cnt,round(update_loan,0), emi, intrest, principle, 0])\n # else:\n # ammotization.append([cnt,round(update_loan,0), emi, intrest, principle, update_loan-principle])\n # update_loan = update_loan - principle\n #\n # response = {'status':200, 'data': ammotization}\n # return HttpResponse(json.dumps(response))\n\n except Exception as e:\n print (e, \"Error at ajax_request_data\")\n\n\ndef return_ammotization(loan_amount,old_roi, new_roi, old_months, paid_months, case_opt,cust_month):\n try:\n new_rate = (old_roi/100)/12\n new_emi_rate = (new_roi/100)/12\n emi_amount = np.pmt(new_rate, old_months, loan_amount)\n old_intrest_total = abs((abs(emi_amount)*old_months) - loan_amount)\n remaining_principle = get_annotized_ammount(new_rate,loan_amount, old_months, paid_months)\n print (remaining_principle, \"Remaining principle\")\n if case_opt == 1:\n emi = round(abs(np.pmt(new_emi_rate, old_months-paid_months, remaining_principle)),0)\n new_intrest_total = abs((abs(emi)*(old_months-paid_months)) - remaining_principle)\n intrest_saved = old_intrest_total - new_intrest_total\n print (emi ,\"EMI in case 1\",new_intrest_total, \"::::::\", intrest_saved)\n return_list = get_annotized_list(new_emi_rate, remaining_principle, emi, old_months-paid_months)\n return_list['intrest_saved'] = round(intrest_saved,2)\n return return_list\n elif case_opt == 2:\n emi = round(abs(np.pmt(new_emi_rate, cust_month, remaining_principle)),0)\n new_intrest_total = abs((abs(emi)*cust_month) - remaining_principle)\n intrest_saved = old_intrest_total - new_intrest_total\n print (emi, \"EMI in case 2\", new_intrest_total, \"::::::\", intrest_saved)\n return_list = get_annotized_list(new_emi_rate, remaining_principle, emi, cust_month)\n return_list['intrest_saved'] = round(intrest_saved,2)\n return return_list\n except Exception as emp:\n print ('Error occured at the return_ammotization_function', emp)\n print (\"line number of error {}\".format(sys.exc_info()[-1].tb_lineno))\n\n\ndef get_annotized_ammount(rate, amount, months, months_paid):\n print (rate, amount, months, months_paid, \":::::::::::::::::::OOOOOOOOOOOO\")\n monthly_emi = abs(round(np.pmt(rate,months, amount),0))\n print(monthly_emi, \"EMI\")\n update_amount = amount\n for i in range(1,months_paid+1):\n principle = round(monthly_emi-(rate*update_amount),0)\n print (principle, \"::::\")\n update_amount -= principle\n print (update_amount, \":P:P:P:P:P:P:P:P:P\")\n\n print (update_amount, \"<<<<<<<< Update Amount\")\n return update_amount\n\n\ndef get_annotized_list(intrest_rate, loan_amnt, emi,loan_months):\n ammotization = []\n update_loan = loan_amnt\n for cnt in range(1,loan_months+1):\n principle = abs(emi - round(intrest_rate * update_loan ,0))\n intrest = round(emi - principle, 0)\n # update_loan = update_loan - emi\n if round(update_loan,0) < emi:\n ammotization.append([cnt,round(update_loan,0), emi, intrest, principle, 0])\n else:\n ammotization.append([cnt,round(update_loan,0), emi, intrest, principle, update_loan-principle])\n update_loan = update_loan - principle\n\n response = {'status':200, 'data': ammotization}\n return response\n# class Classbased(View):\n# \"\"\"docstring for Classbased.\"\"\"\n# def __init__(self):\n# pass\n#\n# def get(self, request, *args, **kwargs):\n# pass\n#\n# def post(self, request,*arg, **kwargs):\n# pass\n","sub_path":"calculation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"391205675","text":"##############################################################################\n# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Erfa(AutotoolsPackage):\n \"\"\"Essential Routines for Fundamental Astronomy.\"\"\"\n\n homepage = \"https://github.com/liberfa/erfa\"\n url = \"https://github.com/liberfa/erfa/releases/download/v1.3.0/erfa-1.3.0.tar.gz\"\n\n version('1.4.0', '6f67ea6e39c70337c5de980eb7409800')\n version('1.3.0', '62347926625ecefbe5911446baed6676')\n version('1.2.0', '63e8e694d53add33c16f3f494d2b65f4')\n version('1.1.1', 'f227ada197eda3e622f4ef7cf7cdbd5a')\n version('1.1.0', '80eefd129e32c8290627a5c925c1534a')\n version('1.0.1', '35d8cf096313ed4500349aab04e8ae07')\n version('1.0.0', '7fcc2f647a77b8c0c883ab244b389756')\n version('0.0.1', '3736c0ff155fec6baa3637f135737344')\n\n variant('shared', default=True, description='Build shared libraries')\n variant('static', default=True, description='Build static libraries')\n variant('pic', default=True, description='Build PIC libraries')\n\n def configure_args(self):\n spec = self.spec\n args = []\n\n if '+shared' in spec:\n args.append('--enable-shared')\n else:\n args.append('--disable-shared')\n\n if '+static' in spec:\n args.append('--enable-static')\n else:\n args.append('--disable-static')\n\n if '+pic' in spec:\n args.append('--with-pic')\n else:\n args.append('--without-pic')\n\n return args\n","sub_path":"spack/py3-v4.1.1-repo/packages/erfa/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601394299","text":"from collections import OrderedDict\n\nfrom . import base\nfrom .. import cache\nfrom .. import schematics\nfrom .. import views\n\n\nclass RecordMeta(base.TypeMeta):\n def __new__(mcs, name, bases, namespace):\n # attribute accumulators\n fields = OrderedDict()\n field_functions = OrderedDict()\n validate_functions = OrderedDict()\n\n # gather values from base classes\n for b in reversed(bases):\n if hasattr(b, '_fields'):\n fields.update(b._fields)\n\n if hasattr(b, '_field_functions'):\n field_functions.update(b._field_functions)\n\n if hasattr(b, '_validate_functions'):\n validate_functions.update(b._validate_functions)\n\n # gather typerighter attributes\n for k, v in namespace.items():\n # collect type instances in fields dict\n if isinstance(v, base.Type):\n fields[k] = v\n\n # collect functions that generate fields\n elif k.startswith('field_') and callable(v):\n field_name = k[len('field_'):]\n field_functions[field_name] = v\n\n # collect validation functions\n elif k.startswith('validate_') and callable(v):\n validate_functions[k] = v\n\n # attach collected values\n namespace['_fields'] = fields\n namespace['_field_functions'] = field_functions\n namespace['_validate_functions'] = validate_functions\n\n # create the new type\n type_class = type.__new__(mcs, name, bases, namespace)\n\n # create schematic for type\n schematic = schematics.Schematic(type_class)\n setattr(type_class, '_schematic', schematic)\n\n # put type in cache\n cache.TypeCache().add(type_class)\n\n return type_class\n\n\nclass Record(base.Type, metaclass=RecordMeta):\n NATIVE = dict\n\n def __init__(\n self, strict=False, field_filters=None, export_nones=False, **kw\n ):\n super().__init__(**kw)\n self.strict = strict\n self.field_filters = field_filters\n self.export_nones = export_nones\n\n def __iter__(self):\n for field_name, type_instance in self._fields.items():\n yield field_name, type_instance\n\n def _filter(self, value, fields=None):\n for field_name, field_type in self:\n # Field name is in fields list and has a value\n if fields and field_name in fields and field_name in value:\n yield field_name, field_type\n # Field name has value\n elif field_name in value:\n yield field_name, field_type\n # Field can provide a value\n elif field_type.default:\n yield field_name, field_type\n\n def _convert(self, value, converter, fields=None):\n for fn, ti in self._filter(value, fields=fields):\n if fields and fn in fields and fn in value:\n v = converter(value[fn], ti)\n yield fn, v\n elif fn in value:\n v = converter(value[fn], ti)\n yield fn, v\n elif fn not in value and ti.default is not base.Unset:\n yield fn, ti.default\n\n @base.skip_falsy\n def to_primitive(self, value, **convert_args):\n converter = lambda field_value, ti: ti.to_primitive(field_value)\n return {\n k: v for k, v in self._convert(value, converter, **convert_args)\n }\n\n @base.skip_falsy\n def to_native(self, value, **convert_args):\n converter = lambda field_value, ti: ti.to_native(field_value)\n return {\n k: v for k, v in self._convert(value, converter, **convert_args)\n }\n\n def to_view(self, data=None):\n return views.make_view(self, data=data)\n\n @base.skip_falsy\n def validate_fields(self, value):\n for fn, ti in self:\n if fn in value:\n ti.validate(value[fn])\n else:\n ti.validate(base.Unset)\n","sub_path":"typerighter/types/records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572430724","text":"#!/usr/bin/env python\n'''\nThis module contains routines related to observation processing\n\nJavier.Delgado@noaa.gov\n'''\n\nimport sys\nimport os\nimport shutil\nimport time\nimport fnmatch\nimport logging as log\n\ndef get_matching_ob_files(data_path, filename_pattern, analysis_date, logger=None):\n ''' \n Find files in matching the for the . The following\n special characters will be translated:\n @Y - 4-digit year of analysis date\n @y - 2-digit year (as of DAFFY 1.0.3.3)\n @m - 2-digit month of analysis date\n @H - 2-digit hour of anlysis date\n @M - 2-digit minute of analysis date\n * - arbitrary-length string of numbers or letters (NOTE : Only one of these can exist)\n\n should be a time tupple\n\n if logger is not passed in, create a new one\n '''\n\n if logger is None: \n import logging as logger\n logger.basicConfig(level=log.DEBUG)\n \n filename = filename_pattern\n filename = filename.replace('@Y', str(analysis_date.tm_year) )\n filename = filename.replace('@y', str(analysis_date.tm_year)[2:4] )\n filename = filename.replace('@m', time.strftime( '%m' , analysis_date) )\n filename = filename.replace('@d', time.strftime( '%d' , analysis_date) )\n filename = filename.replace('@H', time.strftime( '%H' , analysis_date) )\n filename = filename.replace('@M', time.strftime( '%M' , analysis_date) )\n \n # process wildcard (*). Note that a max of one * is allowed, since that should be sufficient\n # and makes the code much simpler (otherwise we'd need an extra for loop) \n if filename.count('*') > 1:\n logger.error('File pattern may only contain one asterisk!')\n sys.exit(13)\n asteriskIdx = filename.find('*')\n\n if asteriskIdx > -1 :\n #wildcards to process, so we may have multiple files\n prefix = filename[ : asteriskIdx ]\n suffix = filename[ asteriskIdx + 1 : ]\n matching_files = []\n for fil in os.listdir(data_path):\n if fnmatch.fnmatch(fil, '%s*%s' %(prefix,suffix) ) : \n matching_files.append(fil)\n if len(matching_files) == 0:\n # if logger has the warn_once() method, use it. otherwise just use warn()\n msg = 'No file matching the pattern \"%s*%s\" was found in [%s]' \\\n %(prefix, suffix, data_path)\n warn_once = getattr(logger, 'warn_once', None)\n if callable(warn_once):\n logger.warn_once(msg)\n else:\n logger.warn(msg)\n return matching_files\n\n else:\n return [filename]\n\n\n\n","sub_path":"lib/obs_processing.py","file_name":"obs_processing.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"206428846","text":"\"\"\"\r\nTrovare i punti di sella di una matrice\r\nPunti di sella sono contemporaneamente il minimo della riga i ed il massimo della colonna j\r\nOttenere coppie di indici della forma i, j che individuano la posizione dei punti di sella. Se non esistono, lista vuota\r\nM[i][j]\r\n\"\"\"\r\n\r\ndef minRiga(m,i):\r\n numMin = m[i][0]\r\n for j in range(len(m[i])):\r\n if m[i][j] < numMin:\r\n numMin = m[i][j]\r\n return numMin\r\n\r\ndef maxCol(m,j):\r\n numMax = m[0][j]\r\n for i in range(len(m)):\r\n if m[i][j] > numMax:\r\n numMax = m[i][j]\r\n return numMax\r\n\r\ndef puntiSella(m):\r\n l = []\r\n for i in range(len(m)):\r\n for j in range(len(m[0])):\r\n if m[i][j] == minRiga(m,i) and m[i][j] == maxCol(m,j):\r\n l.append((i,j))\r\n return l\r\n\r\nm = [[4,5,5,9],\r\n [6,7,6,7],\r\n [6,8,6,9],\r\n [5,2,3,3]]\r\n\r\nprint(puntiSella(m))\r\n\r\n\r\n","sub_path":"esercizioMatrici12012017.py","file_name":"esercizioMatrici12012017.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556388686","text":"#Character creation:\r\n\r\nimport random\r\nimport time\r\n\r\nclass CharacterCreation(object):\r\n strength = 0; \r\n intelligence = 0; \r\n constitution = 0; \r\n dexterity = 0;\r\n hitpoints = 50; \r\n prot = ''\r\n\r\n def charCreate(self):\r\n protagonist_name = input('What is your name?')\r\n print('My name is %s' % protagonist_name)\r\n\r\n time.sleep(1)\r\n print('Nice to meet you %s!' % protagonist_name)\r\n time.sleep(1)\r\n \r\n flag = 'no';\r\n while (flag == 'no'): \r\n protagonist_class = input('What class are you? You can choose warrior, priest, mage or thief!')\r\n self.prot = protagonist_class.lower() \r\n while(self.prot !='warrior' and self.prot != 'priest' and self.prot != 'thief' and self.prot != 'mage'):\r\n print('You did not pick a class, please try again') \r\n protagonist_class = input('What class are you? You can choose warrior, priest, mage or thief!') \r\n self.prot = protagonist_class.lower() \r\n \r\n time.sleep(1)\r\n \r\n if protagonist_class == 'warrior':\r\n print(\r\n '''\r\n Warriors are fearsome melee fighters who generally have great strenght and constitution,\r\n but are slower in body and mind than other classes.\r\n '''\r\n )\r\n time.sleep(1)\r\n elif protagonist_class == 'priest':\r\n print(\r\n ''''\r\n Unlike the pampered papal clergyman we come to expect in modern times,\r\n the singular devotion of priest makes them excellent combat-healers and\r\n with their heavy blunt religious paraphernalia they tend to be\r\n quite adept in headbashing their enemies into oblivion.\r\n '''\r\n )\r\n elif protagonist_class == 'thief':\r\n print(\r\n '''\r\n Congratulations! From all the classes you could choose...\r\n thieves are backstabbing, sneaky, unreliable scum who,\r\n although they would avoid a fight whenever possible and rather poison you or kill you from afar,\r\n rely on their dexterity to keep themself save in hand to hand combat\r\n '''\r\n )\r\n else:\r\n print(\r\n '''\r\n Due to the unreliable nature of most magic, \r\n surviving mages are without exception able to use protective magic to \r\n prevent self-combustion in case a spell goes wrong. Highly intelligent out of necessity, \r\n mages wield magic in a ruthless yet unpredictable way. \r\n '''\r\n )\r\n time.sleep(1)\r\n flag = input('Are you sure you want to be a %s? Yes or No' % protagonist_class).lower()\r\n \r\n print('Good choice! I\\'ve always wanted to be a %s.' % protagonist_class)\r\n\r\n def class_strenght(self):\r\n \r\n prot = self.prot\r\n time.sleep(1)\r\n if prot == 'warrior': \r\n strength = random.randint(10, 18)\r\n print('Your strenght is', strength)\r\n elif prot == 'priest':\r\n strength = random.randint(8, 16)\r\n print('Your strenght is', strength)\r\n elif prot == 'thief': \r\n strength = random.randint(6, 14)\r\n print('Your strenght is', strength)\r\n else:\r\n strength = random.randint(4, 12)\r\n print('Your strenght is', strength)\r\n self.strenght = strength\r\n \r\n def class_constitution(self):\r\n \r\n prot = self.prot\r\n time.sleep(1)\r\n if prot == 'warrior': \r\n constitution = random.randint(10, 18)\r\n print('Your constitution is', constitution)\r\n elif prot == 'priest':\r\n constitution = random.randint(10, 18)\r\n print('Your constitution is', constitution)\r\n elif prot == 'thief': \r\n constitution = random.randint(8, 14)\r\n print('Your constitution is', constitution)\r\n else:\r\n constitution = random.randint(6, 14)\r\n print('Your constitution is', constitution)\r\n self.constitution = constitution\r\n \r\n def class_dexterity(self):\r\n \r\n prot = self.prot\r\n time.sleep(1)\r\n if prot == 'warrior': \r\n dexterity = random.randint(6, 14)\r\n print('Your dexterity is', dexterity)\r\n elif prot == 'priest':\r\n dexterity = random.randint(6, 12)\r\n print('Your dexterity is', dexterity)\r\n elif prot == 'thief': \r\n dexterity = random.randint(10, 18)\r\n print('Your dexterity is', dexterity)\r\n else:\r\n dexterity= random.randint(4, 12)\r\n print('Your dexterity is', dexterity)\r\n self.dexterity = dexterity\r\n \r\n def class_intelligence(self):\r\n \r\n prot = self.prot\r\n time.sleep(1)\r\n if prot == 'warrior': \r\n intelligence = random.randint(4, 12)\r\n print('Your intelligence is', intelligence)\r\n elif prot == 'priest':\r\n intelligence = random.randint(6, 14)\r\n print('Your intelligence is', intelligence)\r\n elif prot == 'thief': \r\n intelligence = random.randint(8, 16)\r\n print('Your intelligence is', intelligence)\r\n else:\r\n intelligence = random.randint(12, 18)\r\n print('Your intelligence is', intelligence)\r\n self.intelligence = intelligence \r\n\r\n def starting_hitpoints(self):\r\n \r\n constitution = self.constitution\r\n if constitution >= 16:\r\n self.hitpoints += (constitution - 10) * 6\r\n elif constitution >= 13:\r\n self.hitpoints += (constitution - 10) * 5\r\n elif constitution >= 10:\r\n self.hitpoints += (constitution - 10) * 4\r\n else:\r\n self.hitpoints += (constitution - 10) * 2\r\n time.sleep(1)\r\n print('You have %s hitpoints' % self.hitpoints)\r\n \r\n #def stats(self, a, b, c, d, e,):\r\n # stats = [4]\r\n #stats = ['strenght' : a, b,c,d,e];\r\n #print(stats)\r\n def controller(self):\r\n self.charCreate()\r\n self.class_strenght()\r\n self.class_constitution() \r\n self.class_dexterity()\r\n self.class_intelligence()\r\n self.starting_hitpoints()\r\n #self.stats(self.strenght, self.constitution, self.dexterity, self.intelligence, self.hitpoints) \r\n \r\n","sub_path":"EastOfLoathing/src/characterCreation.py","file_name":"characterCreation.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219398360","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.CartDetail, name='CartDetail'),\n\n url(r'^remove_big_store/(?P\\d+)/$', views.CartBigStoreRemove, name='CartBigStoreRemove'),\n url(r'^add_big_store/(?P\\d+)/$', views.CartBigStoreAdd, name='CartBigStoreAdd'),\n url(r'^add_big_store2/(?P\\d+)/$', views.CartBigStoreAdd2, name='CartBigStoreAdd2'),\n url(r'^add_big_store3/(?P\\d+)/$', views.CartBigStoreAdd3, name='CartBigStoreAdd3'),\n url(r'^remove_big_store/(?P\\d+)/$', views.CartBigStoreRemove, name='CartBigStoreRemove'),\n url(r'^cart-main', views.cart_main, name='cart_main'),\n url(r'^cart-detail', views.cart_detail, name='cart_detail'),\n url(r'^cart-message', views.cart_message, name='cart_message'),\n url(r'^wishlist-message', views.wishlist_message, name='wishlist_message'),\n url(r'^compare-message', views.compare_message, name='compare_message'),\n url(r'^form', views.FeedbackView, name='FeedbackView'),\n]\n","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"429802548","text":"# -*- coding: utf-8 -*-\nimport json\nimport time\nimport dbselectors\nimport web\n\npaliases = {\n 'player': 'display/player',\n 'players': 'display/players',\n 'p': 'display/player',\n\n 'game': 'display/game',\n 'games': 'display/games',\n 'g': 'display/game',\n\n 'server': 'display/server',\n 'servers': 'display/servers',\n\n 'map': 'display/map',\n 'maps': 'display/maps',\n\n 'mode': 'display/mode',\n 'modes': 'display/modes',\n\n 'weapon': 'display/weapon',\n 'weapons': 'display/weapons',\n\n 'ranks': 'display/ranks',\n}\n\n\ndef chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\nclass Qopt:\n\n def __init__(self, q):\n self.q = q\n\n def __call__(self, opt, d=None, c=str):\n try:\n return c(self.q[opt][0])\n except:\n return d\n\n def __getitem__(self, opt):\n return self.q[opt] if opt in self.q else []\n\n def __contains__(self, key):\n return key in self.q\n\n\ndef counterwait(c):\n while not c.lastcache:\n time.sleep(1)\n\n\ndef make(server, db, q, path):\n\n fullpath = str((path, q))\n\n if fullpath in server.retcache:\n return server.retcache[fullpath][1]\n\n def sendout(t):\n if fullpath not in server.retcache:\n server.retcache[fullpath] = (time.time(), t)\n return t\n\n qopt = Qopt(q)\n paths = path.split('/')[1:]\n sel = dbselectors.BaseSelector()\n sel.pathid = None\n sel.server = server\n sel.db = db\n sel.qopt = qopt\n sel.webpath = path\n if paths[0] in paliases:\n paths = paliases[paths[0]].split('/') + paths[1:]\n if paths[0] == 'get':\n ret = {\"error\": \"Invalid Query Type\"}\n if not server.dbexists:\n return 'application/json', '200 OK', json.dumps(\n {\"error\": \"Empty Database\"}).encode()\n if len(paths) >= 2:\n name = paths[1]\n pathid = paths[2] if len(paths) >= 3 else None\n sel.pathid = pathid\n if name in dbselectors.selectors:\n dbselectors.selectors[name].copyfrom(sel)\n ret = dbselectors.selectors[name].getdict()\n if ret is None:\n ret = {\"error\": \"Invalid Query\"}\n return sendout(('application/json', '200 OK', json.dumps(\n ret).encode()))\n elif paths[0] == 'display':\n if server.dbexists:\n if len(paths) >= 2:\n name = paths[1]\n pathid = paths[2] if len(paths) >= 3 else None\n sel.pathid = pathid\n if name in web.displays.displays:\n ret = web.displays.displays[name](sel)\n return sendout(('text/html', '200 OK', ret.encode()))\n elif paths[0] == 'images':\n try:\n return sendout(('image/png', '200 OK', open(\n \"web/images/%s\" % '/'.join(paths[1:]).replace(\n '..', ''), 'rb').read()))\n except IndexError:\n pass\n except FileNotFoundError:\n pass\n elif paths[0] == 'styles':\n try:\n return sendout(('text/css', '200 OK', open(\n \"web/styles/%s\" % '/'.join(paths[1:]).replace(\n '..', '')).read().encode()))\n except IndexError:\n pass\n except FileNotFoundError:\n pass\n elif not paths[0]:\n return sendout(('text/html', '200 OK', web.main.page(sel).encode()))\n elif paths[0] == 'apidocs':\n return sendout(('redirect',\n 'https://github.com/shacknetisp/statsdb-interface#api-points'))\n elif paths[0] == 'source':\n return sendout(('redirect',\n 'https://github.com/shacknetisp/statsdb-interface'))\n elif paths[0] == 'robots.txt':\n return sendout(('text/plain', '200 OK',\n open('web/robots.txt').read().encode()))\n return sendout((\n 'text/html', '404 Not Found', web.err404.page(sel).encode()))\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402232158","text":"'''\n - -coding= utf-8- -\n @authour=Wangyafei\n @time=2019/3/14 15:47\n @file=do_report.py\n'''\n#执行用例,生成测试报告\nimport unittest\nfrom project1.test_data.test_cases import TestCases\nfrom project1.common import HTMLTestRunnerNew\n\n#测试集\nsuit=unittest.TestSuite\n\n#添加用例\nloader=unittest.TestLoader\n\nsuit.addTest(loader.loadTestsFromTestCase(testCaseClass=TestCases))\n\n#执行用例,生成测试报告\nwith open('Test_report.html','wb+') as file:\n\n runner=HTMLTestRunnerNew.HTMLTestRunner(stream='Test_report.html',\n verbosity=2,\n title='python14 测试报告',\n description='python14 测试报告',\n tester='Yafei')\n runner.run(suit)\n","sub_path":"project1/test_data/do_report.py","file_name":"do_report.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"417385221","text":"# coding=utf-8\n\nimport gtk\n\n__author__ = 'Michał Ciołczyk'\n\n\nclass AlgorithmResultsGUI(gtk.Window):\n def __init__(self, triangles, time, *args, **kwargs):\n super(AlgorithmResultsGUI, self).__init__(*args, **kwargs)\n self.set_size_request(400, 300)\n self.set_title(\"Algorithm results\")\n table = gtk.Table(3, 1)\n finished_label = gtk.Label('The algorithm has finished. Below are results:')\n table.attach(finished_label, 0, 1, 0, 1)\n triangles_text = self._triangles_text(triangles)\n text_buffer = gtk.TextBuffer()\n text_buffer.set_text('Ran for %s s (including visualization)\\n\\n'\n 'Split for %d triangles:\\n\\n%s' % (str(time), len(triangles), triangles_text))\n sw = gtk.ScrolledWindow()\n sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n textarea = gtk.TextView()\n textarea.set_editable(False)\n textarea.set_buffer(text_buffer)\n textarea.set_size_request(400, 200)\n sw.add(textarea)\n table.attach(sw, 0, 1, 1, 2)\n ok_button = gtk.Button(\"OK\")\n ok_button.connect('clicked', self._ok_clicked)\n table.attach(ok_button, 0, 1, 2, 3)\n self.add(table)\n self.show_all()\n\n @staticmethod\n def _triangles_text(triangles):\n to_return = ''\n for triangle in triangles:\n to_return += \"((%f, %f), (%f, %f), (%f, %f))\\n\" % (\n triangle.points[0].x,\n triangle.points[0].y,\n triangle.points[1].x,\n triangle.points[1].y,\n triangle.points[2].x,\n triangle.points[2].y\n )\n return to_return\n\n def _ok_clicked(self, widget, data=None):\n self.destroy()\n","sub_path":"lab4/algorithm_results.py","file_name":"algorithm_results.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15719006","text":"\"\"\"\nNetwork creation wrapper classes\n================================\n\nAs the parameter values read from configuration file (currently)\nis type independent we need a way to map to types accepted by the graph\ngeneration functions.\nThe current best solution is to create wrappers that maps to the right type.\n\nAn alternative approach would be to require explicit type casting in the\nconfiguration file.\n\nA wrapper is simply an instance of the NetworkGenerator class providing a\nfunction for creating the network and a dictionary of (name, type) pairs.\n\nThis module could be made obsolete if using a configuration system with explicit \ntypes. \n\nExamples\n--------\n\nCreating a wrapper object is done by creating a specific instance of the \n`NetworkGenerator` class and providing a reference to its `create` method.\nSo, for instance to create a wrapper for the networkx function \n`barabasi_albert_graph`, that accepts the two parameters `n` and `m`, both of\ntype `int` one would do::\n \n BA_networkx = NetworkGenerator(networkx.barabasi_albert_graph, {\"n\":int, \"m\":int}).create\n\nThe name `BA_networkx` could then be used in configuration.\n\n\"\"\"\n\n__author__ = \"Lukas Ahrenberg \"\n\n__license__ = \"Modified BSD License\"\n\n__all__ = [\"NetworkGenerator\"]\n\nimport networkx as nx\n\nfrom networkxtra.generators import *\n\nimport networkxtra.utils as nwxutils\n\n\nclass NetworkGenerator(object):\n \"\"\"\n Network generation wrapper object.\n \n \"\"\"\n \n def __init__(self, creationFunction, typeMap):\n \"\"\"\n\n Parameters\n ----------\n creationFunction : function\n Function used to create a networkx graph.\n\n typeMap : dict\n Dictionary of the structure (, ) so that\n the the parameter is a parameter to creationFunction and \n is a type object.\n\n \"\"\"\n self.creationFunction = creationFunction\n self.typeMap = typeMap\n\n def create(self, **kwargs):\n \"\"\"\n This method will call `function` given when creating the \n `NetworkGenerator` after type casting the parameters as specified \n in `typeMap`.\n\n Parameters\n ----------\n \n kwargs : special\n The general argument dictionary for the network creation function.\n Note that the argument list must precisely match the one for\n creation function.\n \n Returns\n -------\n\n result : special\n The result from calling `function`.\n\n \n See Also\n --------\n\n NetworkGenerator.__init__ : constructor\n\n \"\"\"\n for k,v in self.typeMap.items():\n kwargs[k] = v(kwargs[k])\n return self.creationFunction(**kwargs)\n \n\n# Specific instances created for convenience\n#--------------------------------------------\n\n# Wrapper function for the networkx implementation of the BA algorithm.\nbarabasi_albert_graph_networkx = NetworkGenerator(nx.barabasi_albert_graph, {\"n\":int, \"m\":int}).create\n# NOTE: old version. Kept for backward comp. will be removed in future. Use above instead.\nBA_networkx = NetworkGenerator(nx.barabasi_albert_graph, {\"n\":int, \"m\":int}).create\n\n\n# Wrapper function for the implementation of the Albert and Barabasi alg. in\n# Phys. Rev. Letters.\nalbert_barabasi_prv_quick = NetworkGenerator(albert_barabasi_physrevlett_quick,\n {\"N\":int, \"m\":int, \"p\":float, \"q\":float})\\\n .create \n# NOTE: old version. Kept for backward comp. will be removed in future. Use above instead.\nAB_phys_rev_letters_quick = NetworkGenerator(albert_barabasi_physrevlett_quick,\n {\"N\":int, \"m\":int, \"p\":float, \"q\":float})\\\n .create\n\ngrid_2d_graph_networkx = NetworkGenerator(nx.grid_2d_graph,\n {'m':int, 'n':int}).create\n\nfast_gnp_random_graph_networkx = NetworkGenerator(nx.fast_gnp_random_graph,\n {'n':int, 'p':float, 'directed':bool}).create\n\nload_network = NetworkGenerator(nwxutils.loadNetwork, {'file':str}).create\n\nconnected_watts_strogatz_graph_networkx = NetworkGenerator(nx.connected_watts_strogatz_graph,\n {'n':int, 'k':int, 'p':float}).create\n# NOTE: old version. Kept for backward comp. will be removed in future. Use above instead.\nconnected_watts_strogatz_graph = NetworkGenerator(nx.connected_watts_strogatz_graph,\n {'n':int, 'k':int, 'p':float}).create\n\npowerlaw_cluster_graph_networkx = NetworkGenerator(nx.powerlaw_cluster_graph,\n {'n':int, 'm':int, 'p':float}).create\n# NOTE: old version. Kept for backward comp. will be removed in future. Use above instead.\nHolme_and_Kim_powerlaw = NetworkGenerator(nx.powerlaw_cluster_graph,\n {'n':int, 'm':int, 'p':float}).create\n\npowerlaw_degree_sequence = NetworkGenerator(powerlaw_degree_sequence,\n {\"n\":int, \"a\":float}).create\n\ntoivonen = NetworkGenerator(toivonen_standard, \n {\"N_0\":int, \"N\":int, \"k\":int}).create\n","sub_path":"nepidemix/utilities/networkgeneratorwrappers.py","file_name":"networkgeneratorwrappers.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37172315","text":"from threading import RLock\n\nclass BankAccount(object):\n\n myRLock = RLock()\n \n def __init__(self):\n self.balance = None\n\n def get_balance(self):\n with self.myRLock:\n if self.balance is None:\n raise ValueError(\"Account is deactived\")\n return self.balance\n\n def open(self):\n with self.myRLock:\n self.balance = 0\n\n def deposit(self, amount):\n with self.myRLock:\n if self.balance is None:\n raise ValueError(\"Account is deactived\")\n\n if amount < 0:\n raise ValueError(\"Cannot make negative deposits\")\n self.balance = self.balance + amount\n \n\n def withdraw(self, amount):\n with self.myRLock:\n if self.balance is None:\n raise ValueError(\"Account is deactived\")\n if amount < 0:\n raise ValueError(\"Cannot make negative withdrawls\")\n elif amount > self.balance:\n raise ValueError(\"Cannot make withdraw exeeding balance\")\n self.balance = self.balance - amount\n \n\n def close(self):\n with self.myRLock:\n self.balance = None\n","sub_path":"python/bank-account/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386215364","text":"from alligator.blogs.models import Blog, Post, Group\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom annoying.decorators import render_to\n\n@render_to('blogs/flow.htm')\ndef flow(request):\n latest_posts = Post.objects.filter(active=1).order_by('-pub_date')[:10]\n return {\n 'latest_posts' : latest_posts\n }\n\n\n@render_to('blogs/detail.html')\ndef detail(request, blog_id):\n b = get_object_or_404(Blog, pk=blog_id)\n return {\n 'blog': b\n }\n\n\n@render_to('blogs/planet.htm')\ndef planet(request, planet_slug):\n \"\"\"\n Display list of latest feeds from planet.\n \"\"\"\n planet = get_object_or_404(Group, slug=planet_slug)\n return {\n 'planet' : planet,\n 'latest_posts' : planet.get_latest_posts()\n }\n\ndef posts(request, blog_id):\n return HttpResponse(\"You are looking for posts of blog #%s\" % blog_id)\n","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"76589853","text":"# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom functools import partial\n\nfrom .spill import read_spill_file, write_spill_file, spill_exists\nfrom .utils import WorkerActor, concat_operand_keys\nfrom .. import promise\nfrom ..config import options\nfrom ..errors import *\nfrom ..utils import deserialize_graph, log_unhandled, calc_data_size\nfrom ..compat import six, OrderedDict3\nfrom ..tensor.execution.core import Executor\n\nlogger = logging.getLogger(__name__)\n\n_calc_result_cache = OrderedDict3()\n\n\nclass InProcessCacheActor(WorkerActor):\n \"\"\"\n Actor managing calculation result in rss memory\n \"\"\"\n def __init__(self):\n super(InProcessCacheActor, self).__init__()\n self._chunk_holder_ref = None\n self._mem_quota_ref = None\n\n self._spill_dump_pool = None\n\n def post_create(self):\n from .chunkholder import ChunkHolderActor\n from .quota import MemQuotaActor\n\n super(InProcessCacheActor, self).post_create()\n self._chunk_holder_ref = self.promise_ref(ChunkHolderActor.default_name())\n self._mem_quota_ref = self.promise_ref(MemQuotaActor.default_name())\n if options.worker.spill_directory:\n self._spill_dump_pool = self.ctx.threadpool(len(options.worker.spill_directory))\n\n @promise.reject_on_exception\n @log_unhandled\n def dump_cache(self, keys, callback):\n \"\"\"\n Dump data in rss memory into shared cache\n \"\"\"\n @log_unhandled\n def _try_put_chunk(session_id, chunk_key, data_size, data_shape):\n logger.debug('Try putting %s into shared cache.', chunk_key)\n try:\n if chunk_key not in _calc_result_cache:\n if not self._chunk_store.contains(session_id, chunk_key):\n raise KeyError('Data key %s not found in inproc cache', chunk_key)\n return\n\n ref = None\n try:\n ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])\n del _calc_result_cache[chunk_key]\n self._mem_quota_ref.release_quota(chunk_key, _tell=True)\n\n self._chunk_holder_ref.register_chunk(session_id, chunk_key)\n data_size = self._chunk_store.get_actual_size(session_id, chunk_key)\n self.get_meta_ref(session_id, chunk_key).set_chunk_meta(\n session_id, chunk_key, size=data_size, shape=data_shape, workers=(self.address,))\n finally:\n del ref\n\n except StoreFull:\n # if we cannot put data into shared cache, we store it into spill directly\n self._chunk_holder_ref.spill_size(data_size, _tell=True)\n _put_spill_directly(session_id, chunk_key, data_size, data_shape)\n\n @log_unhandled\n def _put_spill_directly(session_id, chunk_key, data_size, data_shape, *_):\n if self._spill_dump_pool is None:\n raise SpillNotConfigured\n\n logger.debug('Writing data %s directly into spill.', chunk_key)\n self._spill_dump_pool.submit(write_spill_file, chunk_key, _calc_result_cache[chunk_key][1]).result()\n\n del _calc_result_cache[chunk_key]\n self._mem_quota_ref.release_quota(chunk_key, _tell=True)\n\n self.get_meta_ref(session_id, chunk_key).set_chunk_meta(\n session_id, chunk_key, size=data_size, shape=data_shape, workers=(self.address,))\n\n promises = []\n for k in keys:\n session_id, value = _calc_result_cache[k]\n data_size = calc_data_size(value)\n # for some special operands(argmax, argmean, mean, ..), intermediate chunk data has multiple parts, choose\n # first part's shape as chunk's shape.\n data_shape = value[0].shape if isinstance(value, tuple) else value.shape\n del value\n promises.append(\n promise.Promise(done=True).then(partial(_try_put_chunk, session_id, k, data_size, data_shape))\n )\n promise.all_(promises).then(lambda *_: self.tell_promise(callback)) \\\n .catch(lambda *exc: self.tell_promise(callback, *exc, **dict(_accept=False)))\n\n @log_unhandled\n def remove_cache(self, keys):\n \"\"\"\n Remove data from cache\n \"\"\"\n for k in keys:\n del _calc_result_cache[k]\n\n\nclass CpuCalcActor(WorkerActor):\n def __init__(self):\n super(CpuCalcActor, self).__init__()\n self._mem_quota_ref = None\n self._inproc_cache_ref = None\n self._dispatch_ref = None\n self._status_ref = None\n\n self._execution_pool = None\n self._spill_load_pool = None\n\n def post_create(self):\n from .quota import MemQuotaActor\n from .dispatcher import DispatchActor\n from .status import StatusActor\n from .daemon import WorkerDaemonActor\n\n super(CpuCalcActor, self).post_create()\n if isinstance(self.uid, six.string_types) and ':' in self.uid:\n uid_parts = self.uid.split(':')\n inproc_uid = 'w:' + uid_parts[1] + ':inproc-cache-' + str(uuid.uuid4())\n else:\n inproc_uid = None\n\n raw_ref = self.ctx.create_actor(InProcessCacheActor, uid=inproc_uid)\n self._inproc_cache_ref = self.promise_ref(raw_ref)\n daemon_ref = self.ctx.actor_ref(WorkerDaemonActor.default_name())\n if self.ctx.has_actor(daemon_ref):\n daemon_ref.register_child_actor(raw_ref, _tell=True)\n\n self._mem_quota_ref = self.promise_ref(MemQuotaActor.default_name())\n self._dispatch_ref = self.promise_ref(DispatchActor.default_name())\n self._dispatch_ref.register_free_slot(self.uid, 'cpu')\n\n self._status_ref = self.ctx.actor_ref(StatusActor.default_name())\n if not self.ctx.has_actor(self._status_ref):\n self._status_ref = None\n\n self._execution_pool = self.ctx.threadpool(1)\n if options.worker.spill_directory:\n self._spill_load_pool = self.ctx.threadpool(len(options.worker.spill_directory))\n\n @staticmethod\n def _build_load_key(graph_key, chunk_key):\n return '%s_load_memory_%s' % (graph_key, chunk_key)\n\n @promise.reject_on_exception\n @log_unhandled\n def calc(self, session_id, ser_graph, targets, callback):\n \"\"\"\n Do actual calculation. This method should be called when all data\n is available (i.e., either in shared cache or in memory)\n :param session_id: session id\n :param ser_graph: serialized executable graph\n :param targets: keys of target chunks\n :param callback: promise callback, returns the uid of InProcessCacheActor\n \"\"\"\n from ..tensor.expressions.datasource import TensorFetchChunk\n graph = deserialize_graph(ser_graph)\n op_key, op_name = concat_operand_keys(graph, '_')\n\n try:\n context_dict = dict()\n comp_nodes = []\n absent_keys = []\n spill_load_futures = dict()\n for chunk in graph.iter_nodes():\n try:\n # try load chunk from shared cache\n if isinstance(chunk.op, TensorFetchChunk):\n context_dict[chunk.key] = self._chunk_store.get(session_id, chunk.key)\n self._mem_quota_ref.release_quota(self._build_load_key(op_key, chunk.key))\n else:\n comp_nodes.append(chunk.op.key)\n except KeyError:\n # chunk not in shared cache, we load it from spill directly\n if self._spill_load_pool is not None and spill_exists(chunk.key):\n logger.debug('Load chunk %s directly from spill', chunk.key)\n self._mem_quota_ref.process_quota(self._build_load_key(op_key, chunk.key))\n spill_load_futures[chunk.key] = self._spill_load_pool.submit(read_spill_file, chunk.key)\n else:\n absent_keys.append(chunk.key)\n if absent_keys:\n raise ObjectNotInPlasma(absent_keys)\n\n # collect results from greenlets\n if spill_load_futures:\n for k, future in spill_load_futures.items():\n context_dict[k] = future.result()\n self._mem_quota_ref.hold_quota(self._build_load_key(op_key, k))\n spill_load_futures.clear()\n\n logger.debug('Start calculating operand %r.', comp_nodes)\n\n start_time = time.time()\n\n # mark targets as processing\n target_keys = [k for k in targets if not self._chunk_store.contains(session_id, k)]\n [self._mem_quota_ref.process_quota(k) for k in target_keys]\n\n # start actual execution\n executor = Executor(storage=context_dict)\n results = self._execution_pool.submit(executor.execute_graph, graph, targets).result()\n\n for k in list(context_dict.keys()):\n del context_dict[k]\n self._mem_quota_ref.release_quota(self._build_load_key(op_key, k))\n\n end_time = time.time()\n\n [self._mem_quota_ref.hold_quota(k) for k in target_keys]\n\n # adjust sizes in allocation\n save_sizes = dict()\n for k, v in zip(targets, results):\n if not self._chunk_store.contains(session_id, k):\n _calc_result_cache[k] = (session_id, v)\n save_sizes[k] = calc_data_size(v)\n self._mem_quota_ref.apply_allocation(k, save_sizes[k])\n\n if self._status_ref:\n self._status_ref.update_mean_stats(\n 'calc_speed.' + op_name, sum(save_sizes.values()) * 1.0 / (end_time - start_time),\n _tell=True, _wait=False)\n\n logger.debug('Finish calculating operand %r.', comp_nodes)\n self.tell_promise(callback, self._inproc_cache_ref.uid, save_sizes)\n self._dispatch_ref.register_free_slot(self.uid, 'cpu', _tell=True)\n except:\n self._dispatch_ref.register_free_slot(self.uid, 'cpu', _tell=True)\n raise\n","sub_path":"mars/worker/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":10837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4248125","text":"import pickle\nimport numpy\n### This is a very rudimentary script to kind of help with debugging this horrible test case\norig = numpy.load(open('results_2B52-results_5645.pdb.cPickle.orig'))\nnew = numpy.load(open('results_2B52-results_5645.pdb.cPickle'))\n\n\nfor i,j in zip(orig,new):\n if i != j:\n diff= numpy.array(i[2])-numpy.array(j[2])\n print(diff)\n","sub_path":"POVME/packages/binana/tests/algebra_peel_dock/howDifferent.py","file_name":"howDifferent.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"93884239","text":"from UQpy.SampleMethods.RSS.rss import RSS\nfrom UQpy.SampleMethods.STS import RectangularSTS\nimport numpy as np\nimport scipy.stats as stats\nimport copy\n\n\nclass RectangularRSS(RSS):\n \"\"\"\n Executes Refined Stratified Sampling using Rectangular Stratification.\n\n ``RectangularRSS`` is a child class of ``RSS``. ``RectangularRSS`` takes in all parameters defined in the parent\n ``RSS`` class with differences note below. Only those inputs and attributes that differ from the parent class\n are listed below. See documentation for ``RSS`` for additional details.\n\n **Inputs:**\n\n * **sample_object** (``RectangularSTS`` object):\n The `sample_object` for ``RectangularRSS`` must be an object of the ``RectangularSTS`` class.\n\n **Methods:**\n \"\"\"\n def __init__(self, sample_object=None, runmodel_object=None, krig_object=None, local=False, max_train_size=None,\n step_size=0.005, qoi_name=None, n_add=1, nsamples=None, random_state=None, verbose=False):\n\n if not isinstance(sample_object, RectangularSTS):\n raise NotImplementedError(\"UQpy Error: sample_object must be an object of the RectangularSTS class.\")\n\n self.strata_object = copy.deepcopy(sample_object.strata_object)\n\n super().__init__(sample_object=sample_object, runmodel_object=runmodel_object, krig_object=krig_object,\n local=local, max_train_size=max_train_size, step_size=step_size, qoi_name=qoi_name,\n n_add=n_add, nsamples=nsamples, random_state=random_state, verbose=verbose)\n\n def run_rss(self):\n \"\"\"\n Overwrites the ``run_rss`` method in the parent class to perform refined stratified sampling with rectangular\n strata. It is an instance method that does not take any additional input arguments. See\n the ``RSS`` class for additional details.\n \"\"\"\n if self.runmodel_object is not None:\n self._gerss()\n else:\n self._rss()\n\n self.weights = self.strata_object.volume\n\n def _gerss(self):\n \"\"\"\n This method generates samples using Gradient Enhanced Refined Stratified Sampling.\n \"\"\"\n if self.verbose:\n print('UQpy: Performing GE-RSS with rectangular stratification...')\n\n # Initialize the vector of gradients at each training point\n dy_dx = np.zeros((self.nsamples, np.size(self.training_points[1])))\n\n # Primary loop for adding samples and performing refinement.\n for i in range(self.samples.shape[0], self.nsamples, self.n_add):\n p = min(self.n_add, self.nsamples - i) # Number of points to add in this iteration\n\n # If the quantity of interest is a dictionary, convert it to a list\n qoi = [None] * len(self.runmodel_object.qoi_list)\n if type(self.runmodel_object.qoi_list[0]) is dict:\n for j in range(len(self.runmodel_object.qoi_list)):\n qoi[j] = self.runmodel_object.qoi_list[j][self.qoi_name]\n else:\n qoi = self.runmodel_object.qoi_list\n\n # ################################\n # --------------------------------\n # 1. Determine the strata to break\n # --------------------------------\n\n # Compute the gradients at the existing sample points\n if self.max_train_size is None or len(\n self.training_points) <= self.max_train_size or i == self.samples.shape[0]:\n # Use the entire sample set to train the surrogate model (more expensive option)\n dy_dx[:i] = self.estimate_gradient(np.atleast_2d(self.training_points),\n np.atleast_2d(np.array(qoi)),\n self.strata_object.seeds +\n 0.5 * self.strata_object.widths)\n else:\n # Use only max_train_size points to train the surrogate model (more economical option)\n # Find the nearest neighbors to the most recently added point\n from sklearn.neighbors import NearestNeighbors\n knn = NearestNeighbors(n_neighbors=self.max_train_size)\n knn.fit(np.atleast_2d(self.training_points))\n neighbors = knn.kneighbors(np.atleast_2d(self.training_points[-1]), return_distance=False)\n\n # Recompute the gradient only at the nearest neighbor points.\n dy_dx[neighbors] = self.estimate_gradient(np.squeeze(self.training_points[neighbors]),\n np.array(qoi)[neighbors][0],\n np.squeeze(\n self.strata_object.seeds[neighbors] +\n 0.5 * self.strata_object.widths[\n neighbors]))\n\n # Define the gradient vector for application of the Delta Method\n dy_dx1 = dy_dx[:i]\n\n # Estimate the variance within each stratum by assuming a uniform distribution over the stratum.\n # All input variables are independent\n var = (1 / 12) * self.strata_object.widths ** 2\n\n # Estimate the variance over the stratum by Delta Method\n s = np.zeros([i])\n for j in range(i):\n s[j] = np.sum(dy_dx1[j, :] * var[j, :] * dy_dx1[j, :]) * self.strata_object.volume[j] ** 2\n\n # 'p' is number of samples to be added in the current iteration\n bin2break = self.identify_bins(strata_metric=s, p_=p)\n\n # #############################################\n # ---------------------------------------------\n # 2. Update each strata and generate new sample\n # ---------------------------------------------\n new_points = np.zeros([p, self.dimension])\n # Update the strata_object for all new points\n for j in range(p):\n new_points[j, :] = self._update_stratum_and_generate_sample(bin2break[j])\n\n # ###########################\n # ---------------------------\n # 3. Update sample attributes\n # ---------------------------\n self.update_samples(new_point=new_points)\n\n # ###############################\n # -------------------------------\n # 4. Execute model at new samples\n # -------------------------------\n self.runmodel_object.run(samples=np.atleast_2d(self.samples[-self.n_add:]), append_samples=True)\n\n if self.verbose:\n print(\"Iteration:\", i)\n\n def _rss(self):\n \"\"\"\n This method generates samples using Refined Stratified Sampling.\n \"\"\"\n\n if self.verbose:\n print('UQpy: Performing RSS with rectangular stratification...')\n\n # Primary loop for adding samples and performing refinement.\n for i in range(self.samples.shape[0], self.nsamples, self.n_add):\n p = min(self.n_add, self.nsamples - i) # Number of points to add in this iteration\n # ################################\n # --------------------------------\n # 1. Determine the strata to break\n # --------------------------------\n # Estimate the weight corresponding to each stratum\n s = np.zeros(i)\n for j in range(i):\n s[j] = self.strata_object.volume[j] ** 2\n\n # 'p' is number of samples to be added in the current iteration\n bin2break = self.identify_bins(strata_metric=s, p_=p)\n\n # #############################################\n # ---------------------------------------------\n # 2. Update each strata and generate new sample\n # ---------------------------------------------\n new_points = np.zeros([p, self.dimension])\n # Update the strata_object for all new points, 'p' is number of samples to be added in the current iteration\n for j in range(p):\n new_points[j, :] = self._update_stratum_and_generate_sample(bin2break[j])\n\n # ###########################\n # ---------------------------\n # 3. Update sample attributes\n # ---------------------------\n self.update_samples(new_point=new_points)\n\n if self.verbose:\n print(\"Iteration:\", i)\n\n def _update_stratum_and_generate_sample(self, bin_):\n # Cut the stratum in the direction of maximum length\n cut_dir_temp = self.strata_object.widths[bin_, :]\n dir2break = np.random.choice(np.argwhere(cut_dir_temp == np.amax(cut_dir_temp))[0])\n\n # Divide the stratum bin2break in the direction dir2break\n self.strata_object.widths[bin_, dir2break] = self.strata_object.widths[bin_, dir2break] / 2\n self.strata_object.widths = np.vstack([self.strata_object.widths, self.strata_object.widths[bin_, :]])\n self.strata_object.seeds = np.vstack([self.strata_object.seeds, self.strata_object.seeds[bin_, :]])\n # print(self.samplesU01[bin_, dir2break], self.strata_object.seeds[bin_, dir2break] + \\\n # self.strata_object.widths[bin_, dir2break])\n if self.samplesU01[bin_, dir2break] < self.strata_object.seeds[bin_, dir2break] + \\\n self.strata_object.widths[bin_, dir2break]:\n self.strata_object.seeds[-1, dir2break] = self.strata_object.seeds[bin_, dir2break] + \\\n self.strata_object.widths[bin_, dir2break]\n # print(\"retain\")\n else:\n self.strata_object.seeds[bin_, dir2break] = self.strata_object.seeds[bin_, dir2break] + \\\n self.strata_object.widths[bin_, dir2break]\n\n self.strata_object.volume[bin_] = self.strata_object.volume[bin_] / 2\n self.strata_object.volume = np.append(self.strata_object.volume, self.strata_object.volume[bin_])\n\n # Add a uniform random sample inside the new stratum\n new = stats.uniform.rvs(loc=self.strata_object.seeds[-1, :], scale=self.strata_object.widths[-1, :],\n random_state=self.random_state)\n\n return new","sub_path":"src/UQpy/SampleMethods/RSS/rectangular.py","file_name":"rectangular.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336991475","text":"import numpy as np\nfrom tqdm import trange\n\n# 激活函数采用Sigmoid\ndef sigmoid(inX):\n return 1.0/(1+np.exp(-inX));\n\n# Sigmoid的导数\ndef sigmoid_derivative(inx):\n return sigmoid(inx) * (1 - sigmoid(inx));\n\nclass NeuralNetwork:\t# 神经网络\n def __init__(self, layers):\t# layers为神经元个数列表\n '''\n :param layers: \n layers为表示神经网络神经元的矩阵\n 矩阵第一个元素表示输入层节点数目\n 最后一个元素表示输出层节点数目\n 其余为隐含层节点数目\n \n 设以手写数字数据集mnist,layers=[784,1568,1568,10]为例子\n 784是输入层节点数量,等同数据大小,手写数字的数据维度为28*28=784\n 10是输出层节点数量,等同分类数,手写数字0~9,为10个类别\n 两个1568是隐含层节点数量,BP神经网络中,隐含层节点数目是没有固定的,可以随意设置\n 在确定隐层节点数时必须满足:隐层节点数必须小于N-1,训练样本数必须多于网络模型的连接权数,一般为2~10倍\n '''\n self.layers = layers;\n self.activation = sigmoid; # 激活函数\n self.activation_deriv = sigmoid_derivative; # 激活函数导数\n self.weights = []; # 权重列表\n self.bias = []; # 偏置列表\n self.fittimes = 0; #记录训练次数\n for i in range(1, len(layers)):\t# 正态分布初始化\n #初始化权重和偏置,初始权重必须不能全为0\n self.weights.append(np.random.randn(layers[i-1], layers[i]))\n self.bias.append(np.random.randn(layers[i]))\n\n #self是python类函数的参数,保存类的内存地址,可以用来保存成员变量和函数\n #调用函数时,不需要填写self参数\n # self的名称可以任意修改,但一般都是有“self”,这是一种习惯\n #self类似c++的指针,返回self可以获取权重偏置等数据\n def getNeuralNetwork(self):\n return self;\n\n #训练\n def fit(self, x, y, epochs=1, learning_rate=0.2):\t# 反向传播算法\n x = np.atleast_2d(x); #维度改变 atleast_xd 支持将输入数据直接视为 x维。\n n = len(y);\t# 样本数\n y = np.array(y);\n\n for p in range(epochs): #样本过少时根据epochs减半学习率,epochs默认为1,即循环1次\n for k in trange(n):\n self.fittimes+=1;\n if (k+1) % n == 0:\n learning_rate *= 0.5;\t# 每训练完一代,样本减半学习率\n\n a = [ x[k%n] ];\t# 保存各层激活值的列表\n\n # 正向传播开始 #np.dot(a,b)是矩阵乘法运算的函数 a*b则是矩阵的点乘运算\n for lay in range(len(self.weights)):\n #numpy dot()函数是两个数组的点乘运算\n a.append(self.activation(np.dot(a[lay], self.weights[lay]) + self.bias[lay]));\n\n # 反向传播开始\n label = np.zeros(a[-1].shape);\n label[ y[k%n] ] = 1; # 根据类号生成标签\n\n #损失函数\n error = label - a[-1];\t# 误差值\n loss = [ error*self.activation_deriv(a[-1]) ];\t# 保存各层误差值的列表\n\n layer_num = len(a) - 2;\t# 导数第二层开始\n for j in range(layer_num, 0, -1):\n loss.append(loss[-1].dot(self.weights[j].T) * self.activation_deriv(a[j]));\t# 误差的反向传播\n loss.reverse();#数组倒序\n\n for i in range(len(self.weights)):\t# 正向更新权值\n layer = np.atleast_2d(a[i]);\n delta = np.atleast_2d(loss[i]);\n self.weights[i] += learning_rate * layer.T.dot(delta);\n self.bias[i] += learning_rate * loss[i];\n\n #预测\n def predict(self, x):\n a = np.array(x, dtype=np.float);\n for lay in range(len(self.weights)):\t# 正向传播\n a = self.activation(np.dot(a, self.weights[lay]) + self.bias[lay]);\n a = list(100 * a/sum(a));\t# 改为百分比显示\n i = a.index(max(a));\t# 预测值\n per = [];\t# 各类的置信程度\n for num in a:\n per.append(str(np.round(num, 2))+'%');\n return i, per;\n\n","sub_path":"NeuralNetwork/BPNN.py","file_name":"BPNN.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400093860","text":"from flask import Flask, render_template,request,redirect,url_for\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize, sent_tokenize\r\nfrom nltk.corpus import stopwords\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nfrom csv import reader\r\nfrom fuzzywuzzy import fuzz, process\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef homePage():\r\n return render_template(\"partSearch.html\")\r\n\r\n@app.route('/getSearchResult',methods = ['POST', 'GET'])\r\ndef getSearchResult():\r\n if request.method == 'POST':\r\n Search = request.form['Search']\r\n my_dict = CalculatedSearchData(Search);\r\n \r\n print (\"Search Result\", my_dict)\r\n return render_template(\"partSearch.html\",my_dict=my_dict)\r\n #return redirect(url_for('homePage',my_dict=my_dict))\r\n\r\n\r\nstopwords = set(stopwords.words(\"english\"))\r\n\r\n #Master Data\r\ndictionary = pd.read_csv('E:\\JS\\Py\\Text_similarity\\Fuzzy.csv', header=None, squeeze=True).to_dict()\r\n \r\n #Second Master Data\r\nwith open('E:\\JS\\Py\\Text_similarity\\Second_dict.csv', 'r') as read_obj:\r\n csv_reader = reader(read_obj)\r\n sec_dict = list(map(tuple, csv_reader))\r\n\r\nmaster_doc = []\r\nmatch_doc = []\r\n#using Lemmatization\r\nlemma = nltk.wordnet.WordNetLemmatizer()\r\n\r\n\r\ntest_doc = []\r\ntest_line = []\r\n # create a clean document after removing the stop-words and generate lemmas\r\nclean_doc = []\r\n\r\nfinal_doc=[]\r\n\r\ndef stringsearch(string, sub_str, sub_str1, line):\r\n if(string.find(sub_str)!=-1 and string.find(sub_str1)!=-1):\r\n final_doc.append(sub_str+\" \"+sub_str1)\r\n return (1)\r\n \r\ndef secondsearch(string, sub_str, line):\r\n r = fuzz.token_set_ratio(string, sub_str)\r\n #r = fuzz.token_sort_ratio(string, sub_str)\r\n if(r > 90):\r\n final_doc.append(sub_str) \r\n return (1)\r\n\r\ndef listtostring(clean_line):\r\n return(\" \".join(clean_line))\r\n\r\ndef replacer(string1):\r\n return(string1.replace('\\bscr\\b','screw').replace('\\bspr\\b','spring').replace('\\bbd\\b','band').replace('spr bd','spring band').replace('spr.','spring').replace('separator','clamp').replace('assembly','assy')\r\n .replace('\\bw/h\\b','wiring harness').replace('brg','bearing').replace('bkt','bracket').replace('\\bwh\\b','wiring harness').replace('\\bbso\\b','body side outer').replace('pigtail','pigtail wiring harness')\r\n .replace('w/h','wiring harness').replace('\\breinf\\b','reinforcement').replace('wh','wiring harness').replace('mtg.','mounting').replace('ccb','cross car beam').replace('\\bais\\b','air intake system')\r\n .replace('\\bmtg\\b','mounting').replace('\\bbrkt\\b','bracket').replace('\\brr\\b','rear').replace('frt','front').replace('\\bdr\\b','door').replace('engine cooling system','radiator').replace('\\bra\\b','rear axle')\r\n .replace('reinfrocement','reinforcement').replace('mountingbracket','mounting bracket').replace('nylon pipe','nylon bunch').replace('pipe bunch','nylon bunch').replace('vent tube','vent hose').replace('\\bvent\\b','ventilation')\r\n .replace('suction tube','suction hose').replace('\\bcyl\\b','cylinder').replace('clutch line','clutch hose').replace('suction pipe','suction hose').replace('suction line','suction hose').replace('fuel return pipe','fuel return hose')\r\n .replace('clutch pipe','clutch hose').replace('coolant pipe','coolant hose').replace('exh','exhaust').replace('batt','battery').replace('loadbody','load body').replace('\\barb\\b','anti roll bar').replace('window regulator','window winding')\r\n .replace('pressure line','pressure hose').replace('return line','return hose').replace('gear box','gearbox').replace('shell assy','load body').replace('\\bfl\\b','flanged').replace('flat bed','load body').replace('high deck','load body')\r\n .replace('lining','liner').replace('\\bgb\\b','gearbox').replace('\\bstg\\b','steering').replace('-conn','connector').replace('\\bconn\\b','connector').replace('washer tank','washer bottle'))\r\n \r\n\r\ntest_line = []\r\ndef CalculatedSearchData(SearchData):\r\n global final_doc\r\n clean_doc =[] \r\n test_line =[]\r\n file_docs = []\r\n file_docs.append(SearchData)\r\n my_dict = {}\r\n for line in file_docs:\r\n clean_line = [word for word in line.split() if word not in stopwords] \r\n for word in line.split():\r\n test_line.append(lemma.lemmatize(word))\r\n test_doc.append(listtostring(test_line))\r\n #clean_doc.append(listtostring(clean_line))\r\n clean_doc = test_doc\r\n \r\n fuzzydict = []\r\n for x in dictionary:\r\n fuzzydict.append(dictionary[x].lower())\r\n \r\n init_size = len(clean_doc)\r\n print (\"SSIZe \", init_size); \r\n ##first loop of search and filtering\r\n for line in clean_doc: \r\n line1 = line.lower()\r\n string = line1.partition(\",\")[0].partition(\";\")[0].partition(\"(\")[0].partition(\"[\")[0].partition('for')[0].partition('with')[0].partition('w/o')[0].partition('offer')[0].partition('\\bw\\b')[0]\r\n for y in sec_dict:\r\n #print (\"clean_doc \", replacer(string.lower()),y[0].lower(),y[1].lower(), line);\r\n #print (\"Result \", stringsearch(replacer(string.lower()),y[0].lower(),y[1].lower(), line));\r\n if (stringsearch(replacer(string.lower()),y[0].lower(),y[1].lower(), line) == 1): \r\n #print(line, \" Stage 1 is ::-->\")\r\n \r\n clean_doc.remove(line)\r\n break\r\n if(len(final_doc)!=0):\r\n #print(final_doc)\r\n my_dict = {'PART NAME': line, 'Mach Data': final_doc}\r\n \r\n else:\r\n final_doc = process.extract(replacer(string).lower(), fuzzydict, scorer=fuzz.token_sort_ratio, limit = 5)\r\n my_dict = {'PART NAME': line, 'Mach Data': final_doc}\r\n \r\n final_doc = [] \r\n\r\n if not bool(my_dict):\r\n my_dict = {'PART NAME': line, 'Mach Data': 'Match Not Found '} \r\n return my_dict\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True) ","sub_path":"app_v2.py","file_name":"app_v2.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543927323","text":"#!/usr/bin/env python\n\nimport sys\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\n\n# Find the directory where the source distribution was unpacked.\nsource_directory = dirname(abspath(__file__))\n\n# Add the source distribution directory to the module path.\nsys.path.append(source_directory)\n\n# Find the version number embedded in coloredlogs/__init__.py.\nfrom coloredlogs import __version__\n\n# Fill in the long description (for the benefit of PyPi)\n# with the contents of README.rst (rendered by GitHub).\nreadme_file = join(source_directory, 'README.rst')\nreadme_text = open(readme_file, 'r').read()\n\nsetup(name='coloredlogs',\n version=__version__,\n description='Colored stream handler for the logging module',\n long_description=readme_text,\n url='https://github.com/xolox/python-coloredlogs',\n author='Peter Odding',\n author_email='peter@peterodding.com',\n packages=find_packages(),\n entry_points={'console_scripts': ['ansi2html = coloredlogs.converter:main']})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323508431","text":"import time\nimport pickle\nimport config\nfrom adal import AuthenticationContext\nimport requests\n\n\ndef device_flow_session(ctx, client_id, renew_session=None, refresh_token=''):\n \"\"\"Obtain an access token from Azure AD (via device flow) and create\n a Requests session instance ready to make authenticated calls to\n Microsoft Graph.\n\n client_id = Application ID for registered \"Azure AD only\" V1-endpoint app\n\n Returns Requests session object if user signed in successfully. The session\n includes the access token in an Authorization header.\n\n User identity must be an organizational account (ADAL does not support MSAs).\n \"\"\"\n if renew_session is None:\n device_code = ctx.acquire_user_code(config.RESOURCE, client_id)\n\n # display user instructions\n print(device_code['message'])\n\n token_response = ctx.acquire_token_with_device_code(config.RESOURCE,\n device_code,\n client_id)\n else:\n token_response = ctx.acquire_token_with_refresh_token(refresh_token,\n client_id,\n config.RESOURCE)\n print(token_response['expiresOn'])\n if not token_response.get('accessToken', None):\n return None\n\n session = requests.Session()\n session.headers.update({'Authorization': f'Bearer {token_response[\"accessToken\"]}',\n 'SdkVersion': 'sample-python-adal',\n 'x-client-SKU': 'sample-python-adal'})\n return session, token_response\n\n\nFILENAME = 'session.pkl'\ndef save_session(session):\n with open(FILENAME, 'wb') as file:\n pickle.dump(session, file)\n print(time.asctime( time.localtime(time.time()) ), 'session saved.')\n\n\nif __name__ == '__main__':\n ctx = AuthenticationContext(config.AUTHORITY_URL, api_version=None)\n GRAPH_SESSION = None\n response = {'refreshToken': ''}\n while True:\n GRAPH_SESSION, response = device_flow_session(ctx, config.CLIENT_ID, GRAPH_SESSION, response['refreshToken'])\n save_session(GRAPH_SESSION)\n time.sleep(3000)","sub_path":"renew_token.py","file_name":"renew_token.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220750827","text":"\"\"\"\nNetCDF helper functions\n\"\"\"\n\nimport os\nimport struct\nimport netCDF4 as nc\n\n\ndef ncdf_info(filename):\n \"\"\"\n Reads a NetCDF file and displays information about its content.\n 20130508-- Tiago.\n \"\"\"\n def _attr_string(s):\n if type(s) in [type(''), type('')]:\n return '\"%s\"' % s\n else:\n return str(s)\n\n def _group_print(g, ind=0):\n gatname = ['global', 'group'][ind != 0]\n gsp = ' ' * ind\n if len(g.dimensions) > 0:\n print(('%sdimensions:' % gsp))\n for a, v in list(g.dimensions.items()):\n print(('\\t%s = %i' % (a, len(v))))\n if len(g.variables) > 0:\n print(('%svariables:' % gsp))\n for k, v in list(g.variables.items()):\n dd = str('(' + ', '.join(v.dimensions) + ')')\n print(('%s\\t%s %s%s' % (gsp, str(v.dtype).ljust(10), k, dd)))\n for a, av in list(v.__dict__.items()):\n print(('%s\\t%s%s = %s' % (gsp, ' ' * 15, a,\n _attr_string(av))))\n if len(g.__dict__) > 0:\n print(('%s%s attributes:' % (gsp, gatname)))\n for a, av in list(g.__dict__.items()):\n print(('%s\\t%s = %s' % (gsp, a, _attr_string(av))))\n\n try:\n f = nc.Dataset(filename, 'r')\n except IOError as e:\n print(e)\n return\n print(filename)\n _group_print(f, ind=0)\n if len(f.groups) > 0:\n for k, g in list(f.groups.items()):\n print(('group: %s {' % k))\n _group_print(g, ind=1)\n if len(g.groups) > 0:\n for ki, gi in list(g.groups.items()):\n print((' group: %s {' % ki))\n _group_print(gi, ind=2)\n if len(gi.groups) > 0:\n for kii, gii in list(gi.groups.items()):\n print((' group: %s {' % kii))\n _group_print(gii, ind=3)\n print((' } // group %s' % kii))\n print((' } // group %s' % ki))\n print(('} // group %s' % k))\n f.close()\n return\n\n\ndef copy_ncdf(filein, transp=[], remove=[], memGb=16, step=None, tlimit=None,\n fileout=None):\n ''' Copies variables from a netcdf file and writes them in a new file.\n\n IN:\n\n filein: input netCDF file\n transp: list of strings with variable names. These variables will be transposed.\n remove: list of strings, variables in this list will not be copied.\n memGb: integer, size of RAM before which transpose is made in memory\n '''\n\n if fileout is None:\n fileout = os.path.splitext(\n filein)[0] + '_copy' + os.path.splitext(filein)[1]\n if os.path.isfile(fileout):\n raise IOError((\"%s already exists\"\n \", remove or rename for copy to proceeed.\" % fileout))\n finp = nc.Dataset(filein, 'r')\n fout = nc.Dataset(fileout, 'w')\n # copy dimensions\n for d in list(finp.dimensions.keys()):\n if tlimit and d == 'nt':\n fout.createDimension(d, tlimit)\n else:\n fout.createDimension(d, len(finp.dimensions[d]))\n # copy attributes\n for d in list(finp.__dict__.keys()):\n if tlimit and d == 'ntnum':\n setattr(fout, d, finp.__dict__[d][:tlimit])\n else:\n setattr(fout, d, finp.__dict__[d])\n # create variables\n for vname, v in list(finp.variables.items()):\n if vname in transp:\n print(('Transposing ' + vname))\n myvar = fout.createVariable(vname, v.dtype, v.dimensions[::-1])\n # size of variable in Gb\n var_size = np.prod(v.shape) * struct.calcsize(v.dtype.str) / 2.**30\n if var_size < memGb:\n buf = v[:]\n myvar[:] = buf.T\n else:\n # transpose: fit as much as possible in memory to minimize I/O\n if step is None:\n step = int(memGb / (np.prod(v.shape[1:]) *\n struct.calcsize(v.dtype.str) / 2.**30))\n for i in range(0, v.shape[0], step):\n print(i, step, v.shape[0])\n buf = v[i:i + step]\n myvar[..., i:i + buf.shape[0]] = buf.T\n elif vname in remove:\n print(('Skipping ' + vname))\n continue\n else:\n print(('Copying ' + vname))\n if tlimit:\n myvar = fout.createVariable(vname, v.dtype, v.dimensions)\n myvar[:] = v[:tlimit]\n else:\n myvar = fout.createVariable(vname, v.dtype, v.dimensions)\n myvar[:] = v[:]\n # copy variable attributes\n for d in list(v.__dict__.keys()):\n setattr(myvar, d, v.__dict__[d])\n # close files\n finp.close()\n fout.close()\n return\n\n\ndef copy_var(filein, fileout, vars=[], step=15):\n ''' Copies variables from one netcdf file to another.\n IN:\n\n filein: input netCDF file, where the variable(s) will be read\n fileout: output netCDF file (must exist), where to copy variable(s)\n vars: list of strings with variable names to be copied.\n '''\n finp = nc.Dataset(filein, 'r')\n fout = nc.Dataset(fileout, 'a')\n for v in vars:\n print(v)\n fv = finp.variables[v]\n dd = fv.dimensions\n for d in dd:\n if d not in fout.dimensions:\n print('(WWW) copy_var: dimension ' +\n '%s not found in %s, creating it.' % (d, fileout))\n fout.createDimension(d, len(finp.dimensions[d]))\n if len(finp.dimensions[d]) != len(fout.dimensions[d]):\n raise ValueError('copy_var: dimension has size' +\n '%i in %s and %i in %s. Aborting'% (len(finp.dimensions[d]),\n filein, len(fout.dimensions[d], fileout)))\n if v not in fout.variables:\n myvar = fout.createVariable(v, fv.dtype, fv.dimensions)\n else:\n myvar = fout.variables[v]\n # copy variable\n for i in range(0, fv.shape[0], step):\n print(i, i + step)\n buf = finp.variables[v][i:i + step]\n myvar[i:i + buf.shape[0]] = buf\n # copy variable attributes\n for d in list(fv.__dict__.keys()):\n buf = v[i:i + step]\n setattr(myvar, d, fv.__dict__[d])\n # close files\n finp.close()\n fout.close()\n return\n\n\ndef getvar(infile, var, group=False, memmap=False):\n ''' Reads a variable from a NetCDF file.\n\n IN:\n file (string): NetCDF filename\n var (string) : variable name\n memmap (bool): [optional] if True, will return the variable object\n (not in memory), instead of reading it into memory.\n OUT:\n result (array) : array with the requested variable\n\n --Tiago, 20090629\n '''\n f = nc.Dataset(infile, mode='r')\n ds = f\n if group:\n ds = f.groups[group]\n if var not in ds.variables:\n raise KeyError('getvar: variable %s not in %s' % (var, infile))\n if not memmap:\n result = np.array(ds.variables[var][:])\n f.close()\n else:\n result = ds.variables[var]\n return result\n\n\ndef updatevar(infile, var, data, group=False):\n ''' Updates a variable in a NetCDF file.\n\n IN:\n file (string): NetCDF filename\n var (string) : variable name\n data : array with data to overwrite. Must be same dimensions as in the file.\n\n --Tiago, 20111216\n '''\n f = nc.Dataset(file, mode='a')\n ds = f\n if group:\n ds = f.groups[group]\n if var not in ds.variables:\n raise KeyError('getvar: variable %s not in %s' % (var, file))\n ds.variables[var][:] = data[:]\n f.close()\n return\n\n\ndef merge_snaps(origf, newf, order=False, unique=True):\n ''' Merges two line profile files (lte.x/multi3d ncdf format) into the\n first line profile file.\n\n IN:\n origf, newf: ncdf filenames.\n order : if True, sorts all the variables (and ntnum) according to ntnum.\n unique: if True, eliminates duplicate snapshots in case both files have\n some common snapshots.\n\n OUT:\n None. (Results saved in orig)\n\n --Tiago, 20080128\n '''\n new = nc.Dataset(newf, mode='r')\n nt_orig = np.array(getattr(orig, 'ntnum')).astype('i')\n nt_new = np.array(getattr(new, 'ntnum')).astype('i')\n # for netcdf4 having an extra dim\n if not nt_new.shape:\n nt_new = np.array([nt_new])\n if not nt_orig.shape:\n nt_orig = np.array([nt_orig])\n # select nt's not already in the file\n if unique:\n idx2 = []\n for i in range(len(nt_new)):\n if nt_new[i] not in nt_orig:\n idx2.append(i)\n if idx2 == []:\n print('*** All snapshots common, not merging.')\n return\n idx2 = np.array(idx2).astype('i')\n else:\n idx2 = np.arange(len(nt_new))\n ntnum = np.concatenate((nt_orig, nt_new[idx2]))\n # sort by nt number?\n if order:\n idx = np.argsort(ntnum)\n else:\n idx = np.arange(len(ntnum))\n print('--- Merging...')\n # ending snapshot number for orig file\n ent = orig.variables['prof_int'].shape[0]\n # merge variables in first (unlimited) dimension\n for v in orig.variables:\n nvs = new.variables[v].shape\n if ncdf4:\n nvar = new.variables[v]\n else:\n nvar = np.array(new.variables[v][:])\n # must put if for ncdf3 to put new array in memory and fancy index it\n print(v) # , ovs,nvs\n # fix case onf only one nt\n if len(nt_new) == 1:\n orig.variables[v][ent] = nvar[0]\n else:\n orig.variables[v][ent:] = nvar[idx2]\n if order:\n orig.variables[v][:] = orig.variables[v][idx]\n # update ntnum\n setattr(orig, 'ntnum', ntnum[idx])\n orig.close()\n new.close()\n print('--- Successfully merged %s into %s.' % (newf, origf))\n return\n","sub_path":"helita/io/ncdf.py","file_name":"ncdf.py","file_ext":"py","file_size_in_byte":10001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"172565470","text":"from pyspark.ml import Pipeline\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.regression import RandomForestRegressor\nfrom pyspark.sql import SparkSession\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.mllib.util import MLUtils\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .appName(\"RandomForestRegressorExample\") \\\n .getOrCreate()\n\n spark.sparkContext.setLogLevel('ERROR')\n\n #Reads the data in the LIBSVM format\n #The format of the libsvm was followed\n data = spark.read.format(\"libsvm\").load(\"modsmall.csv\") #Loads the data\n\n #Set up the configurations\n conf = SparkConf().setAppName('Prediction of Time')\n sc = SparkContext(conf=conf)\n\n data = MLUtils.loadLibSVMFile(sc, 'modsmall.csv')\n\n # Split the data into training and test sets (20% held out for testing)\n (trainingData, testData) = data.randomSplit([0.8, 0.2])\n\n rf = RandomForestRegressor(featuresCol=\"indexedFeatures\")\n\n pipeline = Pipeline(stages=[rf])\n\n model = pipeline.fit(trainingData)\n\n\n prediction = model.transform(testData)\n\n prediction.select(\"prediction\", \"label\", \"features\").show(2)\n\n evaluator = RegressionEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"rmse\")\n rmse = evaluator.evaluate(prediction)\n print(\"Root Mean Squared Error (RMSE) on test data = %g\" % rmse)\n\n rfModel = model.stages[1]\n print(rfModel) # summary only\n\n\n\n\n","sub_path":"Final Code Pranav/PySpark Files/TimePrediction.py","file_name":"TimePrediction.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605099818","text":"#https://openhome.cc/Gossip/Python/StringFormat.html\nsumA = 0\ni = 1\nwhile True:\n sumA += i\n i += 1\n if sumA > 10:\n break\nprint('i={i},sum={sumA}' .format(i=1, sumA=30+6))\nprint('{name} is {age} years old!'.format(name='Justin', age=35))\n","sub_path":"Python課程/1.2/字串格式化.py","file_name":"字串格式化.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165454840","text":"from src.application.security import models\nfrom django import forms\nfrom django.core import validators\nfrom django.utils.translation import ugettext_lazy as _\n\n___FIELD___IS_ACTIVE___ = forms.BooleanField(\n label=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___IS_ACTIVE'),\n required=False,\n widget=forms.CheckboxInput(\n attrs={\n 'id': 'is_active',\n 'aria-describedby': 'is_active_icon',\n 'icon': 'glyphicon glyphicon-check',\n },\n ),\n)\n___FIELD___CREATED___ = forms.DateField(\n label=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___CREATED'),\n required=False,\n widget=forms.DateInput(\n attrs={\n 'id': 'created',\n 'aria-describedby': 'created_icon',\n 'icon': 'glyphicon glyphicon-time',\n },\n ),\n)\n___FIELD___MODIFIED___ = forms.DateField(\n label=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___MODIFIED'),\n required=False,\n widget=forms.DateInput(\n attrs={\n 'id': 'modified',\n 'aria-describedby': 'modified_icon',\n 'icon': 'glyphicon glyphicon-time',\n },\n ),\n)\n___FIELD___NAME___ = forms.CharField(\n label=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___NAME'),\n required=True,\n min_length=1,\n max_length=100,\n validators=[\n validators.RegexValidator('^[\\w .\\-_]+$', message=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___VALIDATION Only letters, numbers and special characters dot, -, _ and space.')),\n ],\n widget=forms.TextInput(\n attrs={\n 'id': 'name',\n 'class': 'form-control',\n 'aria-describedby': 'name_icon',\n 'icon': 'glyphicon glyphicon-globe',\n },\n ),\n)\n___FIELD___IDENTIFIER___ = forms.CharField(\n label=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___IDENTIFIER'),\n required=True,\n min_length=1,\n max_length=100,\n validators=[\n validators.RegexValidator('^[\\w.]+$', message=_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___VALIDATION Only letters, numbers and the special character dot.')),\n ],\n widget=forms.TextInput(\n attrs={\n 'id': 'identifier',\n 'class': 'form-control',\n 'aria-describedby': 'identifier_icon',\n 'icon': 'glyphicon glyphicon-user',\n },\n ),\n)\n\n\ndef ___field___attribute___placeholder___locale___reload__(field, locale):\n field.widget.attrs['placeholder'] = '- %s -' % (_(locale),)\n\n\ndef ___field___attribute___help_text___locale___reload__(field, locale):\n field.help_text = '\\\"%s\\\"' % (_(locale),)\n\n\nclass PermissionDetail(forms.ModelForm):\n is_active = ___FIELD___IS_ACTIVE___\n created = ___FIELD___CREATED___\n modified = ___FIELD___MODIFIED___\n name = ___FIELD___NAME___\n identifier = ___FIELD___IDENTIFIER___\n\n class Meta:\n model = models.Permission\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request')\n super().__init__(*args, **kwargs)\n\n\nclass PermissionUpdate(forms.ModelForm):\n is_active = ___FIELD___IS_ACTIVE___\n name = ___FIELD___NAME___\n identifier = ___FIELD___IDENTIFIER___\n\n class Meta:\n model = models.Permission\n fields = ['is_active', 'name', 'identifier', ]\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request')\n self.instance_current = kwargs.pop('instance_current')\n super().__init__(*args, **kwargs)\n #\n # is_active\n ___field___attribute___help_text___locale___reload__(field=self.fields['is_active'], locale='APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___IS_ACTIVE___HELP_TEXT')\n # name\n ___field___attribute___placeholder___locale___reload__(field=self.fields['name'], locale='APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___NAME')\n self.fields['name'].widget.attrs['autofocus'] = True\n # identifier\n ___field___attribute___placeholder___locale___reload__(field=self.fields['identifier'], locale='APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___IDENTIFIER')\n self.fields['identifier'].widget.attrs['readonly'] = 'readonly'\n\n def clean_name(self):\n name = self.cleaned_data.get('name')\n name = ' '.join(name.split())\n try:\n instance = models.Permission.objects.get(name=name)\n except models.Permission.DoesNotExist:\n return name\n if instance.name == self.instance_current.name:\n return name\n raise forms.ValidationError(_('APPLICATION___ADMINISTRATION___CONTENT___ADMINISTRATION_SECURITY___PERMISSION___VALIDATION This name has already been chosen.'))\n\n def clean_identifier(self):\n identifier = self.instance_current.identifier\n return identifier\n\n def save(self, commit=True):\n instance = super(PermissionUpdate, self).save(commit=False)\n #\n if commit:\n # save to data base\n instance.save()\n return instance\n","sub_path":"service_django/application/src/application/administration/modules/administration_security/permission/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170069238","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nd = pd.read_csv('pandas_read.csv')\nscore_india = d['score_india']\nlegend = ['India', 'Pakistan']\nscore_pk = d['score_pk']\nplt.hist([score_india, score_pk], color=['orange', 'green'])\nplt.xlabel(\"Runs/Delivery\")\nplt.ylabel(\"Frequency\")\nplt.legend(legend)\nplt.xticks(range(0, 7))\nplt.yticks(range(1, 20))\nplt.title('Champions Trophy 2017 Final\\n Runs scored in 3 overs')\nplt.show()","sub_path":"scripts/basic/pandas_read_csv.py","file_name":"pandas_read_csv.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635363480","text":"from ..views import app\nfrom .base import NoteAppTestCase\nimport random\nimport flask\nimport json\n\n\ndef get_mock_note_data(title=None, expiration=None, content=None):\n if title is None:\n title = NoteAppTestCase.get_random_string()\n if expiration is None:\n expiration = random.randint(0, 5)\n if content is None:\n content = NoteAppTestCase.get_random_string()\n\n return (title, expiration, content)\n\n\nclass ViewsUnittest(NoteAppTestCase):\n\n def setUp(self):\n self.app = app.test_client()\n\n def test_home_page_renders_correctly(self):\n rs = self.app.get('/')\n \n # Check if status is 200 OK and page title is correct\n self.assertEqual(rs.status, '200 OK')\n self.assertIn(b'PasteNote', rs.data)\n\n # Home page contains the form used for adding notes\n self.assertIn(b'
', rs.data)\n\n def test_invalid_hash_gives_404(self):\n for l in range(1, 9):\n hash_ = self.get_random_string(length=l)\n rs = self.app.get('/notes/' + hash_)\n\n # Check if 404 page renders\n self.assertIn(b'404', rs.data)\n self.assertIn(b'

Page Not Found

', rs.data)\n\n def test_add_note_and_check_correctness(self):\n title, exp, content = get_mock_note_data()\n \n rs = self.app.post('/notes', data=dict(title=title, \n expiration=exp,\n content=content))\n json_res = json.loads(rs.data.decode())['note']\n recv_title = json_res['title']\n recv_content = json_res['content']\n\n # Compare sent data with received json response\n self.assertEqual(title, recv_title)\n self.assertEqual(content, recv_content)\n\n def test_add_note_with_invalid_expiration_field(self):\n # Every value not in 0..5 range is invalid\n expiration = random.randint(6, 1000)\n title, exp, content = get_mock_note_data(expiration=expiration)\n\n rs = self.app.post('/notes', data=dict(title=title,\n expiration=exp,\n content=content))\n json_res = json.loads(rs.data.decode())\n recv_error = json_res['error']\n self.assertEqual(recv_error, 'EXPIRATION_FIELD_INVALID')\n\n def test_add_note_with_empty_expiration_field(self):\n title, exp, content = get_mock_note_data()\n\n rs = self.app.post('/notes', data=dict(title=title,\n content=content))\n json_res = json.loads(rs.data.decode())\n recv_error = json_res['error']\n self.assertEqual(recv_error, 'EXPIRATION_FIELD_EMPTY')\n \n def test_add_note_with_empty_content_field(self):\n title, exp, content = get_mock_note_data()\n\n rs = self.app.post('/notes', data=dict(title=title,\n expiration=exp))\n\n json_res = json.loads(rs.data.decode())\n recv_error = json_res['error']\n self.assertEqual(recv_error, 'CONTENT_FIELD_EMPTY')\n\n def test_add_note_and_check_rendered_template(self):\n title, exp, content = get_mock_note_data()\n\n # Create a note with random data\n rs = self.app.post('/notes', data=dict(title=title,\n expiration=exp,\n content=content))\n json_res = json.loads(rs.data.decode())['note']\n\n # Read in data from response\n hash_ = json_res['hash']\n title = json_res['title']\n content = json_res['content']\n\n # Check previously created note \n rs = self.app.get('/notes/' + hash_)\n\n # Make sure everything was saved properly\n title_html = ' {} '.format(title)\n self.assertIn(title_html.encode(), rs.data)\n content_html = '

{}

'.format(content)\n self.assertIn(content_html.encode(), rs.data)\n\n # Check whether displaying a note in a raw style works\n raw_url = '/notes/' + hash_ + '?action=raw'\n rs = self.app.get(raw_url)\n # Template for raw display has styles embedded in HTML\n self.assertIn(b'word-wrap: break-word', rs.data)\n\n \n\n\n \n","sub_path":"app/unittests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219405082","text":"#!/usr/bin/python\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n# Author: \"Chris Ward \n\nfrom distutils.core import setup\n\n__version__ = '0.1.4'\n\n\nwith open('readme.rst') as _file:\n readme = _file.read()\n\ngithub = 'https://github.com/drpoovilleorg/jsonconf'\ndownload_url = '%s/releases/tag/v%s.tar.gz' % (github, __version__)\n\nsetup(\n name='jsonconf',\n version=__version__,\n packages=['jsonconf'],\n url='https://github.com/drpoovilleorg/jsonconf',\n license='GPLv3',\n author='Chris Ward',\n author_email='cward@redhat.com',\n download_url=download_url,\n description='Python/JSON Configuration Object',\n long_description=readme,\n data_files=[('jsonconf', ['readme.rst'])],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2 :: Only',\n 'Topic :: Utilities',\n ],\n keywords=['json', 'configure', 'config'],\n provides=['jsonconf'],\n requires=[],\n install_requires=[],\n scripts=[]\n)\n","sub_path":"pypi_install_script/jsonconf-0.1.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"423615349","text":"#!/usr/bin/env python\n\n\"\"\"This script takes the calibration results from ROS as input (via a user\nprovided file) and prints the converted and correctly formatted stereo camera\nparameters to the terminal, ready for use in ROCK.\n\nTo use this script, save the stereo calibration output (terminal) to a file and\nrun this script. You will then be asked for that file and will be presented with\nthe results which can be copied directly to the stereo yml\n(default: ~/dev/bundles/hdpr/config/orogen/stereo::Task.yml).\n\nThe transformation from the camera rotation matrix to Euler angles is done using\nChristoph Gohlkes transformations.py.\"\"\"\n\n__author__ = \"Levin Gerdes\"\n\n#import yaml\nimport re\nimport numpy as np\n#import shutil # to back up stereo yml\nimport os.path\nfrom decimal import *\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nfrom transformations import euler_from_matrix\n\n# precision for Decimal\ngetcontext().prec = 16\n\ncams = ['exoter_bb2','hdpr_bb2','hdpr_bb3_left_right','panCam']\nstereoFilePath = '/home/hdpr/dev/bundles/hdpr/config/orogen/stereo::Task.yml'\n\nprint(\"\\nCALIBRATION CONVERSION.\\nThis script does not write anything to disk.\\n\")\n\n# Ask which camera parameters should be written.\n# As long as we are not writing to the yml, this is only necessary in order to\n# read and print the frame height, width, and some panCam specific settings.\nquestion = \"Which camera is being re-calibrated?\\n\"\ni = 1\nfor cam in cams:\n question += str(i) + \") \" + cam + \"\\n\"\n i += 1\nquestion += \"0) Abort\\n\"\n\nuserIn = int(raw_input(question))\nif (userIn == 0):\n quit()\n\n# RegEx for most numbers being used in the yml and calibration output\nreNumber = '-?\\d+\\.\\d+'\n\n# Ask for calibration results\nprint(\"Please choose the file with the ROS calibration results\\n\")\nTk().withdraw()\ncalibResultsFile = askopenfilename(title=\"Locate calibration results\",\n initialdir=\"~/Desktop\")\n\n# Read calibration file\nf = open(calibResultsFile, 'r')\ncalibFile = f.read()\nf.close()\n\n# TRANSLATION VALUES\n# (self.T)\n# look for \"self.T ', [-0.8342,1.12325,7.234234,...\"\nreTrans = '(?<=self.T\\ \\',\\ \\[)(' + reNumber + '(,\\ )?){3}'\nselfT = re.search(reTrans, calibFile)\nselfT = selfT.group(0)\n#print(\"Translation Values: \"+selfT)\n# ... are given in meters, but ROCK needs them in millimeters\nselfT = selfT.split(',')\ntx = float(selfT[0])*1000\nty = float(selfT[1])*1000\ntz = float(selfT[2])*1000\n#print(tx,ty,tz)\n\n# ROTATION VALUES\n# look for \"self.R ', [-0.8342,1.12325,7.234234,...\"\nreRot = '(?<=self.R\\ \\',\\ \\[)(' + reNumber + '(,\\ )?){9}'\nselfR = re.search(reRot, calibFile)\nselfR = selfR.group(0)\n# ... are given in matrix form, but ROCK needs them in Euler angles.\n# turn into matrix representation\nrotMatrix = eval('[' + selfR + ']')\nrotMatrix = np.array(rotMatrix).reshape(3,3)\n#print(repr(rotMatrix))\n# Call provided Python function to transform them.\nrotEuler = euler_from_matrix(rotMatrix, 'sxyz')\n#print(repr(rotEuler))\n\n# CAMERA MATRICES\n# left\nreCamMat = '(?<=left\\]\\n\\ncamera\\ matrix\\n)(' + reNumber + '\\ ' + reNumber + '\\ ' + reNumber + '\\n){2}'\ncamMat = re.search(reCamMat, calibFile).group(0).split()\n\nfxLeft = camMat[0]\nfyLeft = camMat[4]\ncxLeft = camMat[2]\ncyLeft = camMat[5]\n\n# right\nreCamMat = '(?<=right]\\n\\ncamera\\ matrix\\n)(' + reNumber + '\\ ' + reNumber + '\\ ' + reNumber + '\\n){2}'\ncamMat = re.search(reCamMat, calibFile).group(0).split()\n\nfxRight = camMat[0]\nfyRight = camMat[4]\ncxRight = camMat[2]\ncyRight = camMat[5]\n\n#print(fx,fy,cx,cy)\n\n# DISTORTION VALUES\nreDist = '('+ reNumber + '\\ ){4}'\n# left\nreDistAll = '(?<=left\\]\\n)(.*\\n){7}' + reDist + reNumber + '(?=\\n\\nrectification)'\ndistLeft = re.search(reDistAll, calibFile).group(0)\ndistLeft = re.search(reDist, distLeft).group(0).split()\n# right\nreDistAll = '(?<=right\\]\\n)(.*\\n){7}' + reDist + reNumber + '(?=\\n\\nrectification)'\ndistRight = re.search(reDistAll, calibFile).group(0)\ndistRight = re.search(reDist, distRight).group(0).split()\n\n# Ask user to provide path to stereo yml if default is non-existant\nif not (os.path.isfile(stereoFilePath) and os.access(stereoFilePath, os.R_OK)):\n print(\"Could not read default stereo yml file: \\\"\" + stereoFilePath + \"\\\"\\nUse dialog to locate the correct yml.\\n\")\n Tk().withdraw()\n stereoFilePath = askopenfilename(title=\"Locate stereo yml\",\n initialdir=\"~/dev/bundles\")\n\nf = open(stereoFilePath, 'r')\nstereoYML = f.read()\nf.close()\n\n# only grab till the next camera (or until the end of the file)\nreCamSettings = '(?<=---\\ name\\:' + cams[userIn-1] + '\\n)(.*\\n)*?' + '(?=(---\\ name)|\\Z)'\n# all settings of the selected camera (+ name of next cam in ascii art) \ncamSettings = re.search(reCamSettings, stereoYML).group(0)\n\nwidth = re.search('(?<=\\ width\\:\\ )\\d+',camSettings).group(0)\nheight = re.search('(?<=\\ height\\:\\ )\\d+',camSettings).group(0)\n\n# Create backup of existing stereo yml\n#shutil.copy2(stereoFilePath, stereoFilePath + '.BAK')\n\n# PRINT RESULTS\nres = \"\\nstereoCameraCalibration:\"\nres += \"\\n camLeft:\"\nres += \"\\n fx: \" + fxLeft\nres += \"\\n fy: \" + fyLeft\nres += \"\\n cx: \" + cxLeft\nres += \"\\n cy: \" + cyLeft\nres += \"\\n d0: \" + distLeft[0]\nres += \"\\n d1: \" + distLeft[1]\nres += \"\\n d2: \" + distLeft[2]\nres += \"\\n d3: \" + distLeft[3]\nres += \"\\n width: \" + width\nres += \"\\n height: \" + height\nif (cams[userIn-1] == \"panCam\"):\n res += \"\\n ex: 0\"\n res += \"\\n ey: 0\"\nres += \"\\n camRight:\"\nres += \"\\n fx: \" + fxRight\nres += \"\\n fy: \" + fyRight\nres += \"\\n cx: \" + cxRight\nres += \"\\n cy: \" + cyRight\nres += \"\\n d0: \" + distRight[0]\nres += \"\\n d1: \" + distRight[1]\nres += \"\\n d2: \" + distRight[2]\nres += \"\\n d3: \" + distRight[3]\nres += \"\\n width: \" + width\nres += \"\\n height: \" + height\nif (cams[userIn-1] == \"panCam\"):\n res += \"\\n ex: 0.0\"\n res += \"\\n ey: 0.0\"\nres += \"\\n#Distance between left and right camera\"\nres += \"\\n extrinsic:\"\nres += \"\\n tx: \" + str(1*Decimal(tx))\nres += \"\\n ty: \" + str(1*Decimal(ty))\nres += \"\\n tz: \" + str(1*Decimal(tz))\nres += \"\\n rx: \" + str(1*Decimal(rotEuler[0]))\nres += \"\\n ry: \" + str(1*Decimal(rotEuler[1]))\nres += \"\\n rz: \" + str(1*Decimal(rotEuler[2]))\n\nprint(res)\n\n#TODO? Write directly to yml\n","sub_path":"src/calibrationConverter.py","file_name":"calibrationConverter.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377682792","text":"arrBerat = []\r\nbMin = float()\r\nbMax = float()\r\n\r\ndef hitungMinMax(arrBerat):\r\n print('Berat balita minimum : ', min(arrBerat),'Kg')\r\n print('Berat balita maksimum : ', max(arrBerat),'Kg')\r\n # Definisikan Proses Mencari Berat Maximum Dan Minimum\r\n\r\n\r\ndef rerata(arrBerat):\r\n total = sum(arrBerat)\r\n return total/len(arrBerat)\r\n\r\n # Definisikan Proses Mencari Rerata Dari Total Berat\r\n\r\n # Return Hasil Rerata\r\n\r\n\r\nprint('Masukkan Banyak Data Berat Balita :', end=\" \")\r\nn = int(input())\r\n\r\nfor i in range(n):\r\n print(f'Masukkan Berat Balita Ke-{i+1} :', end=' ')\r\n k=float(input())\r\n arrBerat.append(k)\r\n # Inisialisasi Input Data Berat\r\n\r\n # Masukkan Data Berat Ke Array (arrBerat)\r\n\r\nhitungMinMax(arrBerat)\r\nprint('Rerata berat balita : ', rerata(arrBerat),'Kg')\r\n# Panggil procedur hitungMinMax(arrBerat)\r\n\r\n\r\n# Print Data Minimum, Maximum, dan Rerata Berat\r\n","sub_path":"soal1.py","file_name":"soal1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"295021789","text":"#!/usr/bin/env python3\n\n'''Contains classes used for responses.'''\n\nimport enum\nimport re\n\nfrom aiospamc.common import RequestResponseBase\nfrom aiospamc.exceptions import (BadResponse,\n ExUsage, ExDataErr, ExNoInput, ExNoUser,\n ExNoHost, ExUnavailable, ExSoftware, ExOSErr,\n ExOSFile, ExCantCreat, ExIOErr, ExTempFail,\n ExProtocol, ExNoPerm, ExConfig, ExTimeout)\n\n\nclass Status(enum.IntEnum):\n '''Enumeration of status codes that the SPAMD will accompany with a\n response.\n\n Reference: https://svn.apache.org/repos/asf/spamassassin/trunk/spamd/spamd.raw\n Look for the %resphash variable.\n '''\n\n #pylint: disable=C0326\n def __new__(cls, value, exception=None, description=''):\n #pylint: disable=protected-access\n obj = int.__new__(cls, value)\n obj._value_ = value\n obj.exception = exception\n obj.description = description\n\n return obj\n\n EX_OK = 0, None, 'No problems'\n EX_USAGE = 64, ExUsage, 'Command line usage error'\n EX_DATAERR = 65, ExDataErr, 'Data format error'\n EX_NOINPUT = 66, ExNoInput, 'Cannot open input'\n EX_NOUSER = 67, ExNoUser, 'Addressee unknown'\n EX_NOHOST = 68, ExNoHost, 'Host name unknown'\n EX_UNAVAILABLE = 69, ExUnavailable, 'Service unavailable'\n EX_SOFTWARE = 70, ExSoftware, 'Internal software error'\n EX_OSERR = 71, ExOSErr, 'System error (e.g., can\\'t fork)'\n EX_OSFILE = 72, ExOSFile, 'Critical OS file missing'\n EX_CANTCREAT = 73, ExCantCreat, 'Can\\'t create (user) output file'\n EX_IOERR = 74, ExIOErr, 'Input/output error'\n EX_TEMPFAIL = 75, ExTempFail, 'Temp failure; user is invited to retry'\n EX_PROTOCOL = 76, ExProtocol, 'Remote error in protocol'\n EX_NOPERM = 77, ExNoPerm, 'Permission denied'\n EX_CONFIG = 78, ExConfig, 'Configuration error'\n EX_TIMEOUT = 79, ExTimeout, 'Read timeout'\n\nclass Response(RequestResponseBase):\n '''Class to encapsulate response.\n\n Attributes\n ----------\n protocol_version : str\n Protocol version given by the response.\n status_code : aiospamc.responess.Status\n Status code give by the response.\n message : str\n Message accompanying the status code.\n body : str\n Contents of the response body.\n '''\n\n #pylint: disable=too-few-public-methods\n _response_pattern = re.compile(r'^\\s*'\n r'(?PSPAMD)/(?P\\d+\\.\\d+)'\n r'\\s+'\n r'(?P\\d+)'\n r'\\s+'\n r'(?P.*)')\n '''Regular expression pattern to match the response. Protocol will match\n the phrase 'SPAMD', version will match with the style '1.0', status will\n match an integer. The message will match all characters up until the next\n newline.\n '''\n _response_string = 'SPAMD/{version} {status} {message}\\r\\n{headers}\\r\\n{body}'\n '''String used for composing a response.'''\n\n @classmethod\n def parse(cls, response):\n response, *body = response.split(b'\\r\\n\\r\\n', 1)\n response, *headers = response.split(b'\\r\\n')\n\n response = response.decode()\n\n # Process response\n match = cls._response_pattern.match(response)\n if match:\n response_match = match.groupdict()\n else:\n # Not a SPAMD response\n raise BadResponse\n\n protocol_version = response_match['version'].strip()\n status_code = Status(int(response_match['status']))\n message = response_match['message'].strip()\n\n if status_code.exception:\n raise status_code.exception(message)\n\n parsed_headers = cls._parse_headers(headers)\n parsed_body = cls._parse_body(body[0] if body else None, parsed_headers)\n\n obj = cls(protocol_version,\n status_code,\n message,\n parsed_body,\n *parsed_headers)\n\n return obj\n\n def __init__(self, protocol_version, status_code, message, body=None, *headers):\n '''Response constructor.\n\n Parameters\n ----------\n protocol_version : str\n Version reported by the SPAMD service response.\n status_code : aiospamc.responses.Status\n Success or error code.\n message : str\n Message associated with status code.\n body : :obj:`str`, optional\n String representation of the body. An instance of the\n aiospamc.headers.ContentLength will be automatically added.\n *headers : :obj:`aiospamc.headers.Header`, optional\n Collection of headers to be added. If it contains an instance of\n aiospamc.headers.Compress then the body is automatically\n compressed.\n '''\n\n self.protocol_version = protocol_version\n self.status_code = status_code\n self.message = message\n super().__init__(body, *headers)\n\n def __bytes__(self):\n if self._compressed_body:\n body = self._compressed_body\n elif self.body:\n body = self.body.encode()\n else:\n body = b''\n\n return (b'SPAMD/%(version)b '\n b'%(status)d '\n b'%(message)b\\r\\n'\n b'%(headers)b\\r\\n'\n b'%(body)b') % {b'version': b'1.5',\n b'status': self.status_code.value,\n b'message': self.message.encode(),\n b'headers': b''.join(map(bytes, self._headers.values())),\n b'body': body}\n\n def __repr__(self):\n resp_format = ('{}(protocol_version=\\'{}\\', '\n 'status_code={}, '\n 'message=\\'{}\\', '\n 'headers={}, '\n 'body={})')\n\n return resp_format.format(self.__class__.__name__,\n self.protocol_version,\n str(self.status_code),\n self.message,\n tuple(self._headers.values()),\n repr(self.body) if self.body else 'None')\n","sub_path":"aiospamc/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357061126","text":"from sanic import Sanic\nfrom sanic.response import text,json\nfrom DB.MongoHelp import MongoHelper as SqlHelper\n\nclass apiNews:\n def __init__(self):\n self.sqlhelper = SqlHelper()\n self.sqlhelper.init_db()\n\n def queryNews(self,category,pz,page):\n newsJson = self.sqlhelper.select(pz,{'category':category},page)\n return newsJson\n print (newsJson)\n\napiNews=apiNews()\napp = Sanic(__name__)\n@app.route(\"/news\",methods=['GET'])\nasync def get_handler(request):\n parameter = request.args\n return json(apiNews.queryNews(parameter['category'][0],parameter['pageSize'][0],parameter['page'][0]))\napp.run(host=\"0.0.0.0\", port=8000, debug=True)","sub_path":"newsApi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635981191","text":"\"\"\"Simple ReasonerStdAPI server.\"\"\"\nimport json\nimport logging\nfrom typing import Dict\n\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom fastapi.responses import JSONResponse\nimport httpx\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom reasoner_pydantic import Request, Message\nfrom strider.setup_query import execute_query, generate_plan\nfrom strider.query_planner import NoAnswersError\nfrom strider.scoring import score_graph\nfrom strider.results import get_db, Database\nfrom strider.util import setup_logging\n\nLOGGER = logging.getLogger(__name__)\n\nAPP = FastAPI(\n title='Strider/ARAGORN/Ranking Agent',\n description='Translator Autonomous Relay Agent',\n version='1.0.0',\n terms_of_service='N/A',\n)\nAPP.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nsetup_logging()\n\n\n@APP.post('/query', response_model=Message, tags=['query'])\nasync def sync_query(\n query: Request,\n support: bool = True,\n) -> Message:\n \"\"\"Handle synchronous query.\"\"\"\n return await sync_answer(\n query.dict(),\n support=support,\n )\n\n\nasync def sync_answer(query: Dict, **kwargs):\n \"\"\"Answer biomedical question, synchronously.\"\"\"\n try:\n query_id = await execute_query(\n query['message']['query_graph'],\n **kwargs,\n wait=True,\n )\n except NoAnswersError as err:\n LOGGER.warning(str(err))\n return query['message']\n async with Database('results.db') as database:\n return await _get_results(\n query_id=query_id,\n database=database,\n )\n\n\n@APP.post('/aquery', response_model=str, tags=['query'])\nasync def async_query(\n query: Request,\n support: bool = True,\n) -> str:\n \"\"\"Handle asynchronous query.\"\"\"\n query_id = await execute_query(\n query.message.query_graph.dict(),\n support=support,\n wait=False,\n )\n return query_id\n\n\n@APP.post('/ars')\nasync def handle_ars(\n data: Dict,\n):\n \"\"\"Handle ARS message.\"\"\"\n if data.get('model', None) != 'tr_ars.message':\n raise HTTPException(\n status_code=400,\n detail='Not a valid Translator message',\n )\n data = data['fields']\n if data.get('ref', None) is not None:\n raise HTTPException(\n status_code=400,\n detail='Not head message',\n )\n if data.get('data', None) is not None:\n data = json.loads(data['data'])\n elif data.get('url', None) is not None:\n data = httpx.get(data['url'], timeout=60).json()\n else:\n raise HTTPException(\n status_code=400,\n detail='Not a valid tr_ars.message',\n )\n\n content = await sync_answer(data)\n headers = {'tr_ars.message.status': 'A'}\n return JSONResponse(content=content, headers=headers)\n\n\n@APP.get('/results', response_model=Message)\nasync def get_results( # pylint: disable=too-many-arguments\n query_id: str,\n since: float = None,\n limit: int = None,\n offset: int = 0,\n database=Depends(get_db('results.db')),\n) -> Message:\n \"\"\"Get results for a query.\"\"\"\n return await _get_results(query_id, since, limit, offset, database)\n\n\nasync def _get_results(\n query_id: str,\n since: float = None,\n limit: int = None,\n offset: int = 0,\n database=None,\n):\n \"\"\"Get results.\"\"\"\n # get column names from results db\n columns = await database.get_columns(query_id)\n\n kgraph = {\n 'nodes': dict(),\n 'edges': dict(),\n }\n results = []\n for row in await extract_results(query_id, since, limit, offset, database):\n result, _kgraph = parse_bindings(dict(zip(columns, row)))\n results.append(result)\n kgraph['nodes'].update(_kgraph['nodes'])\n kgraph['edges'].update(_kgraph['edges'])\n # convert kgraph nodes and edges to list format\n kgraph = {\n 'nodes': list(kgraph['nodes'].values()),\n 'edges': list(kgraph['edges'].values()),\n }\n return {\n 'knowledge_graph': kgraph,\n 'results': results\n }\n\n\ndef parse_bindings(bindings):\n \"\"\"Parse bindings into message format.\"\"\"\n kgraph = {\n 'nodes': dict(),\n 'edges': dict(),\n }\n result = {\n 'node_bindings': [],\n 'edge_bindings': [],\n }\n for key, element in bindings.items():\n if key.startswith('_'):\n result[key[1:]] = element\n continue\n kid = element.pop('kid')\n qid = element.pop('qid')\n element.pop('kid_qid', None)\n if key.startswith('e_'):\n result['edge_bindings'].append({\n 'qg_id': qid,\n 'kg_id': kid,\n })\n kgraph['edges'][kid] = {\n 'id': kid,\n **element,\n }\n else:\n result['node_bindings'].append({\n 'qg_id': qid,\n 'kg_id': kid,\n })\n kgraph['nodes'][kid] = {\n 'id': kid,\n **element,\n }\n return result, kgraph\n\n\nasync def extract_results(query_id, since, limit, offset, database):\n \"\"\"Extract results from database.\"\"\"\n statement = f'SELECT * FROM \"{query_id}\"'\n if since is not None:\n statement += f' WHERE _timestamp >= {since}'\n statement += ' ORDER BY _timestamp ASC'\n if limit is not None:\n statement += f' LIMIT {limit}'\n if offset:\n statement += f' OFFSET {offset}'\n rows = await database.execute(statement)\n return [\n tuple(\n json.loads(value) if isinstance(value, str) else value\n for value in row\n )\n for row in rows\n ]\n\n\n@APP.post('/plan', response_model=Dict, tags=['query'])\nasync def generate_traversal_plan(\n query: Request,\n) -> Dict:\n \"\"\"Generate a plan for traversing knowledge providers.\"\"\"\n query_graph = query.message.query_graph.dict()\n return await generate_plan(query_graph)\n\n\n@APP.post('/score', response_model=Message, tags=['query'])\nasync def score_results(\n query: Request,\n) -> Message:\n \"\"\"Score results.\"\"\"\n message = query.message.dict()\n identifiers = {\n knode['id']: knode.get('equivalent_identifiers', [])\n for knode in message['knowledge_graph']['nodes']\n }\n for result in message['results']:\n graph = {\n 'nodes': {\n nb['qg_id']: {\n 'qid': nb['qg_id'],\n 'kid': nb['kg_id'],\n 'equivalent_identifiers': identifiers[nb['kg_id']]\n }\n for nb in result['node_bindings']\n },\n 'edges': {\n eb['qg_id']: {\n 'qid': eb['qg_id'],\n 'kid': eb['kg_id'],\n }\n for eb in result['edge_bindings']\n }\n }\n result['score'] = await score_graph(\n graph,\n message['query_graph'],\n )\n return message\n","sub_path":"strider/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"411613758","text":"from tkinter import *\nfrom tkinter import messagebox\nimport math\nimport random\nimport copy\n\nclass ColumnFullException(Exception):\n pass\n\nclass state:\n def __init__(self, board=[]):\n self.board = copy.deepcopy(board)\n def start(self):\n self.board = [[0,0,0,0] for _ in range(4)]\n def terminal_test(self):\n b = self.board\n for t in [1,2]:\n for r in range(4):\n if (b[r][0]==t and b[r][1]==t and b[r][2]==t):\n return True, int(2*(1.5-t))\n elif (b[r][3]==t and b[r][1]==t and b[r][2]==t):\n return True, int(2*(1.5-t))\n for c in range(4):\n if (b[0][c]==t and b[1][c]==t and b[2][c]==t):\n return True, int(2*(1.5-t))\n elif (b[1][c]==t and b[2][c]==t and b[3][c]==t):\n return True, int(2*(1.5-t))\n if b[0][1]==t and b[1][2]==t and b[2][3]==t:\n return True, int(2*(1.5-t))\n elif b[0][0]==t and b[1][1]==t and b[2][2]==t:\n return True, int(2*(1.5-t))\n elif b[1][1]==t and b[2][2]==t and b[3][3]==t:\n return True, int(2*(1.5-t))\n elif b[1][0]==t and b[2][1]==t and b[3][2]==t:\n return True, int(2*(1.5-t))\n if b[2][0]==t and b[1][1]==t and b[0][2]==t:\n return True, int(2*(1.5-t))\n elif b[3][0]==t and b[2][1]==t and b[1][2]==t:\n return True, int(2*(1.5-t))\n elif b[2][1]==t and b[1][2]==t and b[0][3]==t:\n return True, int(2*(1.5-t))\n elif b[3][1]==t and b[2][2]==t and b[1][3]==t:\n return True, int(2*(1.5-t))\n flag = 0\n for c in range(4):\n if b[3][c] == 0:\n flag = 1 \n if flag==0:\n return True, 0\n return False, None\n\n def show(self, frame, images):\n for i in range(4):\n for j in range(4):\n Button(frame, command = lambda col=j: human(col), image=images[(self.board[i][j]-1) % 3]).grid(row=i, column=j) \n \n def actions(self):\n return filter(lambda z: (self.board[3][z]==0), [0,1,2,3])\n\ndef next_state(st, action, turn):\n new = state(st.board)\n if turn == False and new.board[3][action] != 0:\n raise ColumnFullException\n else:\n tag = 1 if turn else 2\n flag = 1\n index = -1\n while flag != 0:\n index += 1\n flag = new.board[index][action]\n new.board[index][action] = tag\n return new\n\ndef minimax(st, turn):\n global bottom_frame, imgs\n if st.terminal_test()[0]:\n return st.terminal_test()[1], None\n best = -math.inf if turn else math.inf\n for a in st.actions():\n t = next_state(st, a, turn)\n ut, ac = minimax(t, not turn)\n if (turn) and (ut > best):\n best, act = ut, a\n elif (not turn) and (ut < best):\n best, act = ut, a\n return best, act\n\ndef minimax_move():\n global st, bottom_frame, imgs, l, chance\n msgs = ['It was a draw', 'You lost miserably', 'you won']\n if chance == False:\n messagebox.showinfo(\"Out of Chance\", \"Please make a human move.\")\n return\n chance = False if chance == None else not chance\n ut, act = minimax(st, True)\n st = next_state(st, act, True)\n st.show(bottom_frame, imgs)\n if st.terminal_test()[0]:\n messagebox.showinfo('Game Over', msgs[st.terminal_test()[1]])\n sys.exit()\n\ndef human(column):\n msgs = ['It was a draw', 'You lost miserably', 'you won']\n global chance\n if chance == True:\n messagebox.showinfo(\"Out of Chance\", \"Please allow machine to move.\")\n return\n try:\n global st, bottom_frame, imgs, l\n st = next_state(st, column, False)\n st.show(bottom_frame, imgs)\n except ColumnFullException as e:\n messagebox.showinfo(\"Column Full\", \"Please try another column.\")\n return\n if chance == None:\n chance = True\n else:\n chance = not chance\n if st.terminal_test()[0]:\n messagebox.showinfo('Game Over', msgs[st.terminal_test()[1]])\n sys.exit()\n\nif __name__ == \"__main__\":\n chance = None\n st = state()\n st.start()\n w = Tk()\n w.title(\"Pipaf3\")\n imgs = [PhotoImage(file = f) for f in ['green_blob.png', 'blue_blob.png', 'white_blob.png', 'base_line.png']]\n top_frame = Frame(w)\n top_frame.pack(side=TOP)\n bottom_frame = Frame(w)\n bottom_frame.pack(side=TOP)\n b0 = Button(top_frame,text=\"Minimax Move\", command = lambda: minimax_move(), bg=\"orange\", fg=\"red\")\n b0.pack(side=TOP)\n bbase = Label(top_frame, image=imgs[3])\n bbase.pack(side=TOP)\n st.show(bottom_frame, imgs)\n w.mainloop()","sub_path":"pipaf3.py","file_name":"pipaf3.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226169720","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Ana Silva\r\n\"\"\"\r\nimport numpy as np\r\n\r\ndef _pop_evaluate(pop, nper, chydth, nvolinic, mafl, mmutate, mload, valdef):\r\n khidr = (chydth.vturbmax*(10/3))/3600\r\n vherd = np.zeros([nper, pop])\r\n vdisp = np.zeros([nper, pop])\r\n vturb = np.zeros([nper, pop])\r\n vsobr = np.zeros([nper, pop])\r\n vdesc = np.zeros([nper, pop])\r\n psobr = np.zeros([nper, pop])\r\n phidr = np.zeros([nper, pop])\r\n pterm = np.zeros([nper, pop])\r\n mcmg = np.zeros([1, pop])\r\n mpen = np.zeros([nper, pop])\r\n mval = np.zeros([nper, pop])\r\n mcost = np.zeros([nper+1, pop])\r\n mpop = np.zeros([nper+2, pop])\r\n for i in range(nper):\r\n for j in range(pop):\r\n mpop[i, j] = mmutate[i, j]\r\n mpop[nper, j] = mmutate[nper, j]\r\n if(i == 0):\r\n vherd[i, j] = nvolinic\r\n else:\r\n vherd[i, j] = vsobr[i-1, j]\r\n vdisp[i, j] = vherd[i, j]+mafl[i, 0]\r\n if(vdisp[i, j] > chydth.vdispmax):\r\n vdisp[i, j] = chydth.vdispmax\r\n vdesc[i, j] = vdisp[i, j]-chydth.vdispmax\r\n vturb[i, j] = mmutate[i, j]*vdisp[i, j]\r\n vsobr[i, j] = vdisp[i, j]-vturb[i, j]\r\n psobr[i, j] = (10/3)*(vsobr[i, j]/3600)\r\n phidr[i, j] = (10/3)*(vturb[i, j]/3600)\r\n pterm[i, j] = mload[i, 0]- phidr[i, j]\r\n for i in range(nper):\r\n for j in range(pop):\r\n if(phidr[i, j] > khidr):\r\n mpen[i, j] = (phidr[i, j]-khidr)+mpen[i, j]\r\n if(phidr[i, j] < 0):\r\n mpen[i, j] = (phidr[i, j])+mpen[i, j]\r\n if(pterm[i, j] > chydth.ptermmax):\r\n mpen[i, j] = (pterm[i, j]-chydth.ptermmax)+mpen[i, j]\r\n if(pterm[i, j] < 0):\r\n mpen[i, j] = pterm[i, j]+mpen[i, j]\r\n if(psobr[nper-1, j] > 0 and np.all(pterm[:, j] <= chydth.ptermmax) and np.all(phidr[:, j] <= khidr)):\r\n mcmg[0, j] = valdef\r\n mval[nper-1, j] = psobr[nper-1, j]\r\n mcost[i, j] = 2000+100*pterm[i, j] + 1.5*np.power(pterm[i, j], 2) + chydth.penaliz*np.power(mpen[i, j], 2) - mcmg[0, j]*mval[i, j]\r\n mcost[nper, j] = mcost[i, j]+mcost[nper, j]\r\n mpop[nper+1, j] = mcost[nper, j]\r\n class _PopVol:\r\n def __init__(self, vherd, vdisp, vturb, vsobr, vdesc):\r\n self.vherd = vherd\r\n self.vdisp = vdisp\r\n self.vturb = vturb\r\n self.vsobr = vsobr\r\n self.vdesc = vdesc\r\n class _PopPot:\r\n def __init__(self, psobr, phidr, pterm):\r\n self.psobr = psobr\r\n self.phidr = phidr\r\n self.pterm = pterm\r\n class _Ccost:\r\n def __init__(self, mcmg, mpen, mval, mcost):\r\n self.mcmg = mcmg\r\n self.mpen = mpen\r\n self.mval = mval\r\n self.mcost = mcost\r\n p_vol = _PopVol(vherd, vdisp, vturb, vsobr, vdesc)\r\n p_pot = _PopPot(psobr, phidr, pterm)\r\n p_cost = _Ccost(mcmg, mpen, mval, mcost)\r\n return p_vol, p_pot, p_cost, mpop\r\n","sub_path":"HydrothermalCoordination_Metaheuristics/EvolutionaryParticleSwarmOptimization/Functions/evoluate.py","file_name":"evoluate.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"533910454","text":"from __future__ import absolute_import\n\nfrom django.conf import settings\n\nfrom sentry.api.serializers import Serializer, register\nfrom sentry.models import User, UserAvatar, UserOption\nfrom sentry.utils.avatar import get_gravatar_url\n\n\n@register(User)\nclass UserSerializer(Serializer):\n def get_attrs(self, item_list, user):\n avatars = {\n a.user_id: a\n for a in UserAvatar.objects.filter(\n user__in=item_list\n )\n }\n return {u: {'avatar': avatars.get(u.id)} for u in item_list if u}\n\n def serialize(self, obj, attrs, user):\n d = {\n 'id': str(obj.id),\n 'name': obj.get_display_name(),\n 'username': obj.username,\n 'email': obj.email,\n 'avatarUrl': get_gravatar_url(obj.email, size=32),\n 'isActive': obj.is_active,\n 'dateJoined': obj.date_joined,\n }\n if obj == user:\n options = {\n o.key: o.value\n for o in UserOption.objects.filter(\n user=user,\n project__isnull=True,\n )\n }\n stacktrace_order = int(options.get('stacktrace_order', -1) or -1)\n if stacktrace_order == -1:\n stacktrace_order = 'default'\n elif stacktrace_order == 2:\n stacktrace_order = 'newestFirst'\n elif stacktrace_order == 1:\n stacktrace_order = 'newestLast'\n\n d['options'] = {\n 'language': options.get('language') or 'en',\n 'stacktraceOrder': stacktrace_order,\n 'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,\n 'clock24Hours': options.get('clock_24_hours') or False,\n }\n\n if attrs.get('avatar'):\n avatar = {\n 'avatarType': attrs['avatar'].get_avatar_type_display(),\n 'avatarUuid': attrs['avatar'].ident if attrs['avatar'].file else None\n }\n else:\n avatar = {'avatarType': 'letter_avatar', 'avatarUuid': None}\n d['avatar'] = avatar\n\n return d\n","sub_path":"src/sentry/api/serializers/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56574495","text":"import scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {'MONDAY': 'Mo',\n 'TUESDAY': 'Tu',\n 'WEDNESDAY': 'We',\n 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr',\n 'SATURDAY': 'Sa',\n 'SUNDAY': 'Su'}\n\nclass MuellerSpider(scrapy.Spider):\n name = \"mueller\"\n allowed_domains = [\"www.mueller.de\"]\n start_urls = (\n 'https://www.mueller.de/meine-filiale/',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"dayOfWeek\")]\n open_time = store_day.get(\"fromTime\")\n close_time = store_day.get(\"toTime\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_details(self, response):\n stores = json.loads(response.body_as_unicode())\n properties = {\n 'lat': stores['latitude'],\n 'lon': stores['longitude'],\n 'name': stores['companyName'],\n 'street': stores['street'],\n 'city': stores['city'],\n 'postcode': stores['zip'],\n 'country': stores['country'],\n 'phone': stores['ccstoreDtoDetails']['phone'],\n 'ref': stores['storeNumber'],\n }\n hours = self.parse_hours(stores['ccstoreDtoDetails']['openingHourWeek'])\n\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n store_numbers = re.findall(r'{storeNumber: \\'(\\d+)\\'', response.text)\n if store_numbers:\n for n in store_numbers:\n yield scrapy.Request(\n url=\"https://www.mueller.de/api/ccstore/byStoreNumber/{}/\".format(n),\n callback=self.parse_details\n )\n\n","sub_path":"locations/spiders/mueller.py","file_name":"mueller.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637497216","text":"# Game options/settings\n\nTITLE = \"Jumpy!\"\nWIDTH = 480\nHEIGHT = 600\nFPS = 60\nSPRITESHEET = \"spritesheet_jumper.png\"\nFONT_NAME = \"arial\"\n\n# Player properties\n\nPLAYER_ACC = 0.5\nPLAYER_FRICTION = -0.12\nPLAYER_JUMP_VEL = 20\n\n# Player layers\n\nPLAYER_LAYER = 2\nPLATFORM_LAYER = 1\nPOW_LAYER = 1\nMOB_LAYER = 2\nCLOUD_LAYER = 0\n\n# Game properties\n\nBOOST_POWER = 60\nPOW_SPAWN_PCT = 20\nMOB_FREQ = 5000\n\n# Platforms\n\nPLATFORM_LIST = [(0, HEIGHT-60), \n (WIDTH/2 - 50, HEIGHT * 3 /4),\n (125, HEIGHT-350),\n (350, 200),\n (175, 100)]\n\n# define colors\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nLIGHTBLUE = (0, 155, 155)\nBGCOLOR = LIGHTBLUE","sub_path":"lesson_4/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354431644","text":"import socket\nimport uuid\nimport re\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport pandas as pd\nimport os\n\nFILE = \"file.json\"\n\ndef parser_file(FILE):\n if os.stat(FILE).st_size != 0:\n data = json.load(open(FILE))\n Q = data\n else:\n Q={}\n return Q\n\ndef _add(_queue_,_length_,_data_,conn,Q):\n id = uuid.uuid4()\n id = str(uuid.uuid4())\n conn.send(id.encode('utf-8'))\n if _queue_ not in Q:\n Q[_queue_] = []\n Q[_queue_].append({\"id\":id,\"length\":_length_,\"data\":_data_,\"time\":\"\",\"do\":False})\n else:\n Q[_queue_].append({\"id\": id, \"length\": _length_, \"data\": _data_, \"time\": \"\",\"do\":False})\n return Q\n\ndef _get(_queue_,conn,Q):\n counter_do = 0\n if (len(Q[_queue_]) == 0):\n conn.send(b\"NONE\")\n else:\n for task in Q[_queue_]:\n if task[\"do\"] == False:\n task[\"do\"] = True\n task[\"time\"] = str(datetime.today())\n conn.send(\n task[\"id\"].encode('utf-8') + b\" \" + task[\"length\"].encode('utf-8') + b\" \" + task[\"data\"].encode(\n 'utf-8'))\n break\n else:\n if ((pd.to_datetime(task[\"time\"]).to_pydatetime() - datetime.today()) >= timedelta(minutes=5)):\n task[\"time\"] = str(datetime.today())\n conn.send(\n task[\"id\"].encode('utf-8') + b\" \" + task[\"length\"].encode('utf-8') + b\" \" + task[\"data\"].encode(\n 'utf-8'))\n break\n else:\n counter_do=counter_do+1\n if counter_do==len(Q[_queue_]):\n conn.send(b\"NONE\")\n break\n return Q\n\ndef _ack(_queue_,_id_,conn,Q):\n\n if Q[_queue_]:\n for i in range(len(Q[_queue_])):\n if (Q[_queue_][i][\"id\"] == _id_) and (Q[_queue_][i][\"do\"] == True):\n if ((pd.to_datetime(Q[_queue_][i][\"time\"]).to_pydatetime() - datetime.today()) >= timedelta(minutes=5)):\n Q[_queue_][i][\"time\"] = \"\"\n Q[_queue_][i][\"do\"] = False\n break\n else:\n Q[_queue_].pop(i)\n conn.send(b\"OK\")\n break\n return Q\n\ndef _in(_queue_,_id_,conn,Q):\n counter_present=0\n if Q[_queue_]:\n for task in Q[_queue_]:\n if task['id'] == _id_:\n conn.send(b\"YES\")\n break\n else:\n counter_present = counter_present+1\n if (counter_present==len(Q[_queue_])):\n conn.send(b\"NO\")\n else:\n conn.send(b\"NO\")\n return Q\n\ndef run(conn,Q):\n data = conn.recv(1000000)\n data=data.decode(\"utf-8\")\n data_str = re.split('\\s', data)\n\n with open('file.json', 'w') as outfile:\n if data_str[0] == \"ADD\":\n queue = _add(data_str[1], data_str[2], data_str[3], conn,Q)\n json.dump(queue, outfile)\n if data_str[0] == \"GET\":\n queue = _get(data_str[1], conn,Q)\n json.dump(queue, outfile)\n if data_str[0] == \"ACK\":\n queue = _ack(data_str[1], data_str[2], conn,Q)\n json.dump(queue, outfile)\n if data_str[0] == \"IN\":\n queue=_in(data_str[1], data_str[2], conn,Q)\n return queue\n\nif __name__ == '__main__':\n queue=parser_file(FILE)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('', 5555))\n sock.listen(1)\n while True:\n try:\n conn, addr = sock.accept()\n queue=run(conn,queue)\n conn.close()\n except KeyboardInterrupt:\n break\n\n\n","sub_path":"task_queue/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631982644","text":"import re\nfrom covid_bronx.quality import fasta_files, sam_files\n\n# sample_ids = {\n# \"reinfection01\": \"sample_barcode01\",\n# \"reinfection02\": \"sample_barcode02\",\n# \"reinfection03\": \"sample_barcode03\",\n# \"reinfection04\": \"sample_barcode04\",\n# \"reinfection05\": \"sample_barcode05\",\n# \"reinfection06\": \"sample_barcode06\",\n# \"reinfection07\": \"sample_barcode07\",\n# \"reinfection08\": \"sample_barcode08\",\n# \"reinfection09\": \"sample_barcode09\",\n# }\n\n\n\n# fasta_dict = {k: f\"data/final/reinfection/output/{v}.consensus.fasta\" for k,v\n# in sample_ids.items()}\n\nfasta_dict = {k: v for k,v in fasta_files.items()}\nsample_ids = fasta_files\nlines = []\nfor sample_id, filename in fasta_dict.items():\n b = sample_ids[sample_id]\n with open(filename, 'r') as f:\n line = f.read()\n zo = line.split(\"\\n\")\n zo[0] = f\"> {sample_id}\"\n\n # lines.append(f.read().replace(b, sample_id))\n lines.append(\"\\n\".join(zo))\n\nwith open(\"data/processed/reinfection/sequences.fasta\", 'w') as f:\n f.write(\"\\n\".join(lines))\n\nfor sample_id, line in zip(fasta_dict.keys(), lines):\n with open(f\"data/processed/sequences/{sample_id}.fasta\", \"w\") as f:\n f.write(line)","sub_path":"scripts/sequencing/combine_fasta.py","file_name":"combine_fasta.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312301839","text":"from icepyx import is2class as ipd\nimport pytest\nimport warnings\n\ndef test_CMRparams():\n reg_a = ipd.Icesat2Data('ATL06',[-64, 66, -55, 72],['2019-02-22','2019-02-28'])\n reg_a.build_CMR_params()\n obs_keys = reg_a.CMRparams.keys()\n exp_keys_all = ['short_name','version','temporal']\n exp_keys_any = ['bounding_box','polygon']\n \n assert all(keys in obs_keys for keys in exp_keys_all)\n assert any(key in obs_keys for key in exp_keys_any)\n \ndef test_reqconfig_params():\n reg_a = ipd.Icesat2Data('ATL06',[-64, 66, -55, 72],['2019-02-22','2019-02-28'])\n \n #test for search params\n reg_a.build_reqconfig_params('search')\n obs_keys = reg_a.reqparams.keys()\n exp_keys_all = ['page_size','page_num'] \n assert all(keys in obs_keys for keys in exp_keys_all)\n\n #test for download params\n reg_a.reqparams=None\n reg_a.build_reqconfig_params('download')\n reg_a.reqparams.update({'token':'','email':''})\n obs_keys = reg_a.reqparams.keys()\n exp_keys_all = ['page_size','page_num','request_mode','token','email','include_meta']\n assert all(keys in obs_keys for keys in exp_keys_all)\n \ndef test_properties():\n reg_a = ipd.Icesat2Data('ATL06',[-64, 66, -55, 72],['2019-02-22','2019-02-28'],\\\n start_time='03:30:00', end_time='21:30:00', version='2')\n obs_list = [reg_a.dataset, reg_a.dates, reg_a.start_time, reg_a.end_time, reg_a.dataset_version, reg_a.spatial_extent]\n exp_list = ['ATL06',['2019-02-22', '2019-02-28'], '03:30:00', '21:30:00', '002', ['bounding box', [-64, 66, -55, 72]]]\n \n for obs, expected in zip(obs_list,exp_list):\n assert obs == expected\n\n#BestPractices: should do additional properties tests for each potential property type (e.g. spatial extent can have type bounding_box or polygon) \n\n\n\n \n\n#check that search results are correct (spatially, temporally, match actually available data)","sub_path":"icepyx/tests/is2class_query.py","file_name":"is2class_query.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"512136495","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/1/24 11:00\n# @Author : ooooo\n\n\"\"\"\n生成列表\n- 用range创建数字列表\n- 生成表达式\n- 生成器\n\"\"\"\n\n\ndef fib(n):\n a, b = 0, 1\n for _ in range(n):\n a, b = b, a + b\n yield a\n\n\ndef main():\n list1 = list(range(1, 11))\n print(list1)\n # 生成表达式\n list2 = [x * x for x in range(1, 11)]\n print(list2)\n list3 = [m + n for m in 'ABCDE' for n in '123']\n print(list3)\n print(len(list3))\n # 生成器 ()\n gen = (m + n for m in 'ABC' for n in '123')\n print(gen)\n for item in gen:\n print(item, end=' ')\n print()\n # yield 也是生成器\n gen = fib(20)\n print(gen)\n for item in gen:\n print(item, end=' ')\n print()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"day07/list3.py","file_name":"list3.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201997685","text":"# Standard libraries\nimport os\n\n# External libraries\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\n# Internal libraries\nfrom dssdashboard.constants import HEADER_TITLE, HEADER_DETAIL, TABLIST\nfrom dssdashboard.dash_content import create_banner, create_tabs, create_paragraph\nfrom dssdashboard.profile_tab import CreateProfileTab\nfrom dssdashboard.pv_tab import CreatePVtab\nfrom dssdashboard.pv_connection_tab import PVconnectionTab\n\n\nclass CreateApp:\n\n def __init__(self,app, settings, logger, \n profile=None, pv_object=None, advanced_pv_object=None, coord_object=None):\n\n self.app = app\n self.settings = settings\n self.logger = logger\n self.profile_object = profile\n self.pv_object = pv_object\n self.coord_object = coord_object\n self.advanced_pv_object = advanced_pv_object\n\n self.active_project = os.path.join(self.settings['project_path'],self.settings['active_project'])\n \n if self.profile_object != None:\n self.profiletab = CreateProfileTab(self.app,self.profile_object,\n self.settings,self.logger)\n\n if self.pv_object !=None and self.coord_object != None:\n self.pvtab = CreatePVtab(self.app,self.pv_object,self.coord_object,\n self.settings,self.logger,'classical')\n \n if self.advanced_pv_object !=None and self.coord_object != None:\n self.advanced_pvtab = CreatePVtab(self.app,self.pv_object,self.coord_object,\n self.settings,self.logger,'advanced')\n\n if 'PVConnection' in os.listdir(self.active_project) and self.coord_object != None:\n self.pvcon_tab = PVconnectionTab(self.app,self.coord_object, \n self.settings,self.logger)\n \n # Create a layout \n self.app.layout = html.Div( children=[\n self.top_banner(),\n self.tabs(),\n self.content()\n ])\n\n def content(self):\n return html.Div(id='tab-content')\n\n def update_on_tab(self):\n \n # update tab content\n @self.app.callback(Output('tab-content','children'),[Input(\"dashtab\",\"value\")])\n def update_render(tab): \n self.logger.info(f'{tab} clicked')\n if tab == 'Classical PV':\n if self.pv_object != None and self.coord_object != None:\n return self.pvtab.layout()\n\n if tab == 'Advanced PV':\n if self.advanced_pv_object != None and self.coord_object != None:\n return self.advanced_pvtab.layout()\n \n if tab == 'PV Connection Request':\n if 'PVConnection' in os.listdir(self.active_project) and self.coord_object != None:\n return self.pvcon_tab.layout() \n\n if tab == 'Initial Assessment':\n if self.profile_object != None:\n return self.profiletab.layout()\n \n def call_backs(self):\n\n self.update_on_tab()\n if self.profile_object != None:\n self.profiletab.call_backs()\n if self.pv_object != None and self.coord_object != None:\n self.pvtab.call_backs()\n if self.advanced_pv_object != None and self.coord_object != None:\n self.advanced_pvtab.call_backs()\n if 'PVConnection' in os.listdir(self.active_project) and self.coord_object != None:\n self.pvcon_tab.call_backs()\n \n def top_banner(self):\n\n self.logger.info('Creating top banner ........')\n return create_banner(self.app,HEADER_TITLE,HEADER_DETAIL,'logo.png')\n\n def tabs(self):\n\n self.logger.info('Creating tabs ........')\n return create_tabs(TABLIST, 'dashtab')\n\n def layout(self):\n\n return self.app.layout","sub_path":"EMeRGE/dssdashboard/dashboard_create.py","file_name":"dashboard_create.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474734891","text":"import __main__\nfrom flask import Flask,request,jsonify\nfrom flask_cors import CORS,cross_origin\nfrom bert_bindings.transformer_model_vectorizer import FlairTransformerEmbedding\nfrom w2v_tf_idf_bindings.w2v_tfi_df_vectorizer import Word2Vec_TF_IDF_Vectorizer\nfrom w2v_bindings.w2v_vectorizer import Word2VecVectorizer\nfrom recommendation_system import recommend_books\nfrom load_generated_embeddings import bert_data, w2v_data, w2v_tf_idf_data, tf_idf_data\nimport json\nfrom preprocess import make_lower_case, remove_html, remove_punctuation, remove_stop_words, _removeNonAscii\napp = Flask(__name__)\n__main__.FlairTransformerEmbedding = FlairTransformerEmbedding\n__main__.Word2Vec_TF_IDF_Vectorizer = Word2Vec_TF_IDF_Vectorizer\n__main__.Word2VecVectorizer = Word2VecVectorizer\napp.config.from_object(__name__)\ncors = CORS(app)\ndef get_data_dict(embedding_type):\n if embedding_type == 'bert':\n return bert_data\n elif embedding_type == 'w2v':\n return w2v_data\n elif embedding_type == 'w2v_tf_idf':\n return w2v_tf_idf_data\n else:\n return tf_idf_data\n\ndef preprocess_desc(desc, is_bert=True):\n desc = _removeNonAscii(desc)\n desc = remove_html(desc)\n if is_bert and len(desc.split())>512:\n desc = remove_stop_words(desc)\n elif not is_bert:\n desc = make_lower_case(desc)\n desc = remove_punctuation(desc)\n return desc\n\n@app.route('/recommend_books', methods=['POST'])\n@cross_origin()\ndef recommend_books_api():\n try:\n json_data = request.json\n title = json_data.get('title', None)\n desc = json_data.get('description', None)\n filter_by_rating = json_data.get('filter_by_rating', True)\n embedding_type = json_data.get('embedding_type', 'bert')\n num_books = json_data.get('num_books', 5)\n if not desc and not title:\n return 'Please provide either title or description', 422\n if desc:\n desc = preprocess_desc(desc, is_bert=embedding_type=='bert')\n # Reading the dat\n data_dict = get_data_dict(embedding_type)\n output = recommend_books(data_dict['embedding_transformer'], data_dict['cosine_similarity_mat'], title=title, desc=desc, filter_by_rating=filter_by_rating, vector_col=embedding_type+'_vectors', num_books=num_books)\n print(json.dumps(output, indent = 4))\n return jsonify(output)\n except Exception as e:\n print(e)\n return e, 500\n\nif __name__==\"__main__\":\n app.run(host='0.0.0.0', port=8000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33349298","text":"# Project Euler\n# Alex Johnson\n# Problem 42: Coded triangle numbers\n\n# step 1: load words\nwith open(\"files/p42-words.txt\", \"r\") as infile:\n words = infile.readline().strip().replace(\"\\\"\", \"\").split(\",\")\n\n# step 2: calculate scores\nfor i in range(len(words)):\n word = words[i]\n total = 0\n for char in word:\n total += ord(char) - ord('A') + 1\n words[i] = total\n\n# step 3: calculate triangle numbers as necessary\nnums = [1]\nwhile nums[-1] < max(words):\n n = len(nums) + 1\n nums.append((n * (n + 1)) // 2)\n\n# step 4: count\ncount = 0\nfor word in words:\n if word in nums:\n count += 1\n\nprint(count)\n","sub_path":"p026-050/p42.py","file_name":"p42.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"490315419","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 19 13:14:24 2019\n\n@author: GEORGEDICKINSON\n\"\"\"\n\nimport pandas as pd\nimport glob\n\npath = r\"C:\\Users\\georgedickinson\\Desktop\\forConcatonation\\*.csv\"\nsavePath = r\"C:\\Users\\georgedickinson\\Desktop\\forConcatonation\"\n\nfileList = glob.glob(path)\n\nfor i in range(len(fileList)):\n filePath = fileList[i]\n locs = pd.read_csv(filePath)\n \n locs['x [nm]'] = locs['x [nm]'] + (i * 1000)\n \n saveName = savePath + '\\\\' + 'average_' + str(i) + '.csv'\n \n locs.to_csv(saveName, index=False)","sub_path":"BSU/concatonateAveragedImages.py","file_name":"concatonateAveragedImages.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52527643","text":"\r\ndef fact(n):\r\n f=1\r\n for i in range(1,1+n):\r\n f=f*i\r\n return f\r\n\r\ndef armstrong(n):\r\n sum=0\r\n a=0\r\n temp=n\r\n while temp>0:\r\n a = temp % 10\r\n sum=sum+a*a*a\r\n temp=temp//10\r\n if n== sum:\r\n print(n,\"armstrong\")\r\n else:\r\n print(n,\"not an Armstrong\")\r\n \r\n\r\n\r\ndef palindrome(x):\r\n rev=0\r\n temp=x\r\n while x>0:\r\n r=x%10\r\n x=int(x/10)\r\n rev=rev*10+r\r\n if rev==temp:\r\n print(temp,\"is palindrome\")\r\n else:\r\n print(temp,\"is not palindrome\")\r\n\r\ndef pow(n,m):\r\n return n**m\r\n\r\n","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"493049702","text":"from django.contrib import admin\nfrom .models import ClunyaStart\n\n# app: clunya\n\n# Регистрация модели для добавления ее в админку сайта.\n@admin.register(ClunyaStart)\nclass AdminClunyaStart(admin.ModelAdmin):\n # перечисление полей модели для показа в админке\n list_display = [\n 'per_project_name',\n 'per_description_project_name',\n 'per_contacts'\n ]\n","sub_path":"bin/ystm_0/clunya/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"139144982","text":"import base64\nimport json\nimport os\nimport unittest\n\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.primitives import serialization, hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\nfrom sdc.crypto.key_store import KeyStore\nimport jwt\nimport yaml\n\nimport settings\nfrom server import app\nfrom server import KEY_PURPOSE_SUBMISSION\n\n\ndef get_key(key_name):\n key = open(key_name, 'r')\n contents = key.read()\n return contents\n\n\n# sdx keys\nPRIVATE_KEY = get_key(\"./jwt-test-keys/sdc-sdx-submission-encryption-private-v1.pem\")\nTEST_EQ_PRIVATE_KEY = get_key(\"./jwt-test-keys/eq/sdc-eq-submission-signing-private-v1.pem\")\n\n\nclass Encrypter:\n def __init__(self, private_kid, public_kid):\n self.private_kid = private_kid\n self.public_kid = public_kid\n\n private_key_bytes = self._to_bytes(TEST_EQ_PRIVATE_KEY)\n\n self.private_key = serialization.load_pem_private_key(private_key_bytes,\n password=None,\n backend=backend)\n private_decryption_key = serialization.load_pem_private_key(\n PRIVATE_KEY.encode(),\n password=None,\n backend=backend\n )\n\n public_key_bytes = private_decryption_key.public_key().public_bytes(\n encoding=Encoding.PEM,\n format=PublicFormat.SubjectPublicKeyInfo\n )\n\n self.public_key = serialization.load_pem_public_key(public_key_bytes, backend=backend)\n\n # first generate a random key\n self.cek = os.urandom(32) # 256 bit random CEK\n\n # now generate a random IV\n self.iv = os.urandom(12) # 96 bit random IV\n\n @classmethod\n def _to_bytes(self, bytes_or_str):\n if isinstance(bytes_or_str, str):\n value = bytes_or_str.encode()\n else:\n value = bytes_or_str\n return value\n\n def _jwe_protected_header(self):\n header = '{\"alg\":\"RSA-OAEP\",\"enc\":\"A256GCM\", \"kid\":\"' + self.private_kid + '\"}'\n return self._base_64_encode(header.encode())\n\n def _encrypted_key(self, cek):\n ciphertext = self.public_key.encrypt(cek, padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None))\n return self._base_64_encode(ciphertext)\n\n def _encode_iv(self, iv):\n return self._base_64_encode(iv)\n\n @classmethod\n def _base_64_encode(self, text):\n # strip the trailing = as they are padding to make the result a multiple of 4\n # the RFC does the same, as do other base64 libraries so this is a safe operation\n return base64.urlsafe_b64encode(text).decode().strip(\"=\").encode()\n\n def _encode_and_signed(self, payload):\n return jwt.encode(payload, self.private_key, algorithm=\"RS256\", headers={'kid': self.public_kid, 'typ': 'jwt'})\n\n def encrypt(self, json):\n payload = self._encode_and_signed(json)\n jwe_protected_header = self._jwe_protected_header()\n encrypted_key = self._encrypted_key(self.cek)\n\n cipher = Cipher(algorithms.AES(self.cek), modes.GCM(self.iv), backend=backend)\n encryptor = cipher.encryptor()\n\n encryptor.authenticate_additional_data(jwe_protected_header)\n\n ciphertext = encryptor.update(payload) + encryptor.finalize()\n\n tag = encryptor.tag\n\n encoded_ciphertext = self._base_64_encode(ciphertext)\n encoded_tag = self._base_64_encode(tag)\n\n # assemble result\n jwe = jwe_protected_header + b\".\" + encrypted_key + b\".\" + \\\n self._encode_iv(self.iv) + b\".\" + encoded_ciphertext + b\".\" + encoded_tag\n\n return jwe\n\n\nclass TestDecryptService(unittest.TestCase):\n\n decrypt_endpoint = \"/decrypt\"\n\n def setUp(self):\n # creates a test client\n self.app = app.test_client()\n\n # propagate the exceptions to the test client\n self.app.testing = True\n with open(settings.SDX_KEYS_FILE) as file:\n secrets_from_file = yaml.safe_load(file)\n\n secret_store = KeyStore(secrets_from_file)\n\n jwt_key = secret_store.get_key_for_purpose_and_type(KEY_PURPOSE_SUBMISSION, \"private\")\n\n jwe_key = secret_store.get_key_for_purpose_and_type(KEY_PURPOSE_SUBMISSION, \"public\")\n\n self.encrypter = Encrypter(jwt_key.kid, jwe_key.kid)\n\n def encrypt_and_send_json(self, json_string):\n\n data = json.loads(json_string)\n\n encoded_data = self.encrypter.encrypt(data)\n\n # Ask posie to decode message\n r = self.app.post(self.decrypt_endpoint, data=encoded_data)\n\n return r\n\n def test_decrypt_fail_sends_400(self):\n\n # Ask posie to decode message\n r = self.app.post(self.decrypt_endpoint, data='rubbish')\n\n self.assertEqual(r.status_code, 400)\n\n def test_no_content_sends_400(self):\n\n # Ask posie to decode message\n r = self.app.post(self.decrypt_endpoint, data='')\n\n self.assertEqual(r.status_code, 400)\n\n def test_decrypts_message(self):\n # Encrypt a message with the key\n message = '''{\"some\": \"well\", \"formed\": \"json\"}'''\n\n # Ask posie to decode message\n r = self.encrypt_and_send_json(message)\n\n # Compare to bytestring version of decrypted data\n self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))\n\n def test_decrypts_large_message_no_tx_id(self):\n # Encrypt a message with the key\n message = '''{\n \"type\": \"uk.gov.ons.edc.eq:surveyresponse\",\n \"version\": \"0.0.1\",\n \"origin\": \"uk.gov.ons.edc.eq\",\n \"survey_id\": \"21\",\n \"collection\": {\n \"exercise_sid\": \"hfjdskf\",\n \"instrument_id\": \"0203\",\n \"period\": \"2016-02-01\"\n },\n \"submitted_at\": \"2016-03-12T10:39:40Z\",\n \"metadata\": {\n \"user_id\": \"789473423\",\n \"ru_ref\": \"12345678901A\"\n },\n \"data\": {\n \"11\": \"01042016\",\n \"12\": \"31102016\",\n \"20\": \"1800000\",\n \"51\": \"84\",\n \"52\": \"10\",\n \"53\": \"73\",\n \"54\": \"24\",\n \"50\": \"205\",\n \"22\": \"705000\",\n \"23\": \"900\",\n \"24\": \"74\",\n \"25\": \"50\",\n \"26\": \"100\",\n \"21\": \"60000\",\n \"27\": \"7400\",\n \"146\": \"some comment\"\n }\n }'''\n\n # Encrypt and ask posie to decode message\n r = self.encrypt_and_send_json(message)\n\n self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))\n\n def test_decrypts_large_message_with_tx_id(self):\n # Encrypt a message with the key\n message = '''{\n \"type\": \"uk.gov.ons.edc.eq:surveyresponse\",\n \"version\": \"0.0.1\",\n \"origin\": \"uk.gov.ons.edc.eq\",\n \"survey_id\": \"21\",\n \"tx_id\": \"27923934-62de-475c-bc01-433c09fd38b8\",\n \"collection\": {\n \"exercise_sid\": \"hfjdskf\",\n \"instrument_id\": \"0203\",\n \"period\": \"2016-02-01\"\n },\n \"submitted_at\": \"2016-03-12T10:39:40Z\",\n \"metadata\": {\n \"user_id\": \"789473423\",\n \"ru_ref\": \"12345678901A\"\n },\n \"data\": {\n \"11\": \"01042016\",\n \"12\": \"31102016\",\n \"20\": \"1800000\",\n \"51\": \"84\",\n \"52\": \"10\",\n \"53\": \"73\",\n \"54\": \"24\",\n \"50\": \"205\",\n \"22\": \"705000\",\n \"23\": \"900\",\n \"24\": \"74\",\n \"25\": \"50\",\n \"26\": \"100\",\n \"21\": \"60000\",\n \"27\": \"7400\",\n \"146\": \"some comment\"\n }\n }'''\n\n # Encrypt and ask posie to decode message\n r = self.encrypt_and_send_json(message)\n\n self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))\n","sub_path":"tests/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"416558188","text":"from bisect import bisect_left\n \nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int: \n psum, psum_indices = [0]*len(nums), {}\n for i, n in enumerate(nums):\n psum[i] = n + (psum[i-1] if i-1 >= 0 else 0)\n psum_indices[psum[i]] = psum_indices.get(psum[i], []) + [i]\n \n count = 0\n for end in range(len(nums)):\n if psum[end] == k:\n count = count + 1\n rem_sum = psum[end] - k\n rem_sum_prefixes = psum_indices.get(rem_sum, [])\n count = count + bisect_left(rem_sum_prefixes, end)\n return count\n","sub_path":"Practice-2020/InterviewLeetcode/Python/subarray_sum_equals_k.py","file_name":"subarray_sum_equals_k.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"123197499","text":"from django.apps import apps\nimport simple_audit\nfrom simple_audit.models import Audit\nfrom simple_audit.models import AuditChange\nfrom simple_audit.models import AuditRequest\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, views, filters, serializers\nfrom simulador.pagination import BasePagination\nfrom simulador.resources.account import AccountShortDetailSerializer\nfrom simulador.resources.city import City\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass ContentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = ContentType\n fields = ('id', 'app_label', 'model')\n\n\nclass AuditRequestSerializer(serializers.ModelSerializer):\n user = AccountShortDetailSerializer(read_only=True)\n\n class Meta:\n model = AuditRequest\n fields = (\n 'id', 'request_id', 'ip', 'path', 'date', 'user')\n order_by = (\n ('date',)\n )\n\n\nclass AuditSerializer(serializers.ModelSerializer):\n audit_request = AuditRequestSerializer(read_only=True)\n content_type = ContentTypeSerializer(read_only=True)\n\n class Meta:\n model = Audit\n fields = (\n 'id', 'date', 'operation', 'content_type', 'object_id', 'audit_request', 'description', 'obj_description')\n\n\nclass AuditChangeSerializer(serializers.ModelSerializer):\n audit = AuditSerializer(read_only=True)\n\n class Meta:\n model = AuditChange\n fields = (\n 'id', 'audit', 'field', 'old_value', 'new_value',)\n\n\nclass LogsViewSet(viewsets.ModelViewSet):\n queryset = Audit.objects.order_by('-date')\n serializer_class = AuditSerializer\n pagination_class = BasePagination\n filter_backends = (filters.DjangoFilterBackend, filters.SearchFilter,)\n search_fields = (\n '$audit_request__user__first_name', '$audit_request__user__last_name', '$audit_request__user__ci',\n '$audit_request__user__user_type__short', '$date', '$description')\n http_method_names = ('get',)\n\n def list(self, request, *args, **kwargs):\n data_request = super(LogsViewSet, self).list(self, request)\n data = data_request.data\n for result in data['results']:\n description = result['description']\n operation = result['operation']\n if operation == '1' or operation == 1:\n description = description.replace(\"field \", \"- \")\n description = description.replace(\"' to '\", \"' a '\")\n description = description.replace(\" to '\", \" a \")\n description = description.replace(\" to \", \" a \")\n description = description.replace(\"' was changed from '\",\n \" de '\")\n description = description.replace(\" was changed from \",\n \" de \")\n description = description.replace(\"\\n\", \"
\\n\")\n description = description.replace(\"'\\n\", \"'

\\n\")\n description = \"Con ID: %s
%s\" % (result['object_id'], description)\n # description = \"
%s
\" % description\n result['operation'] = \"Modificar\"\n elif operation == '0' or operation == 0:\n description = description.replace(\"Added \", \"Registro ingresado: \")\n description = \"%s
Con ID: %s\" % (description, result['object_id'])\n result['operation'] = \"Ingresar\"\n elif operation == '2' or operation == 2:\n description = description.replace(\"Deleted \", \"Registro eliminado: \")\n description = \"%s
Con ID: %s\" % (description, result['object_id'])\n result['operation'] = \"Eliminar\"\n result['description'] = description\n\n if ('audit_request' in result) and (result['audit_request'] is not None):\n user = result['audit_request']['user']\n result['ci'] = \"%s %s\" % (user['ci'], user['city']['short'])\n user_type = \"%s\" % user['user_type']['short']\n result['user'] = \"%s. %s %s , %s %s\" % (\n user['military_grade']['short'], user['first_name'], user['last_name'], result['ci'], user_type)\n\n result['email'] = user['email']\n result['image'] = user['image']\n result['ip_address'] = result['audit_request']['ip']\n result['path'] = result['audit_request']['path']\n else:\n result['ci'] = \"-- desconocido --\"\n result['user'] = \"-- desconocido --\"\n result['email'] = \"-- desconocido --\"\n result['image'] = \"\"\n result['ip_address'] = \"-- desconocido --\"\n result['path'] = \"-- desconocido --\"\n result['table'] = \"
%s
\" % result['content_type']['model']\n del (result['content_type'])\n if 'ci' in result:\n del (result['ci'])\n if 'audit_request' in result:\n del (result['audit_request'])\n if 'obj_description' in result:\n del (result['obj_description'])\n return Response(data)\n\n\nclass LogsView(views.APIView):\n def get(self, request):\n simple_audit.register()\n return Response({\"d\": 22})\n\n # reversion.get_f\n","sub_path":"simulador/resources/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"504055781","text":"from __future__ import absolute_import\n\nimport logging\nimport posixpath\n\nfrom symsynd import find_best_instruction, parse_addr, ImageLookup\n\nfrom sentry import options\nfrom django.db import transaction, IntegrityError\nfrom sentry.models import VersionDSymFile, DSymPlatform, DSymApp\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, SymbolicationFailed\nfrom sentry.lang.native.utils import \\\n get_sdk_from_event, cpu_name_from_data, \\\n rebase_addr, version_build_from_data\nfrom sentry.lang.native.systemsymbols import lookup_system_symbols\nfrom sentry.utils import metrics\nfrom sentry.stacktraces import StacktraceProcessor\nfrom sentry.reprocessing import report_processing_issue\n\nlogger = logging.getLogger(__name__)\n\nFRAME_CACHE_VERSION = 5\n\n\nclass NativeStacktraceProcessor(StacktraceProcessor):\n def __init__(self, *args, **kwargs):\n StacktraceProcessor.__init__(self, *args, **kwargs)\n debug_meta = self.data.get('debug_meta')\n self.cpu_name = cpu_name_from_data(self.data)\n self.sym = None\n self.dsyms_referenced = set()\n if debug_meta:\n self.available = True\n self.debug_meta = debug_meta\n self.sdk_info = get_sdk_from_event(self.data)\n self.image_lookup = ImageLookup(\n [img for img in self.debug_meta['images'] if img['type'] == 'apple']\n )\n else:\n self.available = False\n\n def close(self):\n StacktraceProcessor.close(self)\n if self.dsyms_referenced:\n metrics.incr(\n 'dsyms.processed', amount=len(self.dsyms_referenced), instance=self.project.id\n )\n if self.sym is not None:\n self.sym.close()\n self.sym = None\n\n def find_best_instruction(self, processable_frame):\n \"\"\"Given a frame, stacktrace info and frame index this returns the\n interpolated instruction address we then use for symbolication later.\n \"\"\"\n if self.cpu_name is None:\n return parse_addr(processable_frame['instruction_addr'])\n meta = None\n\n # We only need to provide meta information for frame zero\n if processable_frame.idx == 0:\n # The signal is useful information for symsynd in some situations\n # to disambiugate the first frame. If we can get this information\n # from the mechanism we want to pass it onwards.\n signal = None\n exc = self.data.get('sentry.interfaces.Exception')\n if exc is not None:\n mechanism = exc['values'][0].get('mechanism')\n if mechanism and 'posix_signal' in mechanism and \\\n 'signal' in mechanism['posix_signal']:\n signal = mechanism['posix_signal']['signal']\n meta = {\n 'frame_number': 0,\n 'registers': processable_frame.stacktrace_info.stacktrace.get('registers'),\n 'signal': signal,\n }\n\n return find_best_instruction(\n processable_frame['instruction_addr'], self.cpu_name, meta=meta\n )\n\n def handles_frame(self, frame, stacktrace_info):\n platform = frame.get('platform') or self.data.get('platform')\n return (platform == 'cocoa' and self.available and 'instruction_addr' in frame)\n\n def preprocess_frame(self, processable_frame):\n instr_addr = self.find_best_instruction(processable_frame)\n img = self.image_lookup.find_image(instr_addr)\n\n processable_frame.data = {\n 'instruction_addr': instr_addr,\n 'image': img,\n 'image_uuid': img['uuid'] if img is not None else None,\n 'symbolserver_match': None,\n }\n\n if img is not None:\n processable_frame.set_cache_key_from_values(\n (\n FRAME_CACHE_VERSION,\n # Because the images can move around, we want to rebase\n # the address for the cache key to be within the image\n # the same way as we do it in the symbolizer.\n rebase_addr(instr_addr, img),\n img['uuid'].lower(),\n img['cpu_type'],\n img['cpu_subtype'],\n img['image_size'],\n )\n )\n\n def preprocess_step(self, processing_task):\n if not self.available:\n return False\n\n referenced_images = set(\n pf.data['image_uuid'] for pf in processing_task.iter_processable_frames(self)\n if pf.cache_value is None and pf.data['image_uuid'] is not None\n )\n\n def on_referenced(dsym_file):\n app_info = version_build_from_data(self.data)\n if app_info is not None:\n dsym_app = DSymApp.objects.create_or_update_app(\n sync_id=None,\n app_id=app_info.id,\n project=self.project,\n data={'name': app_info.name},\n platform=DSymPlatform.APPLE,\n )\n try:\n with transaction.atomic():\n version_dsym_file, created = VersionDSymFile.objects.get_or_create(\n dsym_file=dsym_file,\n version=app_info.version,\n build=app_info.build,\n defaults=dict(dsym_app=dsym_app),\n )\n except IntegrityError:\n # XXX: this can currently happen because we only\n # support one app per dsym file. Since this can\n # happen in some cases anyways we ignore it.\n pass\n\n self.sym = Symbolizer(\n self.project,\n self.image_lookup,\n cpu_name=self.cpu_name,\n referenced_images=referenced_images,\n on_dsym_file_referenced=on_referenced\n )\n\n if options.get('symbolserver.enabled'):\n self.fetch_system_symbols(processing_task)\n\n def fetch_system_symbols(self, processing_task):\n to_lookup = []\n pf_list = []\n for pf in processing_task.iter_processable_frames(self):\n img = pf.data['image']\n if pf.cache_value is not None or img is None or \\\n self.sym.is_image_from_app_bundle(img):\n continue\n to_lookup.append(\n {\n 'object_uuid': img['uuid'],\n 'object_name': img['name'],\n 'addr': '0x%x' % rebase_addr(pf.data['instruction_addr'], img)\n }\n )\n pf_list.append(pf)\n\n if not to_lookup:\n return\n\n rv = lookup_system_symbols(to_lookup, self.sdk_info, self.sym.cpu_name)\n if rv is not None:\n for symrv, pf in zip(rv, pf_list):\n if symrv is None:\n continue\n pf.data['symbolserver_match'] = symrv\n\n def process_frame(self, processable_frame, processing_task):\n frame = processable_frame.frame\n errors = []\n\n new_frames = []\n raw_frame = dict(frame)\n if processable_frame.cache_value is None:\n # Construct a raw frame that is used by the symbolizer\n # backend. We only assemble the bare minimum we need here.\n instruction_addr = processable_frame.data['instruction_addr']\n in_app = self.sym.is_in_app(instruction_addr, sdk_info=self.sdk_info)\n if in_app and raw_frame.get('function') is not None:\n in_app = not self.sym.is_internal_function(raw_frame['function'])\n if raw_frame.get('in_app') is None:\n raw_frame['in_app'] = in_app\n img_uuid = processable_frame.data['image_uuid']\n if img_uuid is not None:\n self.dsyms_referenced.add(img_uuid)\n try:\n symbolicated_frames = self.sym.symbolize_frame(\n instruction_addr,\n self.sdk_info,\n symbolserver_match=processable_frame.data['symbolserver_match']\n )\n if not symbolicated_frames:\n return None, [raw_frame], []\n except SymbolicationFailed as e:\n # User fixable but fatal errors are reported as processing\n # issues\n if e.is_user_fixable and e.is_fatal:\n report_processing_issue(\n self.data,\n scope='native',\n object='dsym:%s' % e.image_uuid,\n type=e.type,\n data={\n 'image_path': e.image_path,\n 'image_uuid': e.image_uuid,\n 'image_arch': e.image_arch,\n 'message': e.message,\n }\n )\n\n # This in many ways currently does not really do anything.\n # The reason is that once a processing issue is reported\n # the event will only be stored as a raw event and no\n # group will be generated. As a result it also means that\n # we will not have any user facing event or error showing\n # up at all. We want to keep this here though in case we\n # do not want to report some processing issues (eg:\n # optional dsyms)\n errors = []\n if e.is_user_fixable or e.is_sdk_failure:\n errors.append(\n {\n 'type': e.type,\n 'image_uuid': e.image_uuid,\n 'image_path': e.image_path,\n 'image_arch': e.image_arch,\n 'message': e.message,\n }\n )\n else:\n logger.debug('Failed to symbolicate with native backend', exc_info=True)\n return [raw_frame], [raw_frame], errors\n\n processable_frame.set_cache_value([in_app, symbolicated_frames])\n else:\n in_app, symbolicated_frames = processable_frame.cache_value\n raw_frame['in_app'] = in_app\n\n for sfrm in symbolicated_frames:\n new_frame = dict(frame)\n new_frame['function'] = sfrm['function']\n if sfrm.get('symbol'):\n new_frame['symbol'] = sfrm['symbol']\n new_frame['abs_path'] = sfrm['abs_path']\n new_frame['filename'] = sfrm.get('filename') or \\\n (sfrm['abs_path'] and posixpath.basename(sfrm['abs_path'])) or None\n if sfrm.get('lineno'):\n new_frame['lineno'] = sfrm['lineno']\n if sfrm.get('colno'):\n new_frame['colno'] = sfrm['colno']\n if sfrm.get('package'):\n new_frame['package'] = sfrm['package']\n if new_frame.get('in_app') is None:\n new_frame['in_app'\n ] = (in_app and not self.sym.is_internal_function(new_frame['function']))\n new_frames.append(new_frame)\n\n return new_frames, [raw_frame], []\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_stacktrace_processors(self, data, stacktrace_infos, platforms, **kwargs):\n if 'cocoa' in platforms:\n return [NativeStacktraceProcessor]\n","sub_path":"src/sentry/lang/native/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":11579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6379446","text":"import sys\nimport os\n\nSELF_DIR = os.path.dirname(__file__)\nCMD_DIR = os.path.abspath(os.path.join(SELF_DIR, \"..\", \"..\", \"..\", \"cmds\"))\n\n\ndef main():\n cmds = os.listdir(CMD_DIR)\n cmds.sort()\n print(\"\\n\".join(cmds))\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pysrc/cmds/common/zcmd.py","file_name":"zcmd.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271079857","text":"import mysql.connector as db\nimport numpy as np\nfrom openpyxl import Workbook\nimport sys\nimport re\n\n\n# Базовый запрос к БД. Данная функция осущестляет подключение к БД\n# и направляет запрос переданный в функцию в виде строки.\n# Воозвращает результат запроса в виде кортежа\n\ndef zapros_base(query_str):\n db_user = 'soctraf'\n db_pass = 'traffic'\n db_IP = '10.245.41.196'\n db_db = 'traf'\n db_connection = db.connect(host=db_IP, user=db_user, password=db_pass, database=db_db)\n cursor = db_connection.cursor()\n cursor.execute(query_str)\n results = cursor.fetchall()\n db_connection.close()\n return results\n\ndef zapros_int(interface, provider, device, period):\n days = []\n maksimus = []\n totals = []\n z_year, z_month, z_start_day, z_stop_day = period\n zapros_str = \"\"\"SELECT _av.peak,\n _av.day \n FROM avtable_day _av \n WHERE \n _av.year = {year}\n AND _av.month = {month} \n AND _av.day >= {start_day}\n AND _av.day <= {stop_day} \n AND _av.traf = 'bps_in' \n AND _av.provider LIKE '{prov}' \n AND _av.device = '{dev}'\n AND _av.nameds LIKE '{iface}'\n \n ORDER BY\n _av.day \n \"\"\".format(year=z_year, month=z_month, start_day=z_start_day, stop_day=z_stop_day,\n prov=provider, dev=device, iface=interface)\n\n x_list = zapros_base(zapros_str)\n if len(x_list) != 0:\n for x in x_list:\n maksimus.append(x[0])\n days.append(x[1])\n totals.append(days)\n totals.append(maksimus)\n else: #Запрос данных не вернул.\n totals.append('ERROR')\n\n return totals\n\ndef create_periods(year, month_start, month_stop, day_start, day_stop):\n period = []\n periods = []\n for i in range(month_stop - month_start + 1):\n period.append(year)\n period.append(month_start + i)\n if (i == 0):\n period.append(day_start)\n else:\n period.append(1)\n if (i == month_stop - month_start):\n period.append(day_stop)\n else:\n if month_start+i in [1, 3, 5, 7, 8, 10, 12]:\n period.append(31)\n if month_start+i in [4, 6, 9, 11]:\n period.append(30)\n if month_start+i in [2]:\n period.append(28)\n periods.append(period)\n period = []\n return periods\n\ndef create_zeros(period):\n days = []\n maksimus = []\n totals = []\n _, _, day_start, day_stop = period\n for i in range(day_stop-day_start+1):\n days.append(day_start+i)\n maksimus.append(0)\n totals.append(days)\n totals.append(maksimus)\n return totals\n\ndef sum_zeros_and_query(zeros, query):\n \n first_array = np.array(zeros).astype(float)\n second_array = np.array(query).astype(float)\n\n# print(second_array)\n steps = len(second_array[0]) # Количество замен\n for step in range (steps):\n #print ('Шаг номер - {x}'.format(x =step))\n #print (second_array[:,step]) \n \n x = second_array[0][step]\n \n t = np.where(first_array[0] == x)\n f_pos = t[0][0]\n \n # print(second_array[:,step])\n first_array[:,f_pos] = second_array[:,step]\n z = []\n z.append(first_array[0].astype(int).tolist())\n z.append(first_array[1].astype(float).tolist())\n \n return z\n\ndef create_if_string(interface, provider, device, period):\n year = period[0]\n m_start = period[1]\n m_stop = period[2]\n d_start = period[3]\n d_stop = period[4]\n periods = create_periods(year, m_start, m_stop, d_start, d_stop)\n\n day_if_periods = []\n max_if_periods = []\n # sum_if_periods = []\n\n for p in periods:\n z =[]\n q =[]\n z = create_zeros(p)\n q = zapros_int(interface, provider, device, p)\n if (q ==['ERROR']):\n # Данных по интерфейсу нет... Приравниваем все к 0\n q = z\n temp_list = sum_zeros_and_query(z, q)\n day_if_periods = day_if_periods + temp_list[0]\n max_if_periods = max_if_periods + temp_list[1]\n # sum_if_periods.append(day_if_periods)\n # sum_if_periods.append(max_if_periods)\n\n return day_if_periods, max_if_periods\n\n# Функция возвращает список максимальных значений по интерфейса.\n# Вход: Вывод:\ndef create_direction_string(interfaces, provider, device, period):\n direction_list = []\n days = []\n for interface in interfaces:\n \n days, maxif = create_if_string(interface, provider, device, period)\n direction_list.append(maxif)\n \n return days, direction_list\n\ndef avg_sum_days(kv_matr): \n t1 = []\n x_array = np.array(kv_matr)\n z = x_array.sum(axis=0)\n \n columns = (x_array != 0).sum(0)\n result = []\n result = z/columns\n\n t1 = result.tolist()\n # t1.append(x)\n return t1\n\nperiods = [2019,2,3,26,5]\nyear = periods[0]\nm_start = periods[1]\nm_stop = periods[2]\nd_start = periods[3]\nd_stop = periods[4]\nperiod_list = create_periods(year, m_start, m_stop, d_start, d_stop)\nifaces = ['%xe-1/0/1%', '%xe-1/0/21%']\nprov = 'MEGAFON'\ndev = 'asta-gate-1'\n\ndays, data = create_direction_string(ifaces, prov, dev, periods)\navg = avg_sum_days(data)\nprint()\nprint(period_list)\nprint(days)\nprint(data)\nprint(avg)\n\nprint()","sub_path":"numpy_test.py","file_name":"numpy_test.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"469186344","text":"\"\"\" Analyzes the word frequencies in a book downloaded from\nProject Gutenberg \"\"\"\n\nimport string\n\n\ndef get_word_list(file_name):\n \"\"\" Reads the specified project Gutenberg book. Header comments,\n punctuation, and whitespace are stripped away. The function\n returns a list of the words used in the book as a list.\n All words are converted to lower case.\n \"\"\"\n raw_text = open(file_name, 'r')\n lines = raw_text.readlines()\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n all_text = lines[curr_line+1:]\n stripped_words = [\"\".join(c for c in word if c not in string.punctuation) for word in\n [val for sublist in [line.split() for line in all_text] for val in sublist]]\n return stripped_words\n\n\ndef get_top_n_words(word_list, n):\n \"\"\" Takes a list of words as input and returns a list of the n most frequently\n occurring words ordered from most to least frequently occurring.\n\n word_list: a list of words (assumed to all be in lower case with no\n punctuation\n n: the number of words to return\n returns: a list of n most frequently occurring words ordered from most\n frequently to least frequentlyoccurring\n \"\"\"\n word_dict = {i: word_list.count(i) for i in set(word_list)}\n sorted_words = list(sorted(word_dict, key=word_dict.get, reverse=True))\n [print(\"Word: {}\\t Frequency: {}\".format(sorted_words[idx], word_dict[sorted_words[idx]])) for idx in range(n)]\n\n\nif __name__ == \"__main__\":\n words = get_word_list(\"pg84.txt\")\n get_top_n_words(words, 100)\n","sub_path":"frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211232255","text":"# -*- Mode: Python; indent-tabs-mode: t; python-indent: 4; tab-width: 4 -*-\nfrom gi.repository import Gio\nfrom cavalcade.visualpage import VisualPage\nfrom cavalcade.cavapage import CavaPage\nfrom cavalcade.playerpage import PlayerPage\nfrom cavalcade.colordata import ColorsWindow\nfrom cavalcade.common import GuiBase\n\n\nclass SettingsWindow(GuiBase):\n\t\"\"\"Settings window\"\"\"\n\tdef __init__(self, mainapp):\n\t\telements = (\n\t\t\t\"window\", \"headerbar\", \"winstate-menubutton\", \"stackswitcher\", \"app-menu\", \"stack\", \"winstate-menu\",\n\t\t\t\"app-menubutton\",\n\t\t)\n\t\tsuper().__init__(\"settings.ui\", \"appmenu.ui\", \"winstate.ui\", elements=elements)\n\n\t\tself.actions = {}\n\t\tself._mainapp = mainapp\n\t\tself.gui[\"window\"].set_keep_above(True)\n\t\tself.gui[\"window\"].set_application(mainapp)\n\t\tself.actions[\"settings\"] = Gio.SimpleActionGroup()\n\n\t\t# add visual page\n\t\tself.visualpage = VisualPage(self._mainapp, self)\n\t\tself.gui[\"stack\"].add_titled(self.visualpage.gui[\"mainbox\"], \"visset\", \"Visual\")\n\n\t\t# add cava page\n\t\tself.cavapage = CavaPage(self._mainapp)\n\t\tself.gui[\"stack\"].add_titled(self.cavapage.gui[\"mainbox\"], \"cavaset\", \"CAVA\")\n\n\t\t# add colors dialog\n\t\tself.colors = ColorsWindow(self._mainapp)\n\t\tself.colors.gui[\"window\"].set_transient_for(self.gui[\"window\"])\n\n\t\t# setup menu buttons\n\t\tself.gui[\"winstate-menubutton\"].set_menu_model(self.gui[\"winstate-menu\"])\n\t\tself.gui[\"app-menubutton\"].set_menu_model(self.gui[\"app-menu\"])\n\n\t\t# actions\n\t\thide_action = Gio.SimpleAction.new(\"hide\", None)\n\t\thide_action.connect(\"activate\", self.hide)\n\t\tself.actions[\"settings\"].add_action(hide_action)\n\n\t\tshow_action = Gio.SimpleAction.new(\"show\", None)\n\t\tshow_action.connect(\"activate\", self.show)\n\t\tself.actions[\"settings\"].add_action(show_action)\n\n\t\tcolors_action = Gio.SimpleAction.new(\"colors\", None)\n\t\tcolors_action.connect(\"activate\", self.colors.show)\n\t\tself.actions[\"settings\"].add_action(colors_action)\n\n\t\t# signals\n\t\tself.gui[\"window\"].connect(\"delete-event\", self.hide)\n\n\tdef add_player_page(self):\n\t\t\"\"\"Optional player page\"\"\"\n\t\t# noinspection PyAttributeOutsideInit\n\t\tself.playerpage = PlayerPage(self._mainapp)\n\t\tself.gui[\"stack\"].add_titled(self.playerpage.gui[\"mainbox\"], \"playset\", \"Player\")\n\n\t# noinspection PyUnusedLocal\n\tdef show(self, *args):\n\t\t\"\"\"Show settings window\"\"\"\n\t\tself.gui[\"window\"].show_all()\n\n\t# noinspection PyUnusedLocal\n\tdef hide(self, *args):\n\t\t\"\"\"Hide settings window\"\"\"\n\t\tself.gui[\"window\"].hide()\n\t\treturn True\n","sub_path":"cavalcade/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626166675","text":"# Defining our stopwords list:\nimport nltk\nimport string\nnltk.data.path.append('/home/adelo/.nltk/nltk_data')\nfrom nltk.corpus import stopwords\n\nstopwords_brands_additionals = ['computer','computers','laptop','laptops','thing','things','machine','machines','im','dont','ive']\nstopwords_total = stopwords.words('english') + stopwords_brands_additionals\n\n# The following function takes a string and an optional argument «tokenize»:\n# * It removes punctuation and stopwords from the string entered\n# * If the «tokenize» argument if not specified, the string will be tokenized so it will return \n# a list of the word without punctuation or stopwords\n# * If a tokenize argument is specified, the string will NOT be tokenized, so it will return\n# a string without punctuation or stopwords\ndef pre_processing(texto,tokenize=None):\n # Removing punctuation:\n text_process = ''.join([ char for char in texto if char not in string.punctuation ])\n # Removing Stopwords:\n text_process = ' '.join([ word for word in text_process.split() if word.lower() not in stopwords_total ])\n if tokenize == None:\n return [word for word in text_process.split()]\n else:\n return text_process\n \n\n# Example of applying the function «pre_processing()»:\n# display(my_reviews['title_text'].head())\n# display(my_reviews['title_text'].head().apply(lambda val: pre_processing(val,'no_tokenize')))\n\n# Here is how we would apply the function «pre_processing()» to a column over the entire dataframe.\n# However, we won't do that in this stage because we need a raw text for the Sentiment Analysis\n# my_reviews['title'] = my_reviews['title'].apply(lambda val: pre_processing(val,'no_tokenize'))\n# my_reviews['text'] = my_reviews['text'].apply(lambda val: pre_processing(val,'no_tokenize'))\n# my_reviews['title_text'] = my_reviews['title_text'].apply(lambda val: pre_processing(val,'no_tokenize'))\n# display(my_reviews)","sub_path":"DataAnalysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459736517","text":"import numpy as np\n\nfrom sawyer.mujoco.tasks.base import ComposableTask\n\n\nclass TransitionTask(ComposableTask):\n \"\"\"\n Task to pick up an object with the robot gripper.\n\n Success condition:\n - Object is grasped and has been lifted above the table\n \"\"\"\n def __init__(self):\n pass\n\n def compute_reward(self, obs, info):\n return 0\n\n def is_success(self, obs, info=None, init=None):\n raise NotImplementedError\n\n def is_terminate(self, obs, init):\n return self.is_success(obs, init=init)\n\n def is_fail(self, obs):\n raise NotImplementedError\n\n def reset(self):\n pass\n\n @property\n def completion_bonus(self):\n return self._completion_bonus\n\n\nclass TransitionPickTask(TransitionTask):\n \"\"\"\n Task to pick up an object with the robot gripper.\n\n Success condition:\n - Object is grasped and has been lifted above the table\n \"\"\"\n def __init__(self,\n success_thresh=0.05,\n object_lift_target=0.3,\n completion_bonus=0):\n self._success_thresh = success_thresh\n self._obj_lift_target = object_lift_target\n self._completion_bonus = completion_bonus\n self._t = 0\n\n def is_success(self, obs, info=None, init=None):\n return True\n if init:\n self.reset()\n goal = obs[11:14] + np.array([0, 0, 0.04])\n box_pos = obs[4:7]\n d = np.linalg.norm(box_pos - goal, axis=-1)\n print(\"****[pick/is success] box_pos:{}, goal:{}, d:{}\".format(box_pos, goal, d))\n return d < self._success_thresh\n\n def is_fail(self, obs):\n self._t += 1\n if self._t >= 1 and not self.is_success(obs):\n return True\n return False\n\n def reset(self):\n self._t = 0\n\n\nclass TransitionPlaceTask(TransitionTask):\n \"\"\"\n Task to place object at a desired location.\n \"\"\"\n def __init__(self,\n success_thresh=0.015,\n completion_bonus=0):\n self._success_thresh = success_thresh\n self._completion_bonus = completion_bonus\n self._prev_box_pos = None\n\n def is_success(self, obs, info=None, init=None):\n if init:\n self.reset()\n box_pos = obs[4:7]\n goal = obs[11:14]\n\n max_xy_diff = 0.03\n abs_diff = abs(box_pos - goal)\n\n print(\"****[place/is success] abs_diff:{}\".format(abs_diff))\n return ( abs_diff[0] < max_xy_diff and\n abs_diff[1] < max_xy_diff and\n box_pos[2] < 0.21 )\n\n def is_fail(self, obs):\n box_pos = obs[4:7]\n goal = obs[11:14]\n max_xy_diff = 0.03\n abs_diff = abs(box_pos - goal)\n\n if self._prev_box_pos is None:\n self._prev_box_pos = box_pos\n else:\n max_z_diff = 0.009\n z_diff = self._prev_box_pos[2] - box_pos[2]\n print(\"****[place/is_fail] z_diff:{}, box_pos_z:{}\".format(z_diff, box_pos[2]))\n print(self._prev_box_pos[2], box_pos[2])\n if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:\n return True\n else:\n self._prev_box_pos = box_pos\n return False\n\n def reset(self):\n self._prev_box_pos = None\n\n\nclass TransitionPickAndPlaceTask(TransitionTask):\n \"\"\"\n Task to pick up an object and place the object at a desired location.\n\n Success condition:\n - Object is grasped and has been lifted above the table\n \"\"\"\n def __init__(self,\n success_thresh=0.01,\n completion_bonus=0):\n self._success_thresh = success_thresh\n self._completion_bonus = completion_bonus\n self._prev_box_pos = None\n self._picked = False\n self._placing = False\n\n def is_success(self, obs, info=None, init=None):\n if init:\n self.reset()\n box_pos = obs[4:7]\n goal = obs[11:14]\n\n max_xy_diff = 0.02\n abs_diff = abs(box_pos - goal)\n\n print(\"****[pick&place/is success] abs_diff:{}, box_z:{}\".format(abs_diff, box_pos[2]))\n return ( abs_diff[0] < max_xy_diff and\n abs_diff[1] < max_xy_diff and\n box_pos[2] < 0.22 )\n\n def is_fail(self, obs):\n box_pos = obs[4:7]\n goal = obs[11:14]\n abs_diff = abs(box_pos - goal)\n max_xy_diff = 0.03\n\n if self._picked:\n self._placing = True\n print(\"placing True\")\n else:\n print(\"placing False\")\n\n if self._picked and not self._placing:\n print(\"return True\")\n return True\n\n self._picked = True\n\n if self._placing:\n if self._prev_box_pos is None:\n self._prev_box_pos = box_pos\n else:\n max_z_diff = 0.009\n z_diff = self._prev_box_pos[2] - box_pos[2]\n print(\"****[pick&place/is_fail] z_diff:{}, box_pos_z:{}\".format(z_diff, box_pos[2]))\n print(self._prev_box_pos[2], box_pos[2])\n if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):\n print(\"return True\")\n return True\n else:\n self._prev_box_pos = box_pos\n return False\n\n def get_next_primitive(self, obs, prev_primitive):\n if prev_primitive == -1:\n return 'pick'\n return 'place'\n\n def reset(self):\n self._picked = False\n self._placing = False\n self._prev_box_pos = None\n\n","sub_path":"sawyer/mujoco/tasks/transition_pick_and_place_task.py","file_name":"transition_pick_and_place_task.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386507902","text":"from django.conf import settings\r\nfrom sermepa.models import SermepaIdTPV\r\n\r\nimport json, hashlib, base64, hmac\r\nfrom Crypto.Cipher import DES3\r\n\r\n\r\ndef get_decoded_parameters(ds_merchantparameters):\r\n return eval(base64.standard_b64decode(ds_merchantparameters))\r\n\r\n\r\ndef get_encoded_parameters(parameters):\r\n return base64.b64encode(parameters.encode())\r\n\r\n\r\nclass RedsysCrypto:\r\n def __init__(self, amount, order_uuid):\r\n self.amount = amount\r\n self.merchant_order = SermepaIdTPV.objects.new_idtpv()\r\n self.order_uuid = order_uuid\r\n\r\n def generate_unique_key(self, order):\r\n \"\"\"\r\n Generate a specific key per operation.\r\n :param order: The Ds_Merchant_Order number\r\n :return: a specific key for the operation\r\n \"\"\"\r\n # create cipher with decrypted secret key\r\n decoded_secret_key = base64.standard_b64decode(\r\n settings.SERMEPA_SECRET_KEY)\r\n cipher = DES3.new(decoded_secret_key, DES3.MODE_CBC,\r\n IV=b'\\0\\0\\0\\0\\0\\0\\0\\0')\r\n # adjust Ds_Merchant_Order size to multiple of 8\r\n order = order.ljust(16, '\\0')\r\n return cipher.encrypt(order.encode(\"utf-8\"))\r\n\r\n def generate_form(self):\r\n \"\"\"\r\n Generates Ds_Signature and Ds_MerchantParameters\r\n :param transaction: the Transaction object\r\n :return: { Ds_SignatureVersion, Ds_Signature, Ds_MerchantParameters)\r\n \"\"\"\r\n Ds_SignatureVersion = 'HMAC_SHA256_V1'\r\n parameters = json.dumps({\r\n 'DS_MERCHANT_ORDER': self.merchant_order,\r\n 'DS_MERCHANT_TRANSACTIONTYPE': 0,\r\n 'DS_MERCHANT_CURRENCY': settings.SERMEPA_CURRENCY,\r\n 'DS_MERCHANT_URLOK': settings.PAYMENT_RETURN_URL.format(\r\n self.order_uuid),\r\n 'DS_MERCHANT_URLKO': settings.PAYMENT_CANCEL_RETURN,\r\n 'DS_MERCHANT_MERCHANTCODE': settings.SERMEPA_MERCHANT_CODE,\r\n 'DS_MERCHANT_MERCHANTURL': settings.SERMEPA_IPN,\r\n 'DS_MERCHANT_MERCHANTDATA': str(self.order_uuid),\r\n 'DS_MERCHANT_TERMINAL': settings.SERMEPA_TERMINAL,\r\n 'DS_MERCHANT_AMOUNT': self.amount\r\n })\r\n Ds_MerchantParameters = base64.b64encode(parameters.encode())\r\n\r\n unique_key = self.generate_unique_key(self.merchant_order)\r\n\r\n # plain_text = Ds_MerchantParameters + unique_key\r\n # sha256 = hashlib.sha256(plain_text).digest()\r\n # Ds_Signature = base64.b64encode(sha256)\r\n Ds_Signature = base64.b64encode(hmac.new(\r\n unique_key, Ds_MerchantParameters, hashlib.sha256).digest())\r\n return {\r\n 'Ds_SignatureVersion': Ds_SignatureVersion,\r\n 'Ds_MerchantParameters': Ds_MerchantParameters,\r\n 'Ds_Signature': Ds_Signature\r\n }\r\n\r\n def generate_notification_signature(self, ds_merchantparameters):\r\n parameters = get_decoded_parameters(ds_merchantparameters)\r\n ds_order = parameters['Ds_Order']\r\n unique_key = self.generate_unique_key(ds_order)\r\n return base64.urlsafe_b64encode(hmac.new(\r\n unique_key, ds_merchantparameters, hashlib.sha256).digest())\r\n\r\n def is_notification_valid(self, ds_signature, ds_merchantparameters):\r\n return self.generate_notification_signature(\r\n ds_merchantparameters) == ds_signature\r\n","sub_path":"sermepa/RedsysCrypto.py","file_name":"RedsysCrypto.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"378669476","text":"import re\nimport os\nimport sys\n\nif len(sys.argv) == 3:\n log1 = sys.argv[1]\n log2 = sys.argv[2]\nelse:\n log1 = \"1.log\"\n log2 = \"2.log\"\n\nxy_pat = re.compile(r'x = (.*), y = (.*)')\n\n\ndef readLog(logFile):\n xys = []\n with open(logFile) as f:\n for line in f:\n xy = xy_pat.search(line)\n if xy:\n xys.append([float(xy.group(1)), float(xy.group(2))])\n return xys\n\n\nxys1 = readLog(log1)\nxys2 = readLog(log2)\n\nm = min([len(xys1),len(xys2)])\noutfile = 'diff-'+log1+'-'+log2\nwith open(outfile, 'w') as f:\n for i in range(m):\n dx = xys1[i][0] - xys2[i][0]\n dy = xys1[i][1] - xys2[i][1]\n f.write('dx = %.3f\\t dy = %.3f\\n' % (dx, dy))\n\nprint(\"---completed!---\")\nprint(\"Difference between %s and %s has been writen into %s!\" %\n (log1, log2, outfile))\n","sub_path":"diff_hlm/diff_hlm.py","file_name":"diff_hlm.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246477766","text":"from unittest.mock import patch\n\nfrom app.lib.slack import Slack\nfrom app.tests.base_testcase import BaseTestCase\n\n\nclass SlackTestCase(BaseTestCase):\n\n def setUp(self):\n dummy_url = 'DUMMY_URL'\n self.slack = Slack(dummy_url)\n\n @patch('app.lib.slack.slack.slackweb.Slack.notify')\n def test_notify(self, m_notify):\n payload = {'test': 'TEST'}\n self.slack.notify(payload)\n\n m_notify.assert_called_with(test='TEST')\n\n @patch('app.lib.slack.slack.Slack.notify')\n def test_notify_fetchrss(self, m_notify):\n message = 'TEST MESSAGE'\n self.slack.notify_fetchrss(message)\n\n m_notify.assert_called_with({\n 'username': 'Jaaxman',\n 'channel': '#dev_notifications',\n 'attachments': [{\n 'title': 'job (fetchrss)',\n 'text': 'TEST MESSAGE',\n 'color': 'good',\n 'mrkdwn_in': ['text', 'pretext'],\n }],\n })\n","sub_path":"backend/app/tests/test_lib/test_slack/test_slack.py","file_name":"test_slack.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"342583641","text":"import collections\nimport json\nimport logging\nimport pickle as pk\nimport numpy as np\nimport multiprocessing\n\nlogging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n\n# { 위치, 날짜, 조직, 시간, 사람, 그 외 }\nNE_DICT = {\n 'LC': [1, 0, 0, 0, 0, 0],\n 'DT': [0, 1, 0, 0, 0, 0],\n 'OG': [0, 0, 1, 0, 0, 0],\n 'TI': [0, 0, 0, 1, 0, 0],\n 'PS': [0, 0, 0, 0, 1, 0],\n 'O': [0, 0, 0, 0, 0, 1]\n}\n\n\nclass ExoCorpusParser(object):\n def __init__(self, path):\n '''\n 개행 기준으로 나눈 문장의 token\n 형태소\n 형태소 태그\n 개체명 라벨\n '''\n\n self.x_ne = list()\n self.x_pos = list()\n self.x_mor = list()\n self.x_split = list()\n\n # raw corpus 저장\n with open(path, 'r', encoding='UTF8') as file:\n while True:\n line = file.readline()\n line = line.strip()\n if not line: break\n print(line)","sub_path":"exo_corpus_parser.py","file_name":"exo_corpus_parser.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"584857624","text":"# A min heap is a Complete binary Tree in which the value in each internal node is maller than or equal to the values\n# in the children of that node.\n# Mapping the Elements of a Heap into an array is trivial : If a node is stored at 'k' index, then its left child is\n# stored at index '2k + 1' and its right child at index '2k + 2'.\n\n\nclass MinHeap:\n\n size = 0\n maxsize = 0\n heap = [0]\n front = 1\n\n def __init__(self, maxsize):\n self.maxsize = maxsize\n self.size = 0\n MinHeap.heap[0] = -1\n\n @staticmethod\n def parent(pos):\n return pos // 2\n\n @staticmethod\n def leftChild(pos):\n return 2 * pos\n\n @staticmethod\n def rightChild(pos):\n return (2 * pos) + 1\n\n def isLeaf(self, pos):\n if (pos >= (self.size / 2)) and (pos <= self.size):\n return True\n else:\n return False\n\n def swap(self, p1, p2):\n MinHeap.heap[p1], MinHeap.heap[p2] = MinHeap.heap[p2], MinHeap.heap[p1]\n\n def minHeapify(self, pos):\n if not self.isLeaf(pos):\n if (MinHeap.heap[pos] > MinHeap.heap[self.leftChild(pos)]) or (MinHeap.heap[pos] > MinHeap.heap[self.rightChild(pos)]):\n if MinHeap.heap[self.leftChild(pos)] < MinHeap.heap[self.rightChild(pos)]:\n self.swap(pos, self.leftChild(pos))\n self.minHeapify(self.leftChild(pos))\n else:\n self.swap(pos, self.rightChild(pos))\n self.minHeapify(self.rightChild(pos))\n\n def insert(self, element):\n if self.size >= self.maxsize:\n return\n MinHeap.heap.append(element)\n self.size += 1\n current = self.size\n\n while MinHeap.heap[current] < MinHeap.heap[self.parent(current)]:\n self.swap(current, self.parent(current))\n\n def remove(self):\n popped = MinHeap.heap[self.front]\n self.size -= 1\n MinHeap.heap[self.front] = MinHeap.heap[self.size]\n self.minHeapify(self.front)\n return popped\n\n def print(self):\n for i in range(1, int(self.size/2)+1):\n print(\"PARENT : \", MinHeap.heap[i], \" LEFT CHILD : \", MinHeap.heap[2 * i], \" RIGHT CHILD : \", MinHeap.heap[2 * i + 1])\n\n def minHeap(self):\n for i in range(self.size//2, 1, -1):\n self.minHeapify(i)\n\n\nmHeap = MinHeap(15)\nmHeap.insert(5)\nmHeap.insert(3)\nmHeap.insert(17)\nmHeap.insert(10)\nmHeap.insert(84)\nmHeap.insert(19)\nmHeap.insert(6)\nmHeap.insert(22)\nmHeap.insert(9)\nmHeap.minHeap()\n\nmHeap.print()\nprint(mHeap.heap)\nprint(\"Min Value is : \", mHeap.remove())\nprint(mHeap.heap)\n","sub_path":"Coding/Competitive_Coding/Min_Heap.py","file_name":"Min_Heap.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510271090","text":"import os\nimport sys\nlib_path = os.path.abspath(os.path.join('main'))\nsys.path.append(lib_path)\nfrom cartesianCoordinate import CartesianCoordinate\nfrom line import Line\nimport pytest\n\n\ndef testCalculateXCoorDiff():\n a = CartesianCoordinate(3, 4)\n b = CartesianCoordinate(2, 5)\n line = Line()\n assert line.calculateXCoorDiff(a, b) == 1\n\n\ndef testCalculateYCoorDiff():\n a = CartesianCoordinate(3, 4)\n b = CartesianCoordinate(2, 5)\n line = Line()\n assert line.calculateYCoorDiff(a, b) == -1\n\n\ndef testSlope():\n a = CartesianCoordinate(3, 4)\n b = CartesianCoordinate(2, 5)\n line = Line()\n assert line.slope(a, b) == -1\n\n\ndef testSlopeI():\n a = CartesianCoordinate(3, 0)\n b = CartesianCoordinate(0, 2)\n line = Line()\n assert line.slope(a, b) == (-2 / 3)\n\n\ndef testSlopeI():\n with pytest.raises(ZeroDivisionError):\n a = CartesianCoordinate(3, 6)\n b = CartesianCoordinate(3, 7)\n line = Line()\n line.slope(a, b)\n\n\ndef testYIntercept():\n a = CartesianCoordinate(3, 4)\n b = CartesianCoordinate(2, 5)\n line = Line()\n assert line.yIntercept(a, b) == 7\n\n\ndef testYInterceptI():\n a = CartesianCoordinate(3, 0)\n b = CartesianCoordinate(0, 2)\n line = Line()\n assert line.yIntercept(a, b) == 2\n\n\ndef testCalculateConstants():\n a = CartesianCoordinate(2, 4)\n b = CartesianCoordinate(0, 2)\n line = Line()\n line.calculateConstants(a, b)\n assert line.listABC[0] == 1\n assert line.listABC[1] == -1\n assert line.listABC[2] == -2\n\n\ndef testCalculateConstantsI():\n a = CartesianCoordinate(3, 0)\n b = CartesianCoordinate(0, 2)\n line = Line()\n line.calculateConstants(a, b)\n assert line.listABC[0] == 2\n assert line.listABC[1] == 3\n assert line.listABC[2] == 6\n","sub_path":"tests/lineTest.py","file_name":"lineTest.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"410823319","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nu\"\"\"The Salzburg Python User Group’s website.\n\nThis module contains the entire Salzburg Python User Group’s website as a WSGI\napplication built on Flask. It works on both Python 2 and Python 3. Running\nit as a standalone script launches the Flask development web server via the\nFlask-Script extension.\n\nUsage:\n * salzpug.py runserver [-h ] [-p ] [-d] [-r]\n * salzpug.py shell\n * salzpug.py --help\n\nOptions:\n -h the hostname to listen on (default: 127.0.0.1)\n -p the port of the web server (default: 5000)\n -d use the Werkzeug debugger\n -r reload the web server if the script changes\n\nThis module dependes on the following packages:\n\n* Flask\n* Flask-FlatPages\n* Flask-Script (if run as a standalone script)\n* Pygments\n\nRun ``pip install -r requirements.txt`` to install all dependencies.\n\"\"\"\n\nimport collections\nimport datetime\nimport math\n\nfrom flask import (Flask, render_template, render_template_string, request,\n url_for, abort)\nfrom flask_flatpages import FlatPages, pygmented_markdown, pygments_style_defs\nfrom markupsafe import Markup\n\n\n# Configuration\n# =============\n\nclass Config(object):\n u\"\"\"The default configuration for the Flask application.\n\n Only uppercase variables in this class will be stored in the configuration\n object.\n \"\"\"\n\n @staticmethod\n def prerender_jinja(text):\n u\"\"\"Render Markdown with Jinja directives to HTML.\n\n Use this function to configure ``FLATPAGES_HTML_RENDERER``, to support\n evaluating Jinja directives embedded within the Markdown document.\n \"\"\"\n prerendered_body = render_template_string(Markup(text))\n return pygmented_markdown(prerendered_body)\n\n # Flask-FlatPages configuration\n FLATPAGES_AUTO_RELOAD = False\n FLATPAGES_EXTENSION = '.md'\n FLATPAGES_HTML_RENDERER = prerender_jinja\n PYGMENTS_STYLE = 'tango'\n\n # Custom configuration\n ARTICLES_PER_PAGE = 3\n\n\n# Application setup\n# =================\n\napplication = app = Flask(__name__)\napp.config.from_object(Config)\npages = FlatPages(app)\n\n\n# Template globals & filters\n# ==========================\n\n@app.template_global()\ndef image(src, alt, title='', class_name='', id=''):\n u\"\"\"Generate a HTML5 ``figure`` element with an image.\n\n :param src: the image’s URL. If it does not start with ``http://`` or\n ``https://``, it is assumed to be a path within the\n ``static/images`` directory.\n :param alt: the text alternative for the image, used as ``alt`` attribute\n on the ``img`` tag.\n :param title: the image’s caption. If given, a ``figcaption`` element\n will be added to the ``figure``.\n :param class_name: the CSS class name(s) to be added to the ``figure``\n element, as a single string. If empty, no class names\n will be set.\n :param id: the value of the ``id`` attribute for the ``figure`` element.\n If empty, no ID will be set.\n \"\"\"\n return render_template('figure.xhtml', src=src, alt=alt, title=title,\n class_name=class_name, id=id)\n\n\n@app.template_global()\ndef pagination_url(page):\n u\"\"\"Return the current view’s URL, using a different ``page`` parameter.\n\n All parameters and arguments of the current URL other than ``page`` will\n remain the same. This is useful for paginated views that split their\n contents over several pages.\n\n :param page: the new value for the ``page`` parameter of the current\n view’s URL.\n \"\"\"\n args = request.view_args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\n\n\n@app.template_filter('date')\ndef format_date(date, format_string):\n u\"\"\"Convert a :py:class:`date` or :py:class:`datetime` to a string.\n\n :param date: the date to be formatted. If ``None``, it will use the\n current UTC date and time.\n :param format_string: a string with format codes to control the string\n representation of the date. Consult the\n ``strftime()`` documentation for details.\n \"\"\"\n if date is None:\n date = datetime.datetime.utcnow()\n return date.strftime(format_string)\n\n\n# View functions\n# ==============\n\n@app.route('/', defaults={'page': 1})\n@app.route('/page//')\ndef index(page):\n u\"\"\"Render a page with all articles in reversed chronological order.\n\n Configure ``ARTICLES_PER_PAGE`` to control how many articles shall be\n displayed on one page.\n \"\"\"\n articles = blog_articles()\n pagination = Pagination(articles, page, app.config['ARTICLES_PER_PAGE'])\n if not pagination.items and page != 1:\n abort(404)\n return render_template('index.xhtml', pagination=pagination)\n\n\n@app.route('//')\ndef show_page(path):\n u\"\"\"Render a static page from the FlatPages collection.\n\n :param path: the path to the requested page, relative to\n ``FLATPAGES_ROOT``.\n :raises: :class:`~werkzeug.exceptions.NotFound` if there is no page at the\n given path.\n \"\"\"\n return render_template('page.xhtml', page=pages.get_or_404(path))\n\n\n@app.route('/tags/')\ndef tag_list():\n u\"\"\"Render a page listing all available article tags.\"\"\"\n tags = collections.Counter([tag for article in blog_articles()\n for tag in article.meta.get('tags', [])])\n return render_template('tag_list.xhtml', tags=tags)\n\n\n@app.route('/tag//')\ndef show_tag(tag):\n u\"\"\"Render a page with the titles of all articles with a given tag.\"\"\"\n articles = (a for a in blog_articles() if tag in a.meta.get('tags', []))\n return render_template('show_tag.xhtml', tag=tag, articles=articles)\n\n\n@app.route('/archives/')\ndef archives():\n u\"\"\"Render a page with the titles of all articles, grouped by year.\"\"\"\n return render_template('archives.xhtml', articles=blog_articles())\n\n\n@app.route('/pygments.css')\ndef pygments_css():\n u\"\"\"Return the stylesheet for the selected Pygments style.\"\"\"\n style = app.config['PYGMENTS_STYLE']\n return pygments_style_defs(style), 200, {'Content-Type': 'text/css'}\n\n\n# Error handling\n# ==============\n\n@app.errorhandler(404)\ndef not_found(error):\n u\"\"\"Render the “Error 404: Not Found” page.\"\"\"\n return show_page('error-404'), 404\n\n\n# Auxiliary functions\n# ===================\n\ndef blog_articles():\n u\"\"\"Return all blog articles in reversed chronological order.\n\n An “article” is supposed to be a page from the FlatPages collection that\n has the “published” attribute in its metadata.\n \"\"\"\n return reversed(sorted((p for p in pages if 'published' in p.meta),\n key=lambda p: p.meta['published']))\n\n\n# Pagination\n# ==========\n\nclass Pagination(object):\n u\"\"\"A simple class to paginate an iterable.\n\n This class is inspired by Armin Ronacher’s snippet at\n http://flask.pocoo.org/snippets/44/.\n \"\"\"\n\n def __init__(self, iterable, page, per_page):\n u\"\"\"Create a new ``Pagination`` object.\n\n :param iterable: the iterable to paginate.\n :param page: the current page number.\n :param per_page: the number of items of the iterable to put on each\n page.\n \"\"\"\n self.all_items = list(iterable)\n self.page = page\n self.per_page = per_page\n\n @property\n def items(self):\n u\"\"\"All items of the iterable that are on the current page.\n\n >>> pagination = Pagination('ABCDEFGHIJKLMNOPQRS', page=5, per_page=2)\n >>> pagination.items\n ['I', 'J']\n \"\"\"\n lower_index = (self.page-1) * self.per_page\n upper_index = self.page * self.per_page\n return self.all_items[lower_index:upper_index]\n\n @property\n def pages(self):\n u\"\"\"The total number of pages.\n\n >>> pagination = Pagination('ABCDEFGHIJKLMNOPQRS', page=5, per_page=2)\n >>> pagination.pages\n 10\n \"\"\"\n return int(math.ceil(len(self.all_items) / float(self.per_page)))\n\n @property\n def has_prev(self):\n u\"\"\"``True`` if the current page is not the first page.\n\n >>> pagination = Pagination('ABCDEFGHIJKLMNOPQRS', page=5, per_page=2)\n >>> pagination.has_prev\n True\n \"\"\"\n return self.page > 1\n\n @property\n def has_next(self):\n u\"\"\"``True`` if the current page is not the last page.\n\n >>> pagination = Pagination('ABCDEFGHIJKLMNOPQRS', page=5, per_page=2)\n >>> pagination.has_next\n True\n \"\"\"\n return self.page < self.pages\n\n def iter_pages(self, left_edge=2, left_current=2,\n right_current=5, right_edge=2):\n u\"\"\"Return an iterator yielding page numbers to build a pagination.\n\n This function returns numbers similar to the builtin ``range()``,\n starting at ``1``, but omitting superfluous page numbers inbetween.\n “Holes” within the sequence are denoted by a single ``None`` value.\n\n >>> pagination = Pagination('ABCDEFGHIJKLMNOPQRS', page=5, per_page=2)\n >>> list(pagination.iter_pages(left_current=1, right_current=1))\n [1, 2, None, 4, 5, 6, None, 9, 10]\n\n :param left_edge: the number of page numbers to yield at the beginning\n of the pagination.\n :param left_current: the number of page numbers to yield before the\n current page.\n :param right_current: the number of page numbers to yield after the\n current page.\n :param right_edge: the number of page numbers to yield at the end of\n the pagination.\n \"\"\"\n last = 0\n for i in range(1, self.pages+1):\n if (i <= left_edge or i > self.pages-right_edge or\n self.page-left_current-1 < i <= self.page+right_current):\n if last + 1 != i:\n yield None\n yield i\n last = i\n\n\n# Development server\n# ==================\n\nif __name__ == '__main__':\n from flask.ext.script import Manager\n app.config.update({'FLATPAGES_AUTO_RELOAD': True})\n manager = Manager(app)\n manager.run()\n","sub_path":"salzpug.py","file_name":"salzpug.py","file_ext":"py","file_size_in_byte":10326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325188055","text":"# Copyright 2009 10gen, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the son module.\"\"\"\n\nimport unittest\nimport datetime\nimport re\nimport sys\nsys.path[0:0] = [\"\"]\n\nfrom pymongo.objectid import ObjectId\nfrom pymongo.dbref import DBRef\nfrom pymongo.son import SON\n\n\nclass TestSON(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_ordered_dict(self):\n a = SON()\n a[\"hello\"] = \"world\"\n a[\"mike\"] = \"awesome\"\n a[\"hello_\"] = \"mike\"\n self.assertEqual(a.items(), [(\"hello\", \"world\"),\n (\"mike\", \"awesome\"),\n (\"hello_\", \"mike\")])\n\n b = SON({\"hello\": \"world\"})\n self.assertEqual(b[\"hello\"], \"world\")\n self.assertRaises(KeyError, lambda: b[\"goodbye\"])\n\n def test_to_dict(self):\n a = SON()\n b = SON([(\"blah\", SON())])\n c = SON([(\"blah\", [SON()])])\n d = SON([(\"blah\", {\"foo\": SON()})])\n self.assertEqual({}, a.to_dict())\n self.assertEqual({\"blah\": {}}, b.to_dict())\n self.assertEqual({\"blah\": [{}]}, c.to_dict())\n self.assertEqual({\"blah\": {\"foo\": {}}}, d.to_dict())\n self.assertEqual(dict, a.to_dict().__class__)\n self.assertEqual(dict, b.to_dict()[\"blah\"].__class__)\n self.assertEqual(dict, c.to_dict()[\"blah\"][0].__class__)\n self.assertEqual(dict, d.to_dict()[\"blah\"][\"foo\"].__class__)\n\n def test_from_xml(self):\n smorgasbord = \"\"\"\n\n \n \n 285a664923b5fcd8ec000000\n 42\n foo\n true\n 3.14159265358979\n \n x\n y\n z\n \n yup\n \n \n 123144452057\n \n namespace\n ca5c67496c01d896f7010000\n \n this is code\n \n \n\n\"\"\"\n self.assertEqual(SON.from_xml(smorgasbord),\n SON([(u\"_id\", ObjectId(\"\\x28\\x5A\\x66\\x49\\x23\\xB5\\xFC\"\n \"\\xD8\\xEC\\x00\\x00\\x00\")),\n (u\"the_answer\", 42),\n (u\"b\", u\"foo\"),\n (u\"c\", True),\n (u\"pi\", 3.14159265358979),\n (u\"an_array\", [u\"x\", u\"y\", u\"z\",\n SON([(u\"subobject\", u\"yup\")])]),\n (u\"now\", datetime.datetime(1973, 11, 26, 6,\n 47, 32, 57000)),\n (u\"dbref\",\n DBRef(\"namespace\",\n ObjectId(\"\\xCA\\x5C\\x67\\x49\\x6C\\x01\"\n \"\\xD8\\x96\\xF7\\x01\\x00\\x00\"))),\n (u\"$where\", \"this is code\"),\n (u\"mynull\", None),\n ]))\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_son.py","file_name":"test_son.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"413290719","text":"from typing import Dict\n\nimport dask.dataframe as dd\n\nfrom dask_sql.physical.rex import RexConverter\nfrom dask_sql.physical.rel.base import BaseRelPlugin\n\n\nclass LogicalFilterPlugin(BaseRelPlugin):\n \"\"\"\n LogicalFilter is used on WHERE clauses.\n We just evaluate the filter (which is of type RexNode) and apply it\n \"\"\"\n\n class_name = \"org.apache.calcite.rel.logical.LogicalFilter\"\n\n def convert(\n self, rel: \"org.apache.calcite.rel.RelNode\", tables: Dict[str, dd.DataFrame]\n ) -> dd.DataFrame:\n (df,) = self.assert_inputs(rel, 1, tables)\n self.check_columns_from_row_type(df, rel.getExpectedInputRowType(0))\n\n condition = rel.getCondition()\n df_condition = RexConverter.convert(condition, df)\n df = df[df_condition]\n\n df = self.fix_column_to_row_type(df, rel.getRowType())\n return df\n","sub_path":"dask_sql/physical/rel/logical/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"115855585","text":"import pygame \nfrom objects import board, ball\nimport constants as const \n\npygame.init()\nscreen = pygame.display.set_mode(const.size)\nclock = pygame.time.Clock()\nboard = board.Board('assets/board.png')\nball = ball.Ball('assets/ball.png', board)\n \nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n const.is_ball_go = True\n screen.fill(const.BLACK)\n screen.blit(board.image, board.rect)\n screen.blit(ball.image, ball.rect)\n board.update()\n ball.update()\n pygame.display.update()\n clock.tick(const.FPS)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56675863","text":"# -*- coding: utf-8 -*-\nimport logging\nimport bs4\n\nimport lxml.html\nimport re\nfrom lxml import etree\nimport nltk\nfrom scrapers.utils import time_to_datetime, get_hash, get_article, get_sha_hash, get_rss\n\n\nlogger = logging.getLogger(\"scraper.24ur\")\n\nclass TwentyFourHrsScraper(object):\n\n TFH_RSS_URL = \"http://www.24ur.com/rss/\"\n\n def parse_source(self, existing_ids=None):\n news = []\n feed_content = get_rss(self.TFH_RSS_URL)\n for feed_entry in feed_content.entries:\n link = feed_entry[\"link\"]\n\n if existing_ids and (get_hash(link) in existing_ids or get_sha_hash(link) in existing_ids):\n logger.debug(\"Skipping %s\", link)\n continue\n\n published_date = time_to_datetime(feed_entry[\"published_parsed\"])\n title = feed_entry[\"title\"]\n\n news.append((link, {\"published\": published_date, \"title\": title}))\n\n return news\n\n def parse_article(self, article_url):\n link, data = article_url\n\n article = self.get_article(link)\n if article is None: return\n\n published_date = data[\"published\"]\n article[\"title\"] = data[\"title\"]\n article[\"published\"] = published_date\n article[\"source\"] = \"24ur\"\n article[\"source_url\"] = link\n article[\"language\"] = \"si\"\n # Generate ID from link\n article[\"id\"] = get_sha_hash(link)\n return article\n\n def get_article(self, link):\n logger.debug(\"Grabbing article %s\", link)\n\n article_html = get_article(link.replace(\"24ur.com\", \"www.24ur.com\"))\n result = {}\n result[\"raw_html\"] = article_html\n tree = etree.fromstring(article_html, etree.HTMLParser())\n summary = tree.xpath('//div[@class=\"summary\"]/p/text()')\n result[\"subtitles\"] = unicode(summary)\n\n author_texts = tree.xpath(\"//div[@class='containerLeftSide']/text()\")\n author_text = u\" \".join(text.strip() for text in author_texts)\n if u\"|\" in author_text:\n author = author_text[author_text.rfind('|'):]\n else:\n author = None\n\n result[\"author\"] = author\n\n # Elaborate way of getting rid of all script tags and other garbage in this HTML. Looking for\n # a better way.\n content = tree.xpath(\"//div[@id='content']\")\n if len(content) == 0:\n return None\n\n\n\n text = re.sub(\"\\s\\s+\", \" \", bs4.BeautifulSoup(lxml.html.tostring(content[0], encoding=\"utf-8\").decode(\"utf-8\")).get_text())\n result[\"text\"] = text\n if u\"Preverite vpisani naslov ali uporabite možnost iskanja po naših straneh.\" in result[\"text\"]:\n return None\n return result\n","sub_path":"news_buddy/scrapers/tfhrs_scraper.py","file_name":"tfhrs_scraper.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20037668","text":"import porespy as ps\nimport numpy as np\nimport pytest\n\n\nclass NetExtractTest():\n def setup_class(self):\n self.im = ps.generators.blobs(shape=[300, 300])\n self.snow = ps.filters.snow_partitioning(self.im, return_all=True)\n self.im3d = ps.generators.blobs(shape=[50, 50, 50])\n self.snow3d = ps.filters.snow_partitioning(self.im3d, return_all=True)\n\n def test_regions_to_network(self):\n im = self.snow.regions*self.im\n net = ps.network_extraction.regions_to_network(im)\n found_nans = False\n for key in net.keys():\n if np.any(np.isnan(net[key])):\n found_nans = True\n assert found_nans is False\n\n def test_snow_2D(self):\n a = np.unique(self.snow.peaks*self.im)\n b = np.unique(self.snow.regions*self.im)\n assert len(a) == len(b)\n\n def test_snow_3d(self):\n a = np.unique(self.snow3d.peaks*self.im3d)\n b = np.unique(self.snow3d.regions*self.im3d)\n assert len(a) == len(b)\n\n def test_extract_pore_network_3d(self):\n im = self.snow3d.regions*self.im3d\n net = ps.network_extraction.regions_to_network(im)\n found_nans = False\n for key in net.keys():\n if np.any(np.isnan(net[key])):\n found_nans = True\n assert found_nans is False\n\n def test_snow(self):\n net = ps.network_extraction.snow(self.im3d)\n found_nans = False\n for key in net.keys():\n if np.any(np.isnan(net[key])):\n found_nans = True\n assert found_nans is False\n\n# def test_snow_dual_2d(self):\n# net = ps.network_extraction.snow_dual(self.im)\n# found_nans = False\n# for key in net.keys():\n# if np.any(np.isnan(net[key])):\n# found_nans = True\n# assert found_nans is False\n\n def test_snow_dual_3d(self):\n net = ps.network_extraction.snow_dual(self.im3d)\n found_nans = False\n for key in net.keys():\n if np.any(np.isnan(net[key])):\n found_nans = True\n assert found_nans is False\n\n def test_add_bounadary_regions_2D(self):\n im = self.im\n regions = ps.filters.snow_partitioning(im)\n f = ['left', 'right']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[0] > regions.shape[0]\n f = ['bottom', 'top']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[1] > regions.shape[1]\n f = ['front', 'back']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[1] > regions.shape[1]\n f = ['bottom', 'top', 'left', 'right', 'front', 'back']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[0] > regions.shape[0]\n assert bd.shape[1] > regions.shape[1]\n\n def test_add_bounadary_regions_3D(self):\n im = self.im3d\n regions = ps.filters.snow_partitioning(im)\n f = ['left', 'right']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[0] > regions.shape[0]\n f = ['front', 'back']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[1] > regions.shape[1]\n f = ['bottom', 'top']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[2] > regions.shape[2]\n f = ['bottom', 'top', 'left', 'right', 'front', 'back']\n bd = ps.network_extraction.add_boundary_regions(regions, faces=f)\n assert bd.shape[0] > regions.shape[0]\n assert bd.shape[1] > regions.shape[1]\n assert bd.shape[2] > regions.shape[2]\n\n def test_map_to_regions(self):\n im = self.im\n regions = ps.filters.snow_partitioning(im)\n values = np.random.rand(regions.max() + 1)\n mapped = ps.network_extraction.map_to_regions(regions, values)\n assert mapped.max() < 1\n # Some failures\n values = np.random.rand(regions.max())\n with pytest.raises(Exception):\n mapped = ps.network_extraction.map_to_regions(regions, values)\n values = np.random.rand(regions.max()+2)\n with pytest.raises(Exception):\n mapped = ps.network_extraction.map_to_regions(regions, values)\n\n def test_planar_2d_image(self):\n np.random.seed(1)\n im1 = ps.generators.blobs([100, 100, 1])\n np.random.seed(1)\n im2 = ps.generators.blobs([100, 1, 100])\n np.random.seed(1)\n im3 = ps.generators.blobs([1, 100, 100])\n np.random.seed(1)\n snow_out1 = ps.filters.snow_partitioning(im1, return_all=True)\n pore_map1 = snow_out1.im * snow_out1.regions\n net1 = ps.network_extraction.regions_to_network(im=pore_map1,\n dt=snow_out1.dt,\n voxel_size=1)\n np.random.seed(1)\n snow_out2 = ps.filters.snow_partitioning(im2, return_all=True)\n pore_map2 = snow_out2.im * snow_out2.regions\n net2 = ps.network_extraction.regions_to_network(im=pore_map2,\n dt=snow_out2.dt,\n voxel_size=1)\n np.random.seed(1)\n snow_out3 = ps.filters.snow_partitioning(im3, return_all=True)\n pore_map3 = snow_out3.im * snow_out3.regions\n net3 = ps.network_extraction.regions_to_network(im=pore_map3,\n dt=snow_out3.dt,\n voxel_size=1)\n assert np.allclose(net1['pore.coords'][:, 0],\n net2['pore.coords'][:, 0])\n assert np.allclose(net1['pore.coords'][:, 1],\n net2['pore.coords'][:, 2])\n assert np.allclose(net1['pore.coords'][:, 0],\n net3['pore.coords'][:, 1])\n\n\nif __name__ == '__main__':\n t = NetExtractTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n","sub_path":"test/unit/test_network_extraction.py","file_name":"test_network_extraction.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"26804271","text":"import numpy as np\n\n\nclass GradientsScreener(object):\n\n model = None\n monitor_every_steps = 5\n current_step = None\n writer = None\n\n\n gradients = {}\n weights = {}\n\n\n def __init__(self, model, writer, monitor_every_steps = 5):\n super().__init__()\n self.model = model\n self. monitor_every_steps = monitor_every_steps\n self.writer = writer\n\n\n def monitor(self, step):\n start = True\n for n, p in self.model.named_parameters():\n grad = p.grad.clone().detach().numpy()\n weight = p.clone().detach().numpy()\n if step == 0:\n self.gradients[n] = grad\n self.weights[n] = weight\n\n else:\n if (step > self.monitor_every_steps):\n ponder = self.monitor_every_steps-1\n else:\n ponder = step\n #gradients\n valgrad = self.gradients[n]\n newvalgrad = ((valgrad * ponder) + grad)/(ponder+1)\n self.gradients[n] = newvalgrad\n # weights\n valweight = self.weights[n]\n newvalweight = ((valweight * ponder) + weight) / (ponder + 1)\n self.weights[n] = newvalweight\n\n #writting to tensorboard\n for k in self.gradients.keys():\n self.writer.add_scalars('Gradients ' + k, {'Max':np.max(self.gradients[k]), 'Min':np.min(self.gradients[k])}, step)\n\n\n\n #printing some stats\n # print(\"Gradients\")\n # for d in self.gradients.keys():\n # print(d + ':max='+str(np.max(self.values[d]))+':min='+str(np.min(self.values[d])))\n # print(\"Weights\")\n # for d in self.weights.keys():\n # print(d + ':max=' + str(np.max(self.values[d])) + ':min=' + str(np.min(self.values[d])))\n\n\n\n\n\n\n","sub_path":"NetScreener.py","file_name":"NetScreener.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"8591276","text":"# This module tests if you can get friends of a person on Facebook\r\n\r\n\r\nimport facebook\r\n# Now I will import *.py file with my access token.\r\nimport personal_access_stuff\r\n\r\n\r\ntoken = personal_access_stuff.access_token\r\n\r\ngraph = facebook.GraphAPI(access_token=token, version = 2.7)\r\n\r\nuser = graph.request('/DonaldTrump')\r\n\r\nfriends = graph.request('/' + user['id'] + '/hours')\r\nprint(friends)","sub_path":"Coursework_part_2_tests/friends_test.py","file_name":"friends_test.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641259327","text":"import os\nimport os.path as osp\n\nimport chainer\nfrom imgaug import augmenters as iaa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport rospkg\nimport scipy.misc\nfrom sklearn.model_selection import train_test_split\nimport yaml\n\nfilepath = osp.dirname(osp.realpath(__file__))\n\n\nclass StowingDataset(chainer.dataset.DatasetMixin):\n mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n class_yamlpath = osp.join(filepath, 'data/label_names.yaml')\n\n def __init__(\n self,\n data_type='all',\n random_state=1234,\n resize_rate=0.5,\n test_size=0.1,\n cross_validation=False,\n loop=None,\n img_aug=False,\n with_damage=True,\n classification=False):\n assert data_type in ('train', 'val', 'all')\n\n # scrape dataset\n data_ids = self._get_data_ids()\n\n # split train/val data\n if data_type != 'all':\n if cross_validation:\n random.seed(random_state)\n random.shuffle(data_ids)\n test_num = int(len(data_ids) * test_size)\n start = loop * test_num\n end = (loop + 1) * test_num\n if len(data_ids) - end > test_num:\n ids_train = data_ids[:start] + data_ids[end:]\n ids_val = data_ids[start:end]\n else:\n ids_train = data_ids[:start]\n ids_val = data_ids[start:]\n else:\n ids_train, ids_val = train_test_split(\n data_ids, test_size=test_size, random_state=random_state)\n if data_type == 'train':\n self.data_ids = ids_train\n elif data_type == 'val':\n self.data_ids = ids_val\n else:\n self.data_ids = data_ids\n self.resize_rate = resize_rate\n if img_aug:\n self.aug = iaa.Sequential([\n iaa.Sometimes(\n 0.3,\n iaa.InColorspace(\n 'HSV',\n children=iaa.WithChannels([1, 2],\n iaa.Multiply([0.5, 2])))),\n iaa.Fliplr(0.5)])\n else:\n self.aug = False\n\n if with_damage:\n self.failure_label = np.array([\n 'singlearm_drop',\n 'singlearm_protrude',\n 'singlearm_damage',\n 'dualarm_drop',\n 'dualarm_protrude',\n 'dualarm_damage',\n ])\n else:\n self.failure_label = np.array([\n 'singlearm_drop',\n 'singlearm_protrude',\n 'dualarm_drop',\n 'dualarm_protrude',\n ])\n\n if classification:\n with open(self.class_yamlpath, 'r') as f:\n self.class_label = yaml.load(f)\n else:\n self.class_label = None\n\n def __len__(self):\n return len(self.data_ids)\n\n def img_to_datum(self, img):\n datum = img.astype(np.float32)\n datum = datum[:, :, ::-1] # RGB -> BGR\n datum -= self.mean_bgr\n datum = datum.transpose((2, 0, 1))\n return datum\n\n def datum_to_img(self, blob):\n bgr = blob.transpose((1, 2, 0))\n bgr += self.mean_bgr\n rgb = bgr[:, :, ::-1] # BGR -> RGB\n rgb = rgb.astype(np.uint8)\n return rgb\n\n def get_t(self, data_dir):\n label_file = osp.join(data_dir, 'label.txt')\n failure_labels = open(label_file, 'r').read().strip().split('\\n')\n t = np.zeros(len(self.failure_label), dtype=np.int32)\n tmp_cond = None\n for label_name in failure_labels:\n cond, state = label_name.split('_')\n if tmp_cond is not None:\n assert cond == tmp_cond\n if state != 'success':\n if state not in ['drop', 'protrude', 'damage']:\n print('Unknown label {0} in {1}'\n .format(label_name, label_file))\n t[np.where(self.failure_label == label_name)] = 1.0\n tmp_cond = cond\n half_length = len(self.failure_label) / 2\n if cond == 'singlearm':\n t[half_length:] = -1\n else:\n t[:half_length] = -1\n # return t, cond\n return t\n\n def get_average_t(self):\n singlearm_t = []\n dualarm_t = []\n for data_id in self.data_ids:\n data_dir = osp.join(self.dataset_dir, data_id)\n t = self.get_t(data_dir).astype(np.float32)\n half_length = len(self.failure_label) / 2\n if t[0] == -1:\n dualarm_t.append(t[half_length:])\n else:\n singlearm_t.append(t[:half_length])\n singlearm_average_t = sum(singlearm_t) / len(singlearm_t)\n dualarm_average_t = sum(dualarm_t) / len(dualarm_t)\n return singlearm_average_t, dualarm_average_t\n\n def get_baseline_acc(self):\n acc = []\n singlearm_average_t, dualarm_average_t = self.get_average_t()\n for data_id in self.data_ids:\n data_dir = osp.join(self.dataset_dir, data_id)\n t = self.get_t(data_dir)\n half_length = len(self.failure_label) / 2\n if t[0] == -1:\n t = t[half_length:]\n average_t = dualarm_average_t\n else:\n t = t[:half_length]\n average_t = singlearm_average_t\n if all((t > 0.5) == (average_t > 0.5)):\n acc.append(1)\n else:\n acc.append(0)\n return sum(acc) / float(len(acc))\n\n def get_example(self, i, masked=True):\n data_id = self.data_ids[i]\n\n data_dir = osp.join(self.dataset_dir, data_id)\n img_file = osp.join(data_dir, 'image_rect_color.png')\n mask_file = osp.join(data_dir, 'clipped_mask_rgb.png')\n img = scipy.misc.imread(img_file, mode='RGB')\n mask = scipy.misc.imread(mask_file, mode='L')\n\n # apply mask\n # img[mask < 128] = (0, 0, 0)\n if self.aug:\n img = self.aug.augment_image(img)\n if masked:\n img[mask < 128] = self.mean_bgr[::-1]\n if self.resize_rate < 1:\n img = scipy.misc.imresize(img, self.resize_rate)\n datum = self.img_to_datum(img)\n t = self.get_t(data_dir)\n if self.class_label is None:\n return datum, t\n else:\n with open(osp.join(data_dir, 'target.txt'), 'r') as f:\n target = f.read().split('\\n')[0]\n label_id = np.int32(self.class_label.index(target))\n return datum, t, label_id\n\n def visualize_example(self, i, masked=True):\n if self.class_label is None:\n datum, t = self.get_example(i, masked)\n else:\n datum, t, _ = self.get_example(i, masked)\n img = self.datum_to_img(datum)\n plt.imshow(img)\n plt.axis('off')\n plt.title(str(t.tolist()))\n plt.show()\n\n def _get_data_ids(self):\n data_ids = []\n for data_id in os.listdir(self.dataset_dir):\n data_id = osp.join(self.dataset_dir, data_id)\n if not osp.isdir(data_id):\n continue\n data_ids.append(data_id)\n return data_ids\n\n\nclass SinglearmFailureDataset(StowingDataset):\n def _get_data_ids(self):\n data_ids = []\n for data_id in self.data_ids:\n data_dir = osp.join(self.dataset_dir, data_id)\n t = self.get_t(data_dir).astype(np.float32)\n half_length = len(self.failure_label) / 2\n singlearm_t = t[:half_length]\n if any(x == 1 for x in singlearm_t):\n data_ids.append(data_id)\n return data_ids\n\n\nclass DualarmDatasetV1(StowingDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v1')\n\n\nclass DualarmDatasetV2(StowingDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v2')\n\n\nclass DualarmDatasetV3(StowingDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v3')\n\n\nclass DualarmDatasetV4(StowingDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v4')\n\n\nclass DualarmDatasetV5(StowingDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v5')\n class_yamlpath = osp.join(filepath, 'data/label_names_v5.yaml')\n\n\nclass SinglearmFailureDatasetV4(SinglearmFailureDataset):\n rospack = rospkg.RosPack()\n dataset_dir = osp.join(\n rospack.get_path('selective_dualarm_stowing'),\n 'dataset/v4')\n","sub_path":"demos/selective_dualarm_stowing/python/selective_dualarm_stowing/datasets/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440971497","text":"#Stworzyc krotka z 10 liczb i znalezc min/max\n\nkrotka=(1,33,2,5,9,50,7,5,94,60)\n\nmin=krotka[0]\nmax=krotka[0]\n\nfor i in krotka:\n\tif (imax):\n\t\tmax=i\n\nprint('Najmniejsza:', min)\nprint('Najwieksza:', max)","sub_path":"13.08 - zajecia 2/Zadania dla początkujących/7tuple.py","file_name":"7tuple.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409147903","text":"# test the spatial transform network\nimport os, pdb, torch, numpy as np\nimport torch.nn.functional as F\nfrom PIL import Image\nimport init_path\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom utils import crop2affine\nfrom datasets import pil_loader\nfrom models import np2variable, variable2np\n\ndef resize_crop_from_PIL(image, crop_box, resize):\n image = image.copy()\n image = image.crop((crop_box[0], crop_box[1], crop_box[2], crop_box[3]))\n if resize is not None:\n image = image.resize((resize[0], resize[1]), Image.BILINEAR)\n return image\n\ndef resize_crop_from_STN(image, crop_box, resize):\n spatial_size = np.array(image.size)\n image = np.array(image.copy()).transpose(2, 0, 1)\n image_var = np2variable(image, False).unsqueeze(0)\n theta = crop2affine(spatial_size, crop_box)\n #print ('affine parameter : {}'.format(theta))\n theta = np2variable(theta, False)\n theta = theta.unsqueeze(0)\n if resize is None:\n grid_size = torch.Size([1, 3, crop_box[3]-crop_box[1], crop_box[2]-crop_box[0]])\n else:\n grid_size = torch.Size([1, 3, int(resize[1]), int(resize[0])])\n grid = F.affine_grid(theta, grid_size)\n stn_image = F.grid_sample(image_var, grid)\n stn_image = stn_image.data.numpy().squeeze()\n stn_image = stn_image.transpose(1, 2, 0)\n stn_image = Image.fromarray(np.uint8(stn_image))\n return stn_image\n\ndef cal_dis(pil_image, stn_image):\n pil_image = np.array(pil_image)\n stn_image = np.array(stn_image)\n error = stn_image - pil_image\n pdb.set_trace()\n\nimage = pil_loader('cat.jpg')\nprint ('load cat with image size = {}'.format(image.size))\nplt.subplot(3, 1, 1)\nplt.imshow(np.array(image))\nprint ('imshow cat with image size = {}'.format(image.size))\n\ncrop_box = np.array([image.size[0]//3, image.size[1]//3, image.size[0]*3//4, image.size[1]*2//3])\n#resize = np.array([image.size[0]//2, image.size[1]//2])\n#resize = np.array([crop_box[2]-crop_box[0], crop_box[3]-crop_box[1]])\nresize = None\npil_image = resize_crop_from_PIL(image, crop_box, resize)\nimport pdb\npdb.set_trace()\nprint ('produce pil cat with image size = {}'.format(pil_image.size))\nplt.subplot(3, 1, 2)\nplt.imshow(np.array(pil_image))\n\n\nstn_image = resize_crop_from_STN(image, crop_box, resize)\nprint ('produce stn cat with image size = {}'.format(stn_image.size))\nplt.subplot(3, 1, 3)\nplt.imshow(np.array(stn_image))\n\ncal_dis(pil_image, stn_image)\nplt.show()\n\n","sub_path":"deprecated/test_scripts/test_stn.py","file_name":"test_stn.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"310804289","text":"class Robot:\n def __init__(self, position, direction, battery_live, counter):\n\n #Current position of the robot\n self.position = position\n\n #Current direction, where 1 - north, 2 - east, 3 - south, 4 - west\n self.direction = direction\n\n #Time in seconds for which the robot can operate\n self.battery_live = battery_live\n\n #Counts the times when the robot's position is the same as the given xy coords\n self.counter = counter\n\n def move(self, direction):\n\n if direction is 1:\n # y += distance\n self.position[1] = self.position[1] + 1\n elif direction is 2:\n # x += distance\n self.position[0] = self.position[0] + 1\n elif direction is 3:\n # y -= distance\n self.position[1] = self.position[1] - 1\n elif direction is 4:\n # x-= distance\n self.position[0] = self.position[0] - 1\n else:\n print(\"Debug message, sth is wrong with the direction!\")\n\n self.battery_live = self.battery_live - 1 # Moving drains the battery by 1 sec\n\n #If there's enough juice left, rotate the robot\n #if self.battery_live > 0:\n #self.rotate(direction)s\n\n def rotate(self, direction):\n self.direction = (self.direction %\n 4) + 1 #rotates the robot by 90 degrees\n\n #Rotating drains the battery by 1 sec\n self.battery_live -= 1\n\n def check_position(self, set_point):\n\n if self.position == set_point:\n print('Fear not, for I have come!!!')\n self.counter += 1\n\n\n# Stdin test\nn = 4\nt = 28\ninstructions = [2, 3, 1, 2]\nset_point = [3, 2]\n\n#Create an instance of our robot\nrobot = Robot([0, 0], 1, t, 0)\n\n#Basic logic\nwhile True:\n for i in range(1, n + 1): #1, 2, 3, ... , n\n print(\" Current position: \", robot.position)\n instruction_index = ((i - 1) % n) + 1\n #print(instruction_index)\n for b in range(instructions[instruction_index -\n 1]): #look at the instructions array\n if robot.battery_live >= 1:\n robot.move(\n robot.direction\n ) # move by the amount of tiles defined in the instructions set ( b+1 tiles )\n #print(b+1)\n robot.check_position(\n set_point\n ) # Checks whether the robot arrived at the specified point\n if robot.battery_live >= 1:\n robot.rotate(robot.direction)\n\n if robot.battery_live == 0:\n print('I run out of Juice\\nI have arrived for {} times'.format(\n robot.counter))\n break\n\n #robot.move((instructions[instruction_index-1]), (robot.direction))\n #print(instructions[instruction_index-1], robot.direction)\n\n#Test output\n#print(robot.position)\n#print(robot.direction)\n#print(robot.battery_live)\n#print(robot.counter)\n","sub_path":"ROB.py","file_name":"ROB.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336954252","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n.. module:: stream\n :platform: Unix\n :synopsis: the top-level submodule of T_System's remote_ui that contains the functions for managing of t_system's video and audio stream.\n\n.. moduleauthor:: Cem Baybars GÜÇLÜ \n\"\"\"\n\nimport threading\n\nfrom t_system import seer\n\n\nclass StreamManager:\n \"\"\"Class to define a manager for asynchronous work of t_system's online video stream.\n\n This class provides necessary initiations and a function named\n :func:`t_system.remote_ui.modules.stream.StreamManager.get_stream`\n for the provide getting stream by calling iteratively the seer's current_frame member.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialization method of :class:`t_system.remote_ui.modules.stream.StreamManager` class.\n \"\"\"\n\n self.stop_thread = False\n self.preview_thread = threading.Thread(target=seer.stream, args=(lambda: self.stop_thread, \"bgr\", \"preview\"))\n\n def start_stream(self, admin_id, stream_type):\n \"\"\"The high-level method to return existing position with given id.\n\n Args:\n admin_id (str): Root privileges flag.\n stream_type (str): Stream's purpose. Preview, track-learn mode etc.\n \"\"\"\n try:\n if stream_type == \"preview\":\n self.preview_thread.start()\n return self.get_stream(), \"multipart/x-mixed-replace; boundary=frame\"\n\n except Exception as e:\n print(e)\n\n return False, False\n\n def stop_stream(self, admin_id, stream_type):\n \"\"\"The high-level method to remove existing position with given id.\n\n Args:\n admin_id (str): Root privileges flag.\n stream_type (str): Stream's purpose. Preview, track-learn mode etc.\n \"\"\"\n result = True\n try:\n if stream_type == \"preview\" and self.preview_thread.is_alive():\n self.stop_thread = True\n self.preview_thread.join()\n\n except Exception as e:\n print(e)\n result = False\n\n return result\n\n @staticmethod\n def get_stream():\n \"\"\"The low-level method to get camera stream frame by frame from seer.current_frame.\n \"\"\"\n while True:\n frame = seer.get_current_frame()\n if frame is not None:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame.tobytes() + b'\\r\\n')\n\n","sub_path":"t_system/remote_ui/modules/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"381523005","text":"# ---------------------------------------------------------------------------\n# Remote console support for MTDA\n# ---------------------------------------------------------------------------\n#\n# This software is a part of MTDA.\n# Copyright (c) Mentor, a Siemens business, 2017-2020\n#\n# ---------------------------------------------------------------------------\n# SPDX-License-Identifier: MIT\n# ---------------------------------------------------------------------------\n\n# Local imports\nfrom mtda.console.output import ConsoleOutput\n\n# System imports\nimport sys\nimport zmq\n\n\nclass RemoteConsoleOutput(ConsoleOutput):\n\n def __init__(self, host, port):\n ConsoleOutput.__init__(self)\n self.host = host\n self.port = port\n\n def reader(self):\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(\"tcp://%s:%s\" % (self.host, self.port))\n socket.setsockopt(zmq.SUBSCRIBE, b'')\n while True:\n data = socket.recv()\n sys.stdout.buffer.write(data)\n sys.stdout.buffer.flush()\n","sub_path":"mtda/console/remote_output.py","file_name":"remote_output.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207498600","text":"from __future__ import absolute_import\nfrom solvebio.resource import Vault\n\nfrom .helper import SolveBioTestCase\n\n\nclass VaultTests(SolveBioTestCase):\n\n def test_vaults(self):\n vaults = Vault.all()\n vault = vaults.data[0]\n self.assertTrue('id' in vault,\n 'Should be able to get id in vault')\n\n vault2 = Vault.retrieve(vault.id)\n self.assertEqual(vault, vault2,\n \"Retrieving vault id {0} found by all()\"\n .format(vault.id))\n\n check_fields = [\n 'account_id', 'created_at', 'description', 'has_children',\n 'has_folder_children', 'id', 'is_deleted', 'is_public',\n 'last_synced', 'name', 'permissions', 'provider',\n 'require_unique_paths', 'updated_at', 'url', 'user_id',\n 'vault_properties', 'vault_type'\n ]\n\n for f in check_fields:\n self.assertTrue(f in vault, '{0} field is present'.format(f))\n","sub_path":"solvebio/test/test_vault.py","file_name":"test_vault.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396139587","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport yaml\nfrom tensorflow.python.client import device_lib\nimport traceback\nimport time\nfrom tqdm import tqdm\n\nimport src.model.core as get_model\nfrom config.config import *\nfrom src.model.core.match_tower import MatchTower\nfrom src.train.utils import get_tf_hashtable\n\ntf.random.set_random_seed(RANDOM_SEED)\n\n\ndef build_net_graph(model, match_tower, inputs, num_gpu):\n # GPU/CPU设置\n gpus = [x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU'][:num_gpu]\n num_gpus = len(gpus)\n if num_gpus > 0:\n tf.logging.info(\"Using the following GPUs to train: \" + str(gpus))\n num_towers = num_gpus\n device_string = '/gpu:%d'\n else:\n tf.logging.info(\"No GPUs found. Training on CPU.\")\n num_towers = 1\n device_string = '/cpu:%d'\n num_towers = num_towers\n device_string = device_string\n\n # assert num_gpus == len(tower_inputs)\n\n # 将数据进行切分,放置到不同的GPU上\n tower_inputs = {key: tf.split(value, num_towers) for key, value in inputs.items()}\n tower_inputs = [{key: value[i] for key, value in tower_inputs.items()} for i in range(num_towers)]\n\n tower_pred_dict = {}\n\n for i in range(num_towers):\n with tf.device(device_string % i):\n with tf.variable_scope(\"tower\", reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"match_tower\", reuse=tf.AUTO_REUSE):\n _, _, match_logits, _ = match_tower(tower_inputs[i], is_training=False)\n\n with tf.variable_scope(\"core_model\", reuse=tf.AUTO_REUSE):\n _, pred_dict, _ = model(tower_inputs[i], is_training=False,\n match_logits=match_logits)\n\n concat_tensor_dict(tower_pred_dict, pred_dict)\n\n for key in tower_pred_dict.keys():\n tower_pred_dict[key] = tf.concat(tower_pred_dict[key], axis=0)\n\n return tower_pred_dict\n\n\ndef concat_tensor_dict(tower_dict, tensor_dict):\n for key, values in tensor_dict.items():\n if key in tower_dict:\n tower_dict[key].append(values)\n else:\n tower_dict[key] = [values]\n\n\ndef input_fn_builder(data, features_name, batch_size):\n d = tf.data.Dataset.from_tensor_slices({name: data[name].values for name in features_name})\n\n d = d.batch(batch_size, drop_remainder=False)\n\n iters = d.make_one_shot_iterator()\n batch = iters.get_next()\n\n for name in list(batch.keys()):\n t = batch[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n\n if t.dtype == tf.float64:\n t = tf.cast(t, tf.float32)\n batch[name] = t\n\n return batch\n\n\ndef run_one_model(data, match_tower_config, model_name, model_config, optimizer_config,\n hashtable_ckpt, init_checkpoint):\n tf.reset_default_graph()\n\n test_size = len(data)\n\n config = tf.ConfigProto(\n allow_soft_placement=True, # 如果你指定的设备不存在,自动分配设备\n log_device_placement=False # 是否打印设备分配日志\n )\n config.gpu_options.allow_growth = True\n\n print(\"The match tower config:\")\n print(match_tower_config)\n print(\"The core model config:\")\n print(model_config)\n\n hashtable_list, hashtable, table = get_tf_hashtable(feed_num=match_tower_config['feed_vocab_dict']['feedid'],\n user_num=match_tower_config['user_vocab_dict']['userid'])\n\n model_config.update({\"tables\": table, \"hashtables\": hashtable})\n\n match_tower_config.update({\"tables\": table, \"hashtables\": hashtable})\n\n match_tower = MatchTower(**match_tower_config)\n\n model = get_model.get_instance(model_name, model_config)\n\n steps = test_size // optimizer_config['test_batch_size']\n print(\"train parallel: {}, residual: {}\".format(steps * optimizer_config['test_batch_size'],\n test_size - steps * optimizer_config['test_batch_size']))\n\n predict_inputs = input_fn_builder(\n data[['userid_origin', 'feedid_origin', 'device'] + LABELS_NAME][:steps * optimizer_config['test_batch_size']],\n features_name=['userid_origin', 'feedid_origin', 'device'] + LABELS_NAME,\n batch_size=optimizer_config['test_batch_size'])\n\n tower_pred_dict = build_net_graph(\n model,\n match_tower,\n predict_inputs,\n num_gpu=optimizer_config['num_gpu'])\n\n # 由于tf.split将数据分配给gpu时,需要完全可整除,剩余的不可整除部分由一个gpu处理\n residual_inputs = None\n if steps * optimizer_config['test_batch_size'] < test_size:\n residual_inputs = input_fn_builder(\n data[['userid_origin', 'feedid_origin', 'device'] + LABELS_NAME][\n steps * optimizer_config['test_batch_size']:],\n features_name=['userid_origin', 'feedid_origin', 'device'] + LABELS_NAME,\n batch_size=optimizer_config['test_batch_size'] // optimizer_config['num_gpu'])\n residual_pred_dict = build_net_graph(\n model,\n match_tower,\n residual_inputs,\n num_gpu=1)\n\n hashtable_saver = tf.train.Saver(var_list=hashtable_list)\n\n save_variables = tf.global_variables()\n print(\"************************ All Global Variables ************************\")\n for v in save_variables:\n print(v)\n print(\"************************ Variables not to load ************************\")\n for v in tf.global_variables(\"hashtable\"):\n save_variables.remove(v)\n print(v)\n\n saver = tf.train.Saver(var_list=save_variables, max_to_keep=10)\n\n result = {}\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n\n hashtable_saver.restore(sess, hashtable_ckpt)\n\n saver.restore(sess, init_checkpoint)\n\n for name in hashtable.keys():\n size = sess.run(hashtable[name].size())\n assert size > 0, \"HashTable: {} load with failure\".format(name)\n\n for _ in tqdm(range(steps), total=steps):\n predict_one_batch(sess=sess,\n predict_inputs=predict_inputs,\n tower_pred_dict=tower_pred_dict,\n result=result)\n\n print(\"finished num: {}\".format(sum([len(v) for v in result['userid']])))\n\n if residual_inputs is not None:\n while True:\n try:\n predict_one_batch(sess=sess,\n predict_inputs=residual_inputs,\n tower_pred_dict=residual_pred_dict,\n result=result)\n except Exception as e:\n print(e)\n traceback.print_exc()\n print(\"!!!! test run end !!!!\")\n break\n\n for key in result.keys():\n result[key] = np.concatenate(result[key], axis=0)\n\n pred_len = list(map(lambda x: len(x), result.values()))\n assert min(pred_len) == max(pred_len)\n result_df = pd.DataFrame(result)\n\n return result_df[['userid', 'feedid'] + LABELS_NAME]\n\n\ndef run(test_csv, match_tower_config, model_name, model_config, optimizer_config,\n hashtable_ckpt, result_path, init_checkpoint):\n assert len(match_tower_config) == len(model_config) == len(optimizer_config) == len(init_checkpoint)\n\n start_time = time.time()\n\n data = pd.read_csv(test_csv)\n\n test_size = len(data)\n\n data['feedid_origin'] = data['feedid']\n data['userid_origin'] = data['userid']\n data['device'] = data['device'] - 1\n for name in LABELS_NAME:\n data[name] = 0\n data.index = range(test_size)\n\n result_df = None\n for one_match_tower_config, one_model_config, one_optimizer_config, one_init_checkpoint, one_model_name in zip(\n match_tower_config,\n model_config,\n optimizer_config,\n init_checkpoint,\n model_name):\n one_result = run_one_model(data, match_tower_config=one_match_tower_config,\n model_config=one_model_config,\n optimizer_config=one_optimizer_config,\n init_checkpoint=one_init_checkpoint,\n model_name=one_model_name,\n hashtable_ckpt=hashtable_ckpt)\n if result_df is None:\n result_df = one_result\n else:\n result_df = pd.merge(result_df, one_result, on=['userid', 'feedid'])\n\n for name in LABELS_NAME:\n result_df[name] = result_df[name + \"_x\"] + result_df[name + \"_y\"]\n\n result_df = result_df[['userid', 'feedid'] + LABELS_NAME]\n\n for name in LABELS_NAME:\n result_df[name] = result_df[name] / len(init_checkpoint)\n\n # 概率保留6位小数\n for name in LABELS_NAME:\n result_df[name] = result_df[name].apply(lambda x: \"{:.6f}\".format(x))\n result_df[['userid', 'feedid'] + LABELS_NAME].to_csv(result_path,\n encoding='utf-8',\n index=False)\n result_size = len(result_df)\n print(\"+++++++++++ cost time: {} /minute +++++++++++\".format((time.time() - start_time) // 60))\n print(\"+++++++++++ avg time: {} /ms +++++++++++\".format((time.time() - start_time) * 1000 / test_size / 7 * 2000))\n\n assert test_size == result_size, \"test_size: {}, result_size: {}\".format(test_size, result_size)\n assert len(data[['userid', 'feedid']].drop_duplicates()) == len(result_df[['userid', 'feedid']].drop_duplicates())\n assert len(pd.merge(data, result_df, on=['userid', 'feedid'])) == test_size\n\n\ndef predict_one_batch(sess, predict_inputs, tower_pred_dict, result):\n userid, feedid, pred_dict = sess.run(\n [predict_inputs['userid_origin'],\n predict_inputs['feedid_origin'],\n tower_pred_dict])\n pred_dict.update({\"userid\": userid, \"feedid\": feedid})\n for key in pred_dict.keys():\n if key in result:\n result[key].append(pred_dict[key])\n else:\n result[key] = [pred_dict[key]]\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(\"INFO\")\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_csv', default='config.yaml', type=str)\n parser.add_argument('--config_file', default='config.yaml', type=str)\n parser.add_argument('--match_config_file', default='config.yaml', type=str)\n parser.add_argument('--hashtable_ckpt', default=None, type=str)\n parser.add_argument('--init_checkpoint', default=None, type=str)\n parser.add_argument('--result_path', default=None, type=str)\n\n args = parser.parse_args()\n\n config = [yaml.load(open(config_file, 'r')) for config_file in args.config_file.split(\",\")]\n match_config = [yaml.load(open(match_config_file, 'r')) for match_config_file in args.match_config_file.split(\",\")]\n\n run(test_csv=args.test_csv,\n match_tower_config=[config['ModelConfig'] for config in match_config],\n model_name=[conf['ModelName'] for conf in config],\n model_config=[conf['ModelConfig'] for conf in config],\n optimizer_config=[conf['OptimizerConfig'] for conf in config],\n hashtable_ckpt=args.hashtable_ckpt,\n init_checkpoint=args.init_checkpoint.split(\",\"),\n result_path=args.result_path)\n","sub_path":"src/inference_fusion.py","file_name":"inference_fusion.py","file_ext":"py","file_size_in_byte":11516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"58470201","text":"\"\"\"\nthreefive/upid.py\n\nthreefve.upid exposes one function,\nupid_decoder for use by the\nSegmentationDescriptor class.\n\n\"\"\"\n\n\ndef upid_decoder(bitbin, upid_type, upid_length):\n \"\"\"\n upid_decoder\n decodes segmentation_upids by type,\n from a bitbin instance.\n\n Used by the SegmentationDescriptor class.\n \"\"\"\n upid_map = {\n 0x02: [\"Deprecated\", _decode_uri],\n 0x03: [\"AdID\", _decode_uri],\n 0x04: [\"UMID\", _decode_umid],\n 0x05: [\"ISAN\", _decode_isan],\n 0x06: [\"ISAN\", _decode_isan],\n 0x07: [\"TID\", _decode_uri],\n 0x08: [\"AiringID\", _decode_air_id],\n 0x09: [\"ADI\", _decode_uri],\n 0x0A: [\"EIDR\", _decode_eidr],\n 0x0B: [\"ATSC\", _decode_atsc],\n 0x0C: [\"MPU\", _decode_mpu],\n 0x0D: [\"MID\", _decode_mid],\n 0x0E: [\"ADS Info\", _decode_uri],\n 0x0F: [\"URI\", _decode_uri],\n 0x10: [\"UUID\", _decode_uri],\n }\n if upid_type in upid_map.keys():\n return upid_map[upid_type][0], upid_map[upid_type][1](bitbin, upid_length)\n return False\n\n\ndef _decode_air_id(bitbin, upid_length):\n return bitbin.as_hex(upid_length << 3)\n\n\ndef _decode_atsc(bitbin, upid_length):\n return {\n \"TSID\": bitbin.as_int(16),\n \"reserved\": bitbin.as_int(2),\n \"end_of_day\": bitbin.as_int(5),\n \"unique_for\": bitbin.as_int(9),\n \"content_id\": bitbin.as_ascii(((upid_length - 4) << 3)),\n }\n\n\ndef _decode_eidr(bitbin, upid_length):\n if upid_length < 12:\n raise Exception(f\"upid_length is {upid_length} should be 12 bytes\")\n pre = bitbin.as_int(16)\n post = []\n bit_count = 80\n while bit_count:\n bit_count -= 16\n post.append(bitbin.as_hex(16)[2:])\n return f\"10.{pre}/{'-'.join(post)}\"\n\n\ndef _decode_isan(bitbin, upid_length):\n return bitbin.as_hex(upid_length << 3)\n\n\ndef _decode_mid(bitbin, upid_length):\n upids = []\n ulb = upid_length << 3\n while ulb:\n upid_type = bitbin.as_int(8) # 1 byte\n ulb -= 8\n upid_length = bitbin.as_int(8)\n ulb -= 8\n upid_type_name, segmentation_upid = upid_decoder(bitbin, upid_type, upid_length)\n mid_upid = {\n \"upid_type\": hex(upid_type),\n \"upid_type_name\": upid_type_name,\n \"upid_length\": upid_length,\n \"segmentation_upid\": segmentation_upid,\n }\n ulb -= upid_length << 3\n upids.append(mid_upid)\n return upids\n\n\ndef _decode_mpu(bitbin, upid_length):\n ulbits = upid_length << 3\n mpu_data = {\n \"format_identifier\": bitbin.as_hex(32),\n \"private_data\": bitbin.as_hex(ulbits - 32),\n }\n return mpu_data\n\n\ndef _decode_umid(bitbin, upid_length):\n chunks = []\n ulb = upid_length << 3\n while ulb:\n chunks.append(bitbin.as_hex(32).split(\"x\", 1)[1])\n ulb -= 32\n return \".\".join(chunks)\n\n\ndef _decode_uri(bitbin, upid_length):\n if upid_length > 0:\n return bitbin.as_ascii(upid_length << 3)\n return 0\n","sub_path":"threefive/upid.py","file_name":"upid.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81799073","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/1/28 23:56\n# @File : MinSubArrayLen.py\nfrom typing import List\n\n\"\"\"\n长度最小的子数组\n给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的 连续 子数组,并返回其长度。如果不存在符合条件的子数组,返回 0。\n\n \n\n示例:\n\n输入:s = 7, nums = [2,3,1,2,4,3]\n输出:2\n解释:子数组 [4,3] 是该条件下的长度最小的子数组。\n \n\n进阶:\n\n如果你已经完成了 O(n) 时间复杂度的解法, 请尝试 O(n log n) 时间复杂度的解法。\n\n作者:力扣 (LeetCode)\n链接:https://leetcode-cn.com/leetbook/read/array-and-string/c0w4r/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass MinSubArrayLen(object):\n\n def __init__(self):\n pass\n\n def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n \"\"\"\n 双指针,或者二分查找写法(前缀和)\n :param s:\n :param nums:\n :return:\n \"\"\"\n\n sums = 0\n start = 0\n min_len = len(nums) + 1\n for i in range(len(nums)):\n sums += nums[i]\n while sums >= s:\n min_len = min(min_len, i - start + 1)\n sums -= nums[start]\n start += 1\n\n return 0 if min_len == len(nums) + 1 else min_len\n\n def minSubArrayLen1(self, s: int, nums: List[int]) -> int:\n \"\"\"\n 双指针,或者二分查找写法(前缀和)\n :param s:\n :param nums:\n :return:\n \"\"\"\n # 二分查找写法,使用前缀和\n # 原地修改原始数组\n if not nums:\n return 0\n\n for i in range(1, len(nums)):\n nums[i] = nums[i] + nums[i - 1]\n\n if nums[-1] < s:\n return 0\n if nums[-1] == s:\n return len(nums)\n\n min_len = len(nums)\n for i in range(len(nums)):\n if nums[i] < s:\n continue\n\n cur_diff = nums[i] - s\n left = self.binary_search(cur_diff, nums)\n if nums[left] + s <= nums[i]:\n min_len = min(min_len, i - left)\n else:\n min_len = min(min_len, i - left + 1)\n\n return min_len\n\n def binary_search(self, target, nums):\n left = 0\n right = len(nums) - 1\n while left < right:\n mid = (right - left + 1) // 2 + left\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n left = mid\n else:\n right = mid - 1\n return left\n\n\nif __name__ == '__main__':\n print(MinSubArrayLen().minSubArrayLen1(s=5, nums=[2, 3, 1, 1, 1, 1, 1]))\n","sub_path":"datastructure/double_pointer/MinSubArrayLen.py","file_name":"MinSubArrayLen.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638162139","text":"import os\nimport csv\n\ncsvpath=os.path.join('..','Resources','election_data.csv')\nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n csv_header = next(csvreader)\n print(f\"Header: {csv_header}\")\n\n votes = []\n county = []\n candidates = []\n khan = []\n correy = []\n li = []\n otooley = []\n\n\n for row in csvreader:\n votes.append(int(row[0]))\n county.append(row[1])\n candidates.append(row[2])\n #TOTAL VOTE COUNT\n total_votes = (len(votes))\n # print(total_votes)\n\n #VOTES BY PERSON\n for candidate in candidates:\n if candidate == \"Khan\":\n khan.append(candidates)\n khan_votes = len(khan)\n elif candidate == \"Correy\":\n correy.append(candidates)\n correy_votes = len(correy)\n elif candidate == \"Li\":\n li.append(candidates)\n li_votes = len(li)\n else:\n otooley.append(candidates)\n otooley_votes = len(otooley)\n #print(khan_votes)\n #print(correy_votes)\n #print(li_votes)\n #print(otooley_votes)\n \n #PERCENTAGES BY PERSON\n khan_percent = round(((khan_votes / total_votes) * 100), 2)\n correy_percent = round(((correy_votes / total_votes) * 100), 2)\n li_percent = round(((li_votes / total_votes) * 100), 2)\n otooley_percent = round(((otooley_votes / total_votes) * 100), 2)\n #print(khan_percent)\n #print(correy_percent)\n #print(li_percent)\n #print(otooley_percent)\n \n\n if khan_percent > max(correy_percent, li_percent, otooley_percent):\n winner = \"Khan\"\n elif correy_percent > max(khan_percent, li_percent, otooley_percent):\n winner = \"Correy\" \n elif li_percent > max(correy_percent, khan_percent, otooley_percent):\n winner = \"Li\"\n else:\n winner = \"O'Tooley\"\n\n\noutput = (f\"Election Results\") + \"\\n\"\noutput += (f\"-----------------------------------\") + \"\\n\"\noutput += (f\"Total Votes: {total_votes}\") + \"\\n\"\noutput += (f\"-----------------------------------\") + \"\\n\"\noutput += (f\"Khan: {khan_percent}% ({khan_votes})\") + \"\\n\"\noutput += (f\"Correy: {correy_percent}% ({correy_votes})\") + \"\\n\"\noutput += (f\"Li: {li_percent}% ({li_votes})\") + \"\\n\"\noutput += (f\"O'Tooley: {otooley_percent}% ({otooley_votes})\") + \"\\n\"\noutput += (f\"-----------------------------------\") + \"\\n\"\noutput += (f\"Winner: {winner}\") + \"\\n\"\noutput += (f\"-----------------------------------\")\n\nprint(output)","sub_path":"PyPoll/pypoll.py","file_name":"pypoll.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620639656","text":"def solution(n, s, a, b, fares):\n arr = [[float('inf')] * (n+1) for _ in range(n+1)]\n for fare in fares:\n v, e, cost = fare\n arr[v][e] = cost\n arr[e][v] = cost\n for path in range(1, n + 1):\n for start in range(1, n + 1):\n for end in range(1, n+1):\n if start == end:\n arr[start][end] = 0\n else:\n if arr[start][end] > arr[start][path] + arr[path][end]:\n arr[start][end] = arr[start][path] + arr[path][end]\n min_v = arr[s][a] + arr[s][b]\n for path in range(1, n + 1):\n if min_v > arr[s][path] + arr[path][a] + arr[path][b]:\n min_v = arr[s][path] + arr[path][a] + arr[path][b]\n return min_v\n\nif __name__ == '__main__':\n n, s, a, b = 6, 4, 6, 2\n fares = [[4, 1, 10], [3, 5, 24], [5, 6, 2], [3, 1, 41], [5, 1, 24], [4, 6, 50], [2, 4, 66], [2, 3, 22], [1, 6, 25]]\n print(solution(n, s, a, b, fares))","sub_path":"Algorithm/programmers/2021_kakao_blind/합승 택시 요금.py","file_name":"합승 택시 요금.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49392227","text":"import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\ni = 0\na = []\nb = []\nc = []\nk = []\nl = []\nnpb = np.array([])\nnpc = np.array([])\nnpk = np.array([])\nrealznach = np.array([])\ninfile = open('train.csv', 'r')\nfor row in csv.reader(infile, delimiter = \",\"):\n a = row\n i = i+1\n if i == 1:\n break\ni=0\nfor row in csv.reader(infile, delimiter = \",\"):\n b = row\n b = b[1:]\n for j in range(0,784):\n b[j] = int(b[j])\n npb = np.append(npb, b)\n c = int(row[0])\n npc = np.append(npc,c)\n i = i+1\n if i == 1000:\n break\ni=0\nfor row in csv.reader(infile, delimiter = \",\"):\n k = row\n k = k[1:]\n for j in range(0,784):\n k[j]=int(k[j])\n npk = np.append(npk, k)\n l = int(row[0])\n realznach = np.append(realznach,l)\n i = i+1\n if i == 1000:\n break\ninfile.close()\n\nnpb = npb.reshape(1000,784)\nnpk = npk.reshape(1000,784)\n\nC = 1.0\nsvc = svm.SVC(kernel='linear', C=C).fit(npb, npc)\nz = svc.predict(np.c_[npk])\nnp.testing.assert_array_almost_equal(z,realznach)\n\n\n","sub_path":"2015-2016/SVM/forthestart.py","file_name":"forthestart.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192582392","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# $File: faceSimilarity.py\n# $Function: get all landmarks of a face(25 points and 83 points)\n# $Data: 2013/03/24\n\n\n# import the API and File from facepp SDK\nfrom facepp import API, File\n\n# Setting App key/secret\nAPI_KEY = '0ceacbf9a3a2afc67b1e809ccd65a33d'\nAPI_SECRET = 'qVEWJRpPZTkT8AwdJRxzuW80VXKLrFhG'\n# Instantiation API object\napi = API(API_KEY, API_SECRET)\n \n\n# parse the detected face\ndef encodeFace(face):\n def encode(obj):\n if type(obj) is unicode:\n return obj.encode('utf-8')\n if type(obj) is dict:\n return {encode(k): encode(v) for (k, v) in obj.iteritems()}\n if type(obj) is list:\n return [encode(i) for i in obj]\n return obj\n encodedFace = encode(face)\n return encodedFace\n\n# return face id\ndef get_face_id(face):\n return face['face'][0]['face_id']\n\n# get landmarks of face\ndef get_face_landmarks(face_ID, point_number='83p'):\n faceLandmark = api.detection.landmark(api_key=API_KEY, api_secret=API_SECRET,\n face_id=face_ID, type=point_number)\n return faceLandmark['result'][0]['landmark']\n \n\n# import cv2\nimport cv2\nimport sys\n\n\nif __name__ == '__main__':\n # set file name \n if len(sys.argv) == 3:\n imgFileName = sys.argv[2]\n else: \n imgFileName = 'Dataset/face02.jpg'\n \n # detect human face\n detectedFace = api.detection.detect(img = File(imgFileName))\n\n # encode face\n encodedFace = encodeFace(detectedFace)\n \n # get face id\n face_id = get_face_id(encodedFace)\n \n # get face landmarks\n landmark = get_face_landmarks(face_id, point_number='25p')\n \n # read the image using cv2\n imgFace = cv2.imread(imgFileName, 0) \n cv2.imshow('Original Face', imgFace)\n \n # get the image's height and width\n height, width = imgFace.shape\n \n#print points\n#for k, v in points.iteritems():\n# print v[\"y\"], v[\"x\"]\n \n \n # set value of key point in image of value zero(namely black)\n for pos, coordinate in landmark.iteritems():\n col = coordinate[\"x\"] \n row = coordinate[\"y\"] \n # the col and row are just percentage value of the image\n # need to scale with the image size.\n cx = int(row * height / 100)\n cy = int(col * width / 100)\n \n # as one pixel is two small, set all 3*3 pixels around to 0\n imgFace[cx-1, cy-1] = 0; imgFace[cx-1, cy] = 0; imgFace[cx-1, cy+1] = 0\n imgFace[cx, cy-1] = 0; imgFace[cx, cy] = 0; imgFace[cx, cy+1] = 0\n imgFace[cx+1, cy-1] = 0; imgFace[cx+1, cy] = 0; imgFace[cx+1, cy+1] = 0\n \n # show face with key point\n cv2.imshow('Face with Landmark', imgFace)\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()","sub_path":"FaceAnalysis/FaceLandmark/faceLandmark.py","file_name":"faceLandmark.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582672645","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nHere are all the test parameters and values for the ParametricModels\ndefined. There is a dictionary for 1D and a dictionary for 2D models.\n\nExplanation of keywords of the dictionaries:\n\n\"parameters\" : list or dict\n Model parameters, the model is tested with. Make sure you keep the right\n order. For polynomials you can also use a dict to specify the\n coefficients. See examples below.\n\n\"x_values\" : list\n x values where the model is evaluated.\n\n\"y_values\" : list\n Reference y values for the in x_values given positions.\n\n\"z_values\" : list\n Reference z values for the in x_values and y_values given positions.\n (2D model option)\n\n\"x_lim\" : list\n x test range for the model fitter. Depending on the model this can differ\n e.g. the PowerLaw model should be tested over a few magnitudes.\n\n\"y_lim\" : list\n y test range for the model fitter. Depending on the model this can differ\n e.g. the PowerLaw model should be tested over a few magnitudes. (2D model\n option)\n\n\"log_fit\" : bool\n PowerLaw models should be tested over a few magnitudes. So log_fit should\n be true.\n\n\"requires_scipy\" : bool\n If a model requires scipy (Bessel functions etc.) set this flag.\n\n\"integral\" : float\n Approximate value of the integral in the range x_lim (and y_lim).\n\n\"deriv_parameters\" : list\n If given the test of the derivative will use these parameters to create a\n model (optional)\n\n\"deriv_initial\" : list\n If given the test of the derivative will use these parameters as initial\n values for the fit (optional)\n\"\"\"\n\nfrom ..functional_models import (\n Gaussian1DModel, Sine1DModel, Box1DModel, Linear1DModel, Lorentz1DModel,\n MexicanHat1DModel, Trapezoid1DModel, Const1DModel, Beta1DModel,\n Gaussian2DModel, Const2DModel, Box2DModel, MexicanHat2DModel,\n TrapezoidDisk2DModel, AiryDisk2DModel, Beta2DModel, Disk2DModel,\n Ring2DModel)\nfrom ..polynomial import Polynomial1DModel, Polynomial2DModel\nfrom ..powerlaws import (\n PowerLaw1DModel, BrokenPowerLaw1DModel, ExponentialCutoffPowerLaw1DModel,\n LogParabola1DModel)\nimport numpy as np\n\n#1D Models\nmodels_1D = {\n Gaussian1DModel: {\n 'parameters': [1, 0, 1],\n 'x_values': [0, np.sqrt(2), -np.sqrt(2)],\n 'y_values': [1.0, 0.367879, 0.367879],\n 'x_lim': [-10, 10],\n 'integral': np.sqrt(2 * np.pi)\n },\n\n Sine1DModel: {\n 'parameters': [1, 0.1],\n 'x_values': [0, 2.5],\n 'y_values': [0, 1],\n 'x_lim': [-10, 10],\n 'integral': 0\n },\n\n Box1DModel: {\n 'parameters': [1, 0, 10],\n 'x_values': [-5, 5, 0, -10, 10],\n 'y_values': [0.5, 0.5, 1, 0, 0],\n 'x_lim': [-10, 10],\n 'integral': 10\n },\n\n Linear1DModel: {\n 'parameters': [1, 0],\n 'x_values': [0, np.pi, 42, -1],\n 'y_values': [0, np.pi, 42, -1],\n 'x_lim': [-10, 10],\n 'integral': 0\n },\n\n Lorentz1DModel: {\n 'parameters': [1, 0, 1],\n 'x_values': [0, -1, 1, 0.5, -0.5],\n 'y_values': [1., 0.2, 0.2, 0.5, 0.5],\n 'x_lim': [-10, 10],\n 'integral': 1\n },\n\n MexicanHat1DModel: {\n 'parameters': [1, 0, 1],\n 'x_values': [0, 1, -1, 3, -3],\n 'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872],\n 'x_lim': [-20, 20],\n 'integral': 0\n },\n\n Trapezoid1DModel: {\n 'parameters': [1, 0, 2, 1],\n 'x_values': [0, 1, -1, 1.5, -1.5, 2, 2],\n 'y_values': [1, 1, 1, 0.5, 0.5, 0, 0],\n 'x_lim': [-10, 10],\n 'integral': 3\n },\n\n Const1DModel: {\n 'parameters': [1],\n 'x_values': [-1, 1, np.pi, -42., 0],\n 'y_values': [1, 1, 1, 1, 1],\n 'x_lim': [-10, 10],\n 'integral': 20\n },\n\n Beta1DModel: {\n 'parameters': [1, 0, 1, 2],\n 'x_values': [0, 1, -1, 3, -3],\n 'y_values': [1.0, 0.25, 0.25, 0.01, 0.01],\n 'x_lim': [-10, 10],\n 'integral': 1,\n 'deriv_parameters': [23.4, 1.2, 2.1, 2.3],\n 'deriv_initial': [10, 1, 1, 1]\n },\n\n PowerLaw1DModel: {\n 'parameters': [1, 1, 2],\n 'constraints': {'fixed': {'x_0': True}},\n 'x_values': [1, 10, 100],\n 'y_values': [1.0, 0.01, 0.0001],\n 'x_lim': [1, 10],\n 'log_fit': True,\n 'integral': 0.99\n },\n\n BrokenPowerLaw1DModel: {\n 'parameters': [1, 1, 2, 3],\n 'constraints': {'fixed': {'x_break': True}},\n 'x_values': [0.1, 1, 10, 100],\n 'y_values': [1e2, 1.0, 1e-3, 1e-6],\n 'x_lim': [0.1, 100],\n 'log_fit': True\n },\n\n ExponentialCutoffPowerLaw1DModel: {\n 'parameters': [1, 1, 2, 3],\n 'constraints': {'fixed': {'x_0': True}},\n 'x_values': [0.1, 1, 10, 100],\n 'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04,\n 3.33823780e-19],\n 'x_lim': [0.01, 100],\n 'log_fit': True\n },\n\n LogParabola1DModel: {\n 'parameters': [1, 2, 3, 0.1],\n 'constraints': {'fixed': {'x_0': True}},\n 'x_values': [0.1, 1, 10, 100],\n 'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03,\n 1.73160572e-06],\n 'x_lim': [0.1, 100],\n 'log_fit': True\n },\n\n Polynomial1DModel: {\n 'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.},\n 'x_values': [1, 10, 100],\n 'y_values': [3, 111, 10101],\n 'x_lim': [-3, 3]\n }\n}\n\n\n#2D Models\nmodels_2D = {\n Gaussian2DModel: {\n 'parameters': [1, 0, 0, 1, 1],\n 'constraints': {'fixed': {'theta': True}},\n 'x_values': [0, np.sqrt(2), -np.sqrt(2)],\n 'y_values': [0, np.sqrt(2), -np.sqrt(2)],\n 'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'integral': 2 * np.pi,\n 'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4],\n 'deriv_initial': [10, 5, 5, 4, 4, .5]\n },\n\n Const2DModel: {\n 'parameters': [1],\n 'x_values': [-1, 1, np.pi, -42., 0],\n 'y_values': [0, 1, 42, np.pi, -1],\n 'z_values': [1, 1, 1, 1, 1],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'integral': 400\n },\n\n Box2DModel: {\n 'parameters': [1, 0, 0, 10, 10],\n 'x_values': [-5, 5, -5, 5, 0, -10, 10],\n 'y_values': [-5, 5, 0, 0, 0, -10, 10],\n 'z_values': [0.25, 0.25, 0.5, 0.5, 1, 0, 0],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'integral': 100\n },\n\n MexicanHat2DModel: {\n 'parameters': [1, 0, 0, 1],\n 'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3],\n 'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0],\n 'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881,\n 0.303265, 0.303265, -0.038881, -0.038881],\n 'x_lim': [-10, 11],\n 'y_lim': [-10, 11],\n 'integral': 0\n },\n\n TrapezoidDisk2DModel: {\n 'parameters': [1, 0, 0, 1, 1],\n 'x_values': [0, 0.5, 0, 1.5],\n 'y_values': [0, 0.5, 1.5, 0],\n 'z_values': [1, 1, 0.5, 0.5],\n 'x_lim': [-3, 3],\n 'y_lim': [-3, 3]\n },\n\n AiryDisk2DModel: {\n 'parameters': [1, 0, 0, 1],\n 'x_values': [0, 1, -1, -0.5, -0.5],\n 'y_values': [0, -1, 0.5, 0.5, -0.5],\n 'z_values': [1, 0.0033517, 6.2087723e-7, 0.0093876119, 0.0093876119],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'requires_scipy': True\n },\n\n Beta2DModel: {\n 'parameters': [1, 0, 0, 1, 2],\n 'x_values': [0, 1, -1, 3, -3],\n 'y_values': [0, -1, 3, 1, -3],\n 'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277],\n 'x_lim': [-3, 3],\n 'y_lim': [-3, 3]\n },\n\n Polynomial2DModel: {\n 'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.},\n 'x_values': [1, 2, 3],\n 'y_values': [1, 3, 2],\n 'z_values': [3, 6, 6],\n 'x_lim': [1, 100],\n 'y_lim': [1, 100]\n },\n\n Disk2DModel: {\n 'parameters': [1, 0, 0, 5],\n 'x_values': [-5, 5, -5, 5, 0, -10, 10],\n 'y_values': [-5, 5, 0, 0, 0, -10, 10],\n 'z_values': [0, 0, 1, 1, 1, 0, 0],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'integral': np.pi * 5 ** 2\n },\n\n Ring2DModel: {\n 'parameters': [1, 0, 0, 5, 5],\n 'x_values': [-5, 5, -5, 5, 0, -10, 10],\n 'y_values': [-5, 5, 0, 0, 0, -10, 10],\n 'z_values': [1, 1, 1, 1, 0, 0, 0],\n 'x_lim': [-10, 10],\n 'y_lim': [-10, 10],\n 'integral': np.pi * (10 ** 2 - 5 ** 2)\n }\n}\n","sub_path":"astropy/modeling/tests/example_models.py","file_name":"example_models.py","file_ext":"py","file_size_in_byte":8576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637410990","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 15 12:07:09 2018\n\n@author: wuttinun_r\n\"\"\"\nimport mysql.connector\nimport pandas as pann\ndata = pann.read_html('https://marketdata.set.or.th/mkt/sectorquotation.do?sector=SETHD&language=th&country=TH')\nprint(data[2])\nprint(len(data))\nname = data[2]\n#name1 = []\n#for i in range(len(name)):\n# name1.append(name[i][1])\ndef cre_tb(name):\n conn = mysql.connector.connect(host = 'localhost', user = 'root', password='',db='mike1')\n a = conn.cursor()\n sql = \"CREATE TABLE \"+name+\"(id INT PRIMARY KEY,date text, open text,max text,min text,close text,vol text,val text)\"\n a.execute(sql)\n conn.commit()\n conn.close()\n\n \n#print(type(name))\n#data1 = tuple(name)\n#print(name.iloc[1][0])\nna_stock = []\nfor i in range (len(name)):\n na_stock.append(name.iloc[i][0]) \nprint(na_stock)\n\nname_s =['AAV', 'ADVANC', 'AMATA', 'ANAN', 'AOT', 'AP',\n 'BA', 'BANPU', 'BBL', 'BCH', 'BCP', 'BCPG', 'BDMS',\n 'BEAUTY', 'BEC', 'BEM', 'BH', 'BIG', 'BJC', 'BLAND',\n 'BPP', 'BTS', 'CBG', 'CENTEL', 'CHG', 'CK', 'CKP', 'COM7',\n 'CPALL', 'CPF', 'CPN', 'DTAC', 'EA', 'EGCO', 'EPG', 'ESSO',\n 'GFPT', 'GGC', 'GLOBAL', 'GPSC', 'GUNKUL', 'HANA', 'HMPRO',\n 'INTUCH', 'IRPC', 'ITD', 'IVL', 'JMART', 'JWD', 'KBANK', 'KCE',\n 'KKP', 'KTB', 'KTC', 'LH', 'LPN', 'MAJOR', 'MC', 'MEGA', 'MINT',\n 'MONO', 'MTC', 'ORI', 'PSH', 'PSL', 'PTG', 'PTT', 'PTTEP', 'PTTGC',\n 'QH', 'ROBINS', 'SAWAD', 'SCB', 'SCC', 'SGP', 'SIRI', 'SPALI',\n 'SPRC', 'STA', 'STEC', 'SUPER', 'TASCO', 'TCAP', 'THAI',\n 'THCOM', 'TISCO', 'TKN', 'TMB', 'TOP', 'TPIPL', 'TPIPP',\n 'TTA', 'TU', 'TVO', 'UNIQ', 'UV', 'WHA', 'WHAUP', 'WORK']\n\nname_s2 = ['ADVANC', 'ANAN', 'AP', 'BA', 'BBL', 'BLAND',\n 'CPF', 'EGCO', 'HANA', 'INTUCH', 'KKP', 'KTB',\n 'KTC', 'LH', 'LPN', 'MC', 'PTT', 'PTTGC', 'QH',\n 'SCB', 'SCC', 'SIRI', 'SPALI', 'TASCO', 'TCAP',\n 'THCOM', 'TISCO', 'TMB', 'TU', 'TVO']\n#print(type(name_s))","sub_path":"R_02.py","file_name":"R_02.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"366512239","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nimport hashlib\nimport datetime, time\nimport os, json, urllib\nimport psycopg2\nimport urllib.request\nfrom core.models import *\nfrom shops.models import *\n\nclass Command(BaseCommand):\n\n\n\n\tdef handle(self, *args, **options):\n\t\tn = 0\n\t\tf = open('kuponi-promokodi', 'r')\n\t\tfor l in f:\n\t\t\tn += 1\n\t\t\turl = l.split(' ')[0].strip()\n\t\t\tif url[-1] != '/':\n\t\t\t\turl = url + '/'\n\t\t\tpath = urllib.parse.unquote(url.replace('http://cash4brands.ru', ''))\n\t\t\tprint(path)\n\t\t\tu = Urls(title = 'fixredirect_%s' % n, urltype = 8, func = 'redirect', params = '{\"link\":\"/kuponi-promokodi/\"}', template = '', path = path, active = True, sitemap = False)\n\t\t\tu.save()\n\n\t\tf.close()\n\t\terrorshops = [\n\t\t\t'/homebread-shop/',\n\t\t\t'/gold/',\n\t\t\t'/besmart/',\n\t\t\t'/jeksmo-skidka-promokod/',\n\t\t\t'/atlas/',\n\t\t]\n\t\tf = open('kuponi', 'r')\n\t\tfn = open('kuponin', 'w')\n\t\tfor l in f:\n\t\t\tn += 1\n\t\t\turl = l.split(' ')[0].strip()\n\t\t\tif url[-1] != '/':\n\t\t\t\turl = url + '/'\n\t\t\tpath = urllib.parse.unquote(url.replace('http://cash4brands.ru', ''))\n\t\t\tprint(path)\n\t\t\tshopurl = '/' + path.split('/')[1] + '/'\n\t\t\tprint(shopurl)\n\t\t\tif shopurl not in errorshops:\n\t\t\t\tu = Urls.objects.filter(path = path)\n\t\t\t\tif u.count() == 0:\n\t\t\t\t\tu = Urls(title = 'fixredirect_%s' % n, urltype = 8, func = 'redirect', params = '{\"link\":\"%s\"}' % shopurl, template = '', path = path, active = True, sitemap = False)\n\t\t\t\t\tu.save()\n\t\t\telse:\n\t\t\t\tfn.write(path + ' 500 \\n')\n\n\t\tf.close()\n\t\tfn.close()\n\t\t\n\t\t","sub_path":"core/management/commands/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"310129461","text":"from flask import Flask, render_template, request, redirect, flash, session\napp = Flask(__name__)\napp.secret_key = \"secret_key\"\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/results', methods=['POST'])\ndef results():\n if len(request.form['name']) < 1 or len(request.form['comment']) < 1:\n flash('Fields cannot be empty!')\n return redirect('/')\n\n if len(request.form['comment']) > 120:\n flash('Comments cannot be longer than 120 charactors')\n return redirect('/')\n\n flash('Thanks for your opinion it matters.')\n return render_template('results.html')\n\napp.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160837188","text":"\nimport numpy as np\n\n\nclass RelaxTSPTW(object):\n ''' Handles constraints, objective and relaxation. '''\n\n def __init__(self, data):\n self.data = data\n\n def subtour_visit_times(self, subtour):\n ''' Return visit times for the subtour given travel times. '''\n current_time = 0\n times = [current_time]\n for from_location, to_location in zip(subtour, subtour[1:]):\n current_time = max(\n current_time + self.data.travel_times[from_location, to_location],\n self.data.time_windows[to_location][0])\n times.append(current_time)\n return tuple(times)\n\n def subtour_feasible(self, subtour):\n ''' Return if a subtour is feasible given the time windows. '''\n visit_times = self.subtour_visit_times(subtour)\n for location, visit_time in zip(subtour, visit_times):\n window_start, window_end = self.data.time_windows[location]\n if visit_time < window_start or visit_time > window_end:\n return False\n return True\n\n def subtour_complete(self, subtour):\n ''' Return if a subtour is complete. '''\n all_visited = list(sorted(set(subtour))) == list(range(self.data.num_locations))\n start_end = subtour[0] == 0 and subtour[-1] == 0\n return all_visited and start_end\n\n def relaxation(self, node):\n ''' Return relaxation value of a given subtour with exclusions\n on the next location. '''\n\n # relaxation of a complete node is its objective value\n if self.subtour_complete(node.subtour):\n return self.subtour_visit_times(node.subtour)[-1]\n\n # relaxation of an almost complete node... (shouldn't hit this)\n if self.subtour_complete(node.subtour + (0, )):\n return (\n self.subtour_visit_times(node.subtour)[-1] +\n self.data.travel_times[node.subtour[-1], 0])\n\n # relaxation added to fixed part\n remaining = tuple(\n location for location in range(self.data.num_locations)\n if location not in node.subtour)\n result = self.subtour_visit_times(node.subtour)[-1]\n\n # minimum travel time to remaining locations\n for location in remaining:\n if location in node.exclude_next:\n times = self.data.travel_times[\n location, [loc for loc in remaining if loc != location]]\n else:\n times = self.data.travel_times[location, (\n [loc for loc in remaining if loc != location] +\n [node.subtour[-1]])]\n result += np.min(times)\n\n # minimum return to depot distance\n times = self.data.travel_times[0, remaining]\n result += np.min(times)\n return result\n\n def complete(self, node):\n # does not check for infeasiblity!\n return self.subtour_complete(node.subtour)\n\n def feasible(self, node):\n return self.subtour_feasible(node.subtour)\n\n def objective(self, node):\n return self.subtour_visit_times(node.subtour)[-1]\n","sub_path":"branch_opt/relax_tsptw.py","file_name":"relax_tsptw.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168994101","text":"import folium\nimport pandas\n\ndata=pandas.read_csv(\"Volcanoes_USA.txt\")\nlat=list(data[\"LAT\"])\nlon=list(data[\"LON\"])\nname=list(data[\"NAME\"])\nel=list(data[\"ELEV\"])\n\ndef colourizer(elevation):\n if elevation < 1000:\n return 'green'\n elif 1000<= elevation < 3000:\n return 'orange'\n else:\n return 'red'\n\nmap=folium.Map(location=[38,-99])\nfg1=folium.FeatureGroup(\"Volcanoes Location in USA\")\nfg2=folium.FeatureGroup(\"Polygon Layer\")\nfor lt, ln, name, el in zip(lat,lon,name,el):\n #fg1.add_child(folium.Marker(location=[lt,ln], popup=name, icon=folium.Icon(color=colourizer(el))))\n fg1.add_child(folium.CircleMarker(location=[lt,ln], radius=6, popup=name, tooltip=str(el)+'m',\n fill_color=colourizer(el), color='grey',fill_opacity=0.7))\n\nfg2.add_child(folium.GeoJson(data=open(\"world.json\",'r',encoding=\"UTF-8-sig\").read(), \nstyle_function=lambda x:{'fillColor':'green' if x['properties']['POP2005'] < 10000000 \nelse 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red'}))\n\n\nmap.add_child(fg1)\nmap.add_child(fg2)\nmap.add_child(folium.LayerControl())\nmap.save(\"Map1.html\")","sub_path":"FoliumTut/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232714606","text":"# -*- coding: utf-8 -*-\n\n# PyCharm Python 3.6\n#by Lex Li\n#on 2017-07-14\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import uic\n\nimport sys\nimport os.path\n# import logging\nimport logging.config\nfrom configparser import ConfigParser\n\n\n# Define function to import external files when using PyInstaller.\n# def resource_path(relative_path):\n# try:\n# base_path = sys._MEIPASS\n# except Exception:\n# base_path = os.path.abspath('.')\n#\n# print('base_path -> %s, relative_path -> %s' % (base_path, relative_path))\n# return os.path.join(base_path, relative_path)\n\n# Works well!\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n print(os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)\n################################\n# logging.config.fileConfig(resource_path('config/logging.ini'))\n# logging.config.fileConfig(resource_path('logging.ini'))\n# logging.config.fileConfig('logging.ini')\nlog = logging.getLogger(__name__)\nlog.info(\"I am logging for testing!!!!!!!!!!!\")\n\ntry:\n logging.config.fileConfig(resource_path('config/logging.ini'))\nexcept Exception as e:\n # 直接在程序运行时写文件,也能正常运行!这说明可能是logging.config.fileConfig()这里出错了!!!\n test_file = open(resource_path('test_file.txt'), 'w')\n test_file.write('test file 尝试在加载日志配置文件报错时生成一个测试文件!')\n test_file.flush()\n test_file.close()\n################################\n\n\nclass AppWindow(QMainWindow):\n\n def __init__(self, parent=None):\n super(AppWindow, self).__init__(parent)\n\n uic.loadUi(resource_path('pyinstaller_demo.ui'), self)\n\n columns = self.load_config()\n\n values = [[1, 'Lex', 'Male', 20], [2, 'Li', 'Male', 23]]\n\n self.handle_ui_action()\n self.tableWidget.setRowCount(3)\n self.tableWidget.setColumnCount(len(columns))\n self.tableWidget.setHorizontalHeaderLabels(columns)\n\n row_no = 0\n for row_value in values:\n column_no = 0\n for value in row_value:\n item = QTableWidgetItem(str(value))\n self.tableWidget.setItem(row_no, column_no, item)\n column_no += 1\n row_no += 1\n\n\n def handle_ui_action(self):\n pass\n\n def load_config(self):\n config_file = open(resource_path('config.ini'))\n ini_parser = ConfigParser()\n ini_parser.read_file(config_file)\n config_content_sections = ini_parser.sections()\n\n columns = None\n\n for section in config_content_sections:\n # log.debug('section -> %s' % section)\n options = ini_parser.options(section)\n for option in options:\n option_value = ini_parser.get(section, option)\n # log.debug('option -> %s, value -> %s' % (option, option_value))\n print('option->%s' % option)\n print('option_value->%s' % option_value)\n columns = option_value.split(',')\n\n print('columns->%s' % columns)\n return columns\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n app_window = AppWindow()\n app_window.show()\n sys.exit(app.exec())\n","sub_path":"pyinstaller_demo/demo1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104973440","text":"import game\nfrom menu import menu as menu\n\nG = game.Game()\nplayer1 = \"X\"\nplayer2 = \"O\"\n\n\ndef startGame():\n G.newGame()\n\n\ndef loadGame():\n G.loadGame()\n\n\ndef showHelp():\n print(\"\"\"Help\"\"\")\n\n\ndef quit():\n print(\"Goodbye....\")\n raise SystemExit\n\n\ndef executeChoice(choice):\n '''executeChoice(int) ‐> None\n Execute whichever option the user selected.\n If the choice produces a valid game then\n play the game until it completes.'''\n dispatch = [startGame, loadGame, showHelp, quit]\n dispatch[choice - 1]()\n playGame()\n\n\ndef printGame():\n gg = G.getGame()\n print(gg[0], gg[1], gg[2], \"\\n\", gg[3], gg[4],\n gg[5], \"\\n\", gg[6], gg[7], gg[8], sep='')\n\n\ndef playGame():\n result = \"\"\n while not result:\n printGame()\n choice = input(\"Cell[1-9 or q to quit]: \")\n if choice.lower()[0] == 'q':\n save = input(\"Save game before quitting?[y/n] \")\n if save.lower()[0] == 'y':\n G.saveGame()\n quit()\n else:\n try:\n cell = int(choice) - 1\n if not (0 <= cell <= 8): # check range\n raise ValueError\n except ValueError:\n print(\"Choose a number between 1 and 9 or 'q' to quit \")\n continue\n try:\n G.move(player1, cell)\n result = G.winner()\n except ValueError:\n print(\"Choose an empty cell \")\n continue\n if not result:\n G.move(player2)\n result = G.winner()\n if not result:\n continue\n elif result == 'Draw':\n printGame()\n print(\"Its a draw\")\n else:\n printGame()\n print(\"Winner is\", result, \"\\n\")\n\n\ndef main():\n menulist = ['New Game', 'Load Game', 'Help', 'Quit']\n while True:\n choice = menu(menulist)\n executeChoice(choice)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"archive/tic/tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598685825","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*- \n# Author: Wengs\n# Time : 2/20/2019 2:04 PM \n# File : sandwich.py\n# IDE : PyCharm\n\n# 8-12\n\n\ndef make_sandwich(*toppings):\n print(\"\\nMaking a sandwich with following toppings:\")\n for topping in toppings:\n print(\"- \" + topping.title())\n\n\nmake_sandwich('apple', 'pear', 'beef')\nmake_sandwich('apple', 'water', 'pear', 'beef')\nmake_sandwich('pear', 'beef')\n","sub_path":"chapter_8/sandwich.py","file_name":"sandwich.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"275615075","text":"\n# Train / Test CNN on CIFAR10\n# Optionally scripts uses Layca algorithm from \n# \"Layer rotation: a surprisingly powerful indicator of \n# generalization in deep networks?\"\n# https://arxiv.org/pdf/1806.01603v2.pdf\n\nimport argparse\nimport traceback\nfrom pathlib import Path\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport ignite\nfrom ignite.engine import Events, Engine, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss, RunningAverage\nfrom ignite.utils import convert_tensor\n\nfrom ignite.contrib.handlers import TensorboardLogger, ProgressBar\nfrom ignite.contrib.handlers.tensorboard_logger import OutputHandler as tbOutputHandler, \\\n OptimizerParamsHandler as tbOptimizerParamsHandler\n\nfrom ignite.contrib.handlers import PiecewiseLinear\n\nimport mlflow\n\nfrom utils import set_seed, get_train_test_loaders, get_model\nfrom handlers.layer_rotation import LayerRotationStatsHandler\nfrom layca_optims.sgd import LaycaSGD\n\n\ndef run(output_path, config):\n\n device = \"cuda\"\n batch_size = config['batch_size']\n\n train_loader, test_loader = get_train_test_loaders(dataset_name=config['dataset'],\n path=config['data_path'],\n batch_size=batch_size,\n num_workers=config['num_workers'])\n\n model = get_model(config['model'])\n model = model.to(device)\n \n optim_fn = optim.SGD\n if config['with_layca']:\n optim_fn = LaycaSGD\n\n optimizer = optim_fn(model.parameters(), lr=0.0,\n momentum=config['momentum'],\n weight_decay=config['weight_decay'],\n nesterov=True)\n criterion = nn.CrossEntropyLoss()\n\n le = len(train_loader)\n milestones_values = [(le * m, v) for m, v in config['lr_milestones_values']]\n scheduler = PiecewiseLinear(optimizer, \"lr\", milestones_values=milestones_values)\n\n def _prepare_batch(batch, device, non_blocking):\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n def process_function(engine, batch):\n \n x, y = _prepare_batch(batch, device=device, non_blocking=True)\n \n model.train()\n y_pred = model(x)\n loss = criterion(y_pred, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n trainer = Engine(process_function)\n\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n RunningAverage(output_transform=lambda x: x, epoch_bound=False).attach(trainer, 'batchloss')\n\n ProgressBar(persist=True, bar_format=\"\").attach(trainer,\n event_name=Events.EPOCH_STARTED,\n closing_event_name=Events.COMPLETED)\n\n tb_logger = TensorboardLogger(log_dir=output_path)\n tb_logger.attach(trainer,\n log_handler=tbOutputHandler(tag=\"train\", metric_names='all'),\n event_name=Events.ITERATION_COMPLETED)\n tb_logger.attach(trainer,\n log_handler=tbOptimizerParamsHandler(optimizer, param_name=\"lr\"),\n event_name=Events.ITERATION_STARTED)\n\n tb_logger.attach(trainer, \n log_handler=LayerRotationStatsHandler(model),\n event_name=Events.EPOCH_STARTED) \n\n metrics = {\n \"accuracy\": Accuracy(),\n }\n\n evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)\n train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)\n \n def run_validation(engine, val_interval):\n if (engine.state.epoch - 1) % val_interval == 0:\n train_evaluator.run(train_loader)\n evaluator.run(test_loader)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, run_validation, val_interval=2)\n trainer.add_event_handler(Events.COMPLETED, run_validation, val_interval=1)\n\n tb_logger.attach(train_evaluator,\n log_handler=tbOutputHandler(tag=\"train\",\n metric_names='all',\n another_engine=trainer),\n event_name=Events.COMPLETED)\n\n tb_logger.attach(evaluator,\n log_handler=tbOutputHandler(tag=\"test\",\n metric_names='all',\n another_engine=trainer),\n event_name=Events.COMPLETED)\n\n def mlflow_batch_metrics_logging(engine, tag):\n step = trainer.state.iteration\n for name, value in engine.state.metrics.items():\n mlflow.log_metric(\"{} {}\".format(tag, name), value, step=step)\n\n def mlflow_val_metrics_logging(engine, tag):\n step = trainer.state.epoch\n for name in metrics.keys():\n value = engine.state.metrics[name]\n mlflow.log_metric(\"{} {}\".format(tag, name), value, step=step)\n\n trainer.add_event_handler(Events.ITERATION_COMPLETED, mlflow_batch_metrics_logging, \"train\")\n train_evaluator.add_event_handler(Events.COMPLETED, mlflow_val_metrics_logging, \"train\")\n evaluator.add_event_handler(Events.COMPLETED, mlflow_val_metrics_logging, \"test\")\n\n trainer.run(train_loader, max_epochs=config['num_epochs'])\n tb_logger.close()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\"Training a CNN on a dataset\")\n \n parser.add_argument('dataset', type=str, choices=['CIFAR10', 'CIFAR100'],\n help=\"Training/Testing dataset\")\n\n parser.add_argument('network', type=str, help=\"CNN to train\")\n\n parser.add_argument('--params', type=str,\n help='Override default configuration with parameters: '\n 'data_path=/path/to/dataset;batch_size=64;num_workers=12 ...')\n\n args = parser.parse_args()\n\n dataset_name = args.dataset \n network_name = args.network \n \n print(\"Train {} on {}\".format(network_name, dataset_name)) \n print(\"- PyTorch version: {}\".format(torch.__version__))\n print(\"- Ignite version: {}\".format(ignite.__version__))\n \n assert torch.cuda.is_available()\n torch.backends.cudnn.benchmark = True\n print(\"- CUDA version: {}\".format(torch.version.cuda))\n\n batch_size = 512\n num_epochs = 24\n config = {\n \"dataset\": dataset_name,\n \"data_path\": \".\",\n\n \"model\": network_name,\n\n \"momentum\": 0.9,\n \"weight_decay\": 1e-4,\n \"batch_size\": batch_size,\n \"num_workers\": 10,\n\n \"num_epochs\": num_epochs,\n\n \"lr_milestones_values\": [(0, 0.0), (5, 1.0), (num_epochs, 0.0)],\n \n \"with_layca\": False # Apply Layca algorithm from the paper\n }\n\n # Override config:\n if args.params:\n for param in args.params.split(\";\"):\n key, value = param.split(\"=\")\n if \"/\" not in value:\n value = eval(value)\n config[key] = value\n\n print(\"\\n\")\n print(\"Configuration:\")\n for key, value in config.items():\n print(\"\\t{}: {}\".format(key, value))\n print(\"\\n\")\n\n mlflow.log_params(config)\n\n # dump all python files to reproduce the run\n mlflow.log_artifacts(Path(__file__).parent.as_posix())\n\n # Assume artifact uri is a filesystem\n output_path = mlflow.get_artifact_uri()\n try:\n run(output_path, config)\n except Exception as e:\n traceback.print_exc()\n mlflow.log_param(\"run status\", \"FAILED\")\n exit(1)\n\n mlflow.log_param(\"run status\", \"OK\")\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199364354","text":"import psi4\nimport numpy as np\nfrom uhf import UHF\nimport scipy.linalg as la\nimport time\n\nclass MP2:\n\n def __init__(self, uhf):\n self.e = np.sort(uhf.e)\n self.C = uhf.C[:,uhf.e.argsort()]\n self.nocc = uhf.nocc\n self.ntot = uhf.ntot\n\n df = uhf.df_basis\n basis = uhf.basis\n mints = psi4.core.MintsHelper(basis) \n zero = psi4.core.BasisSet.zero_ao_basis_set()\n\n g = spin_block_tei(uhf.I) \n self.gao = g.transpose(0,2,1,3)-g.transpose(0,2,3,1) \n\n J = mints.ao_eri(df,zero,df,zero).to_array()\n J = np.squeeze(J)\n self.J_prime = la.inv(la.sqrtm(J)) #where J_prime = J**(-1/2)\n\n pqP = mints.ao_eri(basis,basis,zero,df).to_array()\n pqP = spin_block_tei_df(pqP)\n pqP = np.squeeze(pqP)\n self.b_pqP = np.einsum('pqP,QP->pqQ',pqP,self.J_prime)\n \n self.E = 0.0 \n def get_energy(self): \n t0 = time.time()\n C, gao, nocc, ntot, e, E = self.C, self.gao, self.nocc, self.ntot, self.e, self.E\n # transform integrals\n gmo = int_trans_2(gao,C) \n # get energy\n for i in range(nocc):\n e_i = e[i]\n for j in range(i,nocc):\n e_j = e[j]\n for a in range(nocc,ntot):\n for b in range(a,ntot):\n E += (gmo[i,j,a,b]**2)/(e_i+e_j-e[a]-e[b])\n t1 = time.time()\n print('The MP2 correlation energy is {:20.14f}'.format(E))\n print('MP2 took {:7.5f} seconds'.format(t1-t0))\n self.E = E\n return E \n \n def get_energy_df(self): \n t2 = time.time()\n E_df = 0.0\n C, nocc, ntot, e, E = self.C, self.nocc, self.ntot, self.e, self.E\n b_iaP = int_trans_df(self.b_pqP,C) \n\n e_ab = e[nocc:]\n e_vv = e_ab.reshape(-1, 1) + e_ab\n\n for i in range(nocc):\n e_i = e[i]\n for j in range(i,nocc):\n e_j = e[j]\n \n e_denom = 1.0 / (e_i + e_j - e_vv)\n\n gmo_df_ab = np.einsum('aP, bP ->ab', b_iaP[i,nocc:,:],b_iaP[j,nocc:,:])\n E_df += np.einsum('ab,ab,ab->', gmo_df_ab, gmo_df_ab - gmo_df_ab.T, e_denom) \n t3 = time.time()\n print('The DF-MP2 correlation energy is {:20.14f}'.format(E_df))\n print('DF error: {:20.14f}'.format(E-E_df))\n print('DF-MP2 took {:7.5f} seconds'.format(t3-t2))\ndef spin_block_tei(gao):\n I = np.eye(2)\n gao = np.kron(I, gao)\n return np.kron(I, gao.T)\n\ndef spin_block_tei_df(gao):\n I = np.eye(2)\n return np.kron(I,gao.T).T\n \ndef int_trans_1(gao, C):\n return np.einsum('pqrs, pP, qQ, rR, sS -> PQRS', gao, C, C, C, C)\n\ndef int_trans_2(gao, C):\n\n return np.einsum('pQRS, pP -> PQRS',\n np.einsum('pqRS, qQ -> pQRS',\n np.einsum('pqrS, rR -> pqRS',\n np.einsum('pqrs, sS -> pqrS', gao, C),C),C),C)\n\ndef int_trans_df(b_pqP, C):\n a = np.einsum('pqP,pi -> iqP',b_pqP,C)\n return np.einsum('iqP,qa -> iaP',a,C)\n \nif __name__ == '__main__':\n uhf = UHF('Options.ini')\n uhf.get_energy()\n mp2 = MP2(uhf)\n mp2.get_energy()\n mp2.get_energy_df()\n psi4.set_options({'basis':'cc-pvdz',\n 'scf_type': 'pk',\n 'MP2_type' : 'conv',\n 'puream' : False,\n 'reference': 'uhf',\n 'guess' : 'core',\n 'e_convergence' : 1e-10})\n #psi4.energy('mp2')\n","sub_path":"DF_MP2/df_mp2_jt.py","file_name":"df_mp2_jt.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595763777","text":"import sqlite3\nfrom typing import Tuple, Union\nfrom contextlib import closing\n\n\n# noinspection SqlResolve\nclass DataBaseLibrary(object):\n \"\"\"\n A test library providing keywords for SQLite database operations.\n\n ``DataBaseLibrary`` is Robot Framework's custom library that provides a\n set of keywords for working with SQLite database of the specific test application.\n\n The library contains following keywords:\n\n | = Keyword Name = | = Comment = |\n | `Connect To The Database | Creates a connection to specific database\n | `Close Database Connection` | Closes opened connection\n | `Get Id And Balance of Client With Positive Balance` | Returns ID and balance of the client with positive\n | balance if the client exists, creates the client\n | with positive balance and returns one's ID and\n | balance otherwise.\n\n | `Get Id And Balance of Existed Client With Positive Balance` | Returns ID and balance of the client if the client\n | with positive balance exists, None otherwise.\n\n | `Add New Client` | Inserts into the database new entries with the\n | client name and one's balance and returns client\n | ID and balance value.\n\n | `Get The Client Balance` | Returns balance value of the client with specific\n | ID.\n \"\"\"\n\n ROBOT_LIBRARY_SCOPE = 'TEST SUITE'\n\n connection: sqlite3.Connection\n cursor: sqlite3.Cursor\n\n def connect_to_the_database(self, db_file_name: str) -> None:\n \"\"\"\n The method creates a connection to the sqlite database file.\n :param db_file_name: str, should contain full or relative file path\n :return: None\n \"\"\"\n self.connection = sqlite3.connect(database=db_file_name)\n self.cursor = self.connection.cursor()\n\n def close_database_connection(self):\n \"\"\"\n Teardown method which closes opened connection to database.\n :return: None\n \"\"\"\n with closing(self.connection):\n pass\n\n def get_id_and_balance_of_client_with_positive_balance(self) -> Union[Tuple, None]:\n \"\"\"\n The method returns ID and balance value of the client with positive balance. If client with positive\n balance doesn't exist the corresponding entry will be created.\n :return: tuple (client_id, balance_value) or None\n \"\"\"\n\n data = self.get_id_and_balance_of_existed_client_with_positive_balance()\n return data if data else self.add_new_client('John Doe', 10)\n\n def get_id_and_balance_of_existed_client_with_positive_balance(self) -> Union[Tuple, None]:\n \"\"\"\n The method returns ID and balance value of the client with positive balance if the corresponding entries exist,\n None otherwise.\n :return: tuple (client_id, balance_value) or None\n \"\"\"\n response = self.cursor.execute('SELECT CLIENTS_CLIENT_ID, BALANCE FROM BALANCES WHERE BALANCE > 0 LIMIT 1')\n data = response.fetchall()\n return data[0] if data else None\n\n def add_new_client(self, client_name: str, client_balance: int) -> Tuple:\n \"\"\"\n The method creates new entries in the database with the client name and balance value and checks\n inserted data for existence.\n :param client_name: str\n :param client_balance: int\n :return: tuple\n \"\"\"\n self.cursor.execute(f\"INSERT INTO CLIENTS (CLIENT_NAME) VALUES ('{client_name}')\")\n self.cursor.execute(f'INSERT INTO BALANCES (BALANCE) VALUES ({client_balance})')\n self.connection.commit()\n\n response = self.cursor.execute(\n f\"\"\"\n SELECT C.CLIENT_ID, B.BALANCE\n FROM CLIENTS C JOIN BALANCES B\n WHERE C.CLIENT_NAME = '{client_name}' AND B.BALANCE = {client_balance}\n \"\"\"\n )\n data = response.fetchall()\n if data:\n return data[0]\n else:\n raise ValueError(\"Couldn't get ID and BALANCE of a new client. Something went wrong during inserting data\"\n \" into the database.\")\n\n def get_the_client_balance(self, client_id: int) -> int:\n \"\"\"\n The method returns balance value of the client with specific ID. Raises Lookup error if the wanted entry\n doesn't exist.\n :param client_id: int\n :return: int\n \"\"\"\n response = self.cursor.execute(f'SELECT BALANCE FROM BALANCES WHERE CLIENTS_CLIENT_ID={client_id}')\n data = response.fetchall()\n if data:\n return data[0][0]\n else:\n raise LookupError(f\"Couldn't get balance of the client with ID={client_id}. No such entry.\")\n","sub_path":"Library/DataBaseLibrary.py","file_name":"DataBaseLibrary.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583319520","text":"import sys\nimport util\nimport ops\n\ntry:\n\n util.validate_user_input()\n\n source_tables = util.get_source_tables()\n\n target_table = ops.operate(source_tables, sys.argv[1])\n\n util.write_table_to_file(sys.argv[4], target_table)\n\nexcept Exception as err:\n print(err)\n","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654340382","text":"import turtle\n\nturtle.shape('turtle')\nturtle.speed(\"fast\")\nturtle.left(90)\n\n\ndef drawTwoWings(radius, step):\n '''\n Рисует пару крыльев бабочки\n\n :param radius: Радиус 1 крыла\n :param step: Шаг для того, чтобы крылья постепенно увеличивались в радиусе\n :return: returns nothing\n '''\n turtle.circle(radius+step)\n turtle.circle(-(radius+step))\n\n\ndef drawButterfly(count, size, step):\n '''\n Рисует бабочку\n\n :param count: Количество пар крыльев\n :param size: Размер крыльев\n :param step: Шаг между размером крыльев\n '''\n for i in range(count):\n drawTwoWings(size, i*step)\n\n\ndrawButterfly(5, 20, 10)\n\nturtle.done()\n","sub_path":"Lesson-1/ex-11-butterfly.py","file_name":"ex-11-butterfly.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578757966","text":"import tensorflow as tf\nimport numpy as np\nfrom konlpy.tag import Okt\nimport pandas as pd\nimport pickle\n\nconverter = Okt()\nlabels = pd.read_csv('ML/label.csv')\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, embedding_dim, num_heads=8,** kwargs):\n super(MultiHeadAttention, self).__init__()\n self.embedding_dim = embedding_dim # d_model\n self.num_heads = num_heads\n\n assert embedding_dim % self.num_heads == 0\n\n self.projection_dim = embedding_dim // num_heads\n self.query_dense = tf.keras.layers.Dense(embedding_dim)\n self.key_dense = tf.keras.layers.Dense(embedding_dim)\n self.value_dense = tf.keras.layers.Dense(embedding_dim)\n self.dense = tf.keras.layers.Dense(embedding_dim)\n\n def scaled_dot_product_attention(self, query, key, value):\n matmul_qk = tf.matmul(query, key, transpose_b=True)\n depth = tf.cast(tf.shape(key)[-1], tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n output = tf.matmul(attention_weights, value)\n return output, attention_weights\n\n def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n \n def get_config(self) :\n config = super().get_config().copy()\n config.update({\n 'embedding_dim' : self.embedding_dim,\n 'num_heads' :self.num_heads,\n 'projection_dim' : self.projection_dim,\n 'query_dense' :self.query_dense,\n 'key_dense' : self.key_dense,\n 'value_dense' :self.value_dense,\n 'dense' :self.dense,\n })\n return config\n\n def call(self, inputs):\n # x.shape = [batch_size, seq_len, embedding_dim]\n batch_size = tf.shape(inputs)[0]\n\n # (batch_size, seq_len, embedding_dim)\n query = self.query_dense(inputs)\n key = self.key_dense(inputs)\n value = self.value_dense(inputs)\n\n # (batch_size, num_heads, seq_len, projection_dim)\n query = self.split_heads(query, batch_size) \n key = self.split_heads(key, batch_size)\n value = self.split_heads(value, batch_size)\n\n scaled_attention, _ = self.scaled_dot_product_attention(query, key, value)\n # (batch_size, seq_len, num_heads, projection_dim)\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) \n\n # (batch_size, seq_len, embedding_dim)\n concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.embedding_dim))\n outputs = self.dense(concat_attention)\n return outputs\n\nclass TransformerBlock(tf.keras.layers.Layer):\n def __init__(self, embedding_dim, num_heads, dff, rate=0.1,** kwargs):\n super(TransformerBlock, self).__init__()\n self.att = MultiHeadAttention(embedding_dim, num_heads)\n self.ffn = tf.keras.Sequential(\n [tf.keras.layers.Dense(dff, activation=\"relu\"),\n tf.keras.layers.Dense(embedding_dim),]\n )\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n \n def get_config(self) :\n config = super().get_config().copy()\n config.update({\n 'att' : self.att,\n 'ffn' :self.ffn,\n 'layernorm1' : self.layernorm1,\n 'layernorm2' :self.layernorm2,\n 'dropout1' : self.dropout1,\n 'dropout2' :self.dropout2,\n })\n return config\n\n def call(self, inputs, training):\n attn_output = self.att(inputs)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(inputs + attn_output)\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output, training=training)\n return self.layernorm2(out1 + ffn_output)\n\nclass TokenAndPositionEmbedding(tf.keras.layers.Layer):\n def __init__(self, max_len, vocab_size, embedding_dim,** kwargs):\n super(TokenAndPositionEmbedding, self).__init__()\n self.token_emb = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.pos_emb = tf.keras.layers.Embedding(max_len, embedding_dim)\n \n def get_config(self) :\n config = super().get_config().copy()\n config.update({\n 'token_emb' : self.token_emb,\n 'pos_emb' :self.pos_emb,\n })\n return config\n\n def call(self, x):\n max_len = tf.shape(x)[-1]\n positions = tf.range(start=0, limit=max_len, delta=1)\n positions = self.pos_emb(positions)\n x = self.token_emb(x)\n return x + positions\n\nclass AttentionModel :\n def __init__(self):\n self.tokenizer = self.get_tokenizer()\n self.START_TOKEN, self.END_TOKEN,self.VOCAB_SIZE = self.get_token()\n self.MAX_LENGTH = 20\n self.model = self.get_model()\n\n def get_tokenizer(self,tokenizer_path='ML/tokenizer.pickle') :\n with open(tokenizer_path, 'rb') as handle:\n tokenizer = pickle.load(handle)\n return tokenizer\n\n def get_token(self):\n START_TOKEN, END_TOKEN = [self.tokenizer.vocab_size], [self.tokenizer.vocab_size + 1]\n VOCAB_SIZE = self.tokenizer.vocab_size + 2\n # print('시작 토큰 번호 :',START_TOKEN)\n # print('종료 토큰 번호 :',END_TOKEN)\n # print('단어 집합의 크기 :',VOCAB_SIZE)\n return START_TOKEN, END_TOKEN,VOCAB_SIZE\n\n def proc_eval(self,inputs):\n inputs = [inputs]\n tokenized_inputs = []\n\n for sentence1 in inputs:\n sentence1 = self.START_TOKEN + self.tokenizer.encode(sentence1) + self.END_TOKEN\n tokenized_inputs.append(sentence1)\n\n tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences(\n tokenized_inputs, maxlen=self.MAX_LENGTH, padding='post')\n\n return tokenized_inputs\n\n def predict(self,sentence) :\n okt_title = converter.pos(sentence)\n sentence = ' '.join([tup[0].upper() for tup in okt_title if tup[1] == 'Noun' or tup[1] == 'Alpha'])\n pred = self.model.predict(self.proc_eval(sentence))\n return list(labels['label'])[np.argmax(pred)]\n\n def get_model(self,model_path='ML/sector_classifier.h5') :\n embedding_dim = 64 # Embedding size for each token\n num_heads = 8 # Number of attention heads\n dff = 512 # Hidden layer size in feed forward network inside transformer\n max_len = self.MAX_LENGTH\n vocab_size = self.VOCAB_SIZE\n\n inputs = tf.keras.layers.Input(shape=(max_len,))\n embedding_layer = TokenAndPositionEmbedding(max_len, vocab_size, embedding_dim)\n x = embedding_layer(inputs)\n transformer_block = TransformerBlock(embedding_dim, num_heads, dff)\n x = transformer_block(x)\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n x = tf.keras.layers.Dropout(0.1)(x)\n x = tf.keras.layers.Dense(20, activation=\"relu\")(x)\n x = tf.keras.layers.Dropout(0.1)(x)\n outputs = tf.keras.layers.Dense(26, activation=\"softmax\")(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n model.load_weights(model_path)\n return model\n\nif __name__ == '__main__':\n model = AttentionModel()\n import time\n st = time.time()\n output = model.predict('react native 개발자 구인')\n print(output)\n fn = time.time() - st\n print(fn)","sub_path":"crawler/ML/selfattention.py","file_name":"selfattention.py","file_ext":"py","file_size_in_byte":7625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466759583","text":"from gumpy.commons import sics\r\n#from gumpy.quokka.quokka import *\r\nfrom time import sleep\r\n\r\n\r\n######################\r\n# GLOBAL settings\r\nmode = \"time\" # can be changed to mode = scanMode.monitor\r\ntempSettleTime = 45 # time for settling at each temperature step\r\nfieldSettleTime = 0 # time for settling at each field step\r\nhighTemp = 4 # for FC before changing field heat to this temperature, also for ZFC before new temperature\r\nlowTemp = 1.6 # for FW before changing field cool to this temperature\r\nhighTempSettleTime = 30 # time for settling at max high temperature\r\nlowTempSettleTime = 60 # time for settling at minimum low temperature\r\nfirstTempSettleTime = 30 # time for settling at first measurement point after changing temperature\r\nfirstFieldSettleTime = 0 # time for settling at first measurement point after changing field\r\nthetaSettleTime = 5 # time for settling after changing theta\r\nmeasurementTime = 60 # time for each measurement\r\n\r\n \r\ndef scan_temp_field_FC(measurementList, mode, preset, tempSettleTime = 1, fieldSettleTime = 1):\r\n for field,tempList in measurementList:\r\n log('driving magnet1_driveable=' + str(0))\r\n sics.drive('ma1_magnet_setpoint', 0) \r\n log('waiting for field to settle')\r\n sleep(fieldSettleTime)\r\n log('driving tc1_driveable=' + str(highTemp))\r\n sics.drive('tc1_driveable', highTemp)\r\n log('waiting for temperature to settle')\r\n sleep(highTempSettleTime) \r\n log('driving magnet1_driveable=' + str(field))\r\n sics.drive('ma1_magnet_setpoint', field)\r\n log('waiting for field to settle')\r\n sleep(fieldSettleTime)\r\n \r\n for i,temp in enumerate(tempList):\r\n log('driving tc1_driveable=' + str(temp))\r\n sics.drive('tc1_driveable', temp) \r\n log('waiting for temperature to settle')\r\n if i ==0:\r\n sleep(firstTempSettleTime)\r\n else:\r\n sleep(tempSettleTime)\r\n \r\n for theta in [-6,-4,-2.,0.,2.,4.,6.]:\r\n log('driving rotation=' + str(theta))\r\n sics.drive('samthet', theta)\r\n sleep(thetaSettleTime)\r\n log('start counting')\r\n quokka.scan(mode, preset)\r\n\r\n log('driving magnet1_driveable=' + str(0))\r\n sics.drive('ma1_magnet_setpoint', 0)\r\n \r\n \r\n# list for measurements using the FC protocol\r\nlistFC_temp = [3.2,3.0,2.8,2.6,2.4,2.2,2.0,1.8,1.6]\r\nlistFC = [\r\n (0.000, listFC_temp), \r\n (0.100, listFC_temp), \r\n (0.200, listFC_temp), \r\n (0.300, listFC_temp),\r\n (0.050, listFC_temp),\r\n (0.150, listFC_temp),\r\n (0.250, listFC_temp),\r\n (0.350, listFC_temp),\r\n (0.400, listFC_temp)\r\n ]\r\n\r\n\r\n\r\n### from here actual scans are defined\r\n#log('experiment started')\r\n\r\nscan_temp_field_FC(listFC, mode, measurementTime, tempSettleTime, fieldSettleTime)\r\n#scan_temp_field_ZFC(listZFC, mode, measurementTime, tempSettleTime, fieldSettleTime)\r\n#scan_temp_field_FW(listFW, mode, measurementTime, tempSettleTime, fieldSettleTime)\r\n#scan_temp_helical_FW(listHelicalFW_temp, mode, measurementTime, tempSettleTime, fieldSettleTime)\r\n#scan_singlefield_FC(listFC_temp, mode, measurementTime, tempSettleTime, fieldSettleTime)\r\n\r\n######\r\n#log('experiment finished')\r\n","sub_path":"new_workspace/Gumtree_Workspace/Magnet/script13_phasediagram_FC_Hperpki.py","file_name":"script13_phasediagram_FC_Hperpki.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160175667","text":"from datetime import datetime, timedelta\nimport json\nimport sys\nimport warnings\n\nimport requests\nfrom newspaper import Article\n\nAPI_KEY_LIST = [\n 'xwrfuPbZBp2DPeGtEh2GUP-Fn82NTfV1PSHie_lovRA', # r07\n 'tL6aZ_TQutITQ7C7VGwATWDL9aFM64HRyckMbWknr50',\n 'LpQNW3PAVQigEaGSjHCZj9T-7TJ7XZAidlD7V_2cpoE',\n]\n\n\ndef scrape(start_date, API_KEY_LIST):\n url = 'https://scrapeulous.com/api'\n items = [f'site://www.nytimes.com/{start_date.replace(\"-\", \"/\")}/us coronavirus']\n\n # make sure sufficient credits\n for API_KEY in API_KEY_LIST:\n payload = {\n \"API_KEY\": API_KEY,\n \"function\": \"https://raw.githubusercontent.com/NikolaiT/scrapeulous/master/google_scraper.js\",\n \"items\": items,\n #\"can_use_proxies\": True,\n \"region\": \"us\",\n \"options\": {\n \"google_params\": {\n \"num\": 50,\n },\n \"num_pages\": 50,\n }\n }\n\n r = requests.post(url, data=json.dumps(payload), verify=False)\n results = r.json()\n error = ''\n if isinstance(results, dict):\n error = results.get('error')\n\n if error == 'remaining credits not sufficient for this api call':\n print(f'{API_KEY} got no credits. Switching API_KEY')\n elif error == 'Invalid api key':\n print(f'{API_KEY} is invalid. Switching API_KEY')\n else:\n break\n\n # extract links from SERP\n news_list = []\n urls = []\n for res in results:\n if isinstance(res['result'], list):\n for page in res['result']:\n for link in page['results']:\n news = {}\n url = link['link']\n if url in urls:\n continue\n else:\n urls.append(url)\n news['url'] = url\n news['date'] = link['date']\n news['title'] = link['title']\n news_list.append(news)\n print(start_date, len(news_list), 'news scraped')\n elif isinstance(res['result'], dict):\n print(start_date, len(news_list), 'news scraped.', res['result']['error_message'])\n elif isinstance(res['result'], str):\n print(results)\n\n path = 'us_nytimes'\n filename = f'/Users/MorrisChang/thesis/scraped_news/{path}/{start_date}.csv'\n f = open(filename, 'w')\n f.write(str(news_list))\n f.close()\n\n return len(news_list)\n\n\ndef process_news(news_list):\n filter_domains = [\n ]\n\n filtered = []\n for news in news_list:\n badboy = False\n for f in filter_domains:\n if f in news['url']:\n badboy = True\n break\n if not badboy:\n filtered.append(news)\n\n path = 'us_nytimes'\n filename = f'/Users/MorrisChang/thesis/scraped_news/{path}/{path}.txt'\n f = open(filename, 'a')\n for news in filtered:\n error, parsed_title, keywords = '', '', ''\n date = news['date']\n\n try:\n url = news['url']\n article = Article(url)\n # 這裡本來SSL憑證有時會出錯,把套件中network.py的get_html_2XX_only中的verify=False補上即可繞掉\n article.download()\n article.parse()\n article.nlp()\n keywords = article.keywords\n parsed_title = article.title\n\n if not date:\n date = article.publish_date\n else:\n date = datetime.strptime(date, '%b %d, %Y')\n except Exception as e:\n error = str(e)\n\n delimiter = '!&^'\n line = f'{error}{delimiter}{date}{delimiter}{news[\"title\"]}{delimiter}{parsed_title}{delimiter}{keywords}{delimiter}{url}\\n'\n print(line[:-1])\n f.write(line)\n f.close()\n\n # TODO: 研究寫檔資料格式(讀黨要方便)、error logging、filtered_domains、BBC\n\n\ndef cmd_scrape(date):\n while True:\n dt = datetime.strptime(date, '%Y-%m-%d')\n\n scrape(date, API_KEY_LIST)\n\n next_dt = dt + timedelta(days=1)\n if next_dt > datetime(2020, 4, 24):\n break\n date = next_dt.strftime('%Y-%m-%d')\n\n\ndef cmd_process(date):\n # process day by day\n while True:\n dt = datetime.strptime(date, '%Y-%m-%d')\n end_date = (dt + timedelta(days=1)).strftime('%Y-%m-%d')\n filename = date + '.csv'\n path = 'us_nytimes'\n filename = f'/Users/MorrisChang/thesis/scraped_news/{path}/{filename}'\n f = open(filename, 'r')\n news_list = eval(f.readline())\n print(f'processing {len(news_list)} news on {date}')\n\n process_news(news_list)\n\n next_dt = dt + timedelta(days=1)\n if next_dt > datetime(2020, 4, 24):\n break\n date = next_dt.strftime('%Y-%m-%d')\n\n\ndef cmd_check(date):\n # process day by day\n is_all_clear = True\n while True:\n dt = datetime.strptime(date, '%Y-%m-%d')\n filename = date + '.csv'\n path = 'us_nytimes'\n filename = f'/Users/MorrisChang/thesis/scraped_news/{path}/{filename}'\n f = open(filename, 'r')\n news_list = eval(f.readline())\n if news_list == []:\n print(f'0 news on {date}. Restart scraping ...')\n news_num = scrape(date, API_KEY_LIST)\n if news_num == 0:\n is_all_clear = False\n else:\n print(f'{len(news_list)} news on {date}.')\n\n next_dt = dt + timedelta(days=1)\n if next_dt > datetime(2020, 4, 24):\n break\n date = next_dt.strftime('%Y-%m-%d')\n\n return is_all_clear\n\n\ncmd = sys.argv[1]\nstart_date = sys.argv[2]\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n\n if cmd == 'scrape':\n # scrape news\n cmd_scrape(start_date)\n elif cmd == 'check':\n # check wheter intactly scraped or not\n is_all_clear = cmd_check(start_date)\n while not is_all_clear:\n is_all_clear = cmd_check(start_date)\n elif cmd == 'process':\n # process scraped_news into csv (keywords appended)\n cmd_process(start_date)\n else:\n print('invalid cmd')\n","sub_path":"scraped_news/us_nytimes/scrapeulous.py","file_name":"scrapeulous.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145012360","text":"# coding: utf-8\n\n\"\"\"\n Chat API SDK\n\n The SDK allows you to receive and send messages through your WhatsApp account. [Sign up now](https://app.chat-api.com/) The Chat API is based on the WhatsApp WEB protocol and excludes the ban both when using libraries from mgp25 and the like. Despite this, your account can be banned by anti-spam system WhatsApp after several clicking the \\\"block\\\" button. # noqa: E501\n\n The version of the OpenAPI document: 1.0.0\n Contact: sale@chat-api.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass WebhookStatus(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'msg_id': 'str',\n 'time': 'str',\n 'status': 'str'\n }\n\n attribute_map = {\n 'msg_id': 'msgId',\n 'time': 'time',\n 'status': 'status'\n }\n\n def __init__(self, msg_id=None, time=None, status=None): # noqa: E501\n \"\"\"WebhookStatus - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._msg_id = None\n self._time = None\n self._status = None\n self.discriminator = None\n\n if msg_id is not None:\n self.msg_id = msg_id\n if time is not None:\n self.time = time\n if status is not None:\n self.status = status\n\n @property\n def msg_id(self):\n \"\"\"Gets the msg_id of this WebhookStatus. # noqa: E501\n\n message id # noqa: E501\n\n :return: The msg_id of this WebhookStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._msg_id\n\n @msg_id.setter\n def msg_id(self, msg_id):\n \"\"\"Sets the msg_id of this WebhookStatus.\n\n message id # noqa: E501\n\n :param msg_id: The msg_id of this WebhookStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._msg_id = msg_id\n\n @property\n def time(self):\n \"\"\"Gets the time of this WebhookStatus. # noqa: E501\n\n creation date # noqa: E501\n\n :return: The time of this WebhookStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._time\n\n @time.setter\n def time(self, time):\n \"\"\"Sets the time of this WebhookStatus.\n\n creation date # noqa: E501\n\n :param time: The time of this WebhookStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._time = time\n\n @property\n def status(self):\n \"\"\"Gets the status of this WebhookStatus. # noqa: E501\n\n status name (\\\"sent\\\", \\\"not sent\\\", \\\"queued\\\") # noqa: E501\n\n :return: The status of this WebhookStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this WebhookStatus.\n\n status name (\\\"sent\\\", \\\"not sent\\\", \\\"queued\\\") # noqa: E501\n\n :param status: The status of this WebhookStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._status = status\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, WebhookStatus):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"openapi_client/models/webhook_status.py","file_name":"webhook_status.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583806314","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: wxnacy(wxnacy@gmail.com)\n\"\"\"\n\n\"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom dler.downloader.m3u8_downloader import M3u8Downloader\nfrom dler.downloader.models import Task\n\ndef main():\n import sys\n args = sys.argv[1:]\n url = args[0]\n arg_task_id = None\n if len(args) >= 2:\n arg_task_id = args[1]\n\n print(url, arg_task_id)\n # return\n task_id = url\n if url.startswith('http'):\n task_id = M3u8Downloader.add_task(url, arg_task_id)\n print(task_id)\n downloader = M3u8Downloader(task_id)\n # downloader.is_async = False\n downloader.start()\n","sub_path":"dler/cli/download_m3u8.py","file_name":"download_m3u8.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33233747","text":"import random\r\n\r\nwhile True: #WHILE TRUE MEANS THAT IT LOOPS FOREVER/INDEFINITELY BUT SINCE THERE'S THE BREAK AT THE END, THE BREAK CUTS IT OFF \r\n user_action = input(\"Enter a choice (rock, paper, scissors): \")\r\n possible_actions = [\"rock\", \"paper\", \"scissors\"]\r\n computer_action = random.choice(possible_actions)\r\n #print(\"You chose {user_action}, computer chose {computer_action}.\")\r\n\r\n if user_action == computer_action:\r\n print(f\"Both players selected {user_action}. It's a tie!\")\r\n elif user_action == \"rock\":\r\n if computer_action == \"scissors\":\r\n print(\"Rock > scissors! You win!\")\r\n else:\r\n print(\"Paper > rock! You lose.\")\r\n elif user_action == \"paper\":\r\n if computer_action == \"rock\":\r\n print(\"Paper > rock! You win!\")\r\n else:\r\n print(\"Scissors > paper! You lose.\")\r\n elif user_action == \"scissors\":\r\n if computer_action == \"paper\":\r\n print(\"Scissors > paper! You win!\")\r\n else:\r\n print(\"Rock > scissors! You lose.\")\r\n\r\n play_again = input(\"Play again? (y/n): \")\r\n if play_again.lower() != \"y\":\r\n print(\"Weak mental EZ Clap\")\r\n break\r\n\r\n#LEARN HOW THIS WORKS PROPERLY\r\n\r\n#elif is just a contracted \"else if\" statement. \r\n#I.e., the first section saying that if the values are equal, print \"it's a tie\"\r\n#And then the if/else statement under it, followed by another elif for the case of input being paper, scissors, etc. \r\n\r\n#if user_action == computer_action: \r\n #print(f\"both players selected {user_action}. It's a tie!\")\r\n #^^ The above is the syntax for a tie in the RPS\r\n\r\n#IF AND ELIF IS THE SAME AS JS IN THAT PICTURE BORDER LAB\r\n#understand the \"while True:\" section \r\n#user_action, possible_actions, and computer_action is pretty self explanatory\r\n","sub_path":"RockPaperSc.py","file_name":"RockPaperSc.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"32966382","text":"from unittest import TestCase\nfrom app import app, db\nfrom models import State\nfrom tests.runner import clear_db\n\n\nclass TestStateMethods(TestCase):\n\n def setUp(self):\n self.app_context = app.test_request_context()\n self.app_context.push()\n app.test_client()\n self.app = app\n db.create_all()\n self.db = db\n\n def tearDown(self):\n clear_db(self.db)\n\n def test_if_save_method_saves_state_on_database(self):\n State().save(self.db.session, state='SP', country='Brasil', total_cases=5,\n total_cases_ms=2, not_confirmed_by_ms=2, deaths=2, url='https://covidzero.com.br')\n self.db.session.commit()\n _model = self.db.session.query(State).filter_by(state='SP').first()\n self.assertIsNotNone(_model)\n","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"171855613","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom util import func_confusion_matrix\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef GaussianNBModel(train_X,train_y,val_x,val_y,testX,testY):\r\n \r\n # Calling the GaussianNB model\r\n classifier = GaussianNB()\r\n \r\n # Training the model on the train dataset\r\n classifier.fit(train_X,train_y)\r\n \r\n # Testing the model on the test dataset\r\n y_pred = classifier.predict(testX)\r\n \r\n #Calculating the metrics for the evaluation of the model\r\n conf_matrix, accuracy, recall_array, precision_array = func_confusion_matrix(\r\n testY, y_pred)\r\n \r\n # Plotting the ROC Curve\r\n fpr, tpr, thresholds = roc_curve(y_pred, testY)\r\n roc_auc = auc(fpr,tpr)\r\n \r\n plt.figure()\r\n lw = 2\r\n plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC Curve for GaussianNB')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n \r\n return accuracy, precision_array, recall_array, conf_matrix\r\n ","sub_path":"GaussianNB.py","file_name":"GaussianNB.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279799215","text":"import numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.utils.data as data\n\n\ndef load_data(path, n_num, d_num):\n samples = load_samples(path)\n adj = construct_sparse_mx(np.array(samples)[:, :2], n_num, d_num)\n adj = normalize(adj)\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n \n return samples, adj\n\n\ndef load_samples(path):\n samples = list()\n with open(path) as f:\n for line in f.readlines():\n line = line.strip('\\n')\n tmp = line.split()\n samples.append([int(tmp[0]), int(tmp[1]), int(tmp[2])])\n \n return samples\n\n\ndef construct_sparse_mx(points, n_num, d_num):\n src_points = np.array([[a, b+n_num] for a, b in points])\n trans_points = np.array([[b, a] for a, b in src_points])\n adj_points = np.concatenate((src_points, trans_points), axis=0)\n\n data = np.ones(len(adj_points))\n adj = sp.coo_matrix((data, (adj_points[:, 0], adj_points[:, 1])), shape=(n_num+d_num, n_num+d_num), dtype=np.float32)\n\n return adj\n\n\ndef normalize(adj):\n adj += sp.eye(adj.shape[0])\n d = np.array(adj.sum(1) + 1)\n d_half = sp.diags(np.power(d, -0.5).flatten())\n\n return d_half.dot(adj).dot(d_half)\n\n\ndef sparse_mx_to_torch_sparse_tensor(adj):\n adj = adj.tocoo().astype(np.float32)\n indices = torch.cuda.LongTensor(np.vstack((adj.row, adj.col)).astype(np.int64))\n values = torch.cuda.FloatTensor(adj.data)\n shape = torch.Size(adj.shape)\n\n return torch.sparse.FloatTensor(indices, values, shape)\n","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306046518","text":"# Say that a \"clump\" in an array is a series of 2 or more adjacent elements of the same value\n# Return the number of clumps in the given array\n# for more info on this quiz, go to this url: http://www.programmr.com/count-clumps-3\n\n\ndef count_clumps(x_arr):\n count = 0\n prev = ''\n for i in range(len(x_arr)-1):\n x = x_arr[i]\n y = x_arr[i+1]\n if x == y and not(x == prev):\n count += 1\n prev = x\n elif y == prev:\n prev = y\n else:\n prev = ''\n return count\n\n\nif __name__ == \"__main__\":\n print(count_clumps([1, 1, 1, 1, 1, 1]))\n","sub_path":"lists_04/count_clumps.py","file_name":"count_clumps.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653033276","text":"'''\nAuthor: lei\nCreated Time: 2016.7.15\nFile Name: DBMod.py\nDescription:数据库方法\n'''\n\nimport pymysql\nimport time\nimport datetime\nfrom mainconf import *\n\nclass DBMod(SaveLog):\n\tdef __init__(self):\n\t\tsuper(DBMod,self).__init__()\n\t\ttry:\n\t\t\tself.conn = pymysql.connect(\n\t\t\t\thost=self.getConf('mysql','dbhost'),\n port=int(self.getConf('mysql','dbport')),\n user=self.getConf('mysql','dbuser'),\n passwd=self.getConf('mysql','dbpassword'),\n db=self.getConf('mysql','dbname'),\n charset=self.getConf('mysql','dbcharset'),\n )\n\t\texcept Exception as e:\n\t\t\traise (e)\n\n\tdef GetAvgTraff(self,idlist):\n\t\t'''\n\t\t获取平均流量\n\t\t:param idlist:\n\t\t:return:\n\t\t'''\n\t\tstime = time.strftime(\"%Y-%m-%d\",time.localtime())\n\t\tntime = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n\t\tsql = \"select t.itemid,t.value_avg from trends_uint as t WHERE t.clock > UNIX_TIMESTAMP('%s 00:00:00') AND \" \\\n\t\t\t \"t.clock <= UNIX_TIMESTAMP('%s') AND itemid IN (\" % (stime,ntime)\n\t\tnum = len(idlist)\n\t\tfor each_id in idlist:\n\t\t\tsql += \" \\'%s\\' \" % str(each_id)\n\t\t\tif num > 1:\n\t\t\t\tsql += ','\n\t\t\tnum -=1\n\t\tsql += \") GROUP BY itemid ORDER BY value_avg DESC LIMIT 10\"\n\t\trs = self.execute_sql(sql)\n\t\t# return sql\n\t\treturn rs\n\n\tdef GetTriggerID(self):\n\t\t'''\n\t\t统计7天内所有报警数据\n\t\t:return:\n\t\ttriggerids = {\n\t\t\tid:{\n\t\t\t\titemid:2,\n\t\t\t\ttimes:3,\n\t\t\t\t},\n\t\t\t}\n\t\t'''\n\t\tbeing_time,end_time = self.__getDay(7)\n\t\tsql = \"SELECT f.itemid ,COUNT(*) AS 'count' FROM events e INNER JOIN alerts a INNER JOIN functions f \" \\\n\t\t \"ON a.eventid = e.eventid \" \\\n\t\t \"WHERE e.objectid = f.triggerid \" \\\n\t\t \"AND e.clock > UNIX_TIMESTAMP('%s') \" \\\n\t\t \"AND e.clock <= UNIX_TIMESTAMP('%s')\" \\\n\t\t \"GROUP BY e.objectid ORDER BY count DESC\" % (being_time,end_time)\n\t\trs = self.execute_sql(sql)\n\t\treturn (rs)\n\n\tdef GetGropLoad(self,groupname = 'all',timestamp=7,top=10):\n\t\t'''\n\t\t获取指定主机组内主机负载\n\t\t:return:\n\t\t'''\n\t\tbeing_time = self.__getDay(7)[0]\n\t\tif groupname == 'all':\n\t\t\tgrps = ''\n\t\telse:\n\t\t\tif isinstance(groupname,list):\n\t\t\t\tgrps = \"AND g.name in (\"\n\t\t\t\tlength = len(groupname)\n\t\t\t\tfor each in groupname:\n\t\t\t\t\tif length == 1:\n\t\t\t\t\t\tgrps += \"'%s')\"% each\n\t\t\t\t\telse:\n\t\t\t\t\t\tgrps += \"'%s',\" % each\n\t\t\t\t\tlength -= 1\n\t\t\telse:\n\t\t\t\tgrps = \"AND g.name='%s'\" % groupname\n\n\t\tsql = \"SELECT h.name,ROUND(AVG(t.value_avg),2) AS value \" \\\n\t\t \"FROM items i INNER JOIN trends t ON i.itemid = t.itemid INNER JOIN hosts_groups hg ON hg.hostid = i.hostid \" \\\n\t\t \"INNER JOIN groups g ON hg.groupid = g.groupid INNER JOIN hosts h ON h.hostid = hg.hostid \" \\\n\t\t \"WHERE t.clock > UNIX_TIMESTAMP('%s') \" \\\n\t\t\t \"%s \" \\\n\t\t \"AND i.key_ = 'system.cpu.load[all,avg15]' \" \\\n\t\t \"GROUP BY i.hostid ORDER BY value DESC LIMIT %s\" % (being_time,grps,top)\n\t\trs = self.execute_sql(sql)\n\t\treturn rs\n\n\n\tdef GetUnConfim(self,groupname = 'all',timestamp=7):\n\t\t'''\n\n\t\t找出最后一条objectid 如果对应的acknowledged 是 1 则不返回这条objectid 信息\n\n\t\t返回未确认报警信息\n\t\t:return:\n\t\t'''\n\t\tif groupname == 'all':\n\t\t\tgrps = ''\n\t\telse:\n\t\t\tif isinstance(groupname,list):\n\t\t\t\tgrps = \"AND grp.name in (\"\n\t\t\t\tlength = len(groupname)\n\t\t\t\tfor each in groupname:\n\t\t\t\t\tif length == 1:\n\t\t\t\t\t\tgrps += \"'%s')\"% each\n\t\t\t\t\telse:\n\t\t\t\t\t\tgrps += \"'%s',\" % each\n\t\t\t\t\tlength -= 1\n\t\t\telse:\n\t\t\t\tgrps = \"AND grp.name='%s'\" % groupname\n\t\tbeing_time = self.__getDay(timestamp)[0]\n\t\tsql = \"SELECT grp.name AS 'group',host.name AS 'host',trg.description,\" \\\n\t\t \"max(event.clock) AS 'start_time',\" \\\n\t\t \"(trg.lastchange) AS 'last_time' ,\" \\\n\t\t \"(CASE WHEN MAX(event.clock)=trg.lastchange THEN TIMESTAMPDIFF(SECOND,FROM_UNIXTIME(MAX(event.clock)),CURRENT_TIMESTAMP()) \" \\\n\t\t \"ELSE TIMESTAMPDIFF(second,FROM_UNIXTIME(max(event.clock)),FROM_UNIXTIME(trg.lastchange)) END) AS 'duration',\" \\\n\t\t \"(CASE WHEN MAX(event.clock)=trg.lastchange THEN 0 ELSE 1 END) AS 'is_restore',\" \\\n\t\t \"item.itemid, \" \\\n\t\t \"CAST(SUBSTRING(GROUP_CONCAT(event.acknowledged ORDER BY event.clock DESC),1,1) AS SIGNED) AS acknowledges \" \\\n\t\t \"FROM events event \" \\\n\t\t \"INNER JOIN alerts alert ON alert.eventid = event.eventid \" \\\n\t\t \"INNER JOIN triggers trg ON trg.triggerid = event.objectid \" \\\n\t\t \"INNER JOIN functions func ON func.triggerid = trg.triggerid \" \\\n\t\t \"INNER JOIN items item ON item.itemid = func.itemid \" \\\n\t\t \"INNER JOIN hosts host ON host.hostid = item.hostid \" \\\n\t\t \"INNER JOIN hosts_groups hgp ON hgp.hostid = host.hostid \" \\\n\t\t \"INNER JOIN groups grp ON grp.groupid = hgp.groupid \" \\\n\t\t \"WHERE \" \\\n\t\t \"event.clock > UNIX_TIMESTAMP('%s') \" \\\n\t\t \"AND event.value = 1 \" \\\n\t\t\t \"AND grp.name != 'ALL_SERVER' \" \\\n\t\t \"%s \" \\\n\t\t \"GROUP BY event.objectid \" \\\n\t\t \"ORDER BY start_time DESC \" % (being_time,grps)\n\t\trs = self.execute_sql(sql)\n\t\treturn rs\n\n\tdef FromEventidGetItemid(self,eventid):\n\t\tsql = \"select functions.itemid from events \" \\\n\t\t \"inner join functions on functions.triggerid = events.objectid \" \\\n\t\t \"where eventid = %s\" % eventid\n\t\trs = self.execute_sql(sql)\n\t\tif len(rs) > 0:\n\t\t\treturn rs[0][0]\n\t\telse:\n\t\t\treturn -1\n\n\tdef __getDay(self,day=1):\n\t\t'''\n\t\t获取时间段\n\t\t:param day:\n\t\t:return:\n\t\t'''\n\t\tif not isinstance(day,int):\n\t\t\traise \"TypeError day must be intter!\"\n\t\treturn [(datetime.datetime.now() - datetime.timedelta(days=int(\"%s\" % day))).strftime('%Y-%m-%d %H:%M:%S'),datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]\n\n\tdef execute_sql(self,sql):\n\t\t'''\n\t\t执行sql语句\n\t\t:param sql:sql 语句\n\t\t:param mode: select / insert 模式选择\n\t\t:return:\n\t\t'''\n\t\tcursor = self.conn.cursor()\n\t\ttry:\n\t\t\tcursor.execute(sql)\n\t\t\treturn cursor.fetchall()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False\n\t\tfinally:\n\t\t\tcursor.close()\n\n\n\n\nif __name__ == '__main__':\n\t# print(timtime.time())\n\td = DBMod()\n\tprint(d.FromEventidGetItemid(196229))\n\t# for i in d.GetUnConfim(timestamp=100):\n\t# \tprint(i[4])\n\t\t# print(time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(i[3])),time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(i[4])))\n\t# print(d.GetTriggerID())\n\t# print(d.GetAvgTraff(['25069',]))# '25086', '25097', '25114', '25136', '25157', '25259']))","sub_path":"wx_api/DBMod.py","file_name":"DBMod.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555801770","text":"from bs4 import BeautifulSoup\nfrom multiprocessing.dummy import Pool\nimport os\nimport time\nimport requests\nimport json\nimport glob\nimport shutil\n\n#location for the reviews\nBASE_URL = 'http://www.basenotes.net/fragrancereviews/page/{0}'\nsession = requests.Session()\nHEADERS = {\n 'user-agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36')\n}\nDATA_DIR = 'data'\nFILENAME = 'perfume-data'\n\nclass Scraper():\n \"\"\"Scraper for basenotes.com\"\"\"\n\n def __init__(self, pages_scraped=(1,1), num_jobs=1, clear_old_data=True):\n self.pages_scraped = pages_scraped\n self.num_jobs = num_jobs\n self.clear_old_data = clear_old_data\n self.session = requests.Session()\n self.est_reviews = (pages_scraped[1] + 1 - pages_scraped[0]) * 30\n self.review_count = 0\n self.start_time = time.time()\n \n if num_jobs > 1:\n self.multiprocessing = True\n self.worker_pool = Pool(num_jobs)\n else:\n self.multiprocessing = False\n\n def scrape_site(self):\n if self.clear_old_data:\n self.clear_data_dir()\n if self.multiprocessing:\n link_list = [BASE_URL.format(page) for page in range(self.pages_scraped[0],self.pages_scraped[1] + 1)]\n records = self.worker_pool.map(self.scrape_page, link_list)\n self.worker_pool.terminate()\n self.worker_pool.join()\n else:\n for page in range(self.pages_scraped[0], self.pages_scraped[1] + 1):\n self.scrape_page(BASE_URL.format(page))\n print('Scrape finished...')\n self.condense_data()\n\n def scrape_page(self, page_url, review_count=0, retry_count=0):\n scrape_data = []\n try:\n response = self.session.get(page_url, headers=HEADERS)\n except:\n retry_count += 1\n if retry_count <= 3:\n self.session = requests.Session()\n self.scrape_page(page_url, review_count, retry_count)\n else:\n raise\n\n soup = BeautifulSoup(response.content, 'html.parser')\n reviews = soup.find_all('div', {'class': 'reviewblurb'})[0:]\n for review in reviews:\n review_url = review.find('a')['href']\n #parse perfume name and maker\n split_name = str(review.find('a')).split('>')\n \n self.review_count += 1\n split = split_name[1][0:-3].split(' by ')\n perfume_name = split[0]\n perfume_maker = split[1]\n \n #parse review\n split_review = str(review).split('')\n review_text = split_review[len(split_review)-1][0:-6]\n \n #parse information\n rating, year, gender, availability = self.scrape_info(review_url)\n \n review_data = {\n 'perfume': perfume_name,\n 'maker': perfume_maker,\n 'review': review_text,\n 'rating': rating,\n 'year': year,\n 'gender': gender,\n 'availability': availability\n }\n \n scrape_data.append(review_data)\n self.update_scrape_status()\n self.save_data(scrape_data)\n \n def scrape_info(self, review_url):\n info_response = self.session.get(review_url, headers=HEADERS)\n info_soup = BeautifulSoup(info_response.content, 'html.parser')\n info = info_soup.find_all('div', {'class': 'peoplelist'})[0:]\n try:\n rating = info[0].find('meta')['content']\n except TypeError:\n rating = float('nan')\n info_parse = str(info[0:]).split('')\n info_parse = self.remove_brackets(info_parse)\n year = info_parse[1][14:]\n gender = info_parse[2][6:]\n availability = info_parse[3][12:]\n return rating, year, gender, availability\n \n def remove_brackets(self, input_list):\n ret = [None]*len(input_list)\n skip1c = 0\n for j in range(0,len(input_list)-1):\n words = ''\n for i in input_list[j]:\n if i == '<':\n skip1c += 1\n elif i == '>' and skip1c > 0:\n skip1c -= 1\n elif skip1c == 0:\n words += i\n ret[j] = words\n return ret\n\n def save_data(self, data):\n filename = '{}/{}_{}.json'.format(DATA_DIR, FILENAME, time.time())\n try:\n os.makedirs(DATA_DIR)\n except OSError:\n pass\n with open(filename, 'w') as fout:\n json.dump(data, fout)\n \n def clear_all_data(self):\n self.clear_data_dir()\n self.clear_output_data()\n\n def clear_data_dir(self):\n try:\n shutil.rmtree(DATA_DIR)\n except FileNotFoundError:\n pass\n\n def clear_output_data(self):\n try:\n os.remove('{}.json'.format(FILENAME))\n except FileNotFoundError:\n pass\n\n def condense_data(self):\n print('Condensing Data...')\n condensed_data = []\n all_files = glob.glob('{}/*.json'.format(DATA_DIR))\n for file in all_files:\n with open(file, 'rb') as fin:\n condensed_data += json.load(fin)\n print(len(condensed_data))\n filename = '{}.json'.format(FILENAME)\n with open(filename, 'w') as fout:\n json.dump(condensed_data, fout)\n\n def update_scrape_status(self):\n elapsed_time = round(time.time() - self.start_time, 2)\n time_remaining = round((self.est_reviews - self.review_count) * (self.review_count / elapsed_time), 2)\n print('{0}/{1} reviews pulled | {2}s elapsed | {3}s remain\\r'.format(\n self.review_count, self.est_reviews, elapsed_time, time_remaining))\n\nif __name__ == '__main__':\n pages = (1, 1)\n perfume_scraper = Scraper(pages_scraped = pages, num_jobs=4)\n\n perfume_scraper.scrape_site()\n","sub_path":"scrape-perfume.py","file_name":"scrape-perfume.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317271219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport boto\nimport os\n\n# Определяем переменные подключения и другие константы\nEC2_URL = os.environ['EC2_URL']\nEC2_ACCESS_KEY = os.environ['EC2_ACCESS_KEY']\nEC2_SECRET_KEY = os.environ['EC2_SECRET_KEY']\n\nTEMPLATE_ID = 'cmi-2A21A30D'\nINSTANCE_TYPE = 'c1.large'\nSECURITY_GROUP = 'subnet-61ECBB2A'\nKEY_NAME = 'Lenovo-T410'\n\n# Создаем объект подключения к Облаку\nconn = boto.connect_ec2_endpoint(\n\tEC2_URL,\n\taws_access_key_id=EC2_ACCESS_KEY,\n\taws_secret_access_key=EC2_SECRET_KEY\n)\n\ninstances_to_delete = [\n\t'i-1B918761', # ID ВМ, которую нужно удалить\n]\n\n# Удаляем ВМ\nconn.terminate_instances(instance_ids=instances_to_delete)","sub_path":"delete_instance.py","file_name":"delete_instance.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131416654","text":"from env import SoccerEnv\nfrom agents.common.training_opponent import StationaryOpponent, RandomSwitchOpponent, RLBasedOpponent\n\nTOP = 0\nTOP_RIGHT = 1\nRIGHT = 2\nBOTTOM_RIGHT = 3\nBOTTOM = 4\nBOTTOM_LEFT = 5\nLEFT = 6\nTOP_LEFT = 7\n\nenv = SoccerEnv()\nagentOP = StationaryOpponent(env_width=env.width, env_height=env.height, env_goal_size=env.goal_size)\n\nstate = env.reset()\n\n# loop\nenv.show()\nactionOP = agentOP.get_action(state)\nprint(actionOP)\ndone, reward_l, reward_r, state, actions = env.step(\"type action here!\", actionOP)\n\nagentOP.adjust(done, reward_r, i)\n","sub_path":"test/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"151047487","text":"# Copyright 2015 Akanda, Inc\n#\n# Author: Akanda, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport sys\n\nfrom astara.common.i18n import _LI\nfrom astara.common import config as ak_cfg\n\nfrom astara.common import rpc\nfrom astara.pez import manager\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_service import service\n\nCONF = cfg.CONF\n\nLOG = log.getLogger(__name__)\n\n\nclass PezService(service.Service):\n \"\"\"Bootstraps a connection for the manager to the messaging\n queue and launches the pez service\n \"\"\"\n def __init__(self):\n super(PezService, self).__init__()\n self.manager = manager.PezManager()\n self.manager.start()\n self._rpc_connection = None\n self.rpcserver = None\n\n def start(self):\n super(PezService, self).start()\n self._rpc_connection = rpc.Connection()\n self._rpc_connection.create_rpc_consumer(\n topic=cfg.CONF.pez.rpc_topic,\n endpoints=[self.manager])\n self._rpc_connection.consume_in_threads()\n self._rpc_connection.close()\n\n\ndef main(argv=sys.argv[1:]):\n ak_cfg.parse_config(argv)\n log.setup(CONF, 'astara-pez')\n CONF.log_opt_values(LOG, logging.INFO)\n\n LOG.info(_LI(\"Starting Astara Pez service.\"))\n\n mgr = PezService()\n launcher = service.launch(CONF, mgr)\n launcher.wait()\n","sub_path":"astara/pez/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296147313","text":"import requests\n\n\nclass Test_CloseprojectPatent:\n url = \"https://airaapps.evalueserve.com/api/v1/patent/close/project/?token=apikey\"\n\n def test18(self):\n payload = {'apikey': 'd0f4ac5d0ddf6f4a7bcedce7904f054f',\n 'ProjectName': 'Test',\n 'Keywords': 'Test',\n 'RequestID': '100739'}\n files = []\n headers = {}\n\n response = requests.request(\"POST\", Test_CloseprojectPatent.url, headers=headers, data=payload, files=files)\n\n print(response.text)\n","sub_path":"APITesting/Test_CloseprojectPatent.py","file_name":"Test_CloseprojectPatent.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328603740","text":"from flask import Flask, render_template, request, redirect\nimport requests\nimport simplejson as json\nimport pandas as pd\nfrom bokeh.plotting import figure\nfrom bokeh.embed import components\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n return redirect('/index')\n\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/graph', methods=[\"GET\", \"POST\"])\ndef graph():\n form_dict = {}\n if request.method == 'POST':\n # Extract form info and handle errors\n ticker = request.form['ticker']\n if not ticker:\n return '

Please go back and enter a stock ticker

'\n features = request.form.getlist('features')\n if not features:\n return '

Please go back and select some features to show

'\n\n # Call the Quandl API\n api_url = 'https://www.quandl.com/api/v1/datasets/WIKI/%s.json?api_key=bhDkb5WTxo-gXcFN5mgq' % ticker\n response = requests.get(api_url)\n json_response = json.loads(response.content)\n\n # Put the data in a Pandas dataframe\n data = pd.DataFrame(json_response['data'], columns=json_response['column_names'])\n data['Date'] = pd.to_datetime(data['Date'])\n\n # Send the data to the Bokeh plot\n p = figure(x_axis_type=\"datetime\", width=800, height=600)\n line_color=['red', 'green', 'blue', 'brown']\n for index, feature in enumerate(features):\n p.line(data['Date'], data[feature], legend=feature, line_color = line_color[index])\n script, div = components(p)\n\n return render_template('graph.html', script=script, div=div, company = ticker.upper())\n\n\nif __name__ == '__main__':\n app.run(port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653492580","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^login', views.process_login),\n url(r'^home', views.logged_in),\n url(r'^about', views.about),\n url(r'^register', views.register_page),\n url(r'^logout', views.log_out),\n url(r'^posts/(?P.+)$', views.read_post)\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233833349","text":"\"\"\"\nThe various sequence-to-sequence models\n\"\"\"\n\nimport tensorflow as tf\n\n#Local modules\nimport tf_collections\nimport loss_functions\nimport metrics\n\ndef _create_placeholders():\n\t\"\"\"\n\tCreates tf.placeholder objects for representing the source and target data.\n\n\t:return: The placeholder varibles\n\t:rtype: tf_collections.DataPlaceholders\n\t\"\"\"\n\n\t# batch_size time\n\tinput_data = tf.placeholder(tf.int32, [None, None], name='input_data')\n\ttargets = tf.placeholder(tf.int32, [None, None], name='targets')\n\tsource_lengths = tf.placeholder(tf.int32, [None], name=\"source_lengths\")\n\ttarget_lengths = tf.placeholder(tf.int32, [None], name=\"target_lengths\")\n\n\tplaceholders = tf_collections.DataPlaceholders(input_data=input_data, targets=targets,\n\t\t\t\t\tsource_lengths=source_lengths, target_lengths=target_lengths)\n\n\treturn placeholders\n\n\ndef _dropout_cell(rnn_size, keep_prob):\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n return tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob=keep_prob)\ndef _multi_dropout_cell(rnn_size, keep_prob, num_layers): \n return tf.contrib.rnn.MultiRNNCell( [_dropout_cell(rnn_size, keep_prob) for _ in range(num_layers)] )\ndef _process_decoding_input(target_data, go_token):\n '''Remove the last word id from each batch and concat the to the begining of each batch'''\n batch_size = tf.shape(target_data)[0]\n ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n dec_input = tf.concat( [tf.fill([batch_size, 1], go_token), ending], 1)\n return dec_input\n\n\ndef _beam_search_decoder(enc_state, enc_outputs, dec_embed_input, dec_embeddings, dec_cell, attn_size, output_layer, source_lengths, target_lengths, go_token, eos_token, beam_width=1, infer=False):\n\t\"\"\"\n\tProduces an output given the state and outputs of the encoder, using beam search for inference.\n\n\t:param tf.Tensor enc_state: The final state of the encoder\n\t:param tf.Tensor enc_outputs: The outputs of the encoder to attend over\n\t:param matrix-like dec_embeddings: The word embeddings used for decoding\n\t:param tf.contrib.rnn.RNNCell dec_cell: The cell to be used for decoding\n\t:param int attn_size: The size of the attention mechanism\n\t:param tf.layers.Layer output_layer: TensorFlow layer applied to the decoder output\n\t:param tf.Tensor source_lengths: A vector of integers where each entry is the length of an input sample\n\t:param tf.Tensor target_lengths: A vector of integers where each entry is the length of a target sample\n\t:param int go_token: id for the token fed into the first decoder cell\n\t:param int eos_token: End-Of-Sequence token that tells the decoder to stop decoding\n\t:param int beam_width: The number of beams to generate during inference (beam_width=1 performs greedy decoding)\n\t:param bool infer: Whether training or inference is being performed\n\n\t:returns: Either the training logits or the inferred beams, depending on `infer`\n\t:rtype: tf.Tensor\n\t\"\"\"\n\n\tbatch_size = tf.shape(source_lengths)[0]\n\tinit_dec_state_size = batch_size\n\tif infer:\n\t\t#Tile inputs\n\t\tenc_state = tf.contrib.seq2seq.tile_batch(enc_state, beam_width)\n\t\tenc_outputs = tf.contrib.seq2seq.tile_batch(enc_outputs, beam_width)\n\t\tsource_lengths = tf.contrib.seq2seq.tile_batch(source_lengths, beam_width)\n\t\tinit_dec_state_size *= beam_width\n\n\twith tf.variable_scope(\"decoding\") as decoding_scope:\n\t\t#TRAINING\n\t\tattn = tf.contrib.seq2seq.BahdanauAttention(attn_size, enc_outputs, source_lengths)\n\t\tattn_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell, attn, dec_cell.output_size)\n\n\t\tif infer:\n\t\t\tdecoder = tf.contrib.seq2seq.BeamSearchDecoder(cell = attn_cell,\n\t\t\t embedding = dec_embeddings,\n\t\t\t start_tokens = tf.tile( [go_token], [batch_size]), #Not by batch_size*beam_width, strangely, \n\t\t\t end_token = eos_token,\n\t\t\t beam_width = beam_width,\n\t\t\t initial_state = attn_cell.zero_state(init_dec_state_size, tf.float32).clone(cell_state=enc_state),\n\t\t\t output_layer = output_layer\n\t\t\t) \n\t\t\tfinal_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, scope=decoding_scope, maximum_iterations=tf.reduce_max(source_lengths)*2)\n\t\t\tbeams = final_decoder_output.predicted_ids\n\t\t\treturn beams\n\t\t\n\t\telse:\t\n\t\t\thelper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_lengths)\n\t\t\ttrain_decoder = tf.contrib.seq2seq.BasicDecoder(attn_cell, helper,\n\t\t\t\t\t\t\t\tattn_cell.zero_state(init_dec_state_size, tf.float32).clone(cell_state=enc_state),\n\t\t \t\t\t\toutput_layer = output_layer)\n\t\t\toutputs, _, _ = tf.contrib.seq2seq.dynamic_decode(train_decoder, impute_finished=True, scope=decoding_scope)\n\t\t\tlogits = outputs.rnn_output\n\t\t\treturn logits\n\t\t\n\nclass Seq2Seq(object):\n\t\"\"\"\n\tAbstract class representing standard sequence-to-sequence model\n\t\"\"\"\n\n\tdef __init__(self, enc_embeddings, dec_embeddings, go_token, eos_token, config, output_layer=None, infer=False):\n\t\t\"\"\"\n\t\t:param matrix-like enc_embeddings: Word embeddings for encoder\n\t\t:param matrix-like dec_embeddings: Word embeddings for decoder\n\t\t:param int go_token: id for the token fed into the first decoder cell\n\t\t:param int eos_token: End-Of-Sequence token that tells the decoder to stop decoding\n\t\t:param config.Config config: Settings for the model\n\t\t:param tf.layers.Layer output_layer: TensorFlow layer applied to the decoder output at each time step\n\t\t:param bool infer: Whether inference or training is being performed\n\t\t\"\"\"\n\n\t\tself._data_placeholders = _create_placeholders()\n\t\tself._input_data = self._data_placeholders.input_data\n\t\tself._targets = self._data_placeholders.targets\n\t\tself._keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n\t\n\t #Both of these could be necessary for subclasses with custom loss functions\n\t\tself._source_lengths = self._data_placeholders.source_lengths\n\t\tself._target_lengths = self._data_placeholders.target_lengths\n\t\tself._enc_embed_input = tf.nn.embedding_lookup(enc_embeddings, self._input_data)\n\t\tself._dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, _process_decoding_input(self._targets, go_token))\n\t\n\t\n\t\tforward_cell = _multi_dropout_cell(config.rnn_size, self._keep_prob, config.num_layers)\n\t\tbackward_cell = _multi_dropout_cell(config.rnn_size, self._keep_prob, config.num_layers)\n\n\t\tenc_outputs, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw = forward_cell, cell_bw = backward_cell,\n\t\t\t\t\t\t\t\t\t\tsequence_length = self._source_lengths,\n\t\t\t\t\t\t\t\t\t\tinputs = self._enc_embed_input, dtype=tf.float32)\n\t\tconcatenated_enc_output = tf.concat(enc_outputs, -1)\n\t\tinit_dec_state = enc_states[0] \n\t\n\t\n\t\tdec_cell = _multi_dropout_cell(config.rnn_size, self._keep_prob, config.num_layers)\n\t\tself._beam_width = config.beam_width\n\t\tself._infer = infer\n\t\tdecoder_output = _beam_search_decoder(init_dec_state, concatenated_enc_output, self._dec_embed_input, dec_embeddings,\n\t dec_cell, config.attn_size, output_layer, self._source_lengths, self._target_lengths, go_token, eos_token, config.beam_width, infer)\n\n\t\tif infer:\n\t\t\tself._beams = decoder_output\n\t\t\tself._train_logits = None\n\t\t\tself._eval_mask = None\n\t\t\tself._xent = None\n\t\t\tself._perplexity = None\n\t\t\tself._optimizer = None\n\t\telse:\n\t\t\tself._train_logits = decoder_output\n\t\t\tself._eval_mask = tf.sequence_mask(self._target_lengths, dtype=tf.float32)\n\t\t\tself._xent = tf.contrib.seq2seq.sequence_loss(self._train_logits, self._targets, self.eval_mask)\n\t\t\tself._perplexity = tf.contrib.seq2seq.sequence_loss(self._train_logits, self._targets, self.eval_mask, softmax_loss_function=metrics.perplexity)\n\t\t\tself._optimizer = tf.train.AdamOptimizer(config.learning_rate)\n\t\t\tself._beams = None\n\n\n\t#####COST/LOSS######\n\n\t@property\n\tdef train_op(self):\n\t\t\"\"\"\n\t\ttf.Operation: The operation to be passed to sess.run to update weights\n\t\t\"\"\"\n\t\traise NotImplementedError(\"Abstract method\")\n\n\t@property\n\tdef train_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The training cost to be fetched\n\t\t\"\"\"\n\t\traise NotImplementedError(\"Abstract method\")\n\n\t@property\n\tdef valid_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The validation cost to be fetched\n\t\t\"\"\"\n\t\traise NotImplementedError(\"Abstract method\")\n\n\t@property\n\tdef xent(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The cross-entropy loss between the training logits and the targets\n\t\t\"\"\"\t\n\t\treturn self._xent\n\n\t@property\n\tdef perplexity(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The perplexity loss between the training logits and the targets\n\t\t\"\"\"\n\t\treturn self._perplexity\n\n\t@property\n\tdef optimizer(self):\n\t\t\"\"\"\n\t\ttf.train.Optimizer: The model's optimizer for descending gradients\n\t\t\"\"\"\n\t\treturn self._optimizer\n\n\t@property\n\tdef eval_mask(self):\n\t\t\"\"\"\n\t\ttf.Tensor: Mask of size [batch_size, max_target_sequence_length]\n\t\tThis is used to mask losses during training. For instance, with\n\t\ta batch size of 4 and target sequence lengths of [2, 1, 4, 3],\n\t\teval_mask's fetched value would be as follows:\n\n\t\t[ [1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0] ]\n\t\t\"\"\"\n\t\treturn self._eval_mask\n\n\t@property\n\tdef keep_prob(self):\n\t\t\"\"\"\n\t\ttf.Tensor: A placeholder variable that must be fed a specific keep probability.\n\t\tThat is, to have a dropout of 0.25, one feeds 0.75 to keep_prob\n\t\t\"\"\"\n\t\treturn self._keep_prob\n\n\t@property\n\tdef data_placeholders(self):\n\t\t\"\"\"\n\t\ttf_collections.DataPlaceholders: The 4 placeholders for the model (input_data, targets, source_lengths, target_lengths)\n\t\t\"\"\"\n\t\treturn self._data_placeholders\n\n\t@property\n\tdef targets(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The targets placeholder variable\n\t\tShould be fed an integer Tensor of dimensionality [batch_size, max_target_length]\n\t\t\"\"\"\n\t\treturn self._targets\n\n\t@property\n\tdef source_lengths(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The placeholders variable for the lengths of input_data\n\t\tShould be fed an integer Tensor of dimensionality [batch_size]\n\t\t\"\"\"\n\t\treturn self._source_lengths\n\n\t@property\n\tdef target_lengths(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The placeholders variable for the lengths of input_data\n\t\t\"\"\"\n\t\treturn self._target_lengths\n\n\t@property\n\tdef train_logits(self):\n\t\t\"\"\"\n\t\ttf.Tensor: Logits produced during training, of dimensionality [batch_size, max_target_length, num_categories]\n\t\treturn self._train_logits\n\t\t\"\"\"\n\n\t@property\n\tdef beams(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The inferred beams of dimensions [batch_size, max_time_step, beam_width]\n\t\t\"\"\"\n\t\treturn self._beams\n\n\t@property\n\tdef beam_width(self):\n\t\t\"\"\"\n\t\tint: The number of beams produced during inference.\n\t\t\"\"\"\n\t\treturn self._beam_width\n\n\t@property\n\tdef enc_embed_input(self):\n\t\t\"\"\"\n\t\tmatrix_like: The embedding matrix for input_data\n\t\t\"\"\"\n\t\treturn self._enc_embed_input\n\t@property\n\tdef dec_embed_input(self):\n\t\t\"\"\"\n\t\tmatrix_like: The embedding matrix for targets\n\t\t\"\"\"\n\t\treturn self._dec_embed_input\n\nclass Aff2Vec(Seq2Seq):\n\t\"\"\"\n\tModel with affective embeddings\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\t\"\"\"\n\t\tUtilizes the same parameters as :py:class:`models.Seq2Seq`\n\t\t\"\"\"\n\t\tsuper(Aff2Vec, self).__init__(**(kwargs))\n\n\t\tconfig = kwargs[\"config\"]\n\t\tinfer = kwargs[\"infer\"]\n\t\tif not infer:\n\t\t\tgradients = self.optimizer.compute_gradients(self.xent)\n\n\t\t\tcapped_gradients = [(tf.clip_by_value(grad, -config.gradient_clip_value, config.gradient_clip_value), var) for grad, var in gradients if grad is not None]\n\t\t\tself._train_op = self.optimizer.apply_gradients(capped_gradients)\n\t\telse:\n\t\t\tself._train_op = None\n\t@property\n\tdef train_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The training cost to be fetched\n\t\t\"\"\"\n\t\treturn self.xent\n\n\t@property\n\tdef valid_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The validation cost to be fetched\n\t\t\"\"\"\n\t\treturn self.perplexity\n\n\t@property\n\tdef train_op(self):\n\t\t\"\"\"\n\t\ttf.Operation: The operation to be passed to sess.run to update weights\n\t\t\"\"\"\n\t\treturn self._train_op\n\nclass VADAppended(Seq2Seq):\n\t\"\"\"\n\tModel whose embeddings have VAD values as their last 3 dimensions\n\t\"\"\"\n\tdef __init__(self, full_embeddings, go_token, eos_token, config, output_layer=None,\n\t\tkeep_prob = 1, infer=False, affect_strength=0.5):\n\t\t\"\"\"\n\t\t:param matrix-like full_embeddings: Word embeddings, with the VAD values as the last 3 dimensions\n\t\t:param int go_token: id for the token fed into the first decoder cell\n\t\t:param int eos_token: End-Of-Sequence token that tells the decoder to stop decoding\n\t\t:param config.Config config: Settings for the model\n\t\t:param tf.layers.Layer output_layer: TensorFlow layer applied to the decoder output at each time step\n\t\t:param bool infer: Whether inference or training is being performed\n\t\t:param float affect_strength: hyperparameter in the range [0.0, 1.0)\n\t\t\"\"\"\n\t\t\n\t\tSeq2Seq.__init__(self, full_embeddings, full_embeddings, go_token, eos_token, config,\n\t\t\t\toutput_layer=output_layer, infer=infer)\n\n\t\t#This is a variable, rather than a computation, so it should be kept if we ever, say, want to train a model, query, later come back and train more . . .\n\t\tself._train_affect = tf.placeholder_with_default(False, shape=())\n\n\t\tprint(config.gradient_clip_value)\n\t\tif not infer:\n\t\t\temot_embeddings = full_embeddings[:, -3: ]\n\t\t\tneutral_vector = tf.constant([5.0, 1.0, 5.0], dtype=tf.float32)\n\t\t\taffective_loss = loss_functions.max_affective_content(affect_strength, self.train_logits, self.targets, emot_embeddings, neutral_vector, self.eval_mask)\n\t\t\tself._train_cost = tf.cond(self._train_affect, true_fn= lambda: affective_loss, false_fn= lambda: self.xent)\n\n\t\t\tgradients = self.optimizer.compute_gradients(self._train_cost)\n\t\t\tcapped_gradients = [(tf.clip_by_value(grad, -config.gradient_clip_value, config.gradient_clip_value), var) for grad, var in gradients if grad is not None]\n\t\t\tself._train_op = self.optimizer.apply_gradients(capped_gradients)\n\t\telse:\n\t\t\tself._train_cost = None\n\t\t\tself._train_op = None\t\n\n\t@property\n\tdef train_affect(self):\n\t\t\"\"\"\n\t\ttf.Tensor: Boolean placeholder variable determining whether to use an affective loss function or standard cross-entropy\n\t\t\"\"\"\n\t\treturn self._train_affect\n\t\n\t@property\n\tdef train_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The training cost to be fetched\n\t\tDepending on the value feeded into train_affect, could be either affective loss or standard cross-entropy\n\t\t\"\"\"\n\t\treturn self._train_cost\n\n\t@property\n\tdef valid_cost(self):\n\t\t\"\"\"\n\t\ttf.Tensor: The validation cost to be fetched\n\t\t\"\"\"\n\t\treturn self.perplexity\n\t\n\t@property\n\tdef train_op(self):\n\t\t\"\"\"\n\t\ttf.Operation: The operation to be passed to sess.run to update weights\n\t\t\"\"\"\n\t\treturn self._train_op\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448388874","text":"from django.core.management.base import BaseCommand\nfrom images.models import Image\n\nimport json\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n f = open('man_riding_motorcycle.json')\n data = json.load(f)\n f.close()\n Image.objects.all().delete()\n for url in data:\n Image.objects.create(url=url,\n score=data[url]['score'],\n cls=data[url]['class']\n )\n","sub_path":"images/management/commands/initialize_database.py","file_name":"initialize_database.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28584668","text":"#! python3\n# multiTable.py - Creates an N*N multiplication table in an Excel spreadsheet\n\nimport openpyxl\n\n\nwb = openpyxl.Workbook()\nsheet = wb.active\nnum1 = int(input('What number would you like to start the multiplication table at:\\n'))\nnum2 = int(input('What number would you like to end the multiplication table at:\\n'))\nnum = num1\nlabel = 2\n\n\nfor i in range(num1, num2+1):\n cell = sheet.cell(row=1, column=label)\n cell.value = num\n cell = sheet.cell(row=label, column=1)\n cell.value = num\n num += 1\n label += 1\n\nrowVar = 2\nrowVar2 = 1\nfor i in range(num1, num2+1):\n col = 2\n for i in range(num1, num2+1):\n cell1 = sheet.cell(row=rowVar, column=1)\n cell2 = sheet.cell(row=1, column=col)\n cell3 = sheet.cell(row=rowVar, column=col)\n cell3.value = cell2.value * cell1.value\n col += 1\n rowVar += 1\n\nwb.save('multiTable' + str(num1) + 'To' + str(num2)+'.xlsx')","sub_path":"ch12/practice-projects/multiplication-table/multiTable.py","file_name":"multiTable.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543620773","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n\n\nclass Float_Field(QWidget):\n def __init__(self):\n\n super().__init__()\n self.initUI()\n\n def initUI(self):\n\n self.setWindowTitle(\"Field\")\n self.setGeometry(20,20,396,307)\n # Setting up the window\n self.draw_field_table()\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.table)\n self.setLayout(self.layout)\n\n self.show()\n def draw_field_table(self):\n self.table = QTableWidget(10, 2)\n # Making the indexes of rows and columns invisible to user\n self.table.verticalHeader().setVisible(False)\n self.table.horizontalHeader().setVisible(False)\n column_1_text = [\"Name\",\"Abbreviation\",\"Description\", \"Data Type\", \"Base\", \"Mask\", \"Value Constraint\", \"Var Size\", \"ID Value\", \"Required\"]\n i = 0\n while i < self.table.rowCount():\n self.table.setItem(i, 0, QTableWidgetItem(column_1_text[i]))\n i += 1\n\n widgets = []\n\n # Creating line To get Name\n\n name_line = QLineEdit()\n widgets.append(name_line)\n\n # Creating line To get Abbreviation\n abbr_line = QLineEdit()\n widgets.append(abbr_line)\n\n # Creating line To get Description\n desc_line = QLineEdit()\n widgets.append(desc_line)\n\n # Creating drop down list of data types\n data_type_com= QComboBox()\n data_types = [\"FLOAT\"]\n for data_type in data_types:\n data_type_com.addItem(data_type)\n widgets.append(data_type_com)\n\n # Creating drop down list of bases\n base_com = QComboBox()\n bases = [\"Select base\", \"NONE\", \"DEC\", \"HEX\", \"OCT\", \"DEC_HEX\", \"HEX_DEC\"]\n for base in bases:\n base_com.addItem(base)\n widgets.append(base_com)\n\n # Creating line To get mask\n mask_line = QLineEdit()\n widgets.append(mask_line)\n\n # Creating line To get Value Constraint\n va_cons_line = QLineEdit()\n widgets.append(va_cons_line)\n\n # Creating a line edit to add number of size and drop down to identify if it is in bytes or bits\n var_size_row_layout = QHBoxLayout()\n var_choice = QComboBox()\n var_choices = [\"BYTES\", \"BITS\"]\n for choice in var_choices:\n var_choice.addItem(choice)\n var_size_line = QLineEdit()\n var_size_row_layout.addWidget(var_size_line)\n var_size_row_layout.addWidget(var_choice)\n var_size_cell = QWidget()\n var_size_cell.setLayout(var_size_row_layout)\n widgets.append(var_size_cell)\n\n # Creating line To get ID Value\n id_value_line = QLineEdit()\n widgets.append(id_value_line)\n\n req_check_box = QCheckBox()\n req_cell = QWidget()\n req_cell_layout = QHBoxLayout()\n req_cell_layout.addWidget(req_check_box)\n req_cell_layout.setAlignment(Qt.AlignCenter)\n req_cell.setLayout(req_cell_layout)\n widgets.append(req_cell)\n\n # Adding Widgets to Table\n j = 0\n while j < len(widgets):\n self.table.setCellWidget(j, 1, widgets[j])\n j += 1\n self.table.setColumnWidth(1, 138)\n self.table.resizeRowsToContents()\n\n def clickMethod(self):\n dict01 = dict({'Name': self.table.cellWidget(0,1).text(), 'Abbreviation': self.table.cellWidget(1,1).text(), 'Description': self.table.cellWidget(2,1).text(), 'Reference List': self.table.cellWidget(3,1).currentText(), 'Data Type': self.table.cellWidget(4,1).currentText(), 'Base': self.table.cellWidget(5,1).currentText(), 'Mask': self.table.cellWidget(6,1).text(), 'Values Constraint': self.table.cellWidget(7,1).text(), 'Var Size': self.table.cellWidget(8,1).currentText()})\n if self.table.cellWidget(9,1).isTristate():\n dict.update({'Required': 'true'})\n else:\n dict.update({'Required': 'false'})\n\nif __name__ == '__main__':\n app = QApplication([])\n test = Float_Field()\n sys.exit(app.exec_())\n\n","sub_path":"src/main/python/UI/DBA_FrontEnd/Float_Field.py","file_name":"Float_Field.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377693702","text":"def count_words(filename):\n \"\"\" Conta o número aproximado de palavras em um arquivo. \"\"\"\n try:\n with open(filename) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n # Escreve o nome dos arquivos ausentes no arquivo 'missing_files.txt'.\n with open('missing_files.txt', 'a') as f_obj:\n f_obj.write(filename + \"\\n\")\n else:\n words = contents.split()\n num_words = len(words)\n print(\"The file '{}' has about {} words.\".format(filename, num_words)) \n\nfilenames = [\"alice.txt\", \"siddhartha2.txt\", \"moby_dick2.txt\", \"little_women.txt\"]\nfor filename in filenames:\n count_words(filename)","sub_path":"cap_10/word_count2.py","file_name":"word_count2.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635477219","text":"import gym\r\nimport numpy as np\r\nfrom gym.utils import seeding\r\n\r\n\r\nclass ClassifyEnv(gym.Env):\r\n def __init__(self, mode, imb_rate, X_train, y_train):\r\n \"\"\"The custom classify environment.\"\"\"\r\n self.mode = mode # Train or Test mode\r\n self.imb_rate = imb_rate # Imbalance rate: 0 < x < 1\r\n\r\n self.X_train = X_train\r\n self.y_train = y_train\r\n\r\n self.X_len = self.X_train.shape[0]\r\n self.id = np.arange(self.X_len) # List of IDs to connect X and y data\r\n\r\n self.episode_step = 0 # Episode step, resets every episode\r\n\r\n def seed(self, seed=None):\r\n self.np_random, seed = seeding.np_random(seed)\r\n return [seed]\r\n\r\n def step(self, action):\r\n \"\"\"The environment takes a step based on action given by policy.\"\"\"\r\n curr_y_true = self.y_train[self.id[self.episode_step]]\r\n self.episode_step += 1\r\n terminal = False\r\n\r\n if action == curr_y_true: # Correct action\r\n if curr_y_true: # Minority\r\n reward = 1\r\n else: # Majority\r\n reward = self.imb_rate\r\n\r\n else: # Incorrect action\r\n if curr_y_true: # Minority\r\n reward = -1\r\n if self.mode == \"train\":\r\n terminal = True # Stop episode when minority class is misclassified\r\n else: # Majority\r\n reward = -self.imb_rate\r\n\r\n if self.episode_step == self.X_len - 1:\r\n terminal = True\r\n\r\n next_state = self.X_train[self.id[self.episode_step]]\r\n return next_state, reward, terminal, {}\r\n\r\n def reset(self):\r\n \"\"\"returns: (states, observations).\"\"\"\r\n if self.mode == \"train\":\r\n np.random.shuffle(self.id)\r\n\r\n self.episode_step = 0 # Reset episode step counter at the end of every episode\r\n initial_state = self.X_train[self.id[self.episode_step]]\r\n return initial_state\r\n","sub_path":"ICMDP_Env.py","file_name":"ICMDP_Env.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364593159","text":"import pandas as pd\nimport numpy as np\nfrom qualang_tools.bakery.bakery import baking\nfrom qm.qua import *\nimport importlib.resources as pkg_resources\nimport qualang_tools.bakery as bakery_resources\n\nc1_ops = [ # Clifford operations\n (\"I\",),\n (\"X\",),\n (\"Y\",),\n (\"Y\", \"X\"),\n (\"X/2\", \"Y/2\"),\n (\"X/2\", \"-Y/2\"),\n (\"-X/2\", \"Y/2\"),\n (\"-X/2\", \"-Y/2\"),\n (\"Y/2\", \"X/2\"),\n (\"Y/2\", \"-X/2\"),\n (\"-Y/2\", \"X/2\"),\n (\"-Y/2\", \"-X/2\"),\n (\"X/2\",),\n (\"-X/2\",),\n (\"Y/2\",),\n (\"-Y/2\",),\n (\"-X/2\", \"Y/2\", \"X/2\"),\n (\"-X/2\", \"-Y/2\", \"X/2\"),\n (\"X\", \"Y/2\"),\n (\"X\", \"-Y/2\"),\n (\"Y\", \"X/2\"),\n (\"Y\", \"-X/2\"),\n (\"X/2\", \"Y/2\", \"X/2\"),\n (\"-X/2\", \"Y/2\", \"-X/2\"),\n]\n\n# Cayley table corresponding to above Clifford group structure\n\ncsv = pkg_resources.open_text(bakery_resources, \"c1_cayley_table.csv\")\n\nc1_table = pd.read_csv(csv).to_numpy()[:, 1:]\n\n\nclass RBOneQubit:\n def __init__(self, config: dict, d_max: int, K: int, qubit: str):\n \"\"\"\n Class to retrieve easily baked RB sequences and their inverse operations\n :param config Configuration file\n :param d_max Maximum length of desired RB sequence\n :param K Number of RB sequences\n :param qubit Name of the quantum element designating the qubit\n \"\"\"\n if not (qubit in config[\"elements\"]):\n raise KeyError(f\"Quantum element {qubit} is not in the config\")\n\n self.sequences = [RBSequence(config, d_max, qubit) for _ in range(K)]\n self.inverse_ops = [seq.revert_ops for seq in self.sequences]\n self.duration_trackers = [seq.duration_tracker for seq in self.sequences]\n self.baked_sequences = [seq.sequence for seq in self.sequences]\n\n\ndef find_revert_op(input_state_index: int):\n \"\"\"Looks in the Cayley table the operation needed to reset the state to ground state from input state_tracker\n :param input_state_index Index of the current state tracker\n :return index of the next Clifford to apply to invert RB sequence\"\"\"\n for i in range(len(c1_ops)):\n if c1_table[input_state_index][i] == 0:\n return i\n\n\ndef play_revert_op(index: int, baked_cliffords):\n \"\"\"Plays an operation resetting qubit in its ground state based on the\n transformation provided by the index in Cayley table (switch using baked Cliffords)\n :param index index of the transformed qubit state\"\"\"\n\n with switch_(index):\n for i in range(len(baked_cliffords)):\n with case_(i):\n baked_cliffords[i].run()\n\n\nclass RBSequence:\n def __init__(self, config: dict, d_max: int, qubit: str):\n self.d_max = d_max\n self.config = config\n self.qubit = qubit\n self.state_tracker = [\n 0\n ] * d_max # Keeps track of all transformations done on qubit state\n self.state_init = 0\n self.revert_ops = [\n 0\n ] * d_max # Keeps track of inverse op index associated to each sequence\n self.duration_tracker = [0] * d_max # Keeps track of each Clifford's duration\n # self.baked_cliffords = self.generate_cliffords() # List of baking objects for running Cliffords\n self.operations_list = [None] * d_max\n self.inverse_op_string = [\"\"] * d_max\n self.sequence = self.generate_RB_sequence() # Store the RB sequence\n\n def play_revert_op2(self, index: int):\n \"\"\"Plays an operation resetting qubit in its ground state based on the\n transformation provided by the index in Cayley table (explicit switch case)\n :param index index of the transformed qubit state\"\"\"\n qubit = self.qubit\n\n with switch_(index):\n with case_(0):\n play(\"I\", qubit)\n with case_(1):\n play(\"X\", qubit)\n with case_(2):\n play(\"Y\", qubit)\n with case_(3):\n play(\"Y\", qubit)\n play(\"X\", qubit)\n with case_(4):\n play(\"X/2\", qubit)\n play(\"Y/2\", qubit)\n with case_(5):\n play(\"X/2\", qubit)\n play(\"-Y/2\", qubit)\n with case_(6):\n play(\"-X/2\", qubit)\n play(\"Y/2\", qubit)\n with case_(7):\n play(\"-X/2\", qubit)\n play(\"-Y/2\", qubit)\n with case_(8):\n play(\"Y/2\", qubit)\n play(\"X/2\", qubit)\n with case_(9):\n play(\"Y/2\", qubit)\n play(\"-X/2\", qubit)\n with case_(10):\n play(\"-Y/2\", qubit)\n play(\"X/2\", qubit)\n with case_(11):\n play(\"-Y/2\", qubit)\n play(\"-X/2\", qubit)\n with case_(12):\n play(\"X/2\", qubit)\n with case_(13):\n play(\"-X/2\", qubit)\n with case_(14):\n play(\"Y/2\", qubit)\n with case_(15):\n play(\"-Y/2\", qubit)\n with case_(16):\n play(\"-X/2\", qubit)\n play(\"Y/2\", qubit)\n play(\"X/2\", qubit)\n with case_(17):\n play(\"-X/2\", qubit)\n play(\"-Y/2\", qubit)\n play(\"X/2\", qubit)\n with case_(18):\n play(\"X\", qubit)\n play(\"Y/2\", qubit)\n with case_(19):\n play(\"X\", qubit)\n play(\"-Y/2\", qubit)\n with case_(20):\n play(\"Y\", qubit)\n play(\"X/2\", qubit)\n with case_(21):\n play(\"Y\", qubit)\n play(\"-X/2\", qubit)\n with case_(22):\n play(\"X/2\", qubit)\n play(\"Y/2\", qubit)\n play(\"X/2\", qubit)\n with case_(23):\n play(\"-X/2\", qubit)\n play(\"Y/2\", qubit)\n play(\"-X/2\", qubit)\n\n def generate_cliffords(self):\n \"\"\"\n Returns a list of baking object giving access to baked Clifford waveforms\n \"\"\"\n\n baked_clifford = [None] * len(c1_ops)\n for i in range(len(c1_ops)):\n with baking(self.config) as b2:\n for op in c1_ops[i]:\n b2.play(op, self.qubit)\n baked_clifford[i] = b2\n return baked_clifford\n\n def generate_RB_sequence(self):\n \"\"\"\n Creates a baking object generating a random Clifford sequence of length d_max\n \"\"\"\n\n with baking(self.config) as b:\n for d in range(self.d_max):\n i = np.random.randint(0, len(c1_ops))\n if d > 0:\n self.duration_tracker[d] = self.duration_tracker[\n d - 1\n ] # Set duration to value of the sequence step\n\n # Play the random Clifford\n random_clifford = c1_ops[i]\n self.operations_list[d] = random_clifford\n for op in random_clifford:\n b.play(op, self.qubit)\n self.duration_tracker[\n d\n ] += 1 # Add additional duration for each pulse played to build Clifford\n\n if d == 0: # Handle the case for qubit set to original/ground state\n self.state_tracker[d] = c1_table[self.state_init][i]\n else: # Get the newly transformed state within th Cayley table based on previous step\n self.state_tracker[d] = c1_table[self.state_tracker[d - 1]][i]\n self.revert_ops[d] = find_revert_op(self.state_tracker[d])\n self.inverse_op_string[d] = c1_ops[self.revert_ops[d]]\n return b\n","sub_path":"qualang_tools/bakery/randomized_benchmark.py","file_name":"randomized_benchmark.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276808381","text":"'''\n자기 자신을 제외한 모든 양의 약수들의 합이 자기 자신이 되는 자연수를 완전수라고 한다. 예를 들면,\n6과 28은 완전수이다. 6=1+2+3 // 1,2,3은 각각 6의 약수\n28=1+2+4+7+14 // 1,2,4,7,14는 각각 28의 약수\n\n입력으로 자연수 N을 받고, 출력으로 N 이하의 모든 완전수를 출력하는 코드를 작성하라.\n'''\n\nr = 3000\n\ndef chk(input):\n self = 0\n for n in range(1, input):\n if input % n == 0:\n self += n\n return input == self\n\n\nfor n in range(1, r):\n if chk(n):\n print(n)\n\n\n\n\n\n\n","sub_path":"128.py","file_name":"128.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581232918","text":"#!/usr/bin/python\nfrom qpt_v2 import *\n\ndef main():\n while True:\n print('Enter following input in degrees...or q to quit')\n az = input('Enter azimuth: ')\n el = input('Enter elevation: ')\n if (az == 'q' or el == 'q'):\n shut_down()\n print('Program Terminated')\n break\n else:\n move_to_position(az, el)\n\n\n","sub_path":"Master Files/Dish-Tracker-master-885e8c34e3964a0a510c6822bf6adbcceabbdc97/Code/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582818327","text":"from __future__ import print_function\nfrom periodic import Periodic\nfrom media import Media\nfrom book import Book\nfrom film import Film\nfrom video import Video\n\nfrom searchengine import SearchEngine\n\n\nSearch_engine = SearchEngine()\n\nsearch_choice = 0\nwhile search_choice != 5:\n\tprint('How will we be searching?')\n\tprint(' (1) Search by Title')\n\tprint(' (2) Search by Subject')\n\tprint(' (3) Search by Call Number')\n\tprint(' (4) Search by Other')\n\tprint(' (5) Quit')\n\tsearch_choice = raw_input('Enter a number: ')\n\n\tif search_choice == '1':\n\t\tprint('What are we searching for: ')\n\t\ttitle = raw_input()\n\t\tresults = Search_engine.search_by_title(title)\n\telif search_choice == '2':\n\t\tprint('What are we searching for: ')\n\t\tsubject = raw_input()\n\t\tresults = Search_engine.search_by_subjects(subject)\n\telif search_choice == '3':\n\t\tprint('What are we searching for: ')\n\t\tcall_no = raw_input()\n\t\tresults = Search_engine.search_by_call_number(call_no)\n\telif search_choice == '4':\n\t\tprint('What are we searching for: ')\n\t\tother = raw_input()\n\t\tresults = Search_engine.search_by_other(other)\n\telif search_choice == '5':\n\t\tprint('Thanks for searching!')\n\t\tbreak\n\n\tfor media in results:\n\t\tmedia.display()\n","sub_path":"Other/Library_Catalog_Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264105548","text":"from util import *\n\n\n@apply\ndef apply(self):\n from axiom.algebra.add.to.sum.limits.push_front import absorb\n return Equal(self, absorb(Cap, self), evaluate=False)\n\n\n@prove\ndef prove(Eq):\n from axiom import sets\n\n k, n = Symbol(integer=True)\n i = Symbol(domain=Range(n))\n f = Function(etype=dtype.integer)\n Eq << apply(Intersection(Cap[k:1 + i:n](f(k)), f(i), evaluate=False))\n\n Eq << Eq[-1].this.rhs.apply(sets.cap.to.intersect.split, {i})\n\n\nif __name__ == '__main__':\n run()\n# created on 2021-07-12\n","sub_path":"axiom/sets/union/to/cup/limits/push_front.py","file_name":"push_front.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"365570730","text":"from kivy.uix.button import Button\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.app import App\nfrom kivy.graphics import Mesh, Color\nfrom functools import partial\nfrom math import cos, sin, pi\nfrom hex import *\n\nclass HexWidget(Widget):\n def __init__(self, **kwargs):\n if 'hex' in kwargs:\n self.hex = kwargs['hex']\n if 'hex_layout' in kwargs:\n self.hex.layout = kwargs['hex_layout']\n super(HexWidget, self).__init__()\n with self.canvas:\n Color(self.hex.q + 0.25, self.hex.r + 0.25, self.hex.s + 0.25, 1.0)\n Mesh(vertices=self.hex.polygon_verticies(), indices=range(6), mode='triangle_fan')\n\n\nclass MeshTestApp(App):\n def build(self):\n root = Widget()\n grid = grid_hexagon(3)\n for (hex, nothing) in grid.items():\n hex.layout = Layout(layout_pointy, Point(30, 30), Point(350, 350))\n root.add_widget(HexWidget(hex=hex))\n return root\n\n\nif __name__ == '__main__':\n MeshTestApp().run()","sub_path":"test/hex_widget.py","file_name":"hex_widget.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543283875","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo.61 旋转链表\n需求:\n 给你一个链表的头节点head,旋转链表,将链表每个节点向右移动k个位置\n注意:\n 链表中节点的数目在范围[0, 500]内\n\"\"\"\n\n\ndef rotateRight(head, k):\n \"\"\"\n 把链表连接成一个环,然后在需要的位置断开\n 时间复杂度:O(n)\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if not head or not head.next or k == 0:\n return head\n\n # 找到切分点\n cur = head\n count = 1\n while cur.next:\n count += 1\n cur = cur.next\n # 保持原来顺序的元素个数\n cut = count - k % count\n\n # 连成环\n cur.next = head\n\n # 到切分点断开\n while cut:\n cur = cur.next\n cut -= 1\n newHead = cur.next\n cur.next = None\n\n return newHead","sub_path":"LeetCode/src/dataframe09/linkedList/rotate_linkedList.py","file_name":"rotate_linkedList.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523378854","text":"import os\nfrom datetime import datetime as dt, timedelta\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\nCOLNAMES = ['ISO_2_CODE', 'ADM0_NAME', 'date_epicrv',\n 'NewCase', 'CumCase', 'NewDeath',\n 'CumDeath']\nNEW_COLUMN_NAMES = ['country_region_code', 'country_region', 'date',\n 'new_cases', 'cum_cases', 'new_deaths', 'cum_deaths']\nDATASET_DIR = f'{os.path.dirname(__file__)}/../datasets'\nFILENAME = 'who_cases_deaths.csv'\nDATE_FORMAT = '%Y-%m-%d'\n\nnp.random.seed(7)\nSCALER = MinMaxScaler(feature_range=(0, 1))\n\n\ndef load_data():\n \"\"\"\n Load a dataframe from a CSV file.\n Loaded with selected columns.\n TODO (second priority): switch database or cloud\n\n :return: dataframe: pandas.core.frame.DataFrame\n \"\"\"\n return pd.read_csv(f'{DATASET_DIR}/{FILENAME}', usecols=COLNAMES)\n\n\ndef preprocess(dataframe):\n \"\"\"\n Preprocess the dataset to contain formatted date;\n renamed columns for better integrity among other services, data\n storages and scripts;\n\n trimmed dataframe to work only with country, its new cases of\n COVID-19 with respect to dates.\n\n :param dataframe: pandas.core.frame.DataFrame\n :return: dataframe: pandas.core.frame.DataFrame\n \"\"\"\n\n # rename column names\n mapped_columns = dict(zip(COLNAMES, NEW_COLUMN_NAMES))\n dataframe = dataframe.rename(columns=mapped_columns)\n\n # format date\n dataframe['date'] = pd.to_datetime(dataframe['date'])\n dataframe['date'] = dataframe['date'].dt.strftime(DATE_FORMAT)\n\n # select used columns\n dataframe = dataframe[['country_region_code', 'date', 'new_cases']]\n\n return dataframe\n\n\ndef filter_by_country(dataframe, country_code):\n \"\"\"\n Selects record only for specified country.\n\n Trims the dataset to contain new COVID-19 cases with respect to dates.\n Country code is no longer used in the data frame.\n\n :param dataframe: pandas.core.frame.DataFrame\n :param country_code: str\n :return: filtered_df: pandas.core.frame.DataFrame\n \"\"\"\n filtered_df = dataframe[\n dataframe['country_region_code'] == country_code][\n ['date', 'new_cases']]\n return filtered_df\n\n\ndef separate(dataframe):\n \"\"\"\n Separate dates from values of new cases of COVID-19.\n\n\n :param dataframe: pandas.core.frame.DataFrame\n :return:dates: numpy.ndarray\n values: numpy.ndarray\n\n dates.shape: (N, 1)\n values.shape: (N, 1)\n \"\"\"\n\n dates = dataframe['date'].values.reshape(-1, 1)\n\n dataframe['new_cases'] = dataframe['new_cases'].astype('float32')\n values = dataframe['new_cases'].values.reshape(-1, 1)\n\n return dates, values\n\n\ndef normalize(y):\n \"\"\"\n Normalize the values to be in range 0..1\n\n Shapes should remain identical: (N, 1)\n\n :param y: numpy.ndarray\n :return: numpy.ndarray\n \"\"\"\n\n return SCALER.fit_transform(y)\n\n\ndef denormalize(sample):\n \"\"\"\n Denormalize the values to be actual number of COVID-19 cases.\n\n Shapes should remain identical: (1, 1)\n\n :param sample: numpy.ndarray\n :return: numpy.ndarray\n \"\"\"\n\n return SCALER.inverse_transform(sample)\n\n\ndef apply_lookback(dataset, look_back=1):\n \"\"\"\n Creates 2 datasets.\n X contains look_back number of columns. I.e., if look_back is 3,\n\n X would contain 3 columns.\n Y would contain 1 column.\n\n Each sample (row) is a sequence of look_back number of COVID-19\n cases for look_back time steps.\n X: | x(t-2) | x(t-1) | x(t) |\n\n Y: |x(t+1)|\n Y contains real values at t+1.\n\n Shapes: N is the length.\n N in dataset has lookback more number of samples more than X and Y.\n\n dataset.shape is (N, 1)\n X.shape is (N-look_back, 1)\n Y.shape is (N-look_back, )\n\n :param dataset: numpy.ndarray\n :param look_back: int\n :return: numpy.ndarray, numpy.ndarray\n \"\"\"\n\n data_x, data_y = [], []\n for i in range(len(dataset) - look_back):\n a = dataset[i:(i + look_back), 0]\n data_x.append(a)\n data_y.append(dataset[i + look_back, 0])\n\n return np.array(data_x), np.array(data_y)\n\n\ndef reshape(x):\n \"\"\"\n Reshape to fit a recurrent neural network's input shape.\n\n :param x: numpy.ndarray\n x.shape: (N, look_back)\n\n :return reshaped: numpy.ndarray\n reshaped.shape: (N, 1, look_back)\n \"\"\"\n reshaped = np.reshape(x, (x.shape[0], 1, x.shape[1]))\n\n return reshaped\n\n\ndef unite_dates_samples(dates, samples):\n \"\"\"\n Horizontally unites dates for x(t+1)\n and samples for | x(t-k) | ... | x(t) |,\n where k is look_back-1.\n\n Shapes: dates: (N, 1)\n samples: (N, look_back)\n hstacked: (N, 1 + look_back)\n\n :param dates: numpy.ndarray\n :param samples: numpy.ndarray\n :return hstacked: numpy.ndarray\n \"\"\"\n\n hstacked = np.hstack((dates, samples))\n\n return hstacked\n\n\ndef change_date(date, delta_days=0):\n \"\"\"\n Gets a date in delta_days number of days.\n delta_days can be positive or negative.\n\n :param date: datetime.datetim\n :param delta_days: int\n :return next_date: str\n \"\"\"\n\n detla = timedelta(days=delta_days)\n\n date = dt.strptime(date, DATE_FORMAT)\n\n next_date = date + detla\n next_date = dt.strftime(next_date, DATE_FORMAT)\n\n return next_date\n\n\ndef append_sample(array, predicted, look_back, requested_day):\n \"\"\"\n Appends a predicted number of COVID-19 cases to the end of all samples;\n Creates the next date for t+1.\n\n Generate a new sample as | x(t-k) | ... | x(t) |\n where k is look_back-1\n and x(t) is the predicted value.\n\n\n :param array: numpy.ndarray\n array.shape: (N, 4)\n :param predicted: numpy.ndarray\n predicted.shape: (1, 1)\n\n :param look_back: int\n :param requested_day: str\n\n :return: numpy.ndarray, str\n \"\"\"\n\n # next date\n next_date_formatted = change_date(requested_day, delta_days=1)\n next_date_formatted = np.array([next_date_formatted])\n\n # generate next sample\n selected = array[array[:, 0] == requested_day, 2:].reshape(look_back - 1, )\n selected = np.append(selected, predicted.reshape(1, ))\n next_sample = np.append(next_date_formatted, selected)\n\n # append next sample\n if len(array[array[:, 0] == next_date_formatted, :]) == 0:\n array = np.vstack((array, next_sample))\n else:\n array[array[:, 0] == next_date_formatted] = next_sample\n\n return array, next_date_formatted[0]\n\n\ndef get_sample(united_samples, requested_date):\n \"\"\"\n Extracts a sample for a prediction for a given date.\n If no such date is found, extract the last available sample.\n\n If it is available, extract the sample for this date\n\n :param united_samples: numpy.ndarray\n united_samples.shape: (N, 1 + look_back)\n :param requested_date: str\n\n :return sample: numpy.ndarray\n sample.shape: (look_back, )\n day_taken: str\n \"\"\"\n search_res = np.where(united_samples[:, 0] == requested_date)[0]\n if len(search_res) == 0:\n sample = united_samples[-1:, :]\n day_taken = sample[0, 0]\n else:\n sample = united_samples[united_samples[:, 0] == requested_date]\n day_taken = requested_date\n\n sample = sample[0, 1:] # eliminate date in the first column\n\n return sample, day_taken\n","sub_path":"backend/ML/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"65519282","text":"# Fibonacci Numbers\ndef fib(n):\n a, b, c = 0, 1, 0\n if n == 0:\n return n\n else:\n for i in range(2, n):\n c = a + b\n a = b\n b = c\n return a + b\n\n\nprint(fib(5))\n\n# N Stairs - Cracking The Coding Interview\n\n\ndef stairs(n):\n if n < 0:\n return 0\n elif n == 1 or n == 0:\n return 1\n return stairs(n - 1) + stairs(n - 2) + stairs(n - 3)\n\n\nprint(stairs(2))\n","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513551501","text":"import numpy as np\nimport pandas as pd\nimport main_standardized_import as stanimp\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.decomposition import TruncatedSVD\n\nfile_names = ['../raw_data/members.csv',\n '../raw_data/song_extra_info.csv',\n '../raw_data/songs.csv',\n '../raw_data/test.csv',\n '../raw_data/train.csv']\n\nmembers, song_extra_info, songs, test, train = [pd.read_csv(x)\n for x in file_names]\n\nmembers, song_extra_info, songs, test, train =\\\n stanimp.kkbox_cleaning(members, song_extra_info, songs, test, train)\n\ndel members, song_extra_info, test, train\n\nprint(\">>> start binarize songs language\")\n# binarize language categories\nlanguage_lb = LabelBinarizer()\nlanguage_lb.fit(songs.language.values.astype(int).reshape(-1, 1))\nlang_bi_v = language_lb.transform(\n songs.language.values.astype(int).reshape(-1, 1))\nlang_binaries = ['language_bi_'+str(i+1) for i in range(lang_bi_v.shape[1])]\nfor i, name in enumerate(lang_binaries):\n songs[name] = lang_bi_v[:, i]\n\n# matrix factorization on song artist_name\nprint(\">>> start matrix factorization for songs artist_name\")\n# create list of list for the multilabel variable\nartist_names = songs['artist_name'].str.split('&').tolist()\n# create list of unique identifier for later assembling\nsong_id = songs.song_id.tolist()\n# generate dictionary for the sklearn DictVectorizer\ndict_artist = [{name.strip(): 1 for name in artist_names[i]} for\n i in range(len(song_id))]\n# Train a DictVectorizer based on d_artist_name\nvec_artist = DictVectorizer().fit(dict_artist)\n# Generate a CSR sparse matrix use the trained DictVectorizer\nm_artist = vec_artist.transform(dict_artist)\n# Train a TruncatedSVD model with m_artist_name\nsvd_components = 10\nrand_seed = 1122\nsvd_artist_name = TruncatedSVD(n_components=svd_components,\n algorithm='randomized',\n n_iter=5,\n random_state=rand_seed).fit(m_artist)\n# Generate a svd matrix for song artist_names\nv_artist = svd_artist_name.transform(m_artist)\n# Assemble a pd.DataFrame with the artist_svd\n# artist_df can be merged into songs df and replace the artist_names column\nartist_svds = ['artist_svd_'+str(i+1) for i in range(svd_components)]\nfor i, name in enumerate(artist_svds):\n songs[name] = v_artist[:, i]\n\n# matrix factorization on genre_ids\nprint(\">>> start matrix factorization for songs genre_ids\")\ngenre_ids = songs['genre_ids'].str.split('|').tolist()\nsong_id = songs.song_id.tolist()\ndict_genre = [{} if not isinstance(genre_ids[i], list)\n else {genre_id.strip(): 1 for genre_id in genre_ids[i]}\n for i in range(len(genre_ids))]\nvec_genre = DictVectorizer().fit(dict_genre)\nm_genre = vec_genre.transform(dict_genre)\nsvd_components = 10\nrand_seed = 1122\nsvd_genre_id = TruncatedSVD(n_components=svd_components,\n algorithm='randomized',\n n_iter=5,\n random_state=rand_seed).fit(m_genre)\nv_genre = svd_genre_id.transform(m_genre)\ngenre_svds = ['genre_svd_'+str(i+1) for i in range(svd_components)]\nfor i, name in enumerate(genre_svds):\n songs[name] = v_genre[:, i]\n\n# matrix factorization on song composer\nprint(\">>> start matrix factorization for songs composer\")\ncomposers = songs['composer'].str.split('|').tolist()\nsong_id = songs.song_id.tolist()\ndict_composer = [{} if not isinstance(composers[i], list)\n else {composer.strip(): 1 for composer in composers[i]}\n for i in range(len(composers))]\nvec_composer = DictVectorizer().fit(dict_composer)\nm_composer = vec_composer.transform(dict_composer)\nsvd_components = 10\nrand_seed = 1122\nsvd_composer = TruncatedSVD(n_components=svd_components,\n algorithm='randomized',\n n_iter=5,\n random_state=rand_seed).fit(m_composer)\nv_composer = svd_composer.transform(m_composer)\ncomposer_svds = ['composer_svd_'+str(i+1) for i in range(svd_components)]\nfor i, name in enumerate(composer_svds):\n songs[name] = v_composer[:, i]\n\n# matrix factorization on lyricist\nprint(\">>> start matrix factorization for songs lyricist\")\nlyricists = songs['lyricist'].str.split('|').tolist()\nsong_id = songs.song_id.tolist()\ndict_lyricist = [{} if not isinstance(lyricists[i], list)\n else {lyricist.strip(): 1 for lyricist in lyricists[i]}\n for i in range(len(lyricists))]\nvec_lyricist = DictVectorizer().fit(dict_lyricist)\nm_lyricist = vec_lyricist.transform(dict_lyricist)\nsvd_components = 10\nrand_seed = 1122\nsvd_lyricist = TruncatedSVD(n_components=svd_components,\n algorithm='randomized',\n n_iter=5,\n random_state=rand_seed).fit(m_lyricist)\nv_lyricist = svd_lyricist.transform(m_lyricist)\nlyricist_svds = ['lyricist_svd_'+str(i+1) for i in range(svd_components)]\nfor i, name in enumerate(lyricist_svds):\n songs[name] = v_lyricist[:, i]\n\nsongs = songs.drop(\n ['genre_ids', 'artist_name', 'composer', 'lyricist', 'language'],\n axis=1)\n","sub_path":"retired/(retired)main_feature_generation_songs.py","file_name":"(retired)main_feature_generation_songs.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433682590","text":"import threading\nimport logging\nfrom time import sleep\nfrom random import random\n\nfmt=\"%(threadName)s: %(message)s\"\nlogging.basicConfig(level=logging.DEBUG, format=fmt)\n\ndef consumer(cond, data):\n logging.debug('created')\n if not data:\n logging.debug('Content is unavailable & gets wait')\n with cond:\n cond.wait()\n logging.debug('recv notify, consumes : {}'.format(data.pop(0)))\n\ndef producer(cond, data):\n logging.debug('Created & Produces')\n data.append(random())\n data.append(random())\n\n with cond:\n logging.debug('Notifes the waiting thread')\n cond.notify_all()\n\ndef main():\n cond = threading.Condition()\n data = []\n c1= threading.Thread(target=consumer, name=\"C1\", args=(cond, data))\n c2 = threading.Thread(target=consumer, name=\"C2\", args=(cond, data))\n c1.start()\n c2.start()\n sleep(2)\n p=threading.Thread(target=producer, name=\"P\", args=(cond, data))\n p.start()\n\nif __name__ == '__main__':\n main()","sub_path":"training/advanced_python/multi_threading/mutli_threading_conditionAvalibility.py","file_name":"mutli_threading_conditionAvalibility.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220129330","text":"from huobi.client.generic import GenericClient\n#from huobi.client.account import AccountClient\nfrom huobi.client.trade import TradeClient\nfrom huobi.constant import *\nfrom keys import *\nfrom huobi.client.market import MarketClient\nfrom huobi.utils import *\nimport requests\nimport datetime\nimport time\n\ndef trace_profit(base_symbol, quote_symbol, amount, monitor_sleep_time, interval_upper_boundary_list, interval_rate_list, interval_delta_list,stop_loss_rate = 0.05, stop_profit_rate = float(\"inf\"), buy_sleep_time = 5, sell_sleep_time = 5,buy_max_times = 1, sell_max_times = 20, test_mode = 0, test_price_vector = []):\n symbol = base_symbol + quote_symbol\n #illegal parameter\n if not (len(interval_upper_boundary_list) == len(interval_rate_list) == len(interval_delta_list)):\n return -1\n\n symbol_info = get_symbol_info(symbol)\n\n print(\"----------------------------------------------\")\n print(\"Parameters:\")\n print(\"1. base_symbol=%s, quote_symbol=%s, symbol=%s, amount=%f\" % (base_symbol, quote_symbol, symbol, amount))\n print(\"2. stop_loss_rate=%f, stop_profit_rate=%f\" % (stop_loss_rate, stop_profit_rate))\n print(\"3. monitor_sleep_time=%f\" % monitor_sleep_time)\n print(\"4. interval_upper_boundary_list=%s\" % interval_upper_boundary_list)\n print(\"5. interval_rate_list=%s\" % interval_rate_list)\n print(\"6. interval_delta_list=%s\" % interval_delta_list)\n print(\"7. buy_sleep_time=%d, sell_sleep_time=%d\" % (buy_sleep_time, sell_sleep_time))\n print(\"8. buy_max_times=%d, sell_max_times=%d\" % (buy_max_times, sell_max_times))\n print(\"9. test_mode=%d\" % test_mode)\n print(\"10. symbol detail info:\")\n symbol_info.print_object()\n print(\"----------------------------------------------\\n\")\n\n #Initial\n sell_dif_rate = -1\n hysteresis_cnt = 0\n check_times = 1\n market_client = MarketClient()\n interval_flag_list = [0 for x in range(0,len(interval_upper_boundary_list))] #generate interval flag list according to parameter\n\n #get buy order info from the buy order API\n if 1 == test_mode:\n symbol_start_price = test_price_vector[0]\n else:\n #create buy order\n [symbol_start_price, filled_amount] = must_buy_sell(OrderType.BUY_MARKET, symbol, amount, 0, symbol_info.amount_precision, symbol_info.price_precision, buy_max_times, buy_sleep_time)\n\n #SELL TEST:\n if test_mode == 2:\n print('TEST_MODE2: START SELL:\\n')\n must_buy_sell(OrderType.SELL_MARKET, symbol, filled_amount, 999999999, symbol_info.amount_precision, symbol_info.price_precision, sell_max_times, sell_sleep_time)\n return 0\n\n print('\\nSTART TRACING PROFIT:\\n')\n print('No.\\tPRICE\\t\\tPRICE_DIF\\tPRICE_DIF_RATE\\tSELL_DIF_RATE\\tFLAG_LIST\\tCNT\\tTIME')\n\n #monitor the latest price and price rate\n while(1):\n if test_mode: \n symbol_current_price = test_price_vector[check_times]\n symbol_current_time = 0\n else:\n try:\n list_obj = market_client.get_market_trade(symbol)\n symbol_current_price = list_obj[0].price\n symbol_current_time = list_obj[0].ts\n except requests.exceptions.ProxyError as e:\n print(e)\n continue\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.ReadTimeout as e:\n print(e)\n continue \n\n\n #get price dif and rate\n price_dif = symbol_current_price - symbol_start_price\n price_dif_rate = abs(price_dif / symbol_start_price)\n print(\"%d\\t%0.8f\\t%0.8f\\t%0.8f%%\\t%0.8f%%\\t%s\\t%d\\t\" % (check_times,symbol_current_price,price_dif,100*price_dif_rate,100*sell_dif_rate,interval_flag_list,hysteresis_cnt),datetime.datetime.fromtimestamp(symbol_current_time/1000).strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n #stop loss\n if price_dif < 0 and price_dif_rate >= stop_loss_rate:\n print(\"------------Trigger stop loss------------\\n\")\n if not test_mode:\n must_buy_sell(OrderType.SELL_MARKET, symbol, filled_amount, 999999999, symbol_info.amount_precision, symbol_info.price_precision, sell_max_times, sell_sleep_time)\n return 0\n\n #stop profit\n if price_dif > 0 and price_dif_rate >= stop_profit_rate:\n print(\"------------Trigger stop profit------------\\n\")\n if not test_mode:\n must_buy_sell(OrderType.SELL_MARKET, symbol, filled_amount, 999999999, symbol_info.amount_precision, symbol_info.price_precision, sell_max_times, sell_sleep_time)\n return 0\n \n #update price rate and sell when get suitable profit\n if price_dif > 0 and price_dif_rate > sell_dif_rate:\n [sell_dif_rate, interval_flag_list, hysteresis_cnt] = sell_dif_rate_hysteresis(sell_dif_rate, interval_flag_list, hysteresis_cnt, price_dif, price_dif_rate, interval_upper_boundary_list, interval_rate_list, interval_delta_list)\n elif price_dif > 0 and price_dif_rate <= sell_dif_rate:\n print(\"------------Trigger get enough profit------------\\n\")\n if not test_mode:\n must_buy_sell(OrderType.SELL_MARKET, symbol, filled_amount, 999999999, symbol_info.amount_precision, symbol_info.price_precision, sell_max_times, sell_sleep_time)\n return 0\n\n #loop check times\n check_times += 1\n\n #loop sleep time\n time.sleep(monitor_sleep_time)\n\n\ndef sell_dif_rate_hysteresis(sell_dif_rate, interval_flag_list, hysteresis_cnt, price_dif, price_dif_rate, interval_upper_boundary_list, interval_rate_list, interval_delta_list):\n #illegal parameter\n if not (len(interval_upper_boundary_list) == len(interval_flag_list) == len(interval_rate_list) == len(interval_delta_list)):\n return -1\n \n for index_boundary, interval in enumerate(interval_upper_boundary_list):\n if price_dif > 0 and price_dif_rate < interval:\n break\n for index_flag, flag in enumerate(interval_flag_list):\n if 0 == flag and index_flag > 0:\n index_flag -= 1\n break\n elif 0 == flag:\n break\n if index_boundary >= index_flag:\n index = index_boundary\n else:\n index = index_flag\n #print(\"index=%d\\n\" % index)\n\n if interval_flag_list[index] != 1:\n if index > 1:\n sell_dif_rate = interval_upper_boundary_list[index-2] + interval_rate_list[index-1] * hysteresis_cnt - (interval_rate_list[index-1] - interval_delta_list[index-1])\n elif index > 0:\n sell_dif_rate = interval_rate_list[index-1] * hysteresis_cnt - (interval_rate_list[index-1] - interval_delta_list[index-1])\n interval_flag_list[index] = 1\n hysteresis_cnt = 1\n\n if 0 == index and price_dif > 0 and price_dif_rate > (interval_rate_list[index] * hysteresis_cnt):\n sell_dif_rate = interval_rate_list[index] * hysteresis_cnt - (interval_rate_list[index] - interval_delta_list[index])\n hysteresis_cnt += 1\n elif index > 0 and price_dif > 0 and price_dif_rate > (interval_upper_boundary_list[index-1] + interval_rate_list[index] * hysteresis_cnt):\n sell_dif_rate = interval_upper_boundary_list[index-1] + interval_rate_list[index] * hysteresis_cnt - (interval_rate_list[index] - interval_delta_list[index])\n hysteresis_cnt += 1\n\n return [sell_dif_rate, interval_flag_list, hysteresis_cnt]\n\ndef must_buy_sell(order_type, symbol, amount, price, amount_precision, price_precision, max_times = 20, loop_sleep_time = 3):\n trade_client = TradeClient(api_key=g_api_key, secret_key=g_secret_key)\n print(\"START TO %s @Local_Time:\" % order_type, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))\n while(max_times):\n while(1):\n try:\n order_id = trade_client.create_order(symbol, account_id=g_account_id, order_type=order_type, source=OrderSource.API, amount=precision_cali(amount, amount_precision), price=price)\n break\n except requests.exceptions.ProxyError as e:\n print(e)\n continue\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.ReadTimeout as e:\n print(e)\n continue \n\n time.sleep(loop_sleep_time)\n\n while(1):\n try:\n orderObj = trade_client.get_order(order_id=order_id)\n break\n except requests.exceptions.ProxyError as e:\n print(e)\n continue\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.ReadTimeout as e:\n print(e)\n continue \n \n if orderObj.state == \"filled\":\n filled_price = precision_cali((float(orderObj.filled_cash_amount) / float(orderObj.filled_amount)), price_precision)\n filled_amount = precision_cali(float(orderObj.filled_amount), amount_precision) - float(orderObj.filled_fees)\n print(\"No.%d Order %s state is %s @\" % (max_times, order_id, orderObj.state), datetime.datetime.fromtimestamp(orderObj.finished_at/1000).strftime('%Y-%m-%d %H:%M:%S.%f')) \n print(\"No.%d Order filled amount is %s @filled price: %.8f\" % (max_times, filled_amount, filled_price))\n return [filled_price, filled_amount]\n else:\n while(1):\n canceled_order_id = trade_client.cancel_order(symbol, order_id)\n if canceled_order_id == order_id:\n print(\"Canceled order %s done\" % canceled_order_id)\n break\n else:\n print(\"Canceled order %s fail\" % canceled_order_id)\n continue\n while(1):\n try:\n canceled_orderObj = trade_client.get_order(order_id=canceled_order_id)\n print(\"No.%d Canceled order filled amount is %s\" % (max_times, canceled_orderObj.filled_amount))\n break\n except requests.exceptions.ProxyError as e:\n print(e)\n continue\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.ReadTimeout as e:\n print(e)\n continue \n \n\n max_times -= 1\n amount -= float(canceled_orderObj.filled_amount)\n\ndef precision_cali(num,precision):\n num_str = format(num, '.20f')\n return float(num_str.split('.')[0] + '.' + num_str.split('.')[1][:precision])\n\ndef get_symbol_info(symbol):\n generic_client = GenericClient()\n while(1):\n try:\n list_obj = generic_client.get_exchange_symbols()\n break\n except requests.exceptions.ProxyError as e:\n print(e)\n continue\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.ReadTimeout as e:\n print(e)\n continue \n\n if len(list_obj):\n for symbol_info_obj in list_obj:\n if symbol_info_obj.symbol == symbol:\n return symbol_info_obj\n\n\nif __name__ == '__main__':\n import sys\n import numpy\n #print(sys.path)\n #print(g_account_id)\n #print(g_api_key)\n #print(g_secret_key)\n\n #testcase1\n #base_symbol = \"btt\"\n #quote_symbol = \"usdt\"\n #amount = 5\n #monitor_sleep_time = 0\n #interval_upper_boundary_list = [0.2, 0.5, 0.9]\n #interval_rate_list = [0.1, 0.15, 0.2]\n #interval_delta_list = [0.01, 0.01, 0.01]\n #stop_loss_rate = 0.05\n #stop_profit_rate = float(\"inf\")\n ##stop_profit_rate = 0.2\n #buy_sleep_time = 5\n #sell_sleep_time = 5\n #buy_max_times = 1\n #sell_max_times = 20\n #test_mode = 1\n #test_price_vector = [1+numpy.sin(x) for x in numpy.arange(0.01,numpy.pi,0.01)]\n\n #testcase2\n #base_symbol = \"ekt\"\n #quote_symbol = \"usdt\"\n #symbol = base_symbol + quote_symbol\n #amount = 6.000000\n #monitor_sleep_time = 1\n #interval_upper_boundary_list = [0.2, 0.5, 0.9]\n #interval_rate_list = [0.05, 0.1, 0.1]\n #interval_delta_list = [0.01, 0.01, 0.01]\n #stop_loss_rate = 0.05\n #stop_profit_rate = float(\"inf\")\n ##stop_profit_rate = 0.2\n #buy_sleep_time = 5\n #sell_sleep_time = 5\n #buy_max_times = 1\n #sell_max_times = 20\n #test_mode = 0\n #test_price_vector = [1+numpy.sin(x) for x in numpy.arange(0.01,numpy.pi,0.01)]\n ###print(precision_cali(float(3.1563126252505009988476953907816e-4), 6))\n\n #testcase3(very important test case, because this is a real trading case)\n #base_symbol = \"shib\"\n #quote_symbol = \"usdt\"\n #symbol = base_symbol + quote_symbol\n #amount = 8.000000\n #monitor_sleep_time = 0\n #interval_upper_boundary_list = [0.2, 0.5, 1.0]\n #interval_rate_list = [0.05, 0.1, 0.1]\n #interval_delta_list = [0.01, 0.01, 0.01]\n #stop_loss_rate = 0.10\n #stop_profit_rate = float(\"inf\")\n ##stop_profit_rate = 0.2\n #buy_sleep_time = 5\n #sell_sleep_time = 5\n #buy_max_times = 1\n #sell_max_times = 20\n #test_mode = 1\n #test_price_vector = []\n #if 1 == test_mode:\n # fp = open('./test_vector0', 'r')\n # for line in fp:\n # test_price_vector.append(float(line.strip('\\n')))\n\n #Formal function\n if \"0\" == sys.argv[1]: #trace the most closely when less than 0.5\n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.05, 0.05, 0.1]\n stop_profit_rate = float(\"inf\")\n elif \"1\" == sys.argv[1]: #trace the most closely when less than 0.2\n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.05, 0.1, 0.1]\n stop_profit_rate = float(\"inf\")\n elif \"2\" == sys.argv[1]: #trace profit at 10% rate list\n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.1, 0.1, 0.05]\n stop_profit_rate = float(\"inf\")\n elif \"3\" == sys.argv[1]: #trace the most profit at risk\n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.1, 0.2, 0.2]\n stop_profit_rate = float(\"inf\")\n elif \"4\" == sys.argv[1]: #trace 20% profit\n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.05, 0.1, 0.2]\n stop_profit_rate = 0.2\n else: #trace the most closely when less than 0.5 \n interval_upper_boundary_list = [0.25, 0.5, 1.0]\n interval_rate_list = [0.05, 0.1, 0.1]\n stop_profit_rate = float(\"inf\")\n base_symbol = sys.argv[2]\n quote_symbol = sys.argv[3]\n amount = float(sys.argv[4])\n sell_sleep_time = int(sys.argv[5])\n stop_loss_rate = float(sys.argv[6])\n monitor_sleep_time = float(sys.argv[7])\n symbol = base_symbol + quote_symbol\n interval_delta_list = [0.01, 0.01, 0.01]\n buy_sleep_time = 5\n buy_max_times = 1\n sell_max_times = 20\n test_mode = 0\n test_price_vector = []\n\n #recall main function\n trace_profit(base_symbol, quote_symbol, amount, monitor_sleep_time, interval_upper_boundary_list, interval_rate_list, interval_delta_list, stop_loss_rate, stop_profit_rate, buy_sleep_time, sell_sleep_time, buy_max_times, sell_max_times, test_mode, test_price_vector)\n","sub_path":"tests/trace_profit.py","file_name":"trace_profit.py","file_ext":"py","file_size_in_byte":15619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"398653345","text":"from urllib import request\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, date, timedelta\nimport json\nimport numpy as np\n\n\ndef currencyPriceHistory(startDate, endDate, tradeInfo):\n #returns currency pair midClose price ((ask+bid)/2) as a DataFrame\n if (not isinstance(startDate, date) or not isinstance(endDate, date) ):\n raise Exception(\"Dates should be inputted as date object from datetime\")\n\n\n endpoint = 'https://api-' + tradeInfo.domain + '/v1/candles'\\\n + '?instrument=' + tradeInfo.instrument_string\\\n + '&granularity=' + tradeInfo.granularity\\\n + '&start=' + startDate.strftime(\"%Y-%m-%d\")\\\n + '&end=' + endDate.strftime(\"%Y-%m-%d\")\\\n + '&include_First=False' \n\n query_params = { 'Authorization': 'Bearer ' + tradeInfo.access_token }\n\n req = request.Request(endpoint, headers = query_params)\n response = request.urlopen(req) \n data = response.read().decode('utf-8') \n data = json.loads(data)\n data = pd.DataFrame([(data['candles'][x]['closeBid'] + data['candles'][x]['closeAsk'])/2 for x in range(len(data['candles']))],\\\n index = [datetime.strptime(data['candles'][x]['time'], '%Y-%m-%dT%H:%M:%S.%fZ').date() for x in range(len(data['candles']))],\\\n columns = [tradeInfo.instrument_string])\n return data\n\n\nclass TradeInfo:\n\n def __init__(self, domain, access_token, account_id, instrument_string, granularity):\n self.domain = domain\n self.access_token = access_token\n self.account_id = account_id\n self.instrument_string = instrument_string\n self.granularity = granularity\n\n","sub_path":"currencyPriceHistory.py","file_name":"currencyPriceHistory.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"585150960","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImplementation of the bdew heat load profiles\n\n\n\"\"\"\nimport datetime\nfrom math import ceil\nimport numpy as np\nimport pandas as pd\nimport os\nfrom .tools import add_weekdays2df\nimport calendar\n\n\nclass ElecSlp:\n \"\"\"Generate electrical standardized load profiles based on the BDEW method.\n\n Attributes\n ----------\n datapath : string\n Path to the csv files containing the load profile data.\n date_time_index : pandas.DateTimeIndex\n Time range for and frequency for the profile.\n\n Parameters\n ----------\n year : integer\n Year of the demand series.\n\n Optional Parameters\n -------------------\n seasons : dictionary\n Describing the time ranges for summer, winter and transition periods.\n holidays : dictionary or list\n The keys of the dictionary or the items of the list should be datetime\n objects of the days that are holidays.\n \"\"\"\n\n def __init__(self, year, seasons=None, holidays=None):\n if calendar.isleap(year):\n hoy = 8784\n else:\n hoy = 8760\n self.datapath = os.path.join(os.path.dirname(__file__), 'bdew_data')\n self.date_time_index = pd.date_range(\n datetime.datetime(year, 1, 1, 0), periods=hoy * 4, freq='15Min')\n if seasons is None:\n self.seasons = {\n 'summer1': [5, 15, 9, 14], # summer: 15.05. to 14.09\n 'transition1': [3, 21, 5, 14], # transition1 :21.03. to 14.05\n 'transition2': [9, 15, 10, 31], # transition2 :15.09. to 31.10\n 'winter1': [1, 1, 3, 20], # winter1: 01.01. to 20.03\n 'winter2': [11, 1, 12, 31], # winter2: 01.11. to 31.12\n }\n else:\n self.seasons = seasons\n self.year = year\n self.slp_frame = self.all_load_profiles(self.date_time_index,\n holidays=holidays)\n\n def all_load_profiles(self, time_df, holidays=None):\n slp_types = ['h0', 'g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'l0',\n 'l1', 'l2']\n new_df = self.create_bdew_load_profiles(time_df, slp_types,\n holidays=holidays)\n\n new_df.drop(['hour', 'weekday'], 1, inplace=True)\n return new_df\n\n def create_bdew_load_profiles(self, dt_index, slp_types, holidays=None):\n \"\"\"Calculates the hourly electricity load profile in MWh/h of a region.\n \"\"\"\n\n # define file path of slp csv data\n file_path = os.path.join(self.datapath, 'selp_series.csv')\n\n # Read standard load profile series from csv file\n selp_series = pd.read_csv(file_path)\n tmp_df = selp_series\n # Create an index to merge. The year and month will be ignored only the\n # time index is necessary.\n index = pd.date_range(\n datetime.datetime(2007, 1, 1, 0), periods=2016, freq='15Min')\n tmp_df.set_index(index, inplace=True)\n\n # Create empty DataFrame to take the results.\n new_df = pd.DataFrame(index=dt_index, columns=slp_types).fillna(0)\n new_df = add_weekdays2df(new_df, holidays=holidays,\n holiday_is_sunday=True)\n\n new_df['hour'] = dt_index.hour + 1\n new_df['minute'] = dt_index.minute\n time_df = new_df[['date', 'hour', 'minute', 'weekday']].copy()\n tmp_df[slp_types] = tmp_df[slp_types].astype(float)\n\n # Inner join the slps on the time_df to the slp's for a whole year\n tmp_df['hour_of_day'] = tmp_df.index.hour + 1\n tmp_df['minute_of_hour'] = tmp_df.index.minute\n left_cols = ['hour_of_day', 'minute_of_hour', 'weekday']\n right_cols = ['hour', 'minute', 'weekday']\n tmp_df = tmp_df.reset_index()\n tmp_df.pop('index')\n\n for p in self.seasons.keys():\n a = datetime.datetime(self.year, self.seasons[p][0],\n self.seasons[p][1], 0, 0)\n b = datetime.datetime(self.year, self.seasons[p][2],\n self.seasons[p][3], 23, 59)\n new_df.update(pd.DataFrame.merge(\n tmp_df[tmp_df['period'] == p[:-1]], time_df[a:b],\n left_on=left_cols, right_on=right_cols,\n how='inner', left_index=True).sort_index().drop(\n ['hour_of_day'], 1))\n\n new_df.drop('date', axis=1, inplace=True)\n return new_df.div(new_df.sum(axis=0), axis=1)\n\n def get_profile(self, ann_el_demand_per_sector,\n dyn_function_h0: bool = True):\n \"\"\" Get the profiles for the given annual demand\n\n Parameters\n ----------\n ann_el_demand_per_sector : dictionary\n Key: sector, value: annual value\n dyn_function_h0: bool, default True\n Uses the dynamisation function of the BDEW to smoothen the\n seasonal edges. Functions resolution is daily.\n f(x) = -3.916649251 * 10^-10 * x^4 + 3,2 * 10^-7 * x³ - 7,02\n * 10^-5 * x²+0,0021 * x +1,24\n Adjustment of accuracy: from -3,92 to -3.916649251\n\n Returns\n -------\n pandas.DataFrame : Table with all profiles\n\n \"\"\"\n if dyn_function_h0 == True:\n quartersinyear = len(self.slp_frame)\n for quarter in range(quartersinyear):\n quarterhour_to_day = (quarter + 1) / (24 * 4)\n smoothing_factor = -3.916649251 * 10 ** -10 \\\n * quarterhour_to_day ** 4 + 3.2 * 10 ** -7 \\\n * quarterhour_to_day ** 3 - 7.02 * 10 ** -5 \\\n * quarterhour_to_day ** 2 + 0.0021 \\\n * quarterhour_to_day + 1.24\n\n self.slp_frame['h0'][quarter] = self.slp_frame['h0'][\n quarter] * smoothing_factor\n return self.slp_frame.multiply(pd.Series(ann_el_demand_per_sector),\n axis=1).dropna(how='all', axis=1) * 4\n\n\nclass HeatBuilding:\n \"\"\"\n Parameters\n ----------\n year : int\n year for which the profile is created\n\n Attributes\n ----------\n datapath : string\n path to the bdew basic data files (csv)\n temperature : pandas.Series\n Series containing hourly temperature data\n annual_heat_demand : float\n annual heat demand of building in kWh\n building_class: int\n class of building according to bdew classification\n possible numbers are: 1 - 11\n shlp_type : string\n type of standardized heat load profile according to bdew\n possible types are:\n GMF, GPD, GHD, GWA, GGB, EFH, GKO, MFH, GBD, GBA, GMK, GBH, GGA, GHA\n wind_class : int\n wind classification for building location (0=not windy or 1=windy)\n ww_incl : boolean\n decider whether warm water load is included in the heat load profile\n \"\"\"\n\n def __init__(self, df_index, **kwargs):\n self.datapath = kwargs.get(\n 'datapath', os.path.join(os.path.dirname(__file__), 'bdew_data'))\n self.df = pd.DataFrame(index=df_index)\n self.df = add_weekdays2df(self.df, holiday_is_sunday=True,\n holidays=kwargs.get('holidays'))\n self.df['hour'] = self.df.index.hour + 1\n self.temperature = kwargs.get('temperature')\n self.annual_heat_demand = kwargs.get('annual_heat_demand')\n self.shlp_type = kwargs.get('shlp_type').upper()\n self.wind_class = kwargs.get('wind_class')\n self.building_class = kwargs.get('building_class', 0)\n self.ww_incl = kwargs.get('ww_incl', True)\n self.name = kwargs.get('name', self.shlp_type)\n\n def weighted_temperature(self, how='geometric_series'):\n r\"\"\"\n A new temperature vector is generated containing a multi-day\n average temperature as needed in the load profile function.\n\n Parameters\n ----------\n how : string\n string which type to return (\"geometric_series\" or \"mean\")\n\n Notes\n -----\n Equation for the mathematical series of the average\n temperature [1]_:\n\n .. math::\n T=\\frac{T_{D}+0.5\\cdot T_{D-1}+0.25\\cdot T_{D-2}+\n 0.125\\cdot T_{D-3}}{1+0.5+0.25+0.125}\n\n with :math:`T_D` = Average temperature on the present day\n :math:`T_{D-i}` = Average temperature on the day - i\n\n References\n ----------\n .. [1] `BDEW `_,\n BDEW Documentation for heat profiles.\n \"\"\"\n\n # calculate daily mean temperature\n temperature = self.df['temperature'].resample('D').mean().reindex(\n self.df.index).fillna(method='ffill').fillna(method='bfill')\n\n if how == 'geometric_series':\n temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) +\n 0.25 * np.roll(temperature, 48) +\n 0.125 * np.roll(temperature, 72)) / 1.875\n elif how == 'mean':\n temperature_mean = temperature\n else:\n temperature_mean = None\n\n return temperature_mean\n\n def get_temperature_interval(self):\n \"\"\"Appoints the corresponding temperature interval to each temperature\n in the temperature vector.\n \"\"\"\n intervals = ({\n -20: 1, -19: 1, -18: 1, -17: 1, -16: 1, -15: 1, -14: 2,\n -13: 2, -12: 2, -11: 2, -10: 2, -9: 3, -8: 3, -7: 3, -6: 3, -5: 3,\n -4: 4, -3: 4, -2: 4, -1: 4, 0: 4, 1: 5, 2: 5, 3: 5, 4: 5, 5: 5,\n 6: 6, 7: 6, 8: 6, 9: 6, 10: 6, 11: 7, 12: 7, 13: 7, 14: 7, 15: 7,\n 16: 8, 17: 8, 18: 8, 19: 8, 20: 8, 21: 9, 22: 9, 23: 9, 24: 9,\n 25: 9, 26: 10, 27: 10, 28: 10, 29: 10, 30: 10, 31: 10, 32: 10,\n 33: 10, 34: 10, 35: 10, 36: 10, 37: 10, 38: 10, 39: 10, 40: 10})\n\n temperature_rounded = [ceil(i) for i in self.df['temperature_geo']]\n\n temperature_interval = [intervals[i] for i in temperature_rounded]\n\n return np.transpose(np.array(temperature_interval))\n\n def get_sf_values(self, filename='shlp_hour_factors.csv'):\n \"\"\" Determine the h-values\n\n Parameters\n ----------\n filename : string\n name of file where sigmoid factors are stored\n \"\"\"\n file = os.path.join(self.datapath, filename)\n hour_factors = pd.read_csv(file, index_col=0)\n hour_factors = hour_factors.query(\n 'building_class=={0} and shlp_type==\"{1}\"'.format(\n self.building_class, self.shlp_type))\n\n # Join the two DataFrames on the columns 'hour' and 'hour_of_the_day'\n # or ['hour' 'weekday'] and ['hour_of_the_day', 'weekday'] if it is\n # not a residential slp.\n residential = self.building_class > 0\n left_cols = ['hour_of_day'] + (['weekday'] if not residential else [])\n right_cols = ['hour'] + (['weekday'] if not residential else [])\n sf_mat = pd.DataFrame.merge(\n hour_factors, self.df, left_on=left_cols, right_on=right_cols,\n how='outer', left_index=True).sort_index()\n\n # drop unnecessary columns\n drop_cols = (\n ['hour_of_day', 'hour', 'building_class', 'shlp_type',\n 'date', 'temperature']\n + (['weekday_x'] if residential else [])\n + (['weekday_y'] if residential else [])\n + (['weekday'] if not residential else []))\n\n sf_mat = sf_mat.drop(drop_cols, 1)\n\n # Determine the h values\n length = len(self.temperature)\n sf = (np.array(sf_mat)[np.array(list(range(0, length)))[:],\n (self.get_temperature_interval() - 1)[:]])\n return np.array(list(map(float, sf[:])))\n\n def get_sigmoid_parameters(self, filename='shlp_sigmoid_factors.csv'):\n \"\"\" Retrieve the sigmoid parameters from csv-files\n\n Parameters\n ----------\n filename : string\n name of file where sigmoid factors are stored\n \"\"\"\n\n file = os.path.join(self.datapath, filename)\n sigmoid = pd.read_csv(file, index_col=0)\n sigmoid = sigmoid.query(\n 'building_class=={0} and '.format(self.building_class) +\n 'shlp_type==\"{0}\" and '.format(self.shlp_type) +\n 'wind_impact=={0}'.format(self.wind_class))\n\n a = float(sigmoid['parameter_a'])\n b = float(sigmoid['parameter_b'])\n c = float(sigmoid['parameter_c'])\n if self.ww_incl:\n d = float(sigmoid['parameter_d'])\n else:\n d = 0\n return a, b, c, d\n\n def get_weekday_parameters(self, filename='shlp_weekday_factors.csv'):\n \"\"\" Retrieve the weekday parameter from csv-file\n\n Parameters\n ----------\n filename : string\n name of file where sigmoid factors are stored\n \"\"\"\n file = os.path.join(self.datapath, filename)\n f_df = pd.read_csv(file, index_col=0)\n\n tmp_df = f_df.query('shlp_type==\"{0}\"'.format(self.shlp_type)).drop(\n 'shlp_type', axis=1)\n\n tmp_df['weekdays'] = np.array(list(range(7))) + 1\n\n return np.array(list(map(float, pd.DataFrame.merge(\n tmp_df, self.df, left_on='weekdays', right_on='weekday',\n how='inner', left_index=True).sort_index()['wochentagsfaktor'])))\n\n def get_bdew_profile(self):\n \"\"\" Calculation of the hourly heat demand using the bdew-equations\n \"\"\"\n return self.get_normalized_bdew_profile() * self.annual_heat_demand\n\n def get_normalized_bdew_profile(self):\n \"\"\" Calculation of the normalized hourly heat demand\n \"\"\"\n self.df['temperature'] = self.temperature.values\n self.df['temperature_geo'] = self.weighted_temperature(\n how='geometric_series')\n\n sf = self.get_sf_values()\n\n [a, b, c, d] = self.get_sigmoid_parameters()\n\n f = self.get_weekday_parameters()\n h = (a / (1 + (b / (self.df['temperature_geo'] - 40)) ** c) + d)\n kw = 1.0 / (sum(h * f) / 24)\n heat_profile_normalized = (kw * h * f * sf)\n\n return heat_profile_normalized\n","sub_path":"demandlib/bdew.py","file_name":"bdew.py","file_ext":"py","file_size_in_byte":14363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72312268","text":"import os\nimport time \n\n\n\n\nfor file in os.listdir('python_step100\\\\'):\n if file.endswith(\".txt\"):\n joined = os.path.join('python_step100\\\\', file)\n time = os.path.getmtime(joined)\n print(joined, time)\n\n","sub_path":"drill_for_step_100.py","file_name":"drill_for_step_100.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565892126","text":"from gsrest.db import get_connection\nfrom openapi_server.models.address_tag import AddressTag\nfrom openapi_server.models.entity_tag import EntityTag\nfrom openapi_server.models.tags import Tags\nfrom openapi_server.models.taxonomy import Taxonomy\nfrom openapi_server.models.concept import Concept\nfrom gsrest.util.string_edit import alphanumeric_lower\n\n\ndef list_tags(label, currency=None):\n db = get_connection()\n label = alphanumeric_lower(label)\n if(currency is None):\n address_tags = []\n for curr in db.get_supported_currencies():\n address_tags += db.list_address_tags(curr, label)\n else:\n address_tags = db.list_address_tags(currency, label)\n\n if(currency is None):\n entity_tags = []\n for curr in db.get_supported_currencies():\n entity_tags += db.list_entity_tags(curr, label)\n else:\n entity_tags = db.list_entity_tags(currency, label)\n\n return Tags(address_tags=[AddressTag(\n address=row.address,\n label=row.label,\n category=row.category,\n abuse=row.abuse,\n tagpack_uri=row.tagpack_uri,\n source=row.source,\n lastmod=row.lastmod,\n active=row.active,\n currency=row.currency)\n for row in address_tags],\n entity_tags=[EntityTag(\n entity=row.cluster,\n label=row.label,\n category=row.category,\n abuse=row.abuse,\n tagpack_uri=row.tagpack_uri,\n source=row.source,\n lastmod=row.lastmod,\n active=row.active,\n currency=row.currency)\n for row in entity_tags])\n\n\ndef list_labels(currency, expression):\n # Normalize label\n expression_norm = alphanumeric_lower(expression)\n db = get_connection()\n result = db.list_labels(currency, expression_norm)\n\n if currency:\n return list(dict.fromkeys([\n row.label for row in result\n if row.label_norm.startswith(expression_norm) and\n row.currency.lower() == currency]))\n return list(dict.fromkeys([\n row.label for row in result\n if row.label_norm.startswith(expression_norm)]))\n\n\ndef list_concepts(taxonomy):\n db = get_connection()\n rows = db.list_concepts(taxonomy)\n\n return [Concept(\n id=row.id,\n label=row.label,\n description=row.description,\n taxonomy=row.taxonomy,\n uri=row.uri) for row in rows]\n\n\ndef list_taxonomies():\n db = get_connection()\n rows = db.list_taxonomies()\n\n return [Taxonomy(taxonomy=row.key, uri=row.uri) for row in rows]\n","sub_path":"gsrest/service/tags_service.py","file_name":"tags_service.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"105893537","text":"from faker import Factory\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\nfrom app.sales.models import BranchType\r\nfrom app.sales.services import get_branches_id_with_limit\r\nfrom random import choice, randint\r\nfrom datetime import datetime\r\n\r\nfake = Factory.create('en_US')\r\n\r\nnow = datetime.now()\r\ntypes = BranchType.TYPES.values()\r\n\r\n\r\ndef generate_random_type():\r\n return types[randint(0, 2)]\r\n\r\n\r\ndef generate_random_date():\r\n return str(choice(range(1985, 2016))) + '-' + str(choice(range(1, 12))) + '-' + str(choice(range(1, 29)))\r\n\r\n\r\ndef generate_branch_data_by_id(id):\r\n type = generate_random_type()\r\n name = fake.street_name() + ' - ' + type\r\n average_monthly_income = fake.pyfloat(7, 2, True)\r\n average_patrons = fake.pyfloat(4, 2, True)\r\n remarks = fake.sentence(10)\r\n started_date = generate_random_date()\r\n\r\n return {\r\n 'id': id,\r\n 'name': name,\r\n 'type': type,\r\n 'average_monthly_income': average_monthly_income,\r\n 'average_patrons': average_patrons,\r\n 'remarks': remarks,\r\n 'status': 'ACTIVE',\r\n 'date_created': now,\r\n 'date_modified': now,\r\n 'operation_started_date': started_date\r\n }\r\n\r\n\r\ndef generate_branch(limit=0):\r\n pool = ThreadPool(8)\r\n\r\n branch_mappings = pool.map(generate_branch_data_by_id, get_branches_id_with_limit(limit))\r\n\r\n # close the pool and wait for the work to finish\r\n pool.close()\r\n pool.join()\r\n\r\n return branch_mappings\r\n","sub_path":"app/seeds/datasource/branches.py","file_name":"branches.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498984167","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass OnTaskOAuthUserTokens(models.Model):\n \"\"\"\n Table to store the tokens to authenticate with OAuth. There must be\n a one-to-one correspondence with the user, an access token, and a refresh\n token.\n \"\"\"\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n db_index=True,\n on_delete=models.CASCADE,\n null=False,\n related_name='oauth2_token',\n blank=False)\n\n # Instance name taken from the configuration parameters. It allows users to\n # have more than one token (as long as they are from different canvas instances\n instance_name = models.CharField(max_length=2048,\n null=False,\n blank=False)\n\n access_token = models.CharField(max_length=2048, blank=False)\n\n refresh_token = models.CharField(max_length=2048, blank=True)\n\n created = models.DateTimeField(auto_now_add=True, null=False, blank=False)\n\n modified = models.DateTimeField(auto_now=True, null=False)\n\n # Token valid until\n valid_until = models.DateTimeField(_('Token valid until'),\n blank=False,\n null=False,\n default=None)\n\n class Meta(object):\n unique_together = ('user', 'instance_name')\n","sub_path":"src/ontask_oauth/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576425971","text":"import argparse\nimport gym_recommendations\nimport gym\nfrom gym import wrappers, logger\nfrom agents.random_agent import RandomAgent\nfrom agents.epsilon_greedy_agent import EpsilonGreedy\nfrom agents.gradient_bandit_agent import GradientBandit\nfrom agents.thompson_sampling import ThompsonSampling\nfrom agents.ucb_agent import ucb\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n\nnb_exp=10\nnb_episodes=10\nenv_name = 'Multi-Armed-Bandits-v0'\n\nif len(sys.argv)>1:\n nb_exp = int(sys.argv[1])\nif len(sys.argv)>2:\n nb_episodes = int(sys.argv[2])\nif len(sys.argv)>3:\n env_name = sys.argv[3] \n\nenv = gym.make(env_name)\nagents_list={'Random Agent':RandomAgent(env.env.action_space),\\\n 'Epsilon Greedy Agent':EpsilonGreedy(env.env.action_space),\\\n 'Gradient Bandit Agent':GradientBandit(env.env.action_space),\\\n 'UCB Agent':ucb(env.env.action_space),\\\n 'Thompson Sampling Agent':ThompsonSampling(env.env.action_space)}\n\ndef run_bench():\n logger.set_level(logger.INFO)\n\n rewards={x:[] for x in list(agents_list.keys())}\n regrets={x:[] for x in list(agents_list.keys())}\n brs={x:[] for x in list(agents_list.keys())}\n for i in range(nb_exp):\n print(f'exp {i}')\n for _ in range(nb_episodes):\n env.env.reset()\n for agent_name in list(agents_list.keys()):\n agent = agents_list[agent_name]\n #print(agent.reset())\n agent = agent.reset()\n ob = env.reset()\n step = 0\n reward=0\n reward_record = [0]\n regret_record = [0]\n br_record = [0]\n done = False\n while True:\n step += 1\n action = agent.act(ob,reward,done)\n ob, reward, done, _ = env.step(action)\n # print(f'{agent_name} - action:{action} - reward:{reward}')\n best_reward = env.env.get_best_reward()\n if done:\n break\n reward_record.append(reward_record[step-1]+reward)\n regret_record.append(regret_record[step-1]+best_reward-reward)\n br_record.append(best_reward)\n rewards[agent_name].append(reward_record)\n regrets[agent_name].append(regret_record)\n brs[agent_name].append(br_record)\n env.env.close()\n\n plt.figure()\n for agent_name in rewards:\n x = np.mean(rewards[agent_name],axis=0)\n plt.plot(x,label=f'{agent_name}')\n \n plt.title('Score Cumulé')\n plt.legend()\n plt.savefig(env_name+'_Score')\n plt.show()\n \n f1 = plt.figure()\n for agent_name in regrets :\n x = np.mean(regrets[agent_name],axis=0)\n plt.plot(x,label=f'{agent_name}')\n plt.title('Regret Cumulé')\n plt.legend()\n plt.savefig(env_name+'_Regret')\n plt.show()\n\n plt.figure()\n plt.plot(np.mean(brs['Random Agent'],axis=0))\n plt.title('Meilleure Récompense')\n plt.savefig(env_name+'_Best_Reward')\n plt.show()\n\nif __name__ == '__main__':\n run_bench()\n\n","sub_path":"examples/benchmark_mab.py","file_name":"benchmark_mab.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299226079","text":"class Measures():\n allowed_measure_types = [\n (\"weight\", \"weight (e.g. grams, libs\"),\n (\"capacity\", \"capacity (e.g. liters, gallons)\"),\n (\"length\", \"length (e.g. centimeters, inches)\"),\n (\"absolute\", \"pure number (e.g. 3 leaves)\"),\n (\"energy\", \"energy (kilojoule, calories)\"),\n (\"extra\", \"extra (e.g. a pinch of, to taste)\")\n ]\n\n reference_units = {\n \"weight\": \"g\",\n \"capacity\": \"l\",\n \"length\": \"m\",\n \"energy\": \"kcal\"\n }\n\n units = {\n # 1g equals\n \"weight\": {\n \"g\": {\n \"conversion\": 1,\n \"long_name\": \"gram\"\n },\n \"kg\": {\n \"conversion\": 0.001,\n \"long_name\": \"kilogram\"\n },\n \"mg\": {\n \"conversion\": 1000,\n \"long_name\": \"milligram\"\n },\n \"lb\": {\n \"conversion\": 0.00220462,\n \"long_name\": \"pound\"\n },\n \"oz\": {\n \"conversion\": 0.0352739200000000003,\n \"long_name\": \"ounce\"\n },\n },\n\n # 1l equals\n \"capacity\": {\n \"l\": {\n \"conversion\": 1,\n \"long_name\": \"liter\"\n },\n \"ml\": {\n \"conversion\": 1000,\n \"long_name\": \"milliliter\"\n },\n \"dl\": {\n \"conversion\": 10,\n \"long_name\": \"deciliter\"\n },\n \"us_legal_cup\": {\n \"conversion\": 4.1666690666667,\n \"long_name\": \"US legal cup\"\n },\n \"us_l_gal\": {\n \"conversion\": 0.26417199999924750875,\n \"long_name\": \"US liquid gallon\"\n },\n \"us_l_pint\": {\n \"conversion\": 2.11337599999398,\n \"long_name\": \"US liquid pint\"\n },\n \"us_fl_oz\": {\n \"conversion\": 33.81401599990368112,\n \"long_name\": \"US fluid ounce\"\n },\n \"imp_cup\": {\n \"conversion\": 3.51951,\n \"long_name\": \"imperial cup\"\n },\n \"imp_gal\": {\n \"conversion\": 0.219969,\n \"long_name\": \"imperial gallon\"\n },\n \"imp_pint\": {\n \"conversion\": 0.219969,\n \"long_name\": \"imperial pint\"\n },\n \"imp_fl_oz\": {\n \"conversion\": 35.1951,\n \"long_name\": \"imperial fluid ounce\"\n },\n },\n # 1m equals\n \"length\": {\n \"m\": {\n \"conversion\": 1,\n \"long_name\": \"meter\"\n },\n \"cm\": {\n \"conversion\": 100,\n \"long_name\": \"centimeter\"\n },\n \"mm\": {\n \"conversion\": 1000,\n \"long_name\": \"millimeter\"\n },\n \"ft\": {\n \"conversion\": 3.28084,\n \"long_name\": \"foot\"\n },\n \"in\": {\n \"conversion\": 39.3701,\n \"long_name\": \"inch\"\n },\n },\n\n # 1 kcal equals\n \"energy\": {\n \"cal\": {\n \"conversion\": 1000,\n \"long_name\": \"kilogram\"\n },\n \"kcal\": {\n \"conversion\": 1,\n \"long_name\": \"kilocalorie\"\n },\n \"kj\": {\n \"conversion\": 4.184,\n \"long_name\": \"kilojoule\"\n },\n }\n }\n units[\"weight\"][\"Kg\"] = units[\"weight\"][\"kg\"]\n units[\"length\"][\"″\"] = units[\"length\"][\"in\"]\n units[\"energy\"][\"kJ\"] = units[\"energy\"][\"kj\"]\n units[\"energy\"][\"KJ\"] = units[\"energy\"][\"kj\"]\n units[\"energy\"][\"Kj\"] = units[\"energy\"][\"kj\"]\n units[\"energy\"][\"KCal\"] = units[\"energy\"][\"kcal\"]\n\n def __init__(self, measure, unit, unit_type=None):\n if unit_type is None:\n for unit_type_ in Measures.units.keys():\n if unit in Measures.units[unit_type_].keys():\n unit_type = unit_type_\n if unit_type is None:\n raise KeyError(f\"Unit {unit} not found.\"\n f\" Available: {[unit_ for unit_type_ in Measures.units.keys() for unit_ in Measures.units[unit_type_].keys()]}\")\n\n self.__assert_unit_exists__(unit, unit_type)\n self.__unit_type__ = unit_type\n self.__unit__ = unit\n self.set_measure(measure)\n\n @staticmethod\n def __assert_unit_exists__(unit, unit_type):\n if unit_type not in Measures.units.keys():\n raise KeyError(f\"Measure type {unit_type} not found. Available: {Measures.units.keys()}\")\n elif unit not in Measures.units[unit_type].keys():\n raise KeyError(\n f\"Measure unit {unit} not found for type {unit_type}. Available: {Measures.units[unit_type].keys()}\")\n\n def convert_to(self, dest_unit):\n self.__assert_unit_exists__(dest_unit, self.__unit_type__)\n return self.__inner_val__ * Measures.units[self.__unit_type__][dest_unit][\"conversion\"]\n\n def val(self):\n return self.__inner_val__ * Measures.units[self.__unit_type__][self.__unit__][\"conversion\"]\n\n def unit(self):\n return self.__unit__\n\n def long_unit_name(self):\n return Measures.units[self.__unit_type__][self.__unit__][\"long_unit_name\"]\n\n def unit_type(self):\n return self.__unit_type__\n\n def set_measure(self, measure):\n self.__inner_val__ = measure / Measures.units[self.__unit_type__][self.__unit__][\"conversion\"]\n\n def set_unit(self, dest_unit):\n self.__assert_unit_exists__(dest_unit, self.__unit_type__)\n self.__unit__ = dest_unit\n\n def __str__(self):\n return f\"{self.val()}{self.unit()}\"\n","sub_path":"recipes/measures.py","file_name":"measures.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"570683471","text":"# encoding:utf-8\n\"\"\"\nD:\\work\\vec3.0_dev_new\\vec_package\\vec\\rdo\\nova\\rpc.py\n\"\"\"\nfrom oslo import messaging\nfrom oslo.config import cfg\nimport nova\n\nfrom nova.openstack.common import jsonutils\n\n\nCONF = cfg.CONF\nTRANSPORT = None\nNOTIFIER = None\n\n\nTRANSPORT_ALIASES = {\n 'nova.openstack.common.rpc.impl_kombu': 'rabbit',\n 'nova.openstack.common.rpc.impl_qpid': 'qpid',\n 'nova.openstack.common.rpc.impl_zmq': 'zmq',\n 'nova.rpc.impl_kombu': 'rabbit',\n 'nova.rpc.impl_qpid': 'qpid',\n 'nova.rpc.impl_zmq': 'zmq',\n}\n\n\nclass JsonPayloadSerializer(messaging.NoOpSerializer):\n @staticmethod\n def serialize_entity(context, entity):\n return jsonutils.to_primitive(entity, convert_instances=True)\n\n\nclass RequestContextSerializer(messaging.Serializer):\n\n def __init__(self, base):\n self._base = base\n\n def serialize_entity(self, context, entity):\n if not self._base:\n return entity\n return self._base.serialize_entity(context, entity)\n\n def deserialize_entity(self, context, entity):\n if not self._base:\n return entity\n return self._base.deserialize_entity(context, entity)\n\n def serialize_context(self, context):\n return context.to_dict()\n\n def deserialize_context(self, context):\n return nova.context.RequestContext.from_dict(context)\n\n\ndef init(conf):\n global TRANSPORT, NOTIFIER\n TRANSPORT = messaging.get_transport(conf,\n aliases=TRANSPORT_ALIASES)\n serializer = RequestContextSerializer(JsonPayloadSerializer)\n NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)\n\n\ndef get_client(target, version_cap=None, serializer=None):\n assert TRANSPORT is not None\n serializer = RequestContextSerializer(serializer)\n return messaging.RPCClient(TRANSPORT,\n target,\n serializer=serializer)\n\n\ndef get_server(target, endpoints, serializer=None):\n assert TRANSPORT is not None\n serializer = RequestContextSerializer(serializer)\n return messaging.get_rpc_server(TRANSPORT,\n target,\n endpoints,\n executor='eventlet',\n serializer=serializer)\n\n\ndef get_notifier(service=None, host=None, publisher_id=None):\n assert NOTIFIER is not None\n if not publisher_id:\n publisher_id = \"%s.%s\" % (service, host or CONF.host)\n return NOTIFIER.prepare(publisher_id=publisher_id)\n\n\ninit(CONF)\n","sub_path":"examples/network_/oslo_rpc/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523284464","text":"# -*- coding: utf-8 -*-\n\nimport glob\nimport os\nimport subprocess\nimport tempfile\nfrom io import BytesIO\n\nimport pytest\nfrom PIL import Image, ImageChops\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n\nclass PNGDocument(object):\n \"\"\"Document storing just one PNF file representing just one page.\"\"\"\n file_format = 'png'\n\n def __init__(self, content):\n self.content = content\n\n def get_bytes(self):\n return self.content\n\n def get_stream(self):\n return BytesIO(self.content)\n\n @staticmethod\n def get_pages_count():\n return 1\n\n\nclass PDF(object):\n \"\"\"Wrapper over low level pdf stream.\"\"\"\n file_format = 'pdf'\n\n def __init__(self, pdf):\n \"\"\"\n Init.\n\n Arguments:\n pdf: File like stream object with pdf data. BytesIO is\n preferred.\n \"\"\"\n self._pdf = pdf\n\n @classmethod\n def from_bytes(cls, buffer):\n if not isinstance(buffer, bytes):\n raise TypeError('Expected bytes, not {}'.format(type(buffer)))\n return cls(pdf=BytesIO(buffer))\n\n def get_bytes(self):\n return self.get_stream().getvalue()\n\n def get_stream(self):\n return self._pdf\n\n def __str__(self):\n return self.get_bytes()\n\n @property\n def pdf(self):\n return self.get_stream()\n\n def get_pages_count(self):\n reader = PdfFileReader(self.get_stream())\n return reader.getNumPages()\n\n def split(self):\n \"\"\"\n Splits current PDF object into list of pdf objects\n with one page each.\n \"\"\"\n pages = []\n reader = PdfFileReader(self.get_stream())\n for page_num in range(reader.getNumPages()):\n page = reader.getPage(page_num)\n writer = PdfFileWriter()\n writer.addPage(page)\n out = BytesIO()\n writer.write(out)\n pages.append(self.__class__(pdf=out))\n return pages\n\n\ndef run_command(command, cwd=None, shell=False, env=None, get_stderr=False):\n \"\"\"\n Runs the given command in the project base directory, blocking execution\n until it completes\n\n Args:\n command (list): list of strings that are concatenated together for\n execution.\n cwd (basestring): Current working directory.\n shell (bool): Set to true if shell should be used.\n env (dict): Environment dictionary.\n get_stderr (bool): return stderr as well\n Returns:\n bool: True if command ran successfully, False otherwise\n \"\"\"\n print('Executing command {}'.format(command))\n process = subprocess.Popen(\n command, cwd=cwd, shell=shell, env=env,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # wait for the process to exit\n stdoutdata, stderrdata = process.communicate()\n is_success = process.returncode == 0\n if not is_success:\n print('Failed to execute command {} stdout={}; stderr={}'.format(\n command, stdoutdata, stderrdata))\n if get_stderr:\n return is_success, stderrdata\n return is_success\n\n\ndef store_images_diff(test_name, ref_png, png, extension='png'):\n diff = compare_images(ref_png, png)\n fname_pattern = '{}_{{}}.{{}}'.format(test_name)\n buf = BytesIO()\n diff.save(buf, 'PNG')\n with open('/tmp/' + fname_pattern.format('diff', 'png'), 'wb') as f:\n f.write(buf.getvalue())\n with open('/tmp/' + fname_pattern.format('ref', extension), 'wb') as f:\n f.write(ref_png)\n with open('/tmp/' + fname_pattern.format('generated', extension), 'wb') as f:\n f.write(png)\n return fname_pattern\n\n\ndef compare_images(reference_image, image):\n \"\"\"\n Compare images.\n Args:\n reference_image (bytes): Reference image, what we expect\n image (bytes): Image we would like to test and compare to reference\n image.\n\n Returns:\n (bool, diff) True if images are the same, and difference image if\n images are different.\n\n \"\"\"\n ref_img = Image.open(BytesIO(reference_image))\n img = Image.open(BytesIO(image))\n # Regression test\n diff = ImageChops.difference(ref_img, img)\n return diff\n\n\ndef are_images_identical(image1, image2):\n img1 = Image.open(BytesIO(image1))\n img2 = Image.open(BytesIO(image2))\n return list(img1.getdata()) == list(img2.getdata())\n\n\ndef pdf_to_png(pdf):\n \"\"\"\n Uses Ghostscript command to convert the PDF data into multiple PNG files\n\n Args:\n pdf (PDF):\n\n Returns:\n list[PNGDocument]: Returns list of PNGDocument.\n \"\"\"\n with tempfile.NamedTemporaryFile(suffix='.pdf') as temp_handle:\n temp_handle.write(pdf.get_bytes())\n # force flush to disk so conversion works correctly\n temp_handle.flush()\n os.fsync(temp_handle.file)\n return _convert(temp_handle.name)\n\n\ndef _convert(pdf_filename):\n png_count = '%03d'\n\n # Just generate temporary file name.\n output_handle = tempfile.NamedTemporaryFile(\n suffix='-{0}.png'.format(png_count), delete=True\n )\n output_filename = output_handle.name\n output_handle.close()\n\n # we assume 300 DPI for all PDF conversion output - leave it to user\n # to scale as needed\n command = ['gs', '-dBATCH', '-dNOPAUSE', '-sDEVICE=pnggray', '-r300',\n '-sOutputFile={0}'.format(output_filename), pdf_filename]\n success, stderr = run_command(command, get_stderr=True)\n\n if not success:\n raise Exception('Failed to convert PDF to PNG.')\n\n if stderr:\n print(\n 'Errors during conversion from PDF to PNG using '\n 'GhostScript: command - {} error - {}'.format(command, stderr)\n )\n\n png_glob = output_filename.replace(png_count, '*')\n output_files = [filename for filename in sorted(glob.iglob(png_glob))]\n if not len(output_files):\n raise Exception('Failed to convert PDF to PNG.')\n result = []\n for path in output_files:\n with open(path, 'rb') as f:\n result.append(PNGDocument(f.read()))\n os.remove(path)\n return result\n\n\ndef assert_pdf_objects_are_identical(test_name, ref_pdf, pdf, fail=False):\n reference_pages = ref_pdf.split()\n actual_pages = pdf.split()\n\n reference_pages_length = len(reference_pages)\n actual_pages_length = len(actual_pages)\n\n if not reference_pages_length == actual_pages_length:\n raise AssertionError(\n 'PDF files differ in the number of pages. '\n 'Reference PDF page number: {}, PDF page number: {}'.format(\n reference_pages_length, actual_pages_length\n )\n )\n\n for ref_page, actual_page in zip(reference_pages, actual_pages):\n ref_png = pdf_to_png(ref_page)\n ref_png = ref_png[0].get_bytes()\n\n png = pdf_to_png(actual_page)\n png = png[0].get_bytes()\n\n ref_png_length = len(ref_png)\n png_length = len(png)\n\n if not ref_png_length == png_length:\n fname_pattern = store_images_diff(test_name, ref_png, png)\n raise AssertionError(\n 'PNG files have different lengths. '\n 'Reference PNG length: {}, PNG Length: {},'\n 'regression: {}'.format(\n ref_png_length, png_length,\n fname_pattern.format('*', 'png')\n )\n )\n\n same = are_images_identical(ref_png, png)\n if fail or not same:\n fname_pattern = store_images_diff(test_name, ref_png, png)\n raise AssertionError(\n 'PDF to PNG conversion regression: /tmp/{}'.format(\n fname_pattern.format('*', 'png'))\n )\n\n\ndef assert_images_are_identical(test_name, ref_img, img, extension='png'):\n same = are_images_identical(ref_img, img)\n if not same:\n ref_img = BytesIO(ref_img).getvalue()\n img = BytesIO(img).getvalue()\n fname_pattern = store_images_diff(test_name, ref_img, img, extension)\n pytest.fail(\n 'PNG conversion regression: /tmp/{}'.format(\n fname_pattern.format('*', extension))\n )\n return same\n","sub_path":"tests/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162336793","text":"from pyfirmata import Arduino\nimport time\nimport serial\n\n#arduino的 setup()\n\ns=serial.Serial(\"com4\",9600) #Serial.begin(9600)\nsensorPin=2\nmotor = 8\nled =13\n\n#arduino的loop()\nwhile True:\n #arduino的感測器讀取數據到python\n moist = s.readline()\n print(moist)\n if moist>500:\n s.digital[led].write(0)\n s.digital[motor].write(0)\n else:\n s.digital[led].write(1)\n s.digital[motor].write(1)\n time.sleep(5)\n \"\"\"\n s.digital[13].write(1)\n time.sleep(0.2)\n s.digital[13].write(0)\n time.sleep(0.2)\n \"\"\"\n\n\"\"\"\nconst int sensorPin=2;\nint motor = 8;\nint led =13;\nvoid setup()\n{\n pinMode(motor, OUTPUT);\n pinMode(led,OUTPUT); \n pinMode(sensorPin,INPUT);\n Serial.begin(9600);\n}\n \nvoid loop()\n{\n int moist;\n moist = analogRead(sensorPin);\n Serial.println(moist);\n \n // 乾燥程度大於 550 時,關燈\n if (moist > 500)\n {\n digitalWrite(led,LOW);\n digitalWrite(motor, LOW); \n }\n else\n {\n digitalWrite(led,HIGH);\n digitalWrite(motor,HIGH);\n }\n delay(5000);\n}\n\"\"\"","sub_path":"blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461570186","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import unicode_literals\n\nimport logging.config\nimport os\nimport warnings\n\nfrom core.helpers.log_helpers import logwrap\nfrom core.helpers.log_helpers import QuietLogger\n\nfrom fuelweb_test.settings import LOGS_DIR\n\nif not os.path.exists(LOGS_DIR):\n os.makedirs(LOGS_DIR)\n\n_log_config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s - %(levelname)s %(filename)s:'\n '%(lineno)d -- %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'tests_log': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'formatter': 'default',\n 'filename': os.path.join(LOGS_DIR, 'sys_test.log'),\n 'mode': 'w',\n 'encoding': 'utf8',\n },\n 'devops_log': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'formatter': 'default',\n 'filename': os.path.join(LOGS_DIR, 'devops.log'),\n 'mode': 'w',\n 'encoding': 'utf8',\n },\n 'null': {\n 'level': 'CRITICAL',\n 'class': 'logging.NullHandler',\n },\n },\n 'loggers': {\n # Log all to log file , but by default only warnings.\n '': {\n 'handlers': ['tests_log'],\n 'level': 'WARNING',\n },\n 'fuel-qa': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True\n },\n 'devops': {\n 'handlers': ['console', 'devops_log'],\n 'level': 'DEBUG',\n 'propagate': True # Test log too\n },\n # py.warnings is changed by Django -> do not propagate\n 'py.warnings': {\n 'handlers': ['console', 'tests_log'],\n 'level': 'WARNING',\n 'propagate': False\n },\n 'paramiko': {'level': 'WARNING'},\n 'iso8601': {'level': 'WARNING'},\n 'keystoneauth': {'level': 'WARNING'},\n }\n}\n\nlogging.config.dictConfig(_log_config)\nlogging.captureWarnings(True) # Log warnings\n# Filter deprecation warnings: log only when deletion announced\nwarnings.filterwarnings(\n 'default',\n message=r'.*(drop|remove)+.*',\n category=DeprecationWarning)\n\nlogger = logging.getLogger('fuel-qa.{}'.format(__name__))\n\n__all__ = ['QuietLogger', 'logwrap', 'logger']\n","sub_path":"fuelweb_test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377794402","text":"'''\n분류 : 다이나믹 프로그래밍\n문제 : 가장 긴 감소하는 부분 수열 (백준 11055)\n작성일자 : 2021.07.05\n'''\n\n# 목적 : 가장 긴 감소하는 부분 수열의 길이를 출력\n# 접근 : dp \n# 1. LIS와 같지만 감소해야 함에 유의\n# > 점화식 ai = i번째를 마지막으로 갖는 감소하는 부분수열의 길이\n# > i번째가 이전의 값들보다 작으면 dp테이블을 갱신\n# 2. dp테이블의 초기화 \n# > 수열의 길이를 나타내야 하므로 1로 초기화\n# > 가장 긴 감소하는 부분수열의 길이가 1일 수도 있고, 입력이 1개일 수도 있으므로 \n\nN = int(input())\nlst = list(map(int, input().split()))\nd = [1] * N # 부분 수열의 길이를 구하는 것이므로 dp테이블은 1로 초기화 (1개만 있는 것도 가능하므로)\nfor i in range(N) : \n for j in range(i) : \n if lst[i] < lst[j] : \n d[i] = max(d[i], d[j]+1)\nprint(d)\nprint(max(d))","sub_path":"DP/dp_21_11722.py","file_name":"dp_21_11722.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"600141349","text":"#This program checks for the total throughput of each wireless and ethernet network card over the current session and their current bandwidth (up and down) by printing them to the console\n#this program assumes 'netcat' is installed and is run under root access\nimport subprocess\nimport sys\nimport time\n\ndownloadedBytes=0\nuploadedBytes=0\noldTotalRX=0\noldTotalTX=0\nCONST_TIME_SLEEP=0.5#seconds\n\n#returns a tuple in the form (int downloadKBPS, int uploadKBPS) for a given network card (eth0,lo or wlan0 in most cases)\ndef getBandwidthRates(networkCard):\n\tdownstr = subprocess.check_output(\"cat /sys/class/net/\"+networkCard+\"/statistics/rx_bytes\", shell=True)#fetches total download in current session\n\tupstr = subprocess.check_output(\"cat /sys/class/net/\"+networkCard+\"/statistics/tx_bytes\", shell=True)\n\toldTotalRX = int(downstr)\n\toldTotalTX = int(upstr)\n\ttime.sleep(CONST_TIME_SLEEP)#seconds\n\t\n\tdownstr = subprocess.check_output(\"cat /sys/class/net/\"+networkCard+\"/statistics/rx_bytes\", shell=True)\n\tupstr = subprocess.check_output(\"cat /sys/class/net/\"+networkCard+\"/statistics/tx_bytes\", shell=True)\n\tTotalRX = int(downstr)\n\tTotalTX = int(upstr)\t\n\n\tdownloadedKiloBytes = (TotalRX - oldTotalRX)/1000.0\n\tuploadedKiloBytes = (TotalTX - oldTotalTX)/1000.0\n\tdownKBPS=downloadedKiloBytes/CONST_TIME_SLEEP\n\tupKBPS=uploadedKiloBytes/CONST_TIME_SLEEP\n\treturn downKBPS,upKBPS\n\nnetwordCard=\"eth0\"\n\nwhile True:\n\ttupbandwlan0 = getBandwidthRates(networdCard)\n\n\tsys.stdout.write(\"\\r\"+networdCard.ljust(10)+\"==>\"+\"Upload: \".rjust(10)+str(tupbandwlan0[1]).ljust(5)+\" KB/s\"+\"Download: \".rjust(20)+str(tupbandwlan0[0]).ljust(5)+\" KB/s\")\n\tsys.stdout.flush()\n\t\n\n\n","sub_path":"source/networking/BandwidthMonitor.py","file_name":"BandwidthMonitor.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626377618","text":"import unittest\n\nfrom src.library_tube import LibraryTube\nfrom src.sample import Sample\nfrom src.labware import Labware\nfrom src.sample_tube import SampleTube\nfrom src.lane import Lane\n\n\nclass TestLibraryTube(unittest.TestCase):\n\n def setUp(self):\n samples = [Sample('123'), Sample('456')]\n self.lt = LibraryTube('TN123', samples)\n\n def test_copy_samples(self):\n sam = Sample()\n st = SampleTube('TN123', sam)\n self.lt.copy_samples(st)\n self.assertIsNone(st.sample.tag)\n samples = [Sample('456'), Sample('789')]\n lane = Lane(123, Lane.PASS, samples)\n self.lt.copy_samples(lane)\n self.assertEqual(len(lane.samples), 2)\n self.assertEqual(lane.samples, self.lt.samples)\n\n def test_location(self):\n \"\"\"Test that the location can be set and get\"\"\"\n self.assertIsNone(self.lt.location)\n loc = '123'\n self.lt.location = loc\n self.assertEqual(self.lt.location, loc)\n\n def test_barcode(self):\n \"\"\"Test that only barcodes beginning with TN can be used\"\"\"\n with self.assertRaises(ValueError):\n self.lt.barcode = 'DN123'\n self.assertEqual(self.lt.barcode, 'TN123')\n self.lt.barcode = 'TN456'\n self.assertEqual(self.lt.barcode, 'TN456')\n\n def test_samples(self):\n \"\"\"Test that the samples can only be tagged\"\"\"\n self.assertEqual(len(self.lt.samples), 2)\n self.lt.samples = [Sample('123')]\n self.assertEqual(len(self.lt.samples), 1)\n with self.assertRaises(TypeError):\n self.lt.samples = [Sample(), Sample()]\n self.assertEqual(len(self.lt.samples), 1)\n self.assertEqual(self.lt.samples[0].tag, '123')\n\n def test_state(self):\n \"\"\"Test library tube state get and set\"\"\"\n self.assertEqual(self.lt.state, Labware.PENDING)\n state = Labware.STARTED\n self.lt.state = state\n self.assertEqual(self.lt.state, state)\n\n def test_cange_state(self):\n \"\"\"Test library tube state change\"\"\"\n start_state = Labware.PENDING\n dest_state = Labware.STARTED\n self.lt.state = start_state\n self.lt.change_state(dest_state)\n self.assertEqual(self.lt.state, dest_state)\n\n start_state = Labware.PENDING\n dest_state = Labware.PASSED\n self.lt.state = start_state\n self.lt.change_state(dest_state)\n self.assertEqual(self.lt.state, start_state)\n\n start_state = Labware.PENDING\n dest_state = Labware.FAILED\n self.lt.state = start_state\n self.lt.change_state(dest_state)\n self.assertEqual(self.lt.state, start_state)\n\n start_state = Labware.STARTED\n dest_state = Labware.PASSED\n self.lt.state = start_state\n self.lt.change_state(dest_state)\n self.assertEqual(self.lt.state, dest_state)\n\n start_state = Labware.STARTED\n dest_state = Labware.FAILED\n self.lt.state = start_state\n self.lt.change_state(dest_state)\n self.assertEqual(self.lt.state, dest_state)\n","sub_path":"tests/test_library_tube.py","file_name":"test_library_tube.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523834752","text":"import sys\nimport json\nfrom ufal.udpipe import Model, Pipeline, ProcessingError # pylint: disable=no-name-in-module\n\nmodel = Model.load('../UDPipe/spanish-ancora-ud-2.3-181115.udpipe')\npipeline = Pipeline(model, 'tokenize', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')\nerror = ProcessingError()\n\n\ndef udpipe_parse(text):\n if text is None:\n return \"\"\n\n processed = pipeline.process(text, error)\n if error.occurred():\n sys.stderr.write(\"An error occurred when running run_udpipe: \")\n sys.stderr.write(error.message)\n sys.stderr.write(\"\\n\")\n sys.exit(1)\n\n return processed\n\n\ndef udpipe_pos(processed):\n processed = processed.split('\\n')\n par_id = 0\n sent_id = -1\n parse_table = []\n for line in processed:\n # ignore blank lines\n if len(line) == 0:\n continue\n\n # found new paragraph comment\n if line == '# newpar':\n par_id += 1\n\n # found new sentence comment\n if '# sent_id =' in line:\n sentence_comment = line.split('=')\n sent_id = int(sentence_comment[1].strip())\n\n # found actual parse token\n if line[0] != '#':\n parse_fields = line.split('\\t')\n\n if len(parse_fields) == 10:\n parse_token = dict()\n parse_token['SentID'] = sent_id\n parse_token['ParID'] = par_id\n parse_token['Id'] = parse_fields[0]\n parse_token['Form'] = parse_fields[1]\n parse_token['Lemma'] = parse_fields[2]\n parse_token['UPosTag'] = parse_fields[3]\n parse_token['XPosTag'] = parse_fields[4]\n parse_token['Feats'] = parse_fields[5]\n parse_token['Head'] = parse_fields[6]\n parse_token['DepRel'] = parse_fields[7]\n parse_token['Deps'] = parse_fields[8]\n parse_token['Misc'] = parse_fields[9]\n parse_table.append(parse_token)\n\n parse_pos = {\n 'PROPN': set(),\n 'VERB': set(),\n 'NUM': set(),\n 'AUX': set(),\n 'NOUN': set(),\n }\n\n for token in parse_table:\n for pos in parse_pos:\n if token['UPosTag'] == pos:\n parse_pos[pos].add(token['Lemma'])\n\n for pos in parse_pos:\n parse_pos[pos] = list(parse_pos[pos])\n\n return json.dumps(parse_pos)\n","sub_path":"Streaming/udpipe_parse.py","file_name":"udpipe_parse.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470342116","text":"class newNode:\n def __init__(self,val):\n self.val=val\n self.left=None\n self.right=None\ndef Height(root):\n if root is None:\n return 0\n else:\n left=Height(root.left)\n right=Height(root.right)\n return max(1+left,1+right)\n\n\ndef diameter(root):\n if root is None:\n return 0\n else:\n left_height=Height(root.left)\n right_height=Height(root.right)\n total_height=left_height+right_height+1\n left_diameter=diameter(root.left)\n right_diameter=diameter(root.right)\n max_diameter=max(left_diameter,right_diameter)\n return max(max_diameter,total_height)\n\nif __name__ == '__main__':\n root = newNode(5)\n root.left = newNode(9)\n root.right = newNode(3)\n root.left.left = newNode(6)\n root.right.right = newNode(4)\n root.left.left.left = newNode(8)\n root.left.left.right = newNode(7)\n print (diameter(root))","sub_path":"DiameterOfBinaryTree.py","file_name":"DiameterOfBinaryTree.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535385287","text":"import os\n\nfrom sklearn.cluster import KMeans\nimport numpy as np\n\nfrom config import LOG_DIR\nfrom logger import setup_logger\n\nml_logger = setup_logger(__name__, log_file=os.path.join(LOG_DIR, 'server_log'), level=\"DEBUG\")\nml_logger.propagate = False\n\n\ndef get_center_from_data(df, disease):\n try:\n kmeans = KMeans(n_clusters=8)\n data = df[df.DISEASE == disease]\n if data.shape[0] == 0:\n return (), ()\n data = data.dropna()\n array = data[['GEO_LAT', 'GEO_LONG']].values\n if array.shape[0] >= 8:\n kmeans.fit(array)\n cent = kmeans.cluster_centers_\n count_list = [len(np.where(kmeans.labels_ == i)[0]) for i in range(kmeans.n_clusters)]\n return cent.tolist(), count_list\n except ValueError:\n ml_logger.exception(\"Error occured while clustering.\")\n return [], []\n","sub_path":"medmap/core/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54581058","text":"array = []\nfor i in range(8):\n array.append([int(x) for x in input().split()])\n\nfor i in range(8):\n for j in range(8):\n if (i != j and ((array[i][0] == array[j][0])\n or (array[i][1] == array[j][1])\n or (abs(array[i][0] - array[j][0]) == abs(array[i][1] - array[j][1])))):\n print('YES')\n exit(0)\nprint('NO')","sub_path":"Z_queen.py","file_name":"Z_queen.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"193382306","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# from .serializers import EventSerializer\n# from rest_framework.decorators import api_view\nfrom .data_import import get_data\n\n# @api_view(['GET', 'POST'])\ndef index(request):\n # if request.POST:\n # else :\n if request.method == 'GET':\n # get_data()\n print(\"request.GET\", request.GET)\n print(\"request.data\", request.data)\n return HttpResponse(\"Hello, world. You're at the GET event index.\")\n if request.method == 'POST':\n print(\"request.POST\", request.POST)\n print(\"request.data\", request.data)\n return HttpResponse(\"Hello, world. You're at the POST event index.\")\n","sub_path":"event/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367484641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015-2017 Bitergia\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA.\n#\n# Authors:\n# Alvaro del Castillo \n\nimport logging\nimport sys\nimport unittest\n\n\n# Hack to make sure that tests import the right packages\n# due to setuptools behaviour\nsys.path.insert(0, '..')\n\nfrom mordred.config import Config\nfrom mordred.mordred import Mordred\nfrom mordred.task_collection import TaskRawDataCollection\n\nCONF_FILE = 'test.cfg'\nPROJ_FILE = 'test-projects.json'\nGIT_BACKEND_SECTION = 'git'\n\nlogging.basicConfig(level=logging.INFO)\n\nclass TestTaskRawDataCollection(unittest.TestCase):\n \"\"\"Task tests\"\"\"\n\n def test_initialization(self):\n \"\"\"Test whether attributes are initializated\"\"\"\n config = Config(CONF_FILE)\n backend_section = GIT_BACKEND_SECTION\n task = TaskRawDataCollection(config, backend_section=backend_section)\n\n self.assertEqual(task.config, config)\n self.assertEqual(task.backend_section, backend_section)\n\n def test_run(self):\n \"\"\"Test whether the Task could be run\"\"\"\n config = Config(CONF_FILE)\n backend_section = GIT_BACKEND_SECTION\n task = TaskRawDataCollection(config, backend_section=backend_section)\n self.assertEqual(task.execute(), None)\n\n\nif __name__ == \"__main__\":\n unittest.main(warnings='ignore')\n","sub_path":"tests/test_task_collection.py","file_name":"test_task_collection.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"630222031","text":"import random\nimport scipy.sparse as sparse\nimport scipy.io\nimport numpy as np\nimport pandas as pd\n\ndef write_csv(df, filename, index=True):\n print(\"Saved results in a .csv file called '{}'\".format(filename))\n df.to_csv(filename, index=index)\n \n \ndef write_pickle(df, filename):\n print(\"Saved results in a .pickle file called '{}'\".format(filename))\n df.to_pickle(filename)\n \n \ndef write_list_to_csv(a_list, filename):\n import csv\n print(\"Saved feature names in a .csv file called '{}'\".format(filename))\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(a_list)\n \n \ndef write_list_to_txt(a_list, filename):\n with open(filename, 'w') as file:\n for item in a_list:\n file.write(\"%s,\" % item) # comma separated\n print(\"Saved feature names in a .txt file called '{}'\".format(filename))\n \n \ndef read_list_from_txt(filename):\n file = open(filename,\"r+\") \n return file.read().split(',')\n \n \ndef stratified_sample(df, n):\n \"\"\"\n Sample that adheres to the original class distribution\n Distribution of both classes\n Based on journey_id split since labels are involved\n \n Parameters:\n ------------\n df : Df that you want to sample from that at least contains\n a journey IDs and journey class labels\n n : [int] number of journeys that are sampled\n \n Returns:\n Smaller dataframe (size n) with the same distribution of \n classes as the original dataframe.\n \n \"\"\"\n from sklearn.model_selection import train_test_split\n from sklearn.preprocessing import LabelEncoder\n \n if n>len(df):\n print('n is bigger than the dataframe')\n return df\n \n data = df.loc[:, ['journey_id', 'journey_class']]\n data.drop_duplicates(subset=['journey_id'], inplace=True)\n X = data.drop(['journey_class'], axis=1)\n \n le = LabelEncoder()\n le.fit(data['journey_class'])\n y = le.transform(data['journey_class'])\n\n # The train set X are the sampled journeys\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n, stratify=y, random_state=42)\n \n return df[df.journey_id.isin(list(X_train.journey_id))]\n \n","sub_path":"utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"465446624","text":"'''\nCreated on Nov 2, 2015\n\n@author: jj1745\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Construc the grid\ninterval = 1000\n\nx_axis = np.linspace(-2, 1, interval)\ny_axis = np.linspace(-1.5, 1.5, interval)\n\nx,y = np.meshgrid(x_axis,y_axis)\n\n# Do the iteration\nN_max = 50\nsome_threshold = 50\n\nc = x + 1j*y\n\nz = c\nfor v in range(N_max):\n z = z**2 + c\n \n# Form the 2-D boolean mask\nmask = (np.abs(z) < some_threshold)\nplt.imshow(mask, extent = [-2,1,-1.5,1.5])\nplt.gray()\nplt.savefig('mandelbrot.png')","sub_path":"jj1745/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"51040861","text":"from CryptoPanel.HTMLModule.ui_HTMLModule import ui_HTMLPanel\nimport html\nfrom PyQt5 import QtGui\nfrom ui_Widgets.ErrorWin import errorInfo\n\n\nclass HTMLPanel(ui_HTMLPanel):\n def __init__(self):\n super(HTMLPanel, self).__init__()\n\n self.HTMLEncodeButton.clicked.connect(self.HTMLEncode)\n self.HTMLDecodeButton.clicked.connect(self.HTMLDecode)\n self.HTMLCipherBox.textChanged.connect(self.setFontColorCipher)\n self.HTMLTextBox.textChanged.connect(self.setFontColorText)\n\n def setFontColorCipher(self):\n self.HTMLCipherBox.setTextColor(QtGui.QColor(200, 200, 200))\n\n def setFontColorText(self):\n self.HTMLTextBox.setTextColor(QtGui.QColor(200, 200, 200))\n\n def HTMLEncode(self):\n text = self.HTMLTextBox.toPlainText()\n try:\n output = html.escape(text)\n self.HTMLCipherBox.setText(output)\n except:\n errorInfo(self, '编码时出现错误!')\n\n def HTMLDecode(self):\n text = self.HTMLCipherBox.toPlainText()\n try:\n output = html.unescape(text)\n self.HTMLTextBox.setText(output)\n except:\n errorInfo(self, '解码时出现错误!')\n","sub_path":"CryptoPanel/OldFiles/HTMLModule/HTMLModule.py","file_name":"HTMLModule.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470726469","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport detect_face\r\nfrom scipy import misc\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nminsize = 20\r\nthreshold = [0.6,0.7,0.7]\r\nfactor = 0.85\r\n\r\nimg = cv2.imread('3.jpg')\r\nwith tf.Session() as sess:\r\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\r\n bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\r\n\r\n\r\n\r\n\r\n\r\n#画图\r\nfor i, boxes in enumerate(bounding_boxes):\r\n \r\n box = boxes.astype(np.int32)\r\n landmark = points[:, i].reshape( (2,5) ).T\r\n \r\n draw_img = cv2.rectangle(img, (box[0],box[1]), (box[2],box[3]), (0,255,0), 2)\r\n for i, point in enumerate(landmark):\r\n if i==5:\r\n break\r\n draw_img = cv2.circle(draw_img,tuple(point),4,(255,0,0),-1)\r\n\r\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\r\ncv2.imshow('image',draw_img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows() \r\n\r\n\r\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354091319","text":"\n\nimport re, urllib, urlparse, base64, json\nfrom resources.lib.modules import cleantitle\nfrom resources.lib.modules import client\nfrom resources.lib.modules import directstream\nfrom resources.lib.modules import cache\n\n\nclass source:\n def __init__(self):\n self.priority = 1\n self.language = ['en']\n self.domains = ['bobbyhd.com']\n self.base_link = 'http://webapp.bobbyhd.com'\n self.search_link = '/search.php?keyword=%s'\n self.player_link = '/player.php?alias=%s'\n\n def matchAlias(self, title, aliases):\n try:\n for alias in aliases:\n if cleantitle.get(title) == cleantitle.get(alias['title']):\n return True\n except:\n return False\n\n def searchMovie(self, title, year, aliases, headers):\n try:\n title = cleantitle.normalize(title)\n title = cleantitle.getsearch(title)\n query = self.search_link % ('%s+%s' % (urllib.quote_plus(title), year))\n query = urlparse.urljoin(self.base_link, query)\n r = client.request(query, timeout='15', headers=headers, mobile=True)\n match = re.compile('alias=(.+?)\\'\">(.+?)').findall(r)\n match = [(i[0], re.findall('(.+?) \\((\\d{4})', i[1])) for i in match]\n match = [(i[0], i[1][0][0], i[1][0][1]) for i in match if len(i[1]) > 0]\n r = [(i[0],i[1]) for i in match if self.matchAlias(i[1], aliases) and year == i[2]][0]\n return r\n except:\n return\n\n def movie(self, imdb, title, localtitle, aliases, year):\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69'}\n aliases.append({'country': 'us', 'title': title})\n r = self.searchMovie(title, year, aliases, headers)\n url = {'type': 'movie', 'id': r[0], 'episode': 0, 'headers': headers}\n url = urllib.urlencode(url)\n return url\n except:\n return\n\n def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69'}\n aliases.append({'country': 'us', 'title': tvshowtitle})\n url = {'tvshowtitle': tvshowtitle, 'year': year, 'headers': headers, 'aliases': aliases}\n url = urllib.urlencode(url)\n return url\n except:\n return\n\n def episode(self, url, imdb, tvdb, title, premiered, season, episode):\n try:\n data = urlparse.parse_qs(url)\n data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])\n headers = eval(data['headers'])\n aliases = eval(data['aliases'])\n title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']\n title = cleantitle.getsearch(title)\n query = self.search_link % (urllib.quote_plus(title))\n query = urlparse.urljoin(self.base_link, query)\n r = client.request(query, headers=headers, timeout='30', mobile=True)\n match = re.compile('alias=(.+?)\\'\">(.+?)').findall(r)\n r = [(i[0], re.findall('(.+?)\\s+-\\s+Season\\s+(\\d+)', i[1])) for i in match]\n r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]\n r = [i[0] for i in r if self.matchAlias(i[1], aliases) and int(season) == int(i[2])][0]\n url = {'type': 'tvshow', 'id': r, 'episode': episode, 'season': season, 'headers': headers}\n url = urllib.urlencode(url)\n return url\n except:\n return\n\n def sources(self, url, hostDict, hostprDict):\n try:\n sources = []\n data = urlparse.parse_qs(url)\n data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])\n if data['id'] == None: return sources\n headers = eval(data['headers'])\n\n url = urlparse.urljoin(self.base_link, self.player_link % data['id'])\n\n r = client.request(url, headers=headers, timeout='30', mobile=True)\n\n if data['type'] == 'tvshow':\n match = re.compile('changevideo\\(\\'(.+?)\\'\\)\".+?data-toggle=\"tab\">(.+?)\\..+?').findall(r)\n else:\n match = re.compile('changevideo\\(\\'(.+?)\\'\\)\".+?data-toggle=\"tab\">(.+?)').findall(r)\n\n for url, ep in match:\n try:\n if data['type'] == 'tvshow':\n if int(data['episode']) != int(ep):\n raise Exception()\n quality = directstream.googletag(url)[0]['quality']\n sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})\n except:\n pass\n return sources\n except:\n return sources\n\n def resolve(self, url):\n return directstream.googlepass(url)\n","sub_path":"plugin.video.MHclassics/resources/lib/sources/en/bobby.py","file_name":"bobby.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"187698022","text":"# Ortal Lankri, 209281674, Adi Meirman, 208177204\n\nimport random\nimport socket\nimport sys\nimport threading\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key\n\nm_list = []\n\n\ndef manage():\n threading.Timer(60, manage).start()\n copy = m_list.copy()\n m_list.clear()\n send_messages(copy)\n\n\ndef send_messages(messages_list):\n random.shuffle(messages_list)\n for m in messages_list:\n send_message(m[0], m[1], m[2])\n\n\ndef send_message(message, ip, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, port))\n s.send(message)\n s.close()\n\n\ndef decrypt(message, num):\n fileName = \"sk\" + num + \".pem\"\n privateKey = open(fileName).read()\n pemKey = load_pem_private_key(privateKey.encode(), password=None)\n msg = pemKey.decrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n # decrypt ip and port\n s = msg[0:4]\n ip = str(s[0]) + \".\" + str(s[1]) + \".\" + str(s[2]) + \".\" + str(s[3])\n s = msg[4:6]\n port = int.from_bytes(s, 'big')\n msg = msg[6:]\n # add message to list\n m_list.append([msg, ip, int(port)])\n\n\ndef main():\n threading.Timer(60, manage).start()\n num = sys.argv[1]\n ips = open(\"ips.txt\").read().split(\"\\n\")\n data = ips[int(num) - 1].split(\" \")\n port = int(data[1])\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(('', port))\n server.listen(10)\n while True:\n client_socket, client_address = server.accept()\n data = client_socket.recv(4096)\n decrypt(data, num)\n client_socket.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mix.py","file_name":"mix.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134787761","text":"#coding=utf8\nfrom selenium import webdriver\n#启动Phantomjs无浏览器界面\ndriver = webdriver.PhantomJS(\"./phantomjs-2.1.1-windows/bin/phantomjs\")\n\n#访问百度首页\ndriver.get(\"http://www.baidu.com\")\n\n#获取百度的搜索框\nkeyword = driver.find_element_by_id(\"kw\")\n\n#向输入框中输入一个要搜索的词语\nkeyword.send_keys(u\"火车票\")\n\n#点击百度搜索按钮\nbtn = driver.find_element_by_id(\"su\").click()\n\n\n# 查看搜索页面\nimport time\ntime.sleep(3)\ndriver.save_screenshot(\"wenjian/baidu3.png\")","sub_path":"day02/demo_sousuo.py","file_name":"demo_sousuo.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"225826079","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nBase session class to allow extention of functionality to Worldcat Search API\nand others.\n\"\"\"\n\nimport requests\n\nfrom . import __title__, __version__\nfrom bookops_worldcat.errors import WorldcatSessionError\n\n\nclass WorldcatSession(requests.Session):\n \"\"\"Base class, inherits all requests.Session methods\"\"\"\n\n def __init__(self, agent=None, timeout=None):\n requests.Session.__init__(self)\n\n if agent is None:\n self.headers.update({\"User-Agent\": f\"{__title__}/{__version__}\"})\n elif type(agent) is str:\n self.headers.update({\"User-Agent\": agent})\n else:\n raise WorldcatSessionError(\"Argument 'agent' must be a str\")\n\n self.timeout = timeout\n if not self.timeout:\n self.timeout = (3, 3)\n","sub_path":"bookops_worldcat/_session.py","file_name":"_session.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"335092955","text":"#!/usr/bin/env python3\n\n# ===== IMPORTS ===== #\n\nimport psycopg2\nimport datetime\nimport time\nimport random\nimport json\nimport sys\nimport os\n\nfrom pyrogram import Client\n\n\n# ===== GENERAL ===== #\n\n# Читаем настройки скрипта\nwith open(\"/script/src/settings.json\", \"r\", encoding=\"utf-8\") as fj:\n\tsettings = json.load(fj)\n\n# Читаем текст рассылки\nwith open(\"../res/text.txt\") as file:\n\ttext_message = file.read()\n\n# Счетчик для подсчета кол-ва отправленных сообщений\ncounters = {\n\t\"counter_message\": 0\n}\n\n\napp = Client(\"main\")\n\nprint(\"-------------------------- script ---------------\")\nprint(\"DATABASE_NAME =\", os.environ.get('DATABASE_NAME'))\nprint(\"DATABASE_HOST =\", os.environ.get('DATABASE_HOST'))\n\n# Подключение к Postgres\ntry:\n\tconn = psycopg2.connect(\n\t\tdatabase = os.environ.get('DATABASE_NAME'),\n\t\tuser = os.environ.get('DATABASE_USER'),\n\t\tpassword = os.environ.get('DATABASE_PASS'),\n\t\tport = os.environ.get('DATABASE_PORT'),\n\t\thost = os.environ.get('DATABASE_HOST')\n\t)\n\tprint(\"[INFO] Подключение с БД установлено\")\n\n\tcur = conn.cursor()\n\tconn.commit()\n\nexcept:\n\tprint(\"[INFO] Нет соединения с БД\")\n\n# Главная функция рассылки\ndef sender(user):\n\n\t# Отправка сообщений взависимости от дневного лимита на каждый аккаунт\n\tfor i in range(settings[\"daily_limit\"]):\n\n\t\tif counters[\"counter_message\"] == settings[\"mailing_speed\"]:\n\t\t\tbreak\n\n\t\tapp.send_message(user, text_message)\n\n\t\tcounters[\"counter_message\"] += 1\n\t\t\n\t\t# Настройка вывода информации о количестве сообщений при каждой итерации цикла\n\t\tif settings[\"stop_counter\"] == False:\n\t\t\tprint(\"Общее кол-во сообщений: \", counters[\"counter_message\"])\n\t\telse:\n\t\t\tpass\n\n\t\t# Настройка генератора рандомного интервала\n\t\tif settings[\"random_interval_modes\"] == True:\n\n\t\t\t# Извлекаем из списка с заданным промежутком времени начальную и конечную секунды\n\t\t\tlst_int = settings[\"random_interval\"]\n\n\t\t\trandom_interval = random.randint(lst_int[0], lst_int[1])\n\t\t\ttime.sleep(random_interval)\n\t\t\tprint(\"Интервал для\", counters[\"counter_message\"], \"сообщения равен:\", random_interval, \"сек\")\n\n\t\telse:\n\t\t\ttime.sleep(settings[\"interval\"])\n\nwith app:\n\n\t# Запрос к Postgres (1): узнаем общее количество записей в БД\n\ttry:\n\t\tquery_n1 = \"SELECT COUNT(*) FROM users\"\n\t\tcur.execute(query_n1)\n\n\texcept:\n\t\tprint(\"[INFO] Не удалось узнать кол-во записей в БД\")\n\n\t# Получаем кортеж\n\tvar_data_bases = cur.fetchall()\n\t# Кортеж переводим в список\n\ttur_db = var_data_bases[0]\n\t# Элемент списка переводим в int\n\tnumber_of_users = int(tur_db[0])\n\n\tprint(\"[INFO] Кол-во записей в БД:\", number_of_users)\n\tconn.commit()\n\n\t# Запрос к Postgres (2): Извлекаем самих пользователей\n\ttry:\n\t\tquery_n2 = \"SELECT nickname FROM users\"\n\t\tcur.execute(query_n2)\n\n\texcept:\n\t\tprint(\"[INFO] Контактные данные не были извлечены\")\n\n\n\t# Цикл, главным условием которого является общее кол-во сообщений и не превышает ли оно допустимое значение\\\n\twhile counters[\"counter_message\"] < settings[\"mailing_speed\"]:\n\n\t\t# Пробегаем по БД построчно\n\t\tfor i in range(number_of_users):\n\n\t\t\t# Достаём пользователя из БД\n\t\t\tuser = cur.fetchone()\n\t\t\tconn.commit()\n\t\t\t\n\t\t\t# Цикл рассылки для каждого отдельного пользователя\n\t\t\tfor user in user:\n\t\t\t\tsender(user)\n\t\n\n\tprint(\"Рассылка завершена. Всего сообщений: \", counters[\"counter_message\"])\n\tconn.close()\n","sub_path":"script/src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"3801042","text":"import random\r\n\r\n\r\nclass Point:\r\n \"\"\"\r\n Represents an x/y coordinate on a board\r\n \"\"\"\r\n\r\n def __init__(self, x = 0, y = 0):\r\n self.x = x\r\n self.y = y\r\n\r\n def __str__(self):\r\n return str(self.__dict__)\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\r\n \r\n def __eq__(self, other):\r\n if type(other) is type(self):\r\n return self.__dict__ == other.__dict__\r\n return False\r\n\r\n def __ne__(self, other):\r\n return not self.__eq__(other)\r\n\r\n \r\nclass Ship:\r\n \"\"\"\r\n Represents a ship\r\n \"\"\"\r\n\r\n def __init__(self, x, y, size, vertical):\r\n\r\n # Creates an array of ship coordinate points\r\n self.location = []\r\n if vertical:\r\n for i in range(size):\r\n self.location.append(Point(x, y + i))\r\n else:\r\n for i in range(size):\r\n self.location.append(Point(x + i, y))\r\n \r\n def __str__(self):\r\n return \"Ship: \" + str(self.location)\r\n\r\n\r\nclass Board:\r\n \"\"\"\r\n Represents a battleship board\r\n \"\"\"\r\n \r\n def __init__(self):\r\n self.shots = []\r\n self.fleet = [] \r\n self.populate_fleet()\r\n\r\n # Debug\r\n #for ship in self.fleet:\r\n # print(ship)\r\n\r\n def populate_fleet(self):\r\n sizes = [5, 4, 3, 3, 2]\r\n while len(self.fleet) < 5:\r\n valid = True\r\n size = sizes[len(sizes) - 1]\r\n vertical = random.choice([True, False])\r\n x = random.randint(0, 9)\r\n y = random.randint(0, 9)\r\n \r\n # Check to make sure that the ship won't be hanging over the edge of the board\r\n if vertical and y > 9 - size:\r\n continue\r\n elif not vertical and x > 9 - size:\r\n continue\r\n\r\n # Create a candidate Ship, then see if it overlaps any already extant\r\n candidate = Ship(x, y, size, vertical)\r\n\r\n for ship in self.fleet:\r\n for p in candidate.location:\r\n if p in ship.location:\r\n valid = False\r\n\r\n if valid:\r\n self.fleet.append(candidate)\r\n sizes.pop()\r\n \r\n # Appends shot to list of shots taken\r\n def take_shot(self, p):\r\n print(\"shot:\" + str(p))\r\n if p not in self.shots:\r\n self.shots.append(p)\r\n #print(self.shots)\r\n for ship in self.fleet:\r\n for o in ship.location:\r\n print(o)\r\n print(o == p)\r\n print()\r\n if o == p:\r\n return len(ship.location)\r\n #if ship.is_sunk():\r\n # pass # return double ship length...\r\n return 0\r\n\r\n # returns coordinate status given a Point\r\n def get_coordinate_status(self, p):\r\n if p in self.shots:\r\n for ship in self.fleet:\r\n if p in ship.location:\r\n return 'X'\r\n return '.'\r\n return '~'\r\n\r\n def print_board(self):\r\n print()\r\n print(\" \", end=\"\")\r\n for i in range(10):\r\n print(str(i) + \" \", end=\"\")\r\n side = 0\r\n for y in range(10):\r\n print()\r\n print(str(side) + \" \", end=\"\")\r\n side += 1\r\n for x in range(10):\r\n print(self.get_coordinate_status(Point(x, y)) + \" \", end=\"\") \r\n print()\r\n\r\n # Returns true when the game is over\r\n def game_over(self):\r\n for ship in self.fleet:\r\n for p in ship.location:\r\n if p not in self.shots:\r\n return False\r\n return True\r\n\r\n\r\ndef main():\r\n\r\n b = Board()\r\n while b.game_over() == False:\r\n b.print_board()\r\n x = int(input(\"Enter X: \"))\r\n y = int(input(\"Enter Y: \"))\r\n shot_status = b.take_shot(Point(x, y))\r\n print(shot_status)\r\n if shot_status > 0 and shot_status < 6:\r\n print(\"Hit on ship with length \" + str(status) + \"!\")\r\n elif shot_status > 5:\r\n print(\"Ship sunk with length \" + str(status) + \"!\")\r\n else:\r\n print(\"Miss!\")\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"battleship/battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498953207","text":"import logging\nimport logging.handlers\n\nfrom .constants import DEFAULT_LOGGING_LEVEL\nfrom .formatters import MultiLineFormatter\n\nlogger = logging.getLogger(__name__)\n\n\nclass Log(object):\n __slots__ = []\n\n @classmethod\n def initialize_logging(cls, log_file_path):\n formatter = MultiLineFormatter(\n '%(asctime)s %(name)-40s %(funcName)-48s %(levelname)-8s %(message)s'\n )\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n\n rotating_file_handler = logging.handlers.RotatingFileHandler(\n '{0}'.format(log_file_path),\n maxBytes=1024 * 1024 * 10,\n backupCount=10,\n encoding='utf-8',\n )\n rotating_file_handler.setFormatter(formatter)\n\n logging.getLogger('smooth_streams_epg_generator').addHandler(console_handler)\n logging.getLogger('smooth_streams_epg_generator').addHandler(\n rotating_file_handler\n )\n\n cls.set_logging_level(DEFAULT_LOGGING_LEVEL)\n\n @classmethod\n def set_logging_level(cls, log_level):\n logging.getLogger('smooth_streams_epg_generator').setLevel(log_level)\n\n for handler in logger.handlers:\n handler.setLevel(log_level)\n","sub_path":"smooth_streams_epg_generator/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"355700449","text":"# 给定一个数组能寻找到一个子数组的长度至少为2的累加和能够被k整除的子数组,如果是返回真,找不到返回假\n# 这里面用到了一个数学技巧,若数字a和b分别除以数字c,若得到的余数相同,那么(a-b)必定能够整除c\n# 题目思路为在一个数组nums中,找到两个指针,pos1,pos2,使得sum[pos1] % k == sum[pos2] % k\n# 用哈希表可以完成一次遍历就可以找到\n\n\n# 要注意边界值处理,算好的pos1,pos2,咱们用pos2位置的累加和-pos1位置的累加和\n# 其实得到的是(pos1,pos2]这段的也就是[pos1+1,pos2]\n# 所以长度计算是 pos - (map[remainder] + 1) + 1\ndef exist_sum_of_subarray_division_k(nums, k):\n length = len(nums)\n\n if k == 0 or length == 0:\n return None\n map = {}\n sum = 0\n for pos in range(length):\n sum += nums[pos]\n remainder = sum % k\n if remainder in map:\n\n if pos - (map[remainder] + 1) + 1 >= 2: # 题目的长度限制\n return True\n\n else:\n map[remainder] = pos\n\n return False\n\nprint(exist_sum_of_subarray_division_k([1, 2, 3, 4, 5, 6, 1, 12], 13))\n","sub_path":"数组/子数组之和整除k.py","file_name":"子数组之和整除k.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"53195845","text":"\"\"\"\r\n1.\tПользователь вводит данные о количестве предприятий, их наименования и прибыль\r\nза 4 квартала (т.е. 4 отдельных числа) для каждого предприятия.\r\nПрограмма должна определить среднюю прибыль (за год для всех предприятий)\r\nи вывести наименования предприятий, чья прибыль выше среднего и отдельно\r\nвывести наименования предприятий, чья прибыль ниже среднего.\r\n\r\nПодсказка:\r\nДля решения задачи обязательно примените какую-нибудь коллекцию из модуля collections\r\nДля лучшее освоения материала можете даже сделать несколько решений этого задания,\r\nприменив несколько коллекций из модуля collections\r\n\"\"\"\r\n\r\nimport collections\r\n\r\n\r\ndef calc():\r\n n = int(input(\"Введите количество предприятий для расчета прибыли: \"))\r\n d = dict()\r\n a = 1\r\n\r\n for i in range(n):\r\n name = input(\"Введите название предприятия: \")\r\n pr = input(\r\n \"через пробел введите прибыль данного предприятия\\n\"\r\n \"за каждый квартал(Всего 4 квартала): \")\r\n profit = pr.split(\" \")\r\n d[name] = profit\r\n a += 1\r\n print()\r\n\r\n fab = collections.Counter(d)\r\n\r\n b = 0\r\n t = 0\r\n for i in fab:\r\n summ = 0\r\n for j in fab[i]:\r\n summ += int(j)\r\n fab[i] = summ\r\n t += summ\r\n b += 1\r\n sec = t / b\r\n\r\n print(\"Средняя годовая прибыль всех предприятий: \" + str(sec))\r\n bigger = []\r\n smaller = []\r\n for i in fab:\r\n if int(fab[i]) >= sec:\r\n bigger.append(i)\r\n else:\r\n smaller.append(i)\r\n\r\n print(\"Предприятия, с прибылью выше среднего значения: \", end=\"\")\r\n for i in bigger:\r\n print(i, end=\" \")\r\n print()\r\n print()\r\n print(\"Предприятия, с прибылью ниже среднего значения: \", end=\"\")\r\n for i in smaller:\r\n print(i, end=\" \")\r\n print()\r\n\r\n\r\ncalc()\r\n","sub_path":"Урок 5. Пример практического задания/task_1_counter.py","file_name":"task_1_counter.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268719170","text":"from collections import OrderedDict\nfrom .. import utils\n__all__ = [\n \"Layer\",\n \"MergeLayer\",\n]\n\n# Layer base class\n\nclass Layer(object):\n def __init__(self, incoming, name=None):\n if isinstance(incoming, tuple):\n self.input_shape = incoming\n self.input_layer = None\n else:\n self.input_shape = incoming.output_shape\n self.input_layer = incoming\n\n self.name = name\n self.params = OrderedDict()\n\n @property\n def output_shape(self):\n return self.get_output_shape_for(self.input_shape)\n\n def get_params(self, **tags):\n result = list(self.params.keys())\n\n only = set(tag for tag, value in tags.items() if value)\n if only:\n result = [param for param in result\n if not (only - self.params[param])]\n\n exclude = set(tag for tag, value in tags.items() if not value)\n if exclude:\n result = [param for param in result\n if not (self.params[param] & exclude)]\n\n return result\n\n def get_output_shape(self):\n return self.output_shape\n\n def get_output(self, input=None, **kwargs):\n from .helper import get_output\n return get_output(self, input, **kwargs)\n\n def get_output_shape_for(self, input_shape):\n return input_shape\n\n def add_param(self, spec, shape, name=None, **tags):\n # prefix the param name with the layer name if it exists\n if name is not None:\n if self.name is not None:\n name = \"%s.%s\" % (self.name, name)\n\n param = utils.create_param(spec, shape, name)\n # parameters should be trainable and regularizable by default\n tags['trainable'] = tags.get('trainable', True)\n tags['regularizable'] = tags.get('regularizable', True)\n self.params[param] = set(tag for tag, value in tags.items() if value)\n\n return param\n\n def get_bias_params(self):\n return self.get_params(regularizable=False)\n\nclass MergeLayer(Layer):\n def __init__(self, incomings, name=None):\n self.input_shapes = [incoming if isinstance(incoming, tuple)\n else incoming.output_shape for incoming in incomings]\n self.input_layers = [None if isinstance(incoming, tuple)\n else incoming for incoming in incomings]\n self.name = name\n self.params = OrderedDict()\n\n @Layer.output_shape.getter\n def output_shape(self):\n return self.get_output_shape_for(self.input_shapes)\n","sub_path":"network/layers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357326143","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import annotations\n\nimport datetime\nimport hashlib\nimport io\nimport json\nimport os\nimport random\nimport requests\nimport time\n\nfrom PIL import Image\nfrom google.cloud import vision_v1p2beta1 as vision\nfrom selenium import webdriver\n\nfrom .logger import get_logger\n\n\ndef get_webdriver(\n driver_browser: str,\n driver_executable_path: Optional[str],\n driver_options: Optional[List[str]] = None,\n) -> webdriver:\n \"\"\"\n Flexible webdriver getter and configuration\n \n In:\n browser: Browser name, will be taken as the webdriver attribute to be instantiated, i.e. webdriver.Browser\n executable_path: Path to browser executable, optional. Default behaviour is to assume the executable will be in $PATH\n\n Out:\n webdriver: instantiated webdriver, can be used as a context manager.\n \"\"\"\n if driver_options is None:\n driver_options = list()\n\n try:\n WebDriver = getattr(webdriver, driver_browser)\n options = getattr(webdriver, driver_browser.lower()).options.Options()\n except AttributeError as e:\n raise Exception(\n f\"no webdriver attribute called {driver_browser}, try setting browser to 'Chrome' or 'Firefox'\"\n ) from e\n\n for argument in driver_options:\n options.add_argument(argument)\n\n # optionally configure webdriver executable path, selenium will look in $PATH by default\n if driver_executable_path is None:\n return WebDriver(options=options)\n else:\n return WebDriver(executable_path=driver_executable_path, options=options)\n\n\ndef settings(\n application_cred_name: str,\n driver_browser: str,\n driver_executable_path: str,\n driver_options: Optional[List[str]] = None,\n) -> None:\n # This client for the Google API needs to be set for the VISION classification\n # but it is not necessary for the selenium scaper for image downloading\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = application_cred_name\n client = vision.ImageAnnotatorClient() # authentification via environment variable\n\n # See here for scraper details:\n # https://towardsdatascience.com/image-scraping-with-python-a96feda8af2d\n wd = get_webdriver(\n driver_browser=driver_browser,\n driver_executable_path=driver_executable_path,\n driver_options=driver_options,\n )\n wd.quit()\n\n\ndef fuzzy_sleep(min_time: int) -> None:\n \"\"\"\n Fuzz wait times between [min_time, min_time*2]\n \"\"\"\n time.sleep(min_time + min_time * random.random())\n\n\ndef fetch_image_urls(\n query: str,\n number_of_links_to_fetch: int,\n wd: webdriver,\n thumb_css: str = \"img.Q4LuWd\",\n img_css: str = \"img.n3VNCb\",\n load_page_css: str = \".mye4qd\",\n sleep_between_interactions: float = 0.4,\n) -> List[str]:\n\n \"\"\"\n Scrape all image urls from Google for search term 'query'. The script continues to load new \n Google search pages as needed until number_of_links_to_fetch is reached.\n query: term to search in Google\n number_of_links_to_fetch: number of links to download from Google for query\n wd: path to the webdriver for selenium (Chrome or Firefox)\n thumb_css, img_css, load_page_css: css tags to identify IMG urls \n sleep_between_interactions: sleep behavior to avoid red flags with Google. \n Fuzzy sleep randomly varies sleep intervals to emulate human users. \n \"\"\"\n\n log = get_logger(\"fetch_image_urls\")\n\n def scroll_to_end(wd):\n wd.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n fuzzy_sleep(sleep_between_interactions)\n\n # build the google query\n search_url = \"https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img\"\n\n # load the page\n wd.get(search_url.format(q=query))\n\n image_urls = set()\n image_count = 0\n results_start = 0\n\n while image_count < number_of_links_to_fetch:\n\n scroll_to_end(wd)\n thumbnail_results = wd.find_elements_by_css_selector(\n thumb_css\n ) # get all image thumbnail results\n if len(thumbnail_results) == 0:\n log.warning(f\"found no thumbnails using the selector {thumb_css}\")\n number_results = len(thumbnail_results)\n\n log.info(\n f\"Found: {number_results} search results. Extracting links from {results_start}:{number_results}\"\n )\n\n for img in thumbnail_results[results_start:number_results]:\n # try to click every thumbnail such that we can get the real image behind it\n try:\n img.click()\n fuzzy_sleep(sleep_between_interactions)\n except Exception:\n continue\n\n # extract image urls\n actual_images = wd.find_elements_by_css_selector(img_css)\n if len(actual_images) == 0:\n log.warning(f\"found no images using the selector {img_css}\")\n for actual_image in actual_images:\n if actual_image.get_attribute(\n \"src\"\n ) and \"http\" in actual_image.get_attribute(\"src\"):\n image_urls.add(actual_image.get_attribute(\"src\"))\n image_count += 1\n if image_count >= number_of_links_to_fetch:\n log.info(f\"Found: {image_count} image links, done!\")\n return image_urls\n\n else:\n log.info(f\"Found: {image_count} image links, looking for more ...\")\n load_more_button = wd.find_element_by_css_selector(load_page_css)\n if load_more_button:\n fuzzy_sleep(sleep_between_interactions)\n wd.execute_script(f\"document.querySelector('{load_page_css}').click();\")\n else:\n log.warning(\n f\"{image_count}/{number_of_links_to_fetch} images gathered, but no 'load_more_button' found with the selector '{load_page_css}', returning what we have so far\"\n )\n return image_urls\n\n # move the result startpoint further down\n results_start = len(thumbnail_results)\n\n\ndef save_image(folder_path: str, url: str) -> None:\n\n \"\"\"\n Try to download the image correspond to the url scraped from the function, fetch_image_urls. \n folder_path: file location for saving images \n url: image url to download image from \n \"\"\"\n\n log = get_logger(\"save_image\")\n\n try:\n image_content = requests.get(url).content\n except Exception as e:\n log.error(f\"Could not download {url}: {e}\")\n\n try:\n image_file = io.BytesIO(image_content)\n image = Image.open(image_file).convert(\"RGB\")\n file_path = os.path.join(\n folder_path, hashlib.sha1(image_content).hexdigest()[:10] + \".jpg\"\n )\n\n with open(file_path, \"wb\") as f:\n image.save(f, \"JPEG\", quality=85)\n\n except Exception as e:\n log.error(f\"Could not save image to disk: {e}\")\n pass\n\n\ndef search_and_download(\n search_term: str,\n driver_browser: str,\n driver_executable_path: str,\n home: str,\n driver_options: Optional[List[str]] = None,\n target_path: str = \"./downloads\",\n number_images: int = 5,\n sleep_time: float = 0.4,\n) -> List[str]:\n \"\"\"\n Scrape and save images from Google using selenium to automate Google search. Save the raw images \n collected into the folder, './downloads'. number_images determines the number of images to \n collect for each search term. \n \n search_term: term to use in Google query \n driver_path: path to the webdriver for selenium (Chrome or Firefox)\n home: path to home directory of notebook\n target_path: file location to save images \n number_images: number of images to download for each query\n sleep_time: general rate of sleep activity (lower values raise red flags for Google)\n \"\"\"\n\n target_folder = os.path.join(target_path, search_term)\n\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n\n with get_webdriver(\n driver_browser=driver_browser,\n driver_executable_path=driver_executable_path,\n driver_options=driver_options,\n ) as wd:\n urls = fetch_image_urls(\n search_term, number_images, wd=wd, sleep_between_interactions=sleep_time\n )\n\n for url in urls:\n save_image(target_folder, url)\n\n wd.quit()\n os.chdir(home)\n\n return urls\n\n\ndef run_google_vision(img_urls_dict: Dict[str, List[str]]) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Use the Google vision API to return a set of classification labels for each image collected from \n Google using the search_and_download function. Each label assigned by Google vision is associated \n with a score indicating Google's confidence in the fit fo the label for the image.\n \n img_urls_dict: dictionary containing image_urls\n \"\"\"\n\n get_logger(\"run_google_vision\").info(\"Classifying Imgs. w. Google Vision API...\")\n\n client = vision.ImageAnnotatorClient()\n image = vision.types.Image()\n\n for search_term in img_urls_dict.keys():\n img_urls = img_urls_dict[search_term]\n img_classified_dict = {}\n img_classified_dict[search_term] = {}\n\n for image_uri in img_urls:\n try:\n image.source.image_uri = image_uri\n response = client.label_detection(image=image)\n img_classified_dict[image_uri] = {}\n\n for label in response.label_annotations:\n img_classified_dict[search_term][image_uri] = {}\n img_classified_dict[search_term][image_uri][\n label.description\n ] = label.score\n\n except:\n pass\n\n return img_classified_dict\n\n\ndef write_to_json(to_save: Dict[str, Any], filename: str) -> None:\n \"\"\" add and write dictionary to existing json file\"\"\"\n with open(filename, \"a\") as to_write_to:\n json.dump(to_save, to_write_to, indent=4)\n\n\ndef write_img_classifications_to_file(\n home: str, search_terms: List[str], img_classified_dict: Dict[str, Any]\n) -> None:\n \"\"\"\n Store Google vision's classifications for images in a json file, which can then be retrieved for \n the purposes of filtering and also statistical analyses. \n \n home: home directory of notebook\n search_terms: terms used for querying Google\n img_classified_dict: dictionary of image URLs and classifications from Google Vision\n \"\"\"\n\n log = get_logger(\"write_img_classifications_to_file\")\n\n os.chdir(home + \"/image_classifications\")\n\n for term in search_terms:\n term_data = img_classified_dict[term]\n\n if term_data:\n filename = (\n \"classifications_\"\n + term\n + \"_\"\n + datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\")\n + \".json\"\n )\n file_exist = os.path.isfile(filename)\n\n if file_exist:\n log.info(\"File already exists! Appending to file.. \")\n\n with open(filename, encoding=\"utf-8\") as f:\n term_data_orig = json.load(f)\n\n term_data_orig.update(term_data)\n os.remove(filename)\n write_to_json(term_data_orig, filename)\n\n else:\n log.info(\"File new! Saving..\")\n write_to_json(term_data, filename)\n\n os.chdir(home)\n","sub_path":"compsyn/helperfunctions.py","file_name":"helperfunctions.py","file_ext":"py","file_size_in_byte":11567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216293041","text":"import json\n\nfrom google.oauth2 import service_account\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\nclass Google():\n\tname = 'Google Vision'\n\n\tdef __init__(self, credentials_path=None, label_key='description', confidence_key='score'):\n\t\tcredentials = None \n\t\tif (credentials_path): # load credentials if path specified\n\t\t\tcredentials = service_account.Credentials.from_service_account_file(credentials_path)\n\n\t\tself.__api = vision.ImageAnnotatorClient(credentials=credentials)\n\n\t\tself.__label_key = label_key\n\t\tself.__confidence_key = confidence_key\n\n\tdef __normalize_labels(self, result):\n\t\tlabels = result.label_annotations\n\n\t\tnormalized_labels = []\n\t\tfor label in labels:\n\t\t\tnormalized_labels.append({\n\t\t\t\t'name': getattr(label, self.__label_key),\n\t\t\t\t'confidence': getattr(label, self.__confidence_key)\n\t\t\t})\n\n\t\treturn normalized_labels\n\n\tdef classify(self, image_file):\n\t\timage = types.Image(content=image_file)\n\t\tresult = self.__api.label_detection(image=image)\n\n\t\treturn self.__normalize_labels(result)\n","sub_path":"recognition_bench/views/services/Google.py","file_name":"Google.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135726930","text":"# @Time : 2019/5/16 20:14\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nimport math\n\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n if m == 0 or n == 0: return 0\n dp = [[0] * n for _ in range(m)]\n dp[0][0] = 1\n for i in range(m):\n for j in range(n):\n if i - 1 >= 0:\n dp[i][j] += dp[i - 1][j]\n if j - 1 >= 0:\n dp[i][j] += dp[i][j - 1]\n return dp[-1][-1]\n\n def uniquePaths1(self, m: int, n: int) -> int:\n if m == 0 or n == 0: return 0\n dp = [0] * n\n dp[0] = 1\n for i in range(m):\n for j in range(n):\n if j - 1 >= 0:\n dp[j] += dp[j - 1]\n return dp[-1]\n\n # 数学解法\n def uniquePaths2(self, m: int, n: int) -> int:\n return math.factorial(m + n - 2) // math.factorial(m - 1) // math.factorial(n - 1)\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.uniquePaths(56, 90))\n print(s.uniquePaths1(56, 90))\n print(s.uniquePaths2(56, 90))\n","sub_path":"uniquePaths.py","file_name":"uniquePaths.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"175459344","text":"import unittest\r\n\r\ndef fact(n):\r\n answer = 1\r\n if n == 1 or n==0:\r\n return 1\r\n else:\r\n for i in range(1,n+1):\r\n answer = answer * i\r\n return answer\r\n\r\nclass test_factorial(unittest.TestCase):\r\n \r\n def test_fact(self):\r\n n = int(raw_input())\r\n answer = fact(n)\r\n num = 1\r\n while n > 0:\r\n num = num * n\r\n n -= 1\r\n self.assertEqual(answer, num, \"Not equal\")\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n ","sub_path":"TestPython/big_factorial.py","file_name":"big_factorial.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"504355301","text":"#!python3\n# -*- coding: utf-8 -*-\nimport os\nimport os.path\nimport codecs\nimport filecmp\nfrom domain.fileentry import JavaFileEntry \n \nclass ProjectsComparator(object):\n def __init__(self,base_root,target_root):\n self.__base_root = base_root\n self.__target_root = target_root\n\n\n def get_diffs(self,path,diffs,key):\n return self.__get_diffs_for_module(self.__base_root+path,self.__target_root+path,[],key)\n\n\n def __get_diffs_for_module(self,base,target,diffs,key):\n base_list=os.listdir(base)\n for project_entry in base_list:\n base_file = base+'\\\\'+project_entry\n if os.path.isfile(base_file) and project_entry.endswith(key):\n target_file = target+'\\\\'+project_entry\n diff_entry = self.get_diff_javaentry(base_file,target_file,project_entry)\n if diff_entry != None:\n diffs[len(diffs):] = [diff_entry] \n elif os.path.isdir(base+project_entry):\n self.__get_diffs_for_module(base+'\\\\'+project_entry+'\\\\',target+'\\\\'+project_entry+'\\\\',diffs,key)\n return diffs \n \n def get_diff_javaentry(self,base,target,entry):\n if entry.endswith(\"java\"):\n javafile = JavaFileEntry(self.getpackage(base)+\".\"+self.get_classname(entry),os.path.relpath(base,self.__base_root),'NEW')\n else:\n javafile = JavaFileEntry(entry,os.path.relpath(base,self.__base_root),'NEW')\n if os.path.exists(target):\n if not self.isequal(base,target):\n javafile.status = \"MODIFIED\" \n return javafile\n return None\n return javafile\n\n def isequal(self,base,target):\n if filecmp.cmp(base,target,shallow=False):\n return True\n return False\n \n def getpackage(self,classfile):\n with codecs.open(classfile,\"r\",\"utf-8\") as file:\n for line in file:\n packageIndex=line.find(\"package\")\n if packageIndex !=-1:\n packageName=line[packageIndex+len(\"package\")+1:line.find(\";\")]\n return packageName \n\n\n def get_root_imports(self,javaentry,base,target,roots):\n for project_entry in os.listdir(base):\n if os.path.isfile(base+project_entry) and project_entry.endswith(\".java\"):\n if self.is_root(javaentry,base+project_entry,target+project_entry):\n roots = self.update_roots(javaentry,base+project_entry,roots) \n break\n elif os.path.isdir(base+project_entry):\n self.get_root_imports(javaentry,base+'\\\\'+project_entry+'\\\\',target+'\\\\'+project_entry+'\\\\',roots) \n return roots \n\n def is_root(self,javaentry, baseclassfile,targetclassfile):\n if not os.path.exists(targetclassfile):\n return False\n if not self.isequal(baseclassfile,targetclassfile): \n return False\n with codecs.open(baseclassfile,\"r\") as f:\n try:\n for line in f: \n if line.find(javaentry.name)!=-1:\n return True\n if line.find(self.get_classname(javaentry.name))!=-1:\n return True \n except UnicodeDecodeError as ude:\n # print(\"WARNING UnicodeDecodeError\")\n # print(baseclassfile)\n return False \n \n def update_roots(self,javaentry, root_file, roots):\n root_name = self.get_classname_frompath(root_file)\n for root in roots:\n if root.name == root_name:\n root.add_import(javaentry)\n return roots\n new_root = JavaFileEntry(root_name,root_file, \"ROOT\") \n new_root.add_import(javaentry)\n roots.append(new_root) \n return roots\n\n\n def get_classname(self,file):\n return file[:file.find(\"java\")-1]\n\n def get_classname_frompath(self,path):\n package = self.getpackage(path)\n file_name = os.path.split(path)[1]\n return package +'.'+ file_name[:file_name.find(\"java\")-1]\n\n\n\n\nif __name__==\"__main__\":\n asmspath = 'D:\\\\ipimenov_ASMS_V4.0.0\\\\kcc_appserver_ms\\\\RHQ460'\n rhq46path = 'D:\\\\ipimenov_ASMS_V4.0.0\\\\kcc_appserver_ms\\\\RHQ_4_6_0'\n rhq49path = 'D:\\\\ipimenov_ASMS_V4.0.0\\\\kcc_appserver_ms\\\\RHQ_4_9_0'\n alertmodule = \"modules\\\\enterprise\\\\server\\\\jar\\\\src\\\\main\\\\java\\\\org\\\\rhq\\enterprise\\\\server\\\\alert\\\\\"\n comparator = ProjectsComparator()\n diffs = []\n #comparator.get_diffs(asmspath+'\\\\'+alertmodule+'\\\\' , rhq46path+'\\\\'+alertmodule+'\\\\' , diffs)\n entry = JavaFileEntry(\"test\",\"test\", \"test\")\n diffs.append(entry)\n\t\n\n","sub_path":"project-manager/projectcompare/projectscomparator.py","file_name":"projectscomparator.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232008620","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# Load CIFAR 10 dataset\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n shuffle=True, num_workers=0)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=4,\n shuffle=False, num_workers=0)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Define network\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.maxp1 = nn.MaxPool2d(2, stride=2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.maxp2 = nn.MaxPool2d(2, stride=2)\n\n self.fc1 = nn.Linear(400, 120, bias=True)\n self.fc2 = nn.Linear(120, 84, bias=True)\n self.fc3 = nn.Linear(84, 10, bias=True)\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = self.maxp1(out)\n out = F.relu(self.conv2(out))\n out = self.maxp2(out)\n\n out = out.view(-1, 400)\n \n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n return self.fc3(out)\n\n# copy network to cuda\nnet = Net()\nnet.cuda()\n# Define cross entropy loss and SGD optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(),lr=0.001, momentum=0.9)\n\nfor epoch in range(2): \n train_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data\n\n # copy data to cuda\n train_input = Variable(inputs.cuda()) \n train_label = Variable(labels.cuda())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n train_out = net.forward(train_input)\n loss = criterion(train_out, train_label)\n loss.backward()\n optimizer.step()\n\n # print statistics\n train_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, train_loss / 2000))\n train_loss = 0.0\n\nprint('Finished Training')\n","sub_path":"CIFARClassifier/cifar10_fw.py","file_name":"cifar10_fw.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287696174","text":"# -*- coding: utf-8 -*-\nimport pymysql\n\ndef dbhandle():\n con = pymysql.connect(host=\"localhost\",\n user = \"root\",\n password = \"123456\",\n charset = \"utf8\",\n db = \"pachong\")\n return con\n\nclass ZhilianPipeline(object):\n def process_item(self, item, spider):\n con = dbhandle()\n cur = con.cursor()\n sql = \"insert into zhilian(jname,cname,salary,workplace,url) values(%s,%s,%s,%s,%s)\"\n lis = [item['jname'], item['cname'], item['salary'], item['workplace'], item['url']]\n\n try:\n cur.execute(sql,lis)\n con.commit()\n except Exception as e:\n print(e)\n con.rollback()\n return item\n","sub_path":"zhilian/zhilian/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"422410761","text":"import logging\n\nfrom django.http import HttpResponseRedirect\n\nfrom application.business_logic import get_application, awaiting_pith_dbs_action_from_user, find_dbs_status\nfrom application.forms.PITH_forms.PITH_children_check_form import PITHChildrenCheckForm\nfrom application.models import AdultInHome, ApplicantHomeAddress, ChildInHome\nfrom application.utils import get_id, build_url\nfrom application.views.PITH_views.base_views.PITH_radio_view import PITHRadioView\n\n# Initiate logging\nlog = logging.getLogger('')\n\n\nclass PITHChildrenCheckView(PITHRadioView):\n\n template_name = 'PITH_templates/PITH_children_check.html'\n form_class = PITHChildrenCheckForm\n success_url = ('PITH-Children-Details-View', 'PITH-Summary-View', 'Task-List-View')\n application_field_name = 'children_in_home'\n\n def form_valid(self, form):\n\n application_id = get_id(self.request)\n\n super().update_db(application_id)\n\n choice_bool = get_application(\n application_id, self.application_field_name)\n\n log.debug(\n 'Set answer to children in the home for application: ' + application_id)\n\n context = {\n 'id': get_id(self.request)\n }\n\n if choice_bool:\n\n num_children = len(ChildInHome.objects.filter(\n application_id=application_id))\n\n log.debug(\n 'Retrieve the number of adults in the home: ' + str(num_children))\n\n adults_context = {\n 'children': num_children,\n 'remove': 0\n }\n\n context.update(adults_context)\n\n else:\n\n # Remove any existing children details.\n self.__clear_children(application_id)\n\n log.debug(\n 'Children in the home cleared down for application: ' + application_id)\n\n return HttpResponseRedirect(self.get_success_url(get=context))\n\n def get_choice_url(self, app_id):\n\n insufficient_dbs = self.get_awaiting_user_pith_dbs_action(app_id)\n on_update = not insufficient_dbs\n\n yes_to_children_in_home, yes_to_sufficient_adult_dbs, no_to_sufficient_adult_dbs = self.success_url\n choice_bool = get_application(app_id, self.application_field_name)\n\n if choice_bool:\n log.debug('Set children in the home to true for application: ' + app_id)\n return yes_to_children_in_home\n else:\n if on_update:\n log.debug('No children in the home and DBS sufficient for all adults in the home: ' + app_id)\n return yes_to_sufficient_adult_dbs\n else:\n log.debug('No children in the home and DBS insufficient for all adults in the home: ' + app_id)\n return no_to_sufficient_adult_dbs\n\n def __clear_children(self, app_id):\n\n children = ChildInHome.objects.filter(application_id=app_id)\n\n for child in children:\n\n child.delete()\n log.debug('Removing child ' + str(child.pk))\n\n def get_awaiting_user_pith_dbs_action(self, application_id):\n\n result = awaiting_pith_dbs_action_from_user(\n find_dbs_status(adult, adult)\n for adult in AdultInHome.objects.filter(application_id=application_id))\n\n return result\n","sub_path":"application/views/PITH_views/PITH_children_check.py","file_name":"PITH_children_check.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37520136","text":"import os\nimport time\nimport pymongo\nfrom pymongo import MongoClient\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\ntemp_sensor = '/sys/bus/w1/devices/28-031725077dff/w1_slave'\n\nuri = \"mongodb://cooking:wsurop18@ds223268.mlab.com:23268/wsurop_cooking\"\nclient = MongoClient(uri)\n\ndb = client.wsurop_cooking\nglo_temp = -273\n\ndb.temperatures.remove({\"units\" : \"Celsius\"})\n\ndb.temperatures.insert_one(\n {'temperature': glo_temp, 'units': \"Celsius\"}\n )\n\ndef temp_raw():\n\n f = open(temp_sensor, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n\n lines = temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = temp_raw()\n\n\n temp_output = lines[1].find('t=')\n\n if temp_output != -1:\n temp_string = lines[1].strip()[temp_output+2:]\n temp_c = float(temp_string) / 1000.0\n temp_c2 = int(temp_c)\n \n \n #temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_c2\n\nwhile True:\n curr_temp = read_temp()\n \n print(\"waiting for temp change\")\n if glo_temp != curr_temp:\n print(\"temp changed!\")\n print(curr_temp)\n db.temperatures.insert_one(\n {'temperature': read_temp(), 'units': \"Celsius\"}\n )\n glo_temp = curr_temp\n time.sleep(1)\n\n","sub_path":"Raspberyy pi/Trash/working_1408.py","file_name":"working_1408.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179999859","text":"#!/usr/bin/python3\n\"\"\" Place Module for HBNB project \"\"\"\nfrom models.base_model import BaseModel, Base\nfrom models.amenity import Amenity\nfrom sqlalchemy import Column, String, Integer, Float, ForeignKey\nfrom sqlalchemy import Table\nfrom sqlalchemy.orm import relationship\nimport os\n\nplace_amenity = Table(\n \"place_amenity\", Base.metadata,\n Column(\"place_id\", String(60), ForeignKey(\"places.id\"),\n primary_key=True, nullable=False),\n Column(\"amenity_id\", String(60), ForeignKey(\"amenities.id\"),\n primary_key=True, nullable=False))\n\n\nclass Place(BaseModel, Base):\n \"\"\"This Class stores information of places\"\"\"\n __tablename__ = \"places\"\n if os.getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n city_id = Column(String(60), ForeignKey(\"cities.id\"), nullable=False)\n user_id = Column(String(60), ForeignKey(\"users.id\"), nullable=False)\n name = Column(String(128), nullable=False)\n description = Column(String(1024), nullable=True)\n number_rooms = Column(Integer, default=0, nullable=False)\n number_bathrooms = Column(Integer, default=0, nullable=False)\n max_guest = Column(Integer, default=0, nullable=False)\n price_by_night = Column(Integer, default=0, nullable=False)\n latitude = Column(Float, nullable=True)\n longitude = Column(Float, nullable=True)\n amenity_ids = []\n reviews = relationship(\n \"Review\",\n backref=\"place\",\n cascade=\"all\")\n amenities = relationship(\n \"Amenity\",\n secondary=\"place_amenity\",\n viewonly=False,\n back_populates=\"place_amenities\"\n )\n else:\n city_id = \"\"\n user_id = \"\"\n name = \"\"\n description = \"\"\n number_rooms = 0\n number_bathrooms = 0\n max_guest = 0\n price_by_night = 0\n latitude = 0.0\n longitude = 0.0\n amenity_ids = []\n\n if os.getenv(\"HBNB_TYPE_STORAGE\") != \"db\":\n @property\n def reviews(self):\n \"\"\"Getter for reviews\"\"\"\n from models.review import Review\n\n list_reviews = []\n for c in FileStorage.all(Review).values():\n if c.place_id == self.id:\n list_reviews.append(c)\n return list_reviews\n\n @property\n def amenities(self):\n \"\"\"Getter for amenities\"\"\"\n from models.amenity import Amenity\n from models import storage\n\n list_amenities = []\n for c in storage.all(Amenity).values():\n if c.id in self.amenity_ids:\n list_amenities.append(c)\n return list_amenities\n\n @amenities.setter\n def amenities(self, value):\n \"\"\"Setter for amenities\"\"\"\n from models.amenity import Amenity\n\n if isinstance(value, Amenity):\n self.amenity_ids.append(value.id)\n","sub_path":"models/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212054631","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport subprocess\nimport configparser\nimport datetime\nimport dpkgunlocker.dpkgunlockermanager as DpkgUnlockerManager\nimport copy\n\n\nBASE_DIR=\"/usr/share/lliurex-flavours-selector/\"\n\n\nclass flavourSelectorManager:\n\n\tdef __init__(self):\n\n\t\tself.core=Core.Core.get_core()\n\t\tself.supported_flavours=self.core.supported_flavours\n\t\tself.banners=self.core.banners\n\t\tself.flavour_list={}\n\t\tself.order=0\n\t\tself.result_install={}\n\t\tself.server_meta_available=[\"lliurex-meta-server\",\"lliurex-meta-server-lite\"]\n\t\tself.client_meta_available=[\"lliurex-meta-client\",\"lliurex-meta-client-lite\",\"lliurex-meta-minimal-client\"]\n\t\tself.desktop_meta_available=[\"lliurex-meta-desktop\",\"lliurex-meta-desktop-lite\"]\n\t\tself.minimal_client_installed=False\n\t\tself.flavours_installed=[]\n\t\tself.defaultMirror = 'llx21'\n\t\tself.defaultVersion = 'focal'\n\t\tself.textsearch_mirror=\"/mirror/\"+str(self.defaultMirror)\n\t\tself.sourcesListPath='/etc/apt/sources.list'\n\t\tself.initialNumberPackages=[]\n\t\tself.numberPackagesInstalled=[]\n\t\tself.numberPackagesUnpacked=[]\n\t\tself.progressInstallation=0\n\t\tself.progressInstallationPercentage=0.00\n\t\tself.progressUnpacked=0\n\t\tself.progressUnpackedPercentage=0.00\n\t\tself.aptIsRunning=False\n\t\tself.total_flavours=0\n\t\tself.deprecated_flavours=[\"lliurex-meta-music\",\"lliurex-meta-infantil\"]\n\t\tlog_msg=\"---------------------------------------------------------\\n\"+\"LLIUREX FLAVOUR SELECTOR STARTING AT: \" + datetime.datetime.today().strftime(\"%d/%m/%y %H:%M:%S\") +\"\\n---------------------------------------------------------\"\n\t\tself.log(log_msg)\n\t\tself.dpkgUnlocker=DpkgUnlockerManager.DpkgUnlockerManager()\n\n\t\n\tdef loadFile(self,path):\n\n\t\ttry:\n\t\t\tconfig = configparser.ConfigParser()\n\t\t\tconfig.optionxform=str\n\t\t\tconfig.read(path)\n\t\t\tif config.has_section(\"FLAVOUR\"):\n\t\t\t\tinfo={}\n\t\t\t\tinfo[\"pkg\"]=config.get(\"FLAVOUR\",\"pkg\")\n\t\t\t\tinfo[\"name\"]=config.get(\"FLAVOUR\",\"name\")\n\t\t\t\tinfo[\"show\"]=True\n\t\t\t\tinfo[\"incompatible\"]=False\n\t\t\t\tif os.path.exists(self.core.banners+info[\"pkg\"]+\".png\"):\n\t\t\t\t\tinfo[\"banner\"]=self.core.banners+info[\"pkg\"]+\".png\"\n\t\t\t\telse:\n\t\t\t\t\tinfo[\"banner\"]=None\n\n\t\t\t\treturn info\n\t\t\t\t\n\t\texcept Exception as e:\n\t\t\treturn None\n\n\t#def loadFile\n\n\tdef getSupportedFlavour(self):\n\n\t\tfor item in sorted(os.listdir(self.supported_flavours)):\n\t\t\tif os.path.isfile(self.supported_flavours+item):\n\t\t\t\ttmp_info=self.loadFile(self.supported_flavours+item)\n\t\t\t\tif tmp_info!=None:\n\t\t\t\t\ttmp_info[\"installed\"]=self.isInstalled(tmp_info[\"pkg\"])\n\n\t\t\t\t\tbase_apt_cmd = \"apt-cache policy %s \"%tmp_info[\"pkg\"]\n\t\t\t\t\tp=subprocess.Popen([base_apt_cmd],shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\t\n\t\t\t\t\toutput=p.communicate()[0]\n\t\t\t\t\tif type(output) is bytes:\n\t\t\t\t\t\toutput=output.decode()\n\n\t\t\t\t\tif tmp_info[\"pkg\"] not in output:\n\t\t\t\t\t\tavailable=False\n\t\t\t\t\telse:\t\n\t\t\t\t\t\tversion=output.split(\"\\n\")[4]\n\t\t\t\t\t\tif version !='':\n\t\t\t\t\t\t\tavailable=True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tavailable=False\n\t\t\t\t\t\t\n\t\t\t\t\tif available:\n\t\t\t\t\t\tif tmp_info[\"pkg\"] not in self.deprecated_flavours:\n\t\t\t\t\t\t\tself.flavour_list[self.order]=tmp_info\n\t\t\t\t\t\t\tself.order+=1\n\t\t\t\t\tif tmp_info[\"installed\"]:\n\t\t\t\t\t\ttmp_info[\"show\"]=False\n\t\t\t\t\t\tself.flavours_installed.append(tmp_info[\"pkg\"])\n\t\t\n\t\t#if len(self.flavours_installed)>0:\n\t\tlog_msg=\"Current flavours installed: \"+str(self.flavours_installed)\n\t\tself.log(log_msg)\n\t\tself.checkMetaInstalled()\n\t\tself.showHideMeta()\n\t\tself.createAlternatives()\n\n\t\tfor item in self.flavour_list:\n\t\t\tif self.flavour_list[item][\"show\"]:\n\t\t\t\tself.total_flavours+=1\n\t\t#else:\n\t\t#\tlog_msg=\"No flavour detected\"\n\t\t#\tself.log(log_msg)\n\t\n\n\t#def getSupportedFlavours\t\n\n\tdef isInstalled(self,pkg):\n\t\t\n\t\tp=subprocess.Popen([\"dpkg-query -W -f='${db:Status-Status}' %s\"%pkg],shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\toutput=p.communicate()[0]\n\n\t\tif type(output) is bytes:\n\t\t\toutput=output.decode()\n\t\t\n\t\tif output==\"installed\":\n\t\t\treturn True\n\t\t\t\n\t\treturn False\n\t\t\n\t#def isInstalled\n\n\tdef checkMetaInstalled(self):\n\n\t\tself.server=False\n\t\tself.server_lite=False\n\t\tself.client=False\n\t\tself.client_lite=False\n\t\tself.minimal_client=False\n\t\tself.desktop=False\n\t\tself.desktop_lite=False\n\t\tself.pime=False\n\t\tself.music=False\n\t\tself.infantil=False\n\t\tself.empty=False\n\n\t\tif len(self.flavours_installed)>0:\n\t\t\tfor item in self.flavours_installed:\n\t\t\t\tif item==\"lliurex-meta-server\":\n\t\t\t\t\tself.server=True\n\t\t\t\t\tself.server_lite=False\n\t\t\t\t\tself.desktop=False\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\t\tbreak\n\t\t\t\telif item==\"lliurex-meta-server-lite\":\n\t\t\t\t\tself.server_lite=True\n\t\t\t\t\tself.desktop=False\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\telif item==\"lliurex-meta-client\":\n\t\t\t\t\tself.client=True\n\t\t\t\t\tself.client_lite=False\n\t\t\t\t\tself.minimal_client=False\n\t\t\t\t\tself.desktop=False\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\t\tbreak\n\t\t\t\telif item==\"lliurex-meta-client-lite\":\n\t\t\t\t\tself.client_lite=True\n\t\t\t\t\tself.minimal_client=False\n\t\t\t\t\tself.desktop=False\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\telif item==\"lliurex-meta-minimal-client\":\n\t\t\t\t\tif not self.client_lite:\n\t\t\t\t\t\tself.minimal_client=True\n\t\t\t\t\tself.desktop=False\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\telif item==\"lliurex-meta-desktop\":\n\t\t\t\t\tself.desktop=True\n\t\t\t\t\tself.desktop_lite=False\n\t\t\t\t\tself.pime=False\n\t\t\t\telif item==\"lliurex-meta-desktop-lite\":\n\t\t\t\t\tself.desktop_lite=True\n\t\t\t\t\tself.pime=False\n\t\t\t\telif item==\"lliurex-meta-pime\":\n\t\t\t\t\tself.pime=True\n\t\t\t\telif item==\"lliurex-meta-music\":\n\t\t\t\t\tself.music=True\n\t\t\t\telif item==\"lliurex-meta-infantil\":\n\t\t\t\t\tself.infantil=True\n\t\telse:\n\t\t\tself.empty=True\n\n\t#def checkMetaInstalled\n\t\n\tdef showHideMeta(self):\n\n\n\t\tfor item in self.flavour_list:\n\t\t\tif self.server or self.server_lite:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] in self.client_meta_available:\n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.flavour_list[item][\"pkg\"] in self.desktop_meta_available:\n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\t\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.flavour_list[item][\"pkg\"]==\"lliurex-meta-pime\":\n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\t\n\t\t\t\telif self.flavour_list[item][\"pkg\"]==\"lliurex-meta-server-lite\" and self.server:\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\n\t\t\telif self.client or self.client_lite or self.minimal_client:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] in self.server_meta_available: \n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.flavour_list[item][\"pkg\"] in self.desktop_meta_available:\n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\t\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.flavour_list[item][\"pkg\"]==\"lliurex-meta-pime\":\n\t\t\t\t\tself.flavour_list[item][\"incompatible\"]=True\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\t\n\t\t\t\telif self.flavour_list[item][\"pkg\"] in [\"lliurex-meta-client-lite\",\"lliurex-meta-minimal-client\"] and self.client:\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.client_lite and self.flavour_list[item][\"pkg\"]==\"lliurex-meta-minimal-client\":\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\t\telif self.minimal_client and self.flavour_list[item][\"pkg\"]==\"lliurex-meta-client\":\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\n\n\t\t\telif self.desktop:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] not in [\"lliurex-meta-server\",\"lliurex-meta-client\",\"lliurex-meta-music\",\"lliurex-meta-infantil\"]: \n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\n\t\t\telif self.desktop_lite:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] not in [\"lliurex-meta-desktop\",\"lliurex-meta-server-lite\",\"lliurex-meta-client-lite\",\"lliurex-meta-music\",\"lliurex-meta-infantil\"]: \n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\t\n\t\t\telif self.music or self.infantil:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] not in [\"lliurex-meta-desktop\",\"lliurex-meta-server\",\"lliurex-meta-client\"]: \n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\t\n\t\t\t\n\t\t\telif self.pime:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] not in [\"lliurex-meta-desktop\",\"lliurex-meta-server-lite\",\"lliurex-meta-client-lite\",\"lliurex-meta-music\",\"lliurex-meta-infantil\"]: \n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t\n\t\t\telif self.empty:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] not in [\"lliurex-meta-server\",\"lliurex-meta-client\",\"lliurex-meta-desktop\",\"lliurex-meta-music\",\"lliurex-meta-infantil\"]:\n\t\t\t\t\tself.flavour_list[item][\"show\"]=False\n\t#def showHideMeta\t\t\t\n\n\tdef createAlternatives(self):\n\n\t\tself.client_desktop_alternatives=[]\n\t\tself.client_lite_alternatives=[]\n\t\tself.server_alternatives=[]\n\t\tself.desktop_alternatives=[]\n\n\t\tfor item in self.flavour_list:\n\t\t\tif self.desktop or self.desktop_lite or self.music or self.infantil or self.empty or self.pime:\n\t\t\t\tif not self.client and not self.client_lite and not self.minimal_client:\n\t\t\t\t\tif self.flavour_list[item][\"pkg\"] in self.client_meta_available and self.flavour_list[item][\"pkg\"] not in self.flavours_installed:\n\t\t\t\t\t\ttmp=[]\n\t\t\t\t\t\ttmp.append(self.flavour_list[item][\"name\"])\n\t\t\t\t\t\ttmp.append(self.flavour_list[item][\"pkg\"])\n\t\t\t\t\t\tself.client_desktop_alternatives.append(tmp)\n\n\t\t\t\tif self.flavour_list[item][\"pkg\"] in self.server_meta_available and self.flavour_list[item][\"pkg\"] not in self.flavours_installed:\n\t\t\t\t\ttmp=[]\n\t\t\t\t\ttmp.append(self.flavour_list[item][\"name\"])\n\t\t\t\t\ttmp.append(self.flavour_list[item][\"pkg\"])\n\t\t\t\t\tself.server_alternatives.append(tmp)\n\n\t\t\t\tif self.empty:\n\t\t\t\t\tif self.flavour_list[item][\"pkg\"] in self.desktop_meta_available and self.flavour_list[item][\"pkg\"] not in self.flavours_installed:\n\t\t\t\t\t\ttmp=[]\n\t\t\t\t\t\ttmp.append(self.flavour_list[item][\"name\"])\n\t\t\t\t\t\ttmp.append(self.flavour_list[item][\"pkg\"])\n\t\t\t\t\t\tself.desktop_alternatives.append(tmp)\n\t\n\t\t\t\n\t\t\telif self.minimal_client:\n\t\t\t\tif self.flavour_list[item][\"pkg\"] in [\"lliurex-meta-client\",\"lliurex-meta-client-lite\"] and self.flavour_list[item][\"pkg\"] not in self.flavours_installed:\n\t\t\t\t\ttmp=[]\n\t\t\t\t\ttmp.append(self.flavour_list[item][\"name\"])\n\t\t\t\t\ttmp.append(self.flavour_list[item][\"pkg\"])\n\t\t\t\t\tself.client_lite_alternatives.append(tmp)\n\n\t#def createAlternatives\n\n\tdef isMirrorInSourceslist(self):\n\n\t\tcount=0\n\t\tif os.path.exists(self.sourcesListPath):\n\t\t\torigsources=open(self.sourcesListPath,'r')\n\t\t\tfor line in origsources:\n\t\t\t\tif self.textsearch_mirror in line:\n\t\t\t\t\tcount=+1\n\n\t\tif count==0:\t\t\t\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\t \n\n\t#def isMirrorInSourceslist\t\t\t\t\t\n\n\tdef isIncompatibleMeta(self,meta):\n\n\t\tfor item in self.flavour_list:\n\t\t\tif self.flavour_list[item][\"pkg\"]==meta:\n\t\t\t\tif self.flavour_list[item][\"incompatible\"]:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\n\t#def isIncompatibleMeta\t\t\n\t\n\tdef installMeta(self,meta,mirrorRespository=None):\n\n\t\tself.thread_ret=-1\n\t\t\n\t\tmusic_meta=False\n\t\tcmd_base='lliurex-preseed --update; apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y '\n\t\t\n\t\tif self.needHwe():\n\t\t\tcmd_base=cmd_base+'xserver-xorg-video-dummy-hwe-18.04 '\n\n\t\tcmd=cmd_base+meta\n\n\t\tif meta==\"lliurex-meta-infantil\":\n\t\t\tself.addRecursosRepository()\n\t\telif meta==\"lliurex-meta-music\":\n\t\t\tmusic_meta=True\n\t\t\tself.addMusicRepository()\t\t\n\n\t\tlog_msg=\"-New flavours to install:\"+meta\n\t\tself.log(log_msg)\n\t\t\n\t\tp=subprocess.Popen([cmd],shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\t\n\t\toutput,perror=p.communicate()\n\n\t\tif len(output)>0:\n\t\t\tif type(output) is bytes:\n\t\t\t\toutput=output.decode()\n\t\tif len(perror)>0:\n\t\t\tif type(perror) is bytes:\n\t\t\t\tperror=perror.decode()\t\t\n\n\t\tself.thread_ret=p.returncode\n\t\tself.flavour_error=perror\n\n\t\tif music_meta:\n\t\t\tself.removeMusicRepository()\n\n\t\tif self.thread_ret==0:\n\t\t\tlog_msg=\"Installation of new flavour OK\"\n\t\t\tself.log(log_msg)\n\t\t\tif mirrorRespository:\n\t\t\t\tself.writeMirrorRepository()\n\t\telse:\n\t\t\tlog_msg=\"Error during installation of new flavours. \" + self.flavour_error \n\t\t\tself.log(log_msg)\t\n\n\t# def InstallMeta\t\n\n\tdef needHwe(self):\n\n\t\tcmd='dpkg -l | grep \"hwe\" | grep \"^i[i]\"'\n\t\tp=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)\n\t\toutput=p.communicate()[0]\n\n\t\tif len(output)>0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\t#def needHwe\n\t\n\tdef addRecursosRepository(self):\n\n\t\tcmdInfantil=[\"sudo\",\"/usr/bin/add-apt-repository\", \"deb http://lliurex.net/focal focal preschool\"]\n\t\tx=subprocess.Popen((cmdInfantil),stdin=subprocess.PIPE,stdout=subprocess.PIPE)\n\t\tlog_msg=\"Adding repository recursos\"\n\t\tself.log(log_msg)\n\t\tx.communicate(b\"\\n\")[0]\n\n\t#def addRecursosRepository\n\n\tdef addMusicRepository(self):\n\t\t\n\t\tlxRepos=[\"deb http://ppa.launchpad.net/kxstudio-debian/libs/ubuntu focal main\"]\n\n\t\tcmdMusica=[\"sudo\",\"/usr/bin/add-apt-repository\"]\n\n\t\tfor repo in lxRepos:\n\t\t\tlog_msg=\"Adding repository \"+repo\n\t\t\tself.log(log_msg)\n\t\t\tcmdAux=cmdMusica+[repo]\n\t\t\tx=subprocess.Popen((cmdAux),stdin=subprocess.PIPE,stdout=subprocess.PIPE)\n\t\t\tx.communicate(b\"\\n\")[0]\n\n\t#def addMusicRepository\t\n\n\tdef removeMusicRepository(self):\n\n\t\tsourcesFile=open('/etc/apt/sources.list','r')\n\t\trepos=sourcesFile.readlines()\n\t\trepos_orig=[]\n\t\tfor repo in repos:\n\t\t\tif 'kxstudio' not in repo:\n\t\t\t\trepos_orig.append(repo)\n\t\tsourcesFile.close()\n\t\ttry:\n\t\t\tsourcesFile=open('/etc/apt/sources.list','w')\n\t\t\tsourcesFile.writelines(repos_orig)\n\t\t\tsourcesFile.close()\n\t\texcept e as Exception:\n\t\t\tmsg_log=\"Couldn't open sources.list for writting\"\n\t\t\tself.log(msg_log)\n\n\t #def removeMusicRepository\t\t\n\n\tdef writeMirrorRepository(self):\n\n\t\tsourcesFile=open(self.sourcesListPath,'r')\n\t\trepos=sourcesFile.readlines()\n\t\trepos_orig=[]\n\t\tfor repo in repos:\n\t\t\tif self.textsearch_mirror not in repo:\n\t\t\t\trepos_orig.append(repo)\n\t\tsourcesFile.close()\n\t\ttry:\n\t\t\tf = open(self.sourcesListPath,'w')\n\t\t\tf.write('deb http://mirror/{version_mirror} {version} main restricted universe multiverse\\n'.format(version_mirror=self.defaultMirror,version=self.defaultVersion))\n\t\t\tf.write('deb http://mirror/{version_mirror} {version}-updates main restricted universe multiverse\\n'.format(version_mirror=self.defaultMirror,version=self.defaultVersion))\n\t\t\tf.write('deb http://mirror/{version_mirror} {version}-security main restricted universe multiverse\\n'.format(version_mirror=self.defaultMirror,version=self.defaultVersion))\n\t\t\tf.writelines(repos_orig)\n\t\t\tf.close()\n\t\t\tlog_msg=\"Addedd local mirror repository to sources list\"\n\t\t\tself.log(log_msg)\n\t\texcept e as Exception:\n\t\t\tmsg_log=\"Couldn't open sources.list for writting\"\n\t\t\tself.log(msg_log)\t\n\n\t#def writeMirrorRepository\n\n\tdef getNumberPackages(self,meta):\n\n\t\tcmd=\"LANG=C LANGUAGE=en apt-get update; apt-get install --simulate %s\"%meta\n\t\tpsimulate = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)\n\t\trawoutputpsimulate = psimulate.stdout.readlines()\n\t\trawpackagestoinstall = [ aux.decode().strip() for aux in rawoutputpsimulate if aux.decode().startswith('Inst') ]\n\t\tr = [ aux.replace('Inst ','') for aux in rawpackagestoinstall ]\n\t\tfor allinfo in r :\n\t\t\tself.initialNumberPackages.append(allinfo.split(' ')[0])\n\n\t\tself.numberPackagesUnpacked=copy.deepcopy(self.initialNumberPackages)\n\t\tself.numberPackagesInstalled=copy.deepcopy(self.initialNumberPackages)\n\n\t#def getNumberPackages\n\n\tdef isAptRunning(self):\n\n\t\tlocks_info=self.dpkgUnlocker.isDpkgLocked()\n\t\tif locks_info==3:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t#def isAptRunning\n\n\tdef checkProgressUnpacked(self):\n\n\t\tfor i in range(len(self.numberPackagesUnpacked)-1,-1,-1):\n\t\t\tstatus=self.checkStatus(self.numberPackagesUnpacked[i])\n\t\t\tif status==1:\n\t\t\t\tself.numberPackagesUnpacked.pop(i)\n\t\t\telif status==0:\n\t\t\t\tself.numberPackagesUnpacked.pop(i)\n\t\t\t\tself.numberPackagesInstalled.pop(i)\t\n\n\t\tself.progressUnpacked=len(self.initialNumberPackages)-len(self.numberPackagesUnpacked)\n\t\tself.progressUnpackedPercentage=\"{:.2f}\".format(1-float(len(self.numberPackagesUnpacked)/len(self.initialNumberPackages)))\n\t#def checkProgressUnpacked\n\n\tdef checkProgressInstallation(self):\n\n\t\tfor i in range(len(self.numberPackagesInstalled)-1,-1,-1):\n\t\t\tstatus=self.checkStatus(self.numberPackagesInstalled[i])\n\t\t\tif status==0:\n\t\t\t\tself.numberPackagesInstalled.pop(i)\n\n\t\tself.progressInstallation=len(self.initialNumberPackages)-len(self.numberPackagesInstalled)\n\t\tself.progressInstallationPercentage=\"{:.2f}\".format(1-float(len(self.numberPackagesInstalled)/len(self.initialNumberPackages)))\n\t\n\t#def checkProgressInstallation\n\t\n\tdef checkStatus(self,pkg):\n\t\t\n\t\tp=subprocess.Popen([\"dpkg-query -W -f='${db:Status-Status}' %s\"%pkg],shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\toutput=p.communicate()[0]\n\n\t\tif type(output) is bytes:\n\t\t\toutput=output.decode()\n\t\t\n\t\tif output==\"installed\":\n\t\t\treturn 0\n\n\t\telif output==\"unpacked\":\n\t\t\treturn 1\n\t\t\n\t\treturn -1\n\t\n\t#def checkStatus\n\t\n\n\tdef log(self,log_msg):\n\t\tlog_file=\"/var/log/lliurex-flavour-selector.log\"\n\t\tf=open(log_file,\"a+\")\n\t\tf.write(log_msg + '\\n')\n\t\tf.close()\n\t\t\n\t# def log\t\t\n\nfrom . import Core\n","sub_path":"lliurex-flavours-selector/python3-lliurexflavourselector/flavourSelectorManager.py","file_name":"flavourSelectorManager.py","file_ext":"py","file_size_in_byte":16726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"636294378","text":"from data_unit.windowQueue import *\n\n# simulate stream flow using [streamSample.txt]\nstream : int = None\nflow : list = []\n\nwith open(\"streamSample.txt\", \"r\") as streamSample:\n for N in streamSample.read().replace(' ', '').replace('\\n', ''):\n newData = mData([N])\n flow.append(newData)\n\n\n# stream processing starts HERE\nQueue = windowQueue(\"window\")\nQueue.clear()\nfrequency = []\ndetermination = []\n#\n# *****\n# type of result must be changed as class mData (-> windowQueue.py) modified\n# *****\nresult = 0\n# flag to check if N is processed inside below while loop\nisPushed = False\n\n# type(x) = mData\nfor x in flow:\n # each element from flow is put into windowQueue\n Queue.enqueue(x)\n print(Queue)\n\n # check frequency of each number in windowQueue\n # type(N) = int\n for N in Queue.Q:\n # condition : frequency List is not empty\n if len(frequency) != 0:\n # temporary variable for index counting\n index = 0\n freqLength = len(frequency)\n # flag to check if N is processed inside below while loop\n isPushed = False\n ##### start while loop #####\n while (index < freqLength):\n if N != frequency[index][0]:\n index += 1\n else:\n frequency[index][1] += 1\n isPushed = True\n break\n ##### end while loop #####\n if isPushed is False:\n frequency.append([N, 1])\n # else : frequency List is Empty\n else:\n frequency.append([N, 1])\n\n # eliminate the element which is not in windowQueue from frequency dict\n for every in frequency:\n if every[0] not in Queue.Q:\n frequency.remove(every)\n print(\"Frequency: {}\".format(str(frequency)))\n # result of decreasing-orderly-sorted frequency List\n # sorting key = \"frequency value of each element\"\n determination = sorted(frequency, key=lambda innerList: innerList[1], reverse=True)\n print(\"DET: \" + str(determination))\n # Nested-IF for configuring any duplicate frequency\n # if freq value of the 1st and 2nd are the same\n # choose more recently appeared one i.e. second one\n if len(determination) > 1:\n if determination[0][1] == determination[1][1]:\n result = determination[1][0]\n else:\n result = determination[0][0]\n else:\n result = determination[0][0]\n\n print(\"\\t\\t\\tRESULT -> {}\".format(result))\n determination.clear()\n frequency.clear()\n\n\n","sub_path":"pytest001/src/mytest04/streamprocessing.py","file_name":"streamprocessing.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177112092","text":"from sklearn.model_selection import train_test_split\r\nfrom torch import optim\r\nfrom torch.optim import lr_scheduler\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\n\r\nfrom config import Config\r\nfrom dataset import CarDataset\r\nfrom model import CentResnet\r\nimport pandas as pd\r\nimport torch\r\nimport matplotlib\r\n\r\nmatplotlib.use('Agg')\r\n\r\nSWITCH_LOSS_EPOCH = Config.SWITCH_LOSS_EPOCH\r\nPATH = Config.PATH\r\ntrain_images_dir = PATH + 'train_images/{}.jpg'\r\n\r\ntrain = pd.read_csv(PATH + 'train.csv')\r\n\r\nBATCH_SIZE = 2\r\n\r\ndf_train, df_dev = train_test_split(train, test_size=0.01, random_state=118)\r\n\r\n# Create dataset objects\r\ntrain_dataset = CarDataset(df_train, train_images_dir)\r\ndev_dataset = CarDataset(df_dev, train_images_dir)\r\n\r\n# Create data generators - they will produce batches\r\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\r\ndev_loader = DataLoader(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)\r\n\r\nn_epochs = 12 # 6\r\ndevice = Config.device\r\nmodel = CentResnet(8).to(device)\r\noptimizer = optim.AdamW(model.parameters(), lr=0.001)\r\n# optimizer = RAdam(model.parameters(), lr = 0.001)\r\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=max(n_epochs, 10) * len(train_loader) // 3, gamma=0.1)\r\n\r\nhistory = pd.DataFrame()\r\n\r\n\r\ndef _sigmoid(x):\r\n y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\r\n return y\r\n\r\n\r\ndef focal_loss(pred, gt):\r\n pred = _sigmoid(pred)\r\n pos_inds = gt.eq(1).float()\r\n pos_inds = pos_inds.unsqueeze(1)\r\n # print(pos_inds.size())\r\n neg_inds = gt.lt(1).float().unsqueeze(1)\r\n\r\n neg_weights = torch.pow(1 - gt, 4).unsqueeze(1)\r\n\r\n loss = 0\r\n # print(neg_weights)\r\n pos_loss = torch.log(pred + 1e-7) * torch.pow(1 - pred, 2) * pos_inds\r\n neg_loss = torch.log(1 - pred + 1e-7) * torch.pow(pred, 2) * neg_weights * neg_inds\r\n\r\n # .float().sum()\r\n pos_loss = pos_loss.view(pred.size(0), -1).sum(-1)\r\n neg_loss = neg_loss.view(gt.size(0), -1).sum(-1)\r\n # neg_loss.sum(-1)\r\n num_pos = pos_inds.sum()\r\n if num_pos == 0:\r\n loss = loss - neg_loss\r\n else:\r\n loss = loss - (pos_loss + neg_loss) # / num_pos\r\n num_pos = pos_inds.view(gt.size(0), -1).sum(-1)\r\n # print('loss',loss.size(),pos_loss.size(),loss.size(),'loss_sum',loss.sum(-1).mean(0),num_pos.size())\r\n return loss.mean(0)\r\n\r\n\r\ndef criterion(prediction, mask, regr, weight=0.4, size_average=True):\r\n # Binary mask loss\r\n # pred_mask = torch.sigmoid(prediction[:, 0])\r\n # # mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)\r\n # mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)\r\n pred_mask = _sigmoid(prediction[:, 0])\r\n # mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)\r\n mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)\r\n mask_loss = -mask_loss.mean(0).sum()\r\n\r\n # Regression L1 loss\r\n pred_regr = prediction[:, 1:]\r\n regr_loss = (torch.abs(pred_regr - regr).sum(1) * mask).sum(1).sum(1) / mask.sum(1).sum(1)\r\n regr_loss = regr_loss.mean(0)\r\n\r\n # Sum\r\n loss = weight * mask_loss + (1 - weight) * regr_loss\r\n if not size_average:\r\n loss *= prediction.shape[0]\r\n return loss, mask_loss, regr_loss\r\n\r\n\r\ndef train(epoch, history=None):\r\n model.train()\r\n t = tqdm(train_loader)\r\n for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(t):\r\n img_batch = img_batch.to(device)\r\n mask_batch = mask_batch.to(device)\r\n regr_batch = regr_batch.to(device)\r\n\r\n optimizer.zero_grad()\r\n output = model(img_batch)\r\n if epoch < SWITCH_LOSS_EPOCH:\r\n loss, mask_loss, regr_loss = criterion(output, mask_batch, regr_batch, 1)\r\n else:\r\n loss, mask_loss, regr_loss = criterion(output, mask_batch, regr_batch, 0.5)\r\n\r\n t.set_description(f'train_loss (l={loss:.3f})(m={mask_loss:.2f}) (r={regr_loss:.4f}')\r\n\r\n if history is not None:\r\n history.loc[epoch + batch_idx / len(train_loader), 'train_loss'] = loss.data.cpu().numpy()\r\n\r\n loss.backward()\r\n\r\n optimizer.step()\r\n exp_lr_scheduler.step()\r\n\r\n print('Train Epoch: {} \\tLR: {:.6f}\\tLoss: {:.6f}\\tMaskLoss: {:.6f}\\tRegLoss: {:.6f}'.format(\r\n epoch,\r\n optimizer.state_dict()['param_groups'][0]['lr'],\r\n loss.data,\r\n mask_loss.data,\r\n regr_loss.data))\r\n\r\n\r\ndef evaluate(epoch, history=None):\r\n model.eval()\r\n loss = 0\r\n valid_loss = 0\r\n valid_mask_loss = 0\r\n valid_regr_loss = 0\r\n with torch.no_grad():\r\n for img_batch, mask_batch, regr_batch in dev_loader:\r\n img_batch = img_batch.to(device)\r\n mask_batch = mask_batch.to(device)\r\n regr_batch = regr_batch.to(device)\r\n\r\n output = model(img_batch)\r\n\r\n if epoch < SWITCH_LOSS_EPOCH:\r\n loss, mask_loss, regr_loss = criterion(output, mask_batch, regr_batch, 1, size_average=False)\r\n valid_loss += loss.data\r\n valid_mask_loss += mask_loss.data\r\n valid_regr_loss += regr_loss.data\r\n else:\r\n loss, mask_loss, regr_loss = criterion(output, mask_batch, regr_batch, 0.5, size_average=False)\r\n valid_loss += loss.data\r\n valid_mask_loss += mask_loss.data\r\n valid_regr_loss += regr_loss.data\r\n\r\n valid_loss /= len(dev_loader.dataset)\r\n valid_mask_loss /= len(dev_loader.dataset)\r\n valid_regr_loss /= len(dev_loader.dataset)\r\n\r\n if history is not None:\r\n history.loc[epoch, 'dev_loss'] = valid_loss.cpu().numpy()\r\n history.loc[epoch, 'mask_loss'] = valid_mask_loss.cpu().numpy()\r\n history.loc[epoch, 'regr_loss'] = valid_regr_loss.cpu().numpy()\r\n\r\n print('Dev loss: {:.4f}'.format(valid_loss))\r\n\r\n\r\nimport gc\r\nimport matplotlib.pyplot as plt\r\n\r\nhistory = pd.DataFrame()\r\nif __name__ == '__main__':\r\n\r\n for epoch in range(n_epochs):\r\n torch.cuda.empty_cache()\r\n gc.collect()\r\n train(epoch, history)\r\n evaluate(epoch, history)\r\n torch.save(model.state_dict(), './models/model_f_%d.pth' % epoch)\r\n\r\n series1 = history.dropna()['mask_loss']\r\n plt.plot(series1.index, series1, label='mask loss')\r\n series2 = history.dropna()['regr_loss']\r\n plt.plot(series2.index, 30 * series2, label='regr loss')\r\n series3 = history.dropna()['dev_loss']\r\n plt.plot(series3.index, series3, label='dev loss')\r\n plt.legend()\r\n plt.savefig(\"loss.jpg\")\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595597195","text":"\"\"\"Contains the raw gameboard data\"\"\"\n\n\nclass Ship:\n \"\"\"Ships used on gameboard\"\"\"\n\n def __init__(self, name, length):\n \"\"\"Set name and length\"\"\"\n\n self.name = name\n self.length = length\n\n\nclass Tile:\n \"\"\"Building block of the grid\"\"\"\n\n def __init__(self, x, y, symbol, ship=None):\n \"\"\"Set coords, symbol and ship (if any)\"\"\"\n\n self.x = x\n self.y = y\n self.symbol = symbol\n self.ship = ship\n\n\nclass Grid:\n \"\"\"Gameboard and related methods\"\"\"\n\n def __init__(self, board_size=10):\n \"\"\"Set board size and ship symbols; generate grid\"\"\"\n\n self.board_size = board_size\n\n self.vertical_ship_symbol = '|'\n self.horizontal_ship_symbol = '—'\n self.empty_symbol = 'O'\n self.miss_symbol = '.'\n self.hit_symbol = '*'\n self.sunk_symbol = '#'\n\n self.grid = self.generate_grid()\n\n def generate_grid(self):\n \"\"\"Return a square grid the size of board_size\"\"\"\n\n # Validate board_size isn't over the size of the alphabet\n if self.board_size > 26:\n self.board_size = 26\n\n grid = []\n digits = range(0, self.board_size)\n letters = [chr(c) for c in range(ord('a'), ord('a') + self.board_size)]\n\n for digit in digits:\n row = []\n for letter in letters:\n row.append(Tile(letter, digit, self.empty_symbol))\n\n grid.append(row)\n\n return grid\n\n def print_grid(self):\n \"\"\"Print the grid (labeled axes included)\"\"\"\n\n # heading\n print(' ' + ' '.join([chr(c) for c in range(\n ord('a'), ord('a') + self.board_size)]))\n\n row_number = 0\n col_width = max(\n len(tile.symbol) for row in self.grid for tile in row) + 1\n for row in self.grid:\n print(str(row_number) + ' ' + ''.join(\n tile.symbol.ljust(col_width) for tile in row))\n row_number += 1\n\n print()\n","sub_path":"gameboard.py","file_name":"gameboard.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184015008","text":"import socket\nfrom threading import Thread\nimport os\nimport sys\n\nPORT = 50000\n\n# initialize list of topic and value\ntopic_client_list = {}\n\n\ndef handle(data, addr):\n global topic_client_list\n command, payload = data.decode('utf-8').split(' ', 1)\n\n # If command equal subscribe\n if command == 'subscribe':\n ip, topic = payload.split(' ')\n print(ip, ' subscribe topic: ', topic)\n\n # If topic is already exist\n if topic_client_list.get(topic, False) != False:\n # Append client addr in topic_client_list\n topic_client_list[topic].append(addr)\n else:\n # Create new topic and set client addr in list\n topic_client_list.update({topic: [addr]})\n\n # If command equal publish\n elif command == 'publish':\n # split payload into topic and value\n ip, topic, value = payload.split(' ')\n print(ip, ' publish topic: ', topic, ' value: ', value)\n\n # If topic is already exist\n if topic_client_list.get(topic, False) != False:\n for client_addr in topic_client_list.get(topic):\n s.sendto(value.encode('utf-8'), client_addr)\n else:\n topic_client_list.update({topic: []})\n\n\nif __name__ == '__main__':\n addr = ('127.0.0.1', PORT)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(addr)\n print('UDP broker connected ..\\n')\n\n while True:\n data, addr = s.recvfrom(1024)\n try:\n handle(data, addr)\n except:\n print('Cannot start thread..\\n')\n","sub_path":"broker_udp.py","file_name":"broker_udp.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338002805","text":"from point import Point\n\n\nclass Rectangle:\n\tdef __init__(self, x1, y1, x2, y2):\n\t\tself.so = Point(x1, y1)\n\t\tself.ne = Point(x2, y2)\n\n\tdef aire(self):\n\t\tlargeur = self.ne.x() - self.so.x()\n\t\thauteur = self.ne.y() - self.so.y()\n\t\treturn abs(largeur * hauteur)\n\n\tdef __str__(self):\n\t\treturn str(self.so) + '@' + str(self.ne)\n\nif __name__ == '__main__':\n\ta = Rectangle(1, 2, 3, 4)\n\tprint('a:', a)\n\tprint('aire:', a.aire())\n","sub_path":"srcCours1/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296180107","text":"import csv\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nengine = create_engine(\"postgres://postgres:happy!@#123@localhost:5432/postgres\", echo=True)\ndb=scoped_session(sessionmaker(bind=engine))\ndef main():\n f=open(\"flights.csv\")\n reader=csv.reader(f)\n for origin, destination, duration in reader:\n db.execute(\"INSERT INTO flightS (origin, destination, duration) VALUES (:origin,:destination,:duration)\",{\"origin\":origin,\"destination\":destination,\"duration\":duration})\n print(f\"Added flight from {origin} to {destiantion} during {duration} \")\n db.commit()\n\nif __name__==\"__name__\":\n main()\n","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75569168","text":"import os\nimport glob\nimport time\nimport RPi.GPIO as GPIO\nimport datetime\nimport random\n\nfrom DHT22_Sensor import cekTempHum\nfrom DS18B20_Sensor import cekTemp as ct\nfrom HCSR04_Sensor import cekJarak\nfrom urllib.request import urlopen\n\nbuzzerPin = 25\nfastBeep = 0.2\nlongBeep = 0.5\nbeepInterval = 0.2\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(buzzerPin, GPIO.OUT)\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\nbase_dir = '/sys/bus/w1/devices/'\n\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\n\ndef read_rom():\n name_file = device_folder + '/name'\n f = open(name_file, 'r')\n return f.readline()\n\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string)/1000.0\n return temp_c\n\ndef twoBeep():\n GPIO.output(buzzerPin, GPIO.HIGH)\n time.sleep(fastBeep)\n GPIO.output(buzzerPin, GPIO.LOW)\n time.sleep(beepInterval)\n GPIO.output(buzzerPin, GPIO.HIGH)\n time.sleep(fastBeep)\n GPIO.output(buzzerPin, GPIO.LOW)\n\ntry:\n while True:\n waktu = datetime.datetime.now()\n now = waktu.strftime('%M')\n if(now == '54' or now == '55'):\n water_temp = read_temp()\n humidtemp = cekTempHum.read_temp_humidity()\n air_hum = humidtemp[0]\n air_temp = humidtemp[1]\n distance = round(42.7-cekJarak.read_distance(), 2)\n ph = round(random.uniform(6.4, 7.3), 1)\n ec = round(random.uniform(1.5, 2.1), 3)\n \n curr_time = datetime.datetime.now()\n now = curr_time.strftime('%Y-%m-%d %H:%M:%S')\n record = now.replace(\" \", \"%20\")\n \n rescode = urlopen(\"http://192.168.0.101/myquaponic/public/api/insertdata?recorded_at=\" + record + \"&suhu_udara=\" + str(air_temp) + \"&kelembapan_udara=\" + str(air_hum) + \"&suhu_air=\" + str(water_temp) + \"&ph=\" + str(ph) + \"&ec=\" + str(ec) + \"&ketinggian_air=\" + str(distance))\n \n if(rescode.getcode() == 200):\n sent_time = datetime.datetime.now()\n sent = curr_time.strftime('%H:%M:%S')\n twoBeep()\n print(\"Respond Ok\\nData sent successfully at \"+ sent)\n else:\n print(\"Error: \" + rescode.getcode())\n twoBeep()\n time.sleep(0.5)\n twoBeep()\n \n # print(\"Water temperature = \", water_temp)\n # print(\"Air temperature = \", air_temp)\n # print(\"Air humidity = \", air_hum)\n # print(\"Distance = \", distance)\n # print(\"pH = \", ph)\n # print(\"EC = \", ec)\n # print(\"\\n\")\n time.sleep(1)\n \nexcept KeyboardInterrupt:\n print(\"Selesai\")\n GPIO.cleanup()\n\n","sub_path":"send_server_rev1.py","file_name":"send_server_rev1.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"98849913","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom lab.environments import LocalEnvironment, MaiaEnvironment\nfrom lab.reports import Attribute, arithmetic_mean, geometric_mean\n\nimport common_setup\nfrom common_setup import IssueConfig, IssueExperiment\nfrom relativescatter import RelativeScatterPlotReport\nfrom csv_report import CSVReport\n\nDIR = os.path.dirname(os.path.abspath(__file__))\nBENCHMARKS_DIR = os.environ[\"DOWNWARD_BENCHMARKS\"]\nREVISIONS = [\"issue705-base\", \"issue705-v8\", \"issue705-v9\", \"issue705-v10\", \"issue705-v11\"]\nCONFIGS = [\n IssueConfig(\n 'astar-blind',\n ['--search', 'astar(blind())'],\n )\n]\nSUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) |\n set(common_setup.DEFAULT_SATISFICING_SUITE)))\nENVIRONMENT = MaiaEnvironment(\n priority=0, email=\"florian.pommerening@unibas.ch\")\n\nif common_setup.is_test_run():\n SUITE = IssueExperiment.DEFAULT_TEST_SUITE\n ENVIRONMENT = LocalEnvironment(processes=1)\n\nexp = IssueExperiment(\n revisions=REVISIONS,\n configs=CONFIGS,\n environment=ENVIRONMENT,\n)\n\nexp.add_suite(BENCHMARKS_DIR, SUITE)\nexp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')\nexp.add_command('sg-parser', ['{sg_parser}'])\n\nexp.add_fetcher('data/issue705-v4-eval')\n\nexp.add_comparison_table_step()\n\ndef add_sg_peak_mem_diff_per_task_size(run):\n mem = run.get(\"sg_peak_mem_diff\")\n size = run.get(\"translator_task_size\")\n if mem and size:\n run[\"sg_peak_mem_diff_per_task_size\"] = mem / float(size)\n return run\n\n\nfor attr in [\"total_time\", \"search_time\", \"sg_construction_time\", \"memory\", \"sg_peak_mem_diff_per_task_size\"]:\n for rev1, rev2 in [(\"base\", \"v11\"), (\"v8\", \"v9\"), (\"v9\", \"v10\"), (\"v10\", \"v11\")]:\n exp.add_report(RelativeScatterPlotReport(\n attributes=[attr],\n filter_algorithm=[\"issue705-%s-astar-blind\" % rev1, \"issue705-%s-astar-blind\" % rev2],\n filter=add_sg_peak_mem_diff_per_task_size,\n get_category=lambda r1, r2: r1[\"domain\"],\n ),\n outfile=\"issue705-%s-%s-%s.png\" % (attr, rev1, rev2))\n\nexp.add_report(CSVReport(\n filter_algorithm=\"issue705-v11-astar-blind\",\n attributes=[\"algorithm\", \"domain\", \"sg_*\", \"translator_task_size\"]),\n outfile=\"csvreport.csv\")\n\nexp.run_steps()\n","sub_path":"fastdownward/experiments/issue705/v7.py","file_name":"v7.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"647634024","text":"from django.urls import path, re_path\n\nfrom test_player.views import TestPlayer, StartTest\n\napp_name = 'test_player'\n\nurlpatterns = [\n\n path('', StartTest.as_view(), name='start_test'),\n re_path(r'^(?P[0-9]+)/(?P[0-9]+)/',\n TestPlayer.as_view(), name='test_player'),\n]\n","sub_path":"test_player/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192924294","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 29 13:23:40 2017\r\n\r\n@author: echtpar\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport range\r\n\r\nX = [1,2,3,4,5,6,7,8,9]\r\n\r\nY = np.array(X)\r\n\r\nprint (Y)\r\nprint (X)\r\n\r\nA = [1,2,3]\r\nB = np.array(A)\r\nprint(A)\r\nprint(B)\r\n\r\n\r\nZ = np.convolve(B, X, 'full')\r\nprint(Z)\r\n\r\n\r\ndef fc_forward(x,w,b):\r\n \"\"\"\r\n computes the forward pass for an affine layer (fully connected layer)\r\n \r\n Inputs:\r\n - x: Input Tensor (N, d_1,....d_k)\r\n - w: Weights (D, M)\r\n - b: Bias(M,)\r\n \r\n N: Mini-batch size\r\n M: Number of outputs of fully connected layer\r\n D: Input Dimension\r\n \r\n Returns a tuple of\r\n - out: output, of shape(N, M)\r\n - cache: (x, w, b)\r\n \"\"\"\r\n \r\n out = None\r\n \r\n # get batch size (first dimension)\r\n N = x.shape[0]\r\n \r\n # Reshape activations to [N X (d_1),...., d_k], which will be a 2d matrix \r\n # [NXD]\r\n \r\n reshaped_input = x.reshape(N, -1)\r\n \r\n #calculate output\r\n out = np.dot(reshaped_input, w) + b.T\r\n \r\n # Save inputs for backward propagation\r\n cache = (x, w, b)\r\n return out, cache\r\n \r\ndef fc_backward(dout, cache):\r\n \r\n \"\"\"\r\n Inputs:\r\n - dout: Layer partial derivative w.r.t loss of shape (N,M) (same as output)\r\n - cache: (x,w,b) inputs from previous forwards computation\r\n \r\n N: Mini batch size\r\n M: Number of outputs of fully connected layer\r\n D: Input dimension\r\n d_1,...., d_k: Single input dimension\r\n \r\n Returns a tuple of:\r\n - dx: gradient w.r.t x, of shape (N, d_1,...., d_k)\r\n - dw: gradient w.r.t w of shape (D, M)\r\n - db: gradient w.r.t b of shape (M,)\r\n \r\n \"\"\"\r\n x, w, b = cache\r\n dx, dw, db = None, None, None\r\n \r\n # Get batch size (first dimension)\r\n \r\n N = x.shape[0]\r\n \r\n dx = np.dot(dout, w.T)\r\n dx = dx.reshape(x.shape)\r\n \r\n # Get dW (same format as w)\r\n # reshape activations to [NX(d_1,....d_k)], which will be a 2d matrix\r\n # [NXD]\r\n\r\n reshaped_input = x.reshape(N, -1)\r\n \r\n # Transpose then dot product with dout\r\n dw = reshaped_input.T.dot(dout)\r\n \r\n # Get dB (Same format as b)\r\n db = np.sum(dout, axis = 0)\r\n \r\n # Return outputs\r\n dx, dw, db\r\n \r\n\r\ndef relu_forward(x): \r\n \"\"\"\r\n Computes the forward pass for ReLU\r\n Input:\r\n - x: Inputs, of any shape\r\n \r\n Returns a tuple of: (out, cache)\r\n The shape on the output is the same as the input\r\n \"\"\"\r\n \r\n out = None\r\n \r\n # Create a function that receives x and returns x if x is bigger than\r\n # zero, or zero if x is negative\r\n \r\n relu = lambda x: x*(x > 0).astype(float)\r\n out = relu(x)\r\n \r\n \r\n # Cache input and return outputs\r\n cache = x\r\n return out, cache\r\n \r\n \r\ndef relu_backward (dout, cache):\r\n \"\"\"\r\n Computes the backward pass for ReLU\r\n Input:\r\n - dout: Upstream derivates, of any shape\r\n - cache: Previous input (used on forward pass)\r\n \r\n Returns:\r\n - dx: gradient w.r.t x\r\n \"\"\"\r\n \r\n # Initialize dx with None and x with cache\r\n dx, x = None, cache\r\n \r\n # Make all positive elements in x equal to dout while all the other elements \r\n # become zero\r\n dx = dout *(x>=0)\r\n \r\n # Return dx (gradient with respect to x)\r\n return dx\r\n \r\n#experiment with drop out algorigthm - uncomment below to understand\r\n#x = [[1,2,3,4,5,6], [7,8,9,10,11,12]]\r\n#x = np.array(x)\r\n#mask1 = np.random.rand(*x.shape)\r\n#mask = (mask1 < 0.5) / 0.5\r\n#\r\n##mask2 = np.random.rand(x.shape)\r\n##print (mask2)\r\n#print (*x.shape)\r\n#print(x)\r\n#print (mask1)\r\n#print (mask) \r\n#print (x.shape) \r\n#out = x*mask \r\n#print(out)\r\n\r\n\r\ndef dropout_forward(x, dropout_param):\r\n \"\"\"\r\n Performs the forward pass for (inverted) dropout.\r\n Inputs:\r\n - x: Input data, of any shape\r\n - dropout_param: A dictionary with the following keys: (p, test/train, seed)\r\n \r\n Ouputs:\r\n (out, cache)\r\n \"\"\"\r\n \r\n# Get the current dropout mode, p, and seed\r\n p, mode = dropout_param['p'], dropout_param['mode']\r\n if 'seed' in dropout_param:\r\n np.random.seed(dropout_param['seed'])\r\n \r\n# Initialization of outputs and mask\r\n mask = None\r\n out = None\r\n \r\n if mode == 'train':\r\n # create and apply mask (normally p = 0.5for half of neurons), we scale all\r\n # by p to avoid having to multiply by p on backpropagation, this is called \r\n # inverted dropout\r\n\r\n mask = (np.random.rand(*x.shape) < p) / p\r\n \r\n # Apply mask\r\n out = x * mask\r\n elif mode == 'test':\r\n # during prediction no mask is used\r\n mask = None\r\n out = x\r\n \r\n # Save mask and dropout parameters for backprop\r\n cache = (dropout_param, mask)\r\n \r\n # convert \"out\" type and return ouput and cache\r\n out = out.astype(x.dtype, copy = False)\r\n return out, cache\r\n \r\n \r\ndef dropout_backward(dout, cache):\r\n \"\"\"\r\n Perform the backward pass for (inverted) dropout.\r\n Inputs:\r\n - dout: upstream derivates, of any shape\r\n - cache: (dropout_param, mask) from dropout_forward.\r\n \"\"\"\r\n \r\n # recover dropout parameters (p, mask, mode) from cache\r\n dropout_param, mask = cache\r\n mode = dropout_param['mode']\r\n\r\n dx = None\r\n # Back propagate (Dropout laer has not parameters just input X)\r\n \r\n if mode == 'train':\r\n # just back propagate dout from teh neurons that were used during dropout\r\n dx = dout*mask\r\n elif mode =='test':\r\n # disable droput dring prediction / test\r\n dx = dout\r\n \r\n # return dx\r\n return dx\r\n\r\n\r\n#experiment with padding functionality of numpy\r\n#x=[[[7,8,9],[9,10,11], [1,2,3]]]\r\n#P = 3\r\n#y = np.lib.pad(x,((2,1),(0,1),(1,1)), 'constant', constant_values = 0)\r\n#print (y)\r\n\r\n \r\ndef conv_forward_naive(x, w, b, conv_params):\r\n \"\"\"\r\n computes the forward pass for the convolution layer (naive)\r\n Input:\r\n - x: Input data of shape (N, C, H, W)\r\n - w: Filter weights of shape (F, C, HH, WW)\r\n - b: Biases, of shape (F,)\r\n - conv_param: A Dictionary with the following keys:\r\n - 'stride': How much pixels the sliding window will travel\r\n - 'pad': The number of pixels that will be used to zero pad the input\r\n \r\n N: Mini-batch size\r\n C: Input depth (i.e. 3 for RGB images)\r\n H/W: Image height / width\r\n F: number of filters on convolution layer (will be the output depth)\r\n HH/WW: Kernel Height / Width\r\n \r\n Returns a tuple of:\r\n - out: output data, of shape (N, F, H', W') where H' and w'are given by\r\n H' = 1 + (H + 2 * pad - HH) / stride\r\n W' = 1 + (W + 2 * pad - WW) / stride\r\n - cache: (x, w, b, conv_param)\r\n \r\n \"\"\"\r\n \r\n out = None\r\n N, C, H, W = x.shape\r\n F, C, HH, WW = w.shape\r\n \r\n # Get parameters\r\n \r\n P = conv_params[\"pad\"]\r\n S = conv_params[\"stride\"]\r\n\r\n #Calculate output size and initialize output volume\r\n \r\n H_R = 1 + (H + 2 * P - HH) / S\r\n W_R = 1 + (W + 2 * P - WW) / S\r\n\r\n out = np.zeros(N,F,H_R, W_R)\r\n \r\n #pad images with zeroes on the border (used to keep spatial information)\r\n \r\n x_pad = np.lib.pad(x((0,0),(0,0),(P,P),(P,P)), 'constant', constant_values = 0)\r\n \r\n# Apply the convolution\r\n for n in xrange(N): # for each element on batch\r\n for depth in xrange(F): # for each input depth\r\n for r in xrange(0,H,S): # slide vertically taking stride into account\r\n for c in xrange(0,W,S): # slide horizontally taking stride into account\r\n out[n, depth, r/S, c/S] = np.sum (x_pad[n, :, r:r+HH, c:c+WW] * W[depth, :, :,:]) + b[depth]\r\n \r\n # cache parameters and input for backpropagation and return output volume\r\n cache = (x,w,b,conv_param)\r\n return out,cache\r\n \r\ndef conv_backward_naive(dout, cache):\r\n \r\n \"\"\"\r\n computes the backward pass for the convolution layer. (naive)\r\n Inputs:\r\n - dout: upstream derivatives.\r\n - cache: a tuple of (x,w,b,conv_param)\r\n \r\n Returns a tuple of (dw, dx, db) gradients\r\n \"\"\"\r\n \r\n dx, dw, db = None, None, None\r\n x, w, b, conv_param = cache\r\n N, F, H_R, W_R = dout.shape\r\n N, C, H, W = x.shape\r\n F, C, HH, WW = w.shape\r\n P = conv_param[\"pad\"]\r\n S = conv_param[\"stride\"]\r\n\r\n # Do zero padding on x_pad\r\n \r\n x_pad = np.lib.pad(x,((0,0),(0,0),(P,P),(P,P)), 'constant', constant_values=0)\r\n \r\n # initiaalise outputs\r\n dx = np.zeros(x_pad.shape)\r\n dw = np.zeros(w.shape)\r\n db = np.zeros(b.shape)\r\n \r\n # Calculate dx with 2 extra col/row that will be deleted\r\n for n in xrange(N): # for each element on batch\r\n for depth in xrange(F): # for each filter\r\n for r in xrange(0,H,S): # slide vertically taking stride into account\r\n for c in xrange(0,W,S): # slide horizontally taking stride into account\r\n dx[n, :, r:r+HH, c:c+WW] += dout[n,depth,r/S,c/S] * w[depth,:,:,:]\r\n\r\n #deleting padded rows to match real dx\r\n delete_rows = range(P) + range(H+P, H+2*p,1)\r\n delete_columns = range(P) + range(W+P, W+2*P,1)\r\n dx = np.delete(dx, delete_rows, axis=2) #height\r\n dx = np.delete(dx, delete_columns, axis=3) #width\r\n \r\n # Calculate dw\r\n for n in xrange(N): # for each element on batch\r\n for depth in xrange(F): # for each filter\r\n for r in xrange(H_R): #slide vertically taking stride into account\r\n for c in xrange(W_R): # slide horizontally taking stride into account\r\n dw[depth,:,:,:] += dout[n, depth, r,c] * x_pad[n,:,r*S:r*S+HH, c*S:c*S+WW]\r\n\r\n # Calculate db, 1 scalar bias per filter, so its just a matter of summing\r\n for depth in range(F):\r\n db[depth] = np.sum(dout[:depth, :, :])\r\n \r\n return dx, dw, db\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"LearnConvNetBasics.py","file_name":"LearnConvNetBasics.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"617182128","text":"#coding:utf-8\n\n\"\"\"\n目的:画画,画各种各样的圆,Circle.\n - 标准的圆,Circle,\n - 椭圆,ellipse,\n - 等高线,Contour line.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport com.youngz.python.basic.FileUtils as FileUtils\n\ncircle_demo = \"data/output/circle_demo.png\"\n\n\"\"\"\n Circle 参数((圆心坐标), 圆型半径)\n\"\"\"\n\ncircle_1 = plt.Circle((0, 0), 0.2)\ncircle_2 = plt.Circle((1, 1), 0.5, color=\"blue\")\ncircle_3 = plt.Circle((5, -2), 1, color=\"g\", clip_on=False)\n\n\nfig, ax = plt.subplots()\n\n\n\nax.add_artist(circle_1)\nax.add_artist(circle_2)\nax.add_artist(circle_3)\n\nplt.xlim(-5, 5)\nplt.ylim(-5, 5)\n\nfig.savefig(FileUtils.get_full_path(circle_demo))\n","sub_path":"src/python/com/youngz/python/pkgs/use_matploglib/zy_draw_circles.py","file_name":"zy_draw_circles.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"83301499","text":"\"\"\"\n* user: VR437653\n* fname: LUCA\n* lname: PESERICO\n* task: game2stacks\n* score: 0.0\n* date: 2018-12-05 12:47:24.756284\n\"\"\"\n#!/usr/bin/env python3\n# -*- coding: latin-1 -*-\n\n# Template di soluzione di game2stacks, written by Romeo Rizzi 2018.11.28\n\nn1 = int(input())\nn2 = int(input())\n\ndef play(n1, n2): # questa è la funzione che devi perfezionare\n somma=n1+n2\n if n1==n2:\n return (0,0)\n elif n1>n2:\n if (n1+n2)%2==0:\n return (2,0) \n else:\n return (1,0)\n elif n2>n1:\n if (n2+n1)%2==0:\n return (0,2)\n else:\n return (0,1) \n \n \n \n \n\ntogli1, togli2 = play(n1, n2)\nprint(togli1)\nprint(togli2)\n","sub_path":"2018.12.05.provetta/all-CMS-submissions-2018-12-05/2018-12-05.12:47:24.756284.VR437653.game2stacks.py","file_name":"2018-12-05.12:47:24.756284.VR437653.game2stacks.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108604641","text":"from global_setting import *\n\ndef run_uniform():\n randomizer.set_budget(epsilon/omega)\n for step in range(total_step):\n print(f'STEP {step+1} :')\n generator.transit()\n # randomize and aggregate\n sample_data = generator.sample()\n private_counts = randomizer.randomize_group(sample_data)\n estimate_counts = randomizer.aggregate(private_counts)\n # record metrics\n true_counts= calc_counts(generator.data, itemset_len)\n record[step] = utility_metrics(estimate_counts, true_counts, k)\n result = record.mean(0)\n print('Mean:', result[0], result[1], result[2], result[3], result[4], result[5])\n \nif __name__=='__main__':\n randomizer = RAPPOR(itemset_len, epsilon/omega)\n randomizer.enable_render(False)\n run_uniform()","sub_path":"uniform.py","file_name":"uniform.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"387665587","text":"# coding=utf-8\nfrom Point import Point\nfrom barycenter import body_barycenter, head_barycenter\nfrom size import body_size, head_size\nfrom labels import Labels\nimport numpy as np\n\n\nclass Corp(object):\n \"\"\"\n implemente la sauvegarde des donnee d'un corps\n \"\"\"\n\n def __init__(self, ligne):\n # print(\"hd size : \" + str(head_size(ligne)))\n if (len(ligne) >= 41):\n self.hd_bary = Point(ligne[Labels.X_BARY_HD], ligne[Labels.Y_BARY_HD])\n self.bd_bary = Point(ligne[Labels.X_BARY_BD], ligne[Labels.Y_BARY_BD])\n else:\n self.hd_bary = head_barycenter(ligne)\n self.bd_bary = body_barycenter(ligne)\n # self.hd_size = head_size(ligne)\n # self.bd_size = body_size(ligne)\n self.cache = pt_cache(ligne)\n\n self.coef_dist_hd = 1\n self.coef_dist_bd = 1\n # self.coef_size_hd = 0\n # self.coef_size_bd = 0\n self.coef_pt_cache = 50\n self.exposant = 0.3\n self.nb_of_use = 0\n\n def dist(self, other):\n cout = self.hd_bary.dist(other.hd_bary) ** self.exposant * self.coef_dist_hd\n cout += self.bd_bary.dist(other.bd_bary) ** self.exposant * self.coef_dist_bd\n # cout += self.hd_size.dist(other.hd_size)**self.exposant * self.coef_size_hd\n # cout += self.bd_size.dist(other.bd_size)**self.exposant * self.coef_size_bd\n cout += abs(self.dist_pt_cache(other)) ** self.exposant * self.coef_pt_cache\n # print( \"comp : \" + str(self) + \" \\n et \\n\" + str(other))\n # print(\"cout :\" + str(cout))\n # print(\"\\n\")\n return cout\n\n def comp(self, other):\n return np.array([self.hd_bary.dist(other.hd_bary),\n self.bd_bary.dist(other.bd_bary),\n abs(self.dist_pt_cache(other))])\n\n def config(self, par_dict=None, **args):\n \"\"\"\n configuration du corp\n :param args: coef_dist_hd, coef_dist_bd, coef_size_hd, coef_size_bd, coef_pt_cache\n \"\"\"\n if (par_dict is not None):\n args = par_dict\n if \"coef_dist_hd\" in args:\n self.coef_dist_hd = args[\"coef_dist_hd\"]\n if \"coef_dist_bd\" in args:\n self.coef_dist_bd = args[\"coef_dist_bd\"]\n # if \"coef_size_hd\" in args:\n # self.coef_size_hd = args[\"coef_size_hd\"]\n # if \"coef_size_bd\" in args:\n # self.coef_size_bd = args[\"coef_size_bd\"]\n if \"coef_pt_cache\" in args:\n self.coef_pt_cache = args[\"coef_pt_cache\"]\n if \"exposant\" in args:\n self.exposant = args[\"exposant\"]\n\n def __hash__(self):\n return hash((self.hd_bary, self.bd_bary, len(self.cache)))\n\n def __eq__(self, other):\n return self.bd_bary == other.bd_bary and self.hd_bary == other.hd_bary and self.cache == other.cache\n\n def __str__(self):\n return (\"Corp : \" + \"hd : \" + str(self.hd_bary) + \" size : \" +\n \"bd : \" + str(self.bd_bary) + \" size :\" + \" cachee : \" + str(self.cache) + \"\\n\" +\n \"Coef : \" + \" pt :\" + str(self.coef_pt_cache) + \" sbd \" + \" hd \" + str(self.coef_dist_hd) +\n \" bd \" + str(self.coef_dist_bd))\n\n def dist_pt_cache(self, other):\n return len(self.cache.symmetric_difference(other.cache))\n\n\ndef pt_cache(ligne):\n c = 0\n s = set()\n for i in range(2, 37):\n if ligne[i] == -1:\n s.add(i)\n return s\n\n\nclass CorpCol(Corp):\n\n def __init__(self, ligne, col=None):\n \"\"\"\n crée un corp avec gestion de la couleur en la précisant\n :param ligne:\n :param col: un triplet (R, G, B)\n \"\"\"\n Corp.__init__(self, ligne)\n if col != None:\n self.col = col\n else:\n self.col = (ligne[Labels.COL_R], ligne[Labels.COL_G], ligne[Labels.COL_B])\n self.coef_col = 1\n\n def dist_col(self, other):\n dist = np.sqrt(sum([(self.col[i] - other.col[i]) ** 2 for i in (0, 1, 2)])) ** self.exposant\n return dist\n\n def config(self, par_dict=None, **args):\n \"\"\"\n configuration du corp\n :param args: coef_dist_hd, coef_dist_bd, coef_size_hd, coef_size_bd, coef_pt_cache\n \"\"\"\n if par_dict is not None:\n args = par_dict\n\n super(CorpCol, self).config(args)\n\n if \"coef_col\" in args:\n self.coef_col = args[\"coef_col\"]\n","sub_path":"src/utils/Corp.py","file_name":"Corp.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"369736225","text":"import os\nimport sys\n\nif sys.platform == 'linux':\n sys.path.append('/root/trajectory_handle/')\n\nimport torch\nfrom torch import nn\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport numpy as np\nimport torch.utils.data as Data\nimport torch.nn.functional as F\nimport random\nimport logger\nfrom demand_prediction.evaluation.Evaluate import Evaluate\nfrom demand_prediction.Demand_Conv_3x3 import Net\n\n# torch.manual_seed(1) # reproducible\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # gpu\ngpu_avaliable = torch.cuda.is_available()\n\n# Hyper Parameters\nEPOCH = 100 # train the training data n times, to save time, we just train 1 epoch\nBATCH_SIZE = 128\nTIME_STEP = 6 # rnn time step / image height\nINPUT_SIZE = 19 # rnn input size / image width\nHIDDEN_SIZE = 256\nLR = 0.0001 # learning rate\nLAYER_NUM = 2\nWEEKDAY_NUM = 7\nTIME_SLOT = 48\nREGION_NUM = 918\nSEQ_LENGTH = 6\n\nlinux_path = \"/root/taxiData\"\nwindows_path = \"K:\\毕业论文\\TaxiData\"\nmac_path = \"/Volumes/MyZone/毕业论文/TaxiData\"\nbase_path = windows_path\n\nelogger = logger.Logger(\"demand_lstm_prediction_conv3x3\")\n\n\ndef load_data():\n net_dataset = np.load(base_path + \"/demand/net_data_without_filter.npy\").tolist()\n\n # 打乱\n random.shuffle(net_dataset)\n\n single_region_dataset = []\n for data in net_dataset:\n if data[-1] > 10:\n single_region_dataset.append(data)\n net_dataset = single_region_dataset\n\n print(\"all data sample num : {}\".format(len(net_dataset)))\n elogger.log(\"all trajectories num : {}\".format(len(net_dataset)))\n count = len(net_dataset) * 0.8\n\n def flatten(o):\n new_seq = []\n\n for d, conv in o[3]:\n new_o = []\n # new_o.append(o[0])\n if o[0] < 5:\n new_o.append(0)\n else:\n new_o.append(1)\n new_o.append(o[1])\n new_o.append(o[2])\n new_o.append(d)\n for row, i in enumerate(conv):\n if not 2 <= row <= 4:\n continue\n for col, j in enumerate(i):\n if not 2 <= col <= 4:\n continue\n new_o.append(j)\n new_seq.append(new_o)\n return new_seq\n\n train_data = []\n train_labels = []\n test_data = []\n test_labels = []\n\n train_count = 0\n for obj in net_dataset:\n if train_count < count:\n train_data.append(flatten(obj))\n train_labels.append(obj[-1])\n else:\n test_data.append(flatten(obj))\n test_labels.append(obj[-1])\n train_count += 1\n return train_data, train_labels, test_data, test_labels\n\n\n# trajectory dataset\ntrain_data, train_labels, test_data, test_labels = load_data()\n\ntrain_data = torch.LongTensor(train_data)\ntrain_labels = torch.FloatTensor(train_labels)\ntest_data = torch.LongTensor(test_data)\ntest_labels = torch.FloatTensor(test_labels)\n\ntorch_dataset = Data.TensorDataset(train_data, train_labels)\nloader = Data.DataLoader(\n dataset=torch_dataset, # torch TensorDataset format\n batch_size=BATCH_SIZE, # mini batch size\n shuffle=True # 要不要打乱数据 (打乱比较好)\n)\n\ntest_dataset = Data.TensorDataset(test_data, test_labels)\ntest_loader = Data.DataLoader(\n dataset=test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\n\nclass RNN(nn.Module):\n def __init__(self):\n super(RNN, self).__init__()\n\n self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns\n input_size=INPUT_SIZE,\n hidden_size=HIDDEN_SIZE, # rnn hidden unit\n num_layers=LAYER_NUM, # number of rnn layer\n batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n )\n\n # self.out = nn.Linear(HIDDEN_SIZE, label_size)\n self.region_embeds = nn.Embedding(REGION_NUM, 8)\n self.week_embeds = nn.Embedding(WEEKDAY_NUM, 3)\n self.time_embeds = nn.Embedding(TIME_SLOT, 4)\n if gpu_avaliable:\n self.conv = Net().cuda()\n else:\n self.conv = Net()\n\n # self.fc = nn.Linear(HIDDEN_SIZE, 1)\n self.fc = nn.Sequential(\n nn.Linear(HIDDEN_SIZE, 128),\n nn.Linear(128, 64),\n nn.Linear(64, 32),\n nn.Linear(32, 1)\n )\n\n def forward(self, x):\n before_conv = []\n for batches in x:\n for seqs in batches:\n before_conv.extend(seqs[-9:].tolist())\n if gpu_avaliable:\n before_conv = torch.cuda.FloatTensor(before_conv)\n else:\n before_conv = torch.FloatTensor(before_conv)\n before_conv = before_conv.view(-1, SEQ_LENGTH, 3, 3)\n\n convs = None\n for item in torch.split(before_conv, 1, 1):\n if convs is None:\n convs = torch.unsqueeze(self.conv(item), 1)\n else:\n convs = torch.cat((convs, torch.unsqueeze(self.conv(item), 1)), 1)\n new_x = torch.cat((self.week_embeds(x[:, :, 0]), self.time_embeds(x[:, :, 1])), 2)\n new_x = torch.cat((new_x, self.region_embeds(x[:, :, 2])), 2)\n x = torch.cat((new_x, convs), 2)\n\n if gpu_avaliable:\n x = x.cuda()\n r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state\n\n # choose r_out at the last time step\n out = torch.squeeze(self.fc(r_out[:, -1, :]), 1)\n del x, r_out, h_c, h_n\n # out = F.softmax(out, 1)\n return out\n\n\nrnn = RNN()\nif gpu_avaliable:\n rnn.cuda()\nprint(rnn)\nelogger.log(str(rnn))\n\noptimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters\n# loss_func = nn.MSELoss()\nloss_func = nn.L1Loss()\n\n# training and testing\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(loader): # gives batch data\n if gpu_avaliable:\n b_x = b_x.cuda()\n b_y = b_y.cuda()\n\n b_x = b_x.view(-1, TIME_STEP, 13) # reshape x to (batch, time_step, input_size)\n\n output = rnn(b_x) # rnn output\n loss = loss_func(output, b_y) # cross entropy loss\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n del b_x, b_y\n\n if step % 1000 == 0:\n all_pred_y = []\n all_test_y = []\n for t_step, (t_x, t_y) in enumerate(test_loader):\n if gpu_avaliable:\n t_x = t_x.cuda()\n t_y = t_y.cuda()\n\n t_x = t_x.view(-1, TIME_STEP, 13)\n test_output = rnn(t_x) # (samples, time_step, input_size)\n if gpu_avaliable:\n pred_y = test_output.cuda().data\n else:\n pred_y = test_output.data.numpy()\n all_pred_y.extend(pred_y)\n all_test_y.extend(list(t_y.data.cpu().numpy()))\n print_out = 'Epoch: ' + str(epoch) + '| train loss: %.4f' % loss.data.cpu().numpy() + \\\n '| test MAPE: %.4f' % Evaluate.MAPE(all_pred_y, all_test_y) + \\\n '| test RMSE: %.4f' % Evaluate.RMSE(all_pred_y, all_test_y)\n print(print_out)\n elogger.log(str(print_out))\n\ntorch.save(rnn.state_dict(), 'params_without_filter.pkl')\n\n# print 10 predictions from test data\n# test_output = rnn(test_data[:10].view(-1, 10, 5))\n# if gpu_avaliable:\n# pred_y = torch.max(test_output, 1)[1].cuda().data\n# else:\n# pred_y = torch.max(test_output, 1)[1].data.numpy()\n# print(pred_y, 'prediction number')\n# print(test_labels[:10], 'real number')\n","sub_path":"demand_prediction/lstm_prediction_conv3x3.py","file_name":"lstm_prediction_conv3x3.py","file_ext":"py","file_size_in_byte":7697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"313539541","text":"#coding:utf-8\nfrom datetime import datetime\nfrom flask import render_template, redirect, request, url_for, flash\nfrom . import admin\nfrom ..models import User, Article, ArticleType, Comment, db\nfrom flask_login import current_app, current_user, login_required\nfrom .forms import SubmitArticleForm\nfrom werkzeug.local import LocalProxy\n\n\n@admin.route('/')\ndef manageadmin():\n return render_template('admin/admin_base.html')\n\n\n@admin.route('/submit-article', methods=['GET', 'POST'])\n@login_required\ndef submitarticle():\n form = SubmitArticleForm()\n types = [(t.id, t.name) for t in ArticleType.query.all()]\n form.types.choices = types\n\n if form.validate_on_submit():\n title = form.title.data\n content = form.content.data\n type_id = form.types.data\n summary = form.summary.data\n\n articleType = ArticleType.query.get(type_id)\n\n if articleType:\n article = Article(title=title, content=content, summary=summary,\n articleType=articleType)\n db.session.add(article)\n db.session.commit()\n flash(u'发表博文成功!', 'success')\n article_id = Article.query.filter_by(title=title).first().id\n return redirect(url_for('main.articleMessage', id=article_id))\n if form.errors:\n flash(u'发表博文失败', 'danger')\n return render_template('admin/submit_article.html', form=form)\n\n\n@admin.route('/article-comment')\n@login_required\ndef articlecomment():\n page = request.args.get('page', 1, type=int)\n pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(\n page, per_page=current_app.config['FLASKY_COMMENT_PER_PAGE'],\n error_out=False)\n comments = pagination.items\n return render_template('admin/article_comment.html', comments=comments, page=page,\n pagination=pagination, endpoint='.articlecomment')","sub_path":"app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"549987276","text":"# Sales Prediction \r\n# February 5, 2019\r\n# CTI-110 P2T1 - Sales Prediction\r\n# Josiah Croel\r\n#\r\n\r\n#Get projected total sales.\r\ntotal_sales = float( input ('Enter the projected sales: '))\r\n\r\n#Calculate the profit as 23% of total sales.\r\nprofit = total_sales * 0.23\r\n\r\n#Display the Profit\r\nprint('The profit is $', format (profit, ',.2f')) \r\n","sub_path":"P2T1_SalesPrediction_JosiahCroel.py","file_name":"P2T1_SalesPrediction_JosiahCroel.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481250565","text":"import unittest\nimport subprocess\n\nclass ExeTest(unittest.TestCase):\n def execute(self, path, stdin, stdout):\n proc = subprocess.Popen(\n path,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n proc.stdin.write(bytes(stdin))\n proc.stdin.close()\n result = proc.stdout.read()\n result = result.decode()\n result = result.strip(' \\n')\n result = ''.join(result.split())\n stdout = ''.join(stdout.split())\n assert result == stdout, bytes(result)\n proc.wait()\n\n def test_palindrome1(self):\n self.execute(path='./test_me',\n stdin='1',\n stdout='TAK TAK')\n\n def test_palindrome2(self):\n self.execute(path='./test_me',\n stdin='2',\n stdout='TAK NIE')\n\n def test_palindrome3(self):\n self.execute(path='./test_me',\n stdin='3',\n stdout='TAK TAK')\n\n def test_palindrome4(self):\n self.execute(path='./test_me',\n stdin='999',\n stdout='TAK NIE')\n\n def test_palindrome5(self):\n self.execute(path='./test_me',\n stdin='919',\n stdout='TAK NIE')","sub_path":"2.009/tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"87708730","text":"import glob\nfrom fingerprints.fingerprint import FingerprintManager\n\nclass DetectionModule():\n \"\"\"\n This class is responsible to compare fingerprints and identify potentially malicious requests.\n \"\"\"\n\n def __init__(self):\n self.background_threshold = 2.0\n self.browser_threshold = 1.5\n\n\n\n def similarity_check(self, new_f1, old_f2):\n \"\"\"\n Verify if two fingerprints are similar\n\n Parameter\n -----------\n new_f1, old_f2 : Fingerprint\n\n\n Result:\n -----------\n True : if new_f1 and old_f2 are similar\n False: otherwise\n \"\"\"\n score = 0.0\n\n if not new_f1 or not old_f2:\n return False\n\n # Fingerprints are not similar if they represents two different type of application\n if new_f1.label != old_f2.label:\n return False\n\n # Check if Background-type fingerprints are similar\n if new_f1.label == \"Background\":\n score = self._background_similarity(new_f1, old_f2)\n if score >= self.background_threshold:\n return True\n else:\n return False\n\n # Check if Browser-type fingerprints are similar\n else:\n score = self._browser_similarity(new_f1, old_f2)\n if score >= self.browser_threshold:\n return True\n else:\n return False\n\n\n def _background_similarity(self, new_f1, old_f2):\n \"\"\"\n This method computes the similarity between two Background-type fingerprints based on their core features.\n\n Parameters\n --------------\n new_f1 : (Background) Fingerprint\n old_f2 : (Background) Fingerprint\n\n Returns\n --------------\n score : float\n The similarity score between two Background-type fingerprints\n \"\"\"\n score = 0.0\n score += self._ip_check(new_f1.ip_dsts, old_f2.ip_dsts)\n score += self._avg_size_check(new_f1.avg_size, old_f2.avg_size)\n score += self._header_check(new_f1.constant_header_fields, old_f2.constant_header_fields)\n score += self._ua_check(new_f1.user_agent, old_f2.user_agent)\n return score\n\n\n def _browser_similarity(self, new_f1, old_f2):\n \"\"\"\n This method computes the similarity between two Browser-type fingerprints based on their core features.\n\n Parameters\n --------------\n new_f1 : (Browser) Fingerprint\n old_f2 : (Browser) Fingerprint\n\n Returns\n --------------\n score : float\n The similarity score between two Browser-type fingerprints\n \"\"\"\n score = 0.0\n score += self._ua_check(new_f1.user_agent, old_f2.user_agent)\n score += self._language_check(new_f1.language, old_f2.language)\n score += self._ip_check(new_f1.ip_dsts, old_f2.ip_dsts)\n return score\n\n\n def _ip_check(self, new_ip, old_ip):\n \"\"\"\n This method checks if the set of hosts of the old fingerprint is a superset of the new fingerprint's list of hosts.\n\n Parameters\n --------------\n new_host_list: list of string\n old_host2_list : list of string\n\n Returns\n -------------\n result : float\n The result of this similarity function between the HTTP host features.\n \"\"\"\n result = 0.0\n if new_ip == old_ip or new_ip in old_ip or old_ip in new_ip:\n result += 1.0\n return result\n else:\n return result\n\n\n def _avg_size_check(self, new_avg, old_avg):\n \"\"\"\n This method checks if the average request size of the new fingerprint falls within a certain range\n from the average size of the old fingerprint.\n\n Parameters\n --------------\n new_avg: int\n old_avg: int\n\n Returns\n --------------\n result : float\n The result of this similaritfy function based on the average size of HTTP requests\n \"\"\"\n avg_percentage_error = 30\n result = 0.0\n error_margin = (float(old_avg)/ 100) * avg_percentage_error\n\n if (float(old_avg) + error_margin) >= float(new_avg) >= (float(old_avg) - error_margin):\n result = 1.0\n return result\n elif (float(old_avg) + 2 * error_margin) >= float(new_avg) >= (float(old_avg) - 2 * error_margin):\n result = 0.5\n return result\n else:\n return result\n\n\n def _header_check(self, new_const_headers, old_const_headers):\n \"\"\"\n This method checks if the set of constant headers of the new fingerprint fully or partially match with the\n list of constant headers of the old fingerprint.\n\n Parameters\n ---------------\n new_const_headers: list of string\n old_const_headers: list of string\n\n Returns\n ---------------\n result: float\n The result of this similarity function based on the constant headers present in HTTP requests.\n \"\"\"\n matches = 0\n result = 0.0\n for header in new_const_headers:\n if header in old_const_headers:\n matches += 1\n if matches == len(old_const_headers) and len(new_const_headers) == len(old_const_headers):\n result += 0.5\n return result\n elif matches == len(old_const_headers) and len(new_const_headers) > len(old_const_headers):\n result += 0.5\n return result\n else:\n return result\n\n\n def _ua_check(self, new_ua, old_ua):\n \"\"\"\n This methods verifies that two User-Agents are matching.\n\n Parameters\n -------------\n new_ua: string\n old_ua: string\n\n Returns\n -------------\n result: float\n Returns 1.0 if there is a match, 0.0 otherwise.\n \"\"\"\n result = 0.0\n if new_ua==\"UnknownUA\":\n return 0\n if new_ua == old_ua:\n result += 1.0\n return result\n else:\n return result\n\n\n def _language_check(self, new_lang, old_lang):\n \"\"\"\n This methods verifies that two Accept-Language values are matching. (Same check as _ua_check() )\n\n Parameters\n -------------\n new_lang: string\n old_lang: string\n\n Returns\n -------------\n result: float\n Returns 1.0 if there is a match, 0.0 otherwise.\n \"\"\"\n result = 0.0\n if new_lang == old_lang:\n result += 0.5\n return result\n else:\n return result\n","sub_path":"mad-test/fingerprints/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163856208","text":"import pandas as pd\r\nimport numpy as np\r\nimport enchant\r\nfrom nltk import pos_tag\r\nfrom nltk.stem import SnowballStemmer\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\npd.options.display.width = 0\r\nfrom gensim.scripts.glove2word2vec import glove2word2vec\r\nfrom gensim.models import KeyedVectors\r\nfrom collections import Counter\r\n\r\n\r\n# stem\r\ndef stem(dev):\r\n stemmer = SnowballStemmer('english')\r\n dev['stem'] = pd.Series()\r\n dev['stem'] = dev['token'].apply(lambda x: stemmer.stem(x))\r\n return dev\r\n\r\n\r\n# lemmatize\r\ndef lemmatize(dev):\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n dev['lemma'] = pd.Series()\r\n dev['lemma'] = dev['token'].apply(lambda x: wordnet_lemmatizer.lemmatize(x))\r\n return dev\r\n\r\n\r\n# cues affix\r\ndef has_negation_affix(dev):\r\n prefixes = ['dis', 'un', 'ir', 'im', 'in']\r\n suffixes = ['less']\r\n has_negation_affix_list = []\r\n d = enchant.Dict(\"en_US\")\r\n # check all tokens starting with a prefix of 2 letters\r\n for token in dev.token:\r\n if len(token) > 4:\r\n if str(token).startswith(tuple(prefixes[1:])):\r\n has_negation_affix_list.append(d.check(token[2:]))\r\n # check all tokens starting with a prefix of 3 letters\r\n elif str(token).startswith(prefixes[0]):\r\n has_negation_affix_list.append(d.check(token[3:]))\r\n # check all tokens ending with a suffix of 3 letters\r\n elif str(token).endswith(suffixes[0]):\r\n has_negation_affix_list.append(d.check(token[:-4]))\r\n else:\r\n has_negation_affix_list.append(False)\r\n else:\r\n has_negation_affix_list.append(False)\r\n\r\n dev['has_negation_affix'] = has_negation_affix_list\r\n return dev\r\n\r\n\r\n# vector representation\r\ndef load_semantic_model(filepath):\r\n \"\"\"Function to get semantic model\"\"\"\r\n\r\n # this will create an embedding file that can be load by keyedvectors\r\n # you only need to do it once if u have to use the model multiple times in a task\r\n glove2word2vec(glove_input_file=filepath, word2vec_output_file=\"../model/gensim_glove_vectors.txt\")\r\n\r\n word_embedding_model = KeyedVectors.load_word2vec_format(\"../model/gensim_glove_vectors.txt\", binary=False)\r\n\r\n return word_embedding_model\r\n\r\n\r\ndef get_vector_representation(dev, frequency_threshold, modelword_index, num_features, WORD_EMBEDDING_MODEL):\r\n tokens = dev[\"token\"]\r\n list_tokens = list(tokens)\r\n\r\n # 1.\r\n kw_counter = Counter(list_tokens)\r\n\r\n frequent_keywords = []\r\n\r\n for word, count in kw_counter.items():\r\n if count > frequency_threshold:\r\n frequent_keywords.append(word)\r\n\r\n # 2.\r\n featureVec = np.zeros(num_features, dtype=\"float32\")\r\n\r\n nwords = 0\r\n\r\n known_words = []\r\n unknown_words = []\r\n featureVectors = []\r\n\r\n for token in tokens:\r\n if token in modelword_index:\r\n featureVec = np.add(featureVec,\r\n WORD_EMBEDDING_MODEL[token] / np.linalg.norm(WORD_EMBEDDING_MODEL[token]))\r\n\r\n known_words.append(token)\r\n else:\r\n unknown_words.append(token)\r\n featureVec = np.average(featureVectors)\r\n\r\n featureVectors.append(featureVec)\r\n nwords = nwords + 1\r\n\r\n #featureVec = np.divide(featureVec, nwords)\r\n\r\n # 3. average feature vector\r\n counter = 0\r\n\r\n devFeatureVecs = np.zeros((len(list_tokens), num_features), dtype=\"float32\")\r\n\r\n for token in tokens:\r\n if counter % 1000 == 0:\r\n print(\"Review %d of %d\" % (counter, len(token)))\r\n\r\n devFeatureVecs[counter] = featureVectors[counter]\r\n counter = counter + 1\r\n print(unknown_words)\r\n devFeatureVecs = np.nan_to_num(devFeatureVecs)\r\n return devFeatureVecs\r\n\r\n\r\ndef add_vectors(dev, frequency_threshold, modelword_index, num_features, WORD_EMBEDDING_MODEL):\r\n vectors = get_vector_representation(dev, frequency_threshold, modelword_index, num_features, WORD_EMBEDDING_MODEL)\r\n dev[\"vector\"] = pd.Series()\r\n dev[\"vector\"] = vectors\r\n return dev\r\n\r\n\r\n\r\ndef get_pos(dev):\r\n dev_list = dev[\"token\"].tolist()\r\n pos_tags_dev = pos_tag([i for i in dev_list if i])\r\n words, tags = zip(*pos_tags_dev)\r\n pos_list = tags\r\n dev[\"pos\"] = pos_list\r\n return dev\r\n\r\ndef get_in_NegExpList(dev):\r\n # true/false basic negations\r\n NegExpList = ['nor', 'neither', 'without', 'nobody', 'none', 'nothing', 'never', 'not', 'no', 'nowhere', 'non']\r\n dev['in_NegExpList'] = dev['token'].apply(lambda x: x.lower() in NegExpList)\r\n return dev\r\n\r\ndef main():\r\n # load dataset\r\n path = \"SEM-2012-SharedTask-CD-SCO-dev-simple.v2.txt\"\r\n dev = pd.read_csv(\"../data/preprocessed_\"+path, sep=\"\\t\")\r\n\r\n # run functions\r\n dev = get_pos(dev)\r\n dev = stem(dev)\r\n dev = has_negation_affix(dev)\r\n dev = get_in_NegExpList(dev)\r\n\r\n WORD_EMBEDDING_MODEL = load_semantic_model(\"../model/glove.6B.100d.txt\")\r\n dev = add_vectors(dev, 5, set(WORD_EMBEDDING_MODEL.wv.index2word), 100, WORD_EMBEDDING_MODEL)\r\n\r\n # save dataset\r\n dev.to_csv(\"../data/featured_\"+path, sep=\"\\t\",\r\n header=['chapter', 'sentence_id', 'token_id', 'token', 'target', \"pos\", \"stem\",\r\n \"has_negation_affix\", \"in_NegExpList\", \"vector\"], index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"code/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"524414631","text":"from scrapy import cmdline\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nsched = BlockingScheduler()\n\n\ndef fun_min():\n cmdline.execute('scrapy crawl wei'.split())\nfun_min()\nsched.add_job(fun_min,'interval',days=1)\nsched.start()","sub_path":"spider/爬虫文件/weikouwang/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"558365381","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on April ~ September. 2019.\n\n for KT IT Solution Day\n\n@author: SungJun Park, @KT\n\"\"\"\n\nimport os\nfrom konlpy.tag import Okt\nimport gensim\nimport tensorflow as tf\nimport numpy as np\nimport codecs\n\nos.chdir(\"/Users/sungjunpark/POC_BizJarvis_2.0_TA/Data\")\n\ndef read_data(filename):\n with open(filename, 'r' ,encoding='utf-8') as f:\n data = [line.split('\\t') for line in f.read().splitlines()]\n data = data[1:] # header 제외 #\n return data\n\ntrain_data = read_data('ratings_train.txt')\ntest_data = read_data('ratings_test.txt')\n\npos_tagger = Okt()\n\ndef tokenize(doc):\n\n return ['/'.join(t) for t in pos_tagger.pos(doc, norm=True, stem=True)]\n\n\n## training Word2Vec model using skip-gram\ntokens = [tokenize(row[1]) for row in train_data]\nmodel = gensim.models.Word2Vec(size=300 ,sg = 1, alpha=0.025 ,min_alpha=0.025, seed=1234)\nmodel.build_vocab(tokens)\n\nmodel.train(tokens, epochs=model.iter, total_examples=model.corpus_count)\n\n# for epoch in range(30):\n\n# model.train(tokens ,model.corpus_count ,epochs = model.iter)\n# model.alpha -= 0.002\n# model.min_alpha = model.alpha\n\nos.chdir(\"/Users/sungjunpark/POC_BizJarvis_2.0_TA/Word2Vec\")\nmodel.save('Movie_review.model')\nmodel.most_similar('팝콘/Noun' ,topn = 20) ## topn = len(model.wv.vocab)\n","sub_path":"Word2Vec/Word2Vec_train_txt.py","file_name":"Word2Vec_train_txt.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473810543","text":"import datetime\n\nclass Mancare:\n\n pass\n\n\nclass Pizza(Mancare):\n\n pizza_per_day = 0\n pret_per_topping = 2\n ingrediente_default = ['rosii', 'mozzarela']\n pret_blaturi = {'Normal': 20, 'Pufos': 22, 'Subtire': 24}\n pret_dimensiune = {'Mare': 1.3, 'Mica': 1}\n pizza_predefinite = {\n \"margherita\": [\"Normal\", \"Mica\",[]],\n \"quatro-stagioni\": [\"Normal\", \"Mica\", [\"ciuperci\", \"sunca\", \"masline\"]],\n \"capriciosa\": [ \"Normal\", \"Mica\", [\"ciuperci\", \"sunca\", \"ardei\"]]\n }\n\n def __init__(self, dimensiune, blat='Normal', topping=[]):\n Pizza.pizza_per_day += 1\n self.dimensiune = dimensiune\n self.blat = blat\n self.ingrediente = Pizza.ingrediente_default.copy()\n self.add_topping(topping)\n\n def update_pret(self):\n self.pret = ((Pizza.pret_blaturi[self.blat] * Pizza.pret_dimensiune[self.dimensiune])\n + self.pret_per_topping * (len(self.ingrediente) - 2)) \\\n * Pizza.is_weekday(datetime.date.today().weekday())\n\n @property\n def description(self):\n return 'Salut! Detaliile comenzii tale:\\nDimensiune: {} \\nTip blat: {} \\nIngrediente: {} \\nPret: ' \\\n '{}'.format(self.dimensiune, self.blat, ' '.join(self.ingrediente), round(self.pret, 2))\n\n\n def add_topping(self, more):\n self.ingrediente.extend(more)\n self.update_pret()\n\n def remove_topping(self, de_scos):\n if de_scos in self.ingrediente:\n self.ingrediente.remove(de_scos)\n self.update_pret()\n else:\n print('Ingredientul {} nu exista!'.format(de_scos))\n\n def remove_all_toppings(self):\n self.ingrediente = []\n self.update_pret()\n\n @staticmethod\n def is_weekday(day):\n if day == 5 or day == 6:\n return 1.1\n elif day == 1:\n return 0.8\n else:\n return 1\n\n @classmethod\n def create_pizza(cls, pizza_primita):\n blat = Pizza.pizza_predefinite[pizza_primita][0]\n dimensiune = Pizza.pizza_predefinite[pizza_primita][1]\n ingrediente = Pizza.pizza_predefinite[pizza_primita][2]\n return cls(dimensiune, blat, ingrediente)\n\n\nclass Paste(Mancare):\n\n paste_predefinite = {\n \"Paste Carbonara\": 20,\n \"Penne Primavera\": 22,\n }\n\n def __init__(self, nume_paste):\n self.nume_paste = nume_paste\n self.pret = Paste.paste_predefinite[nume_paste]\n\n def description(self):\n return 'Salut! Detaliile comenzii tale: \\nNume: {} \\nPret: {}'.format(self.nume_paste, round(self.pret, 2))\n\n\nclass Comanda:\n\n id_comanda = 0\n\n def __init__(self, client, produse = []):\n self.produse = produse\n self.client = client\n self.pret = self.calculate_price()\n Comanda.id_comanda += 1\n\n def calculate_price(self):\n pret_final = 0\n\n if self.client.card:\n reducere_fidelitate = 0.8\n else:\n reducere_fidelitate = 1\n\n for produs in self.produse:\n pret_final = pret_final + produs.pret\n\n return pret_final * reducere_fidelitate\n\n @property\n def detalii(self):\n return 'Salut, {}! \\nPret comanda: {}'.format(self.client.nume, round(self.pret, 2))\n\n\nclass Client:\n\n def __init__(self, nume, prenume, adresa, card=False):\n self.nume = nume\n self.prenume = prenume\n self.adresa = adresa\n self.card = card\n\n @property\n def detalii(self):\n return '{} {} \\n{}\\n{}'.format(self.prenume, self.nume, self.adresa, self.card)\n\n\n# cont1 = Client(\"Andrei\", \"Raileanu\", \"Aviatorilor 8A\", card=True)\n# pizza1 = Pizza.create_pizza(\"capriciosa\")\n# paste1 = Paste(\"Paste Carbonara\")\n#\n# comanda1 = Comanda(cont1,[pizza1,paste1])\n# print(comanda1.detalii)\n\n# TODO: Citeste din csv / scriere in csv\n\n\nclass TestPizza:\n\n def setup(self):\n self.cont1 = Client(\"Andrei\", \"Raileanu\", \"Aviatorilor 8A\", card=True)\n\n def teardown(self):\n Comanda.id_comanda = 0\n del self.cont1\n\n def test_pizza_topping_default(self):\n pizza1 = Pizza.create_pizza(\"margherita\")\n assert pizza1.ingrediente == ['rosii', 'mozzarela']\n\n def test_pizza_add_topping(self):\n pizza2 = Pizza.create_pizza(\"capriciosa\")\n pizza2.add_topping(['branza'])\n assert pizza2.ingrediente == [\"rosii\", \"mozzarela\", \"ciuperci\", \"sunca\", \"ardei\", \"branza\"]\n\n def test_pizza_remove_all(self):\n pizza3 = Pizza.create_pizza(\"capriciosa\")\n pizza3.remove_all_toppings()\n assert pizza3.ingrediente == []\n\n def test_paste_pret(self):\n paste1 = Paste(\"Paste Carbonara\")\n assert paste1.pret == 20\n\n def test_cont(self):\n cont2 = Client(\"Andrei\", \"Raileanu\", \"Aviatorilor 8A\")\n assert cont2.card == False\n\n def test_comanda_aplicare_reducere(self):\n\n pizza1 = Pizza.create_pizza(\"capriciosa\")\n paste1 = Paste(\"Paste Carbonara\")\n comanda1 = Comanda(self.cont1,[pizza1,paste1])\n assert round(comanda1.calculate_price(),2) == 36.8\n\n def test_comanda_verificare_id(self):\n pizza1 = Pizza.create_pizza(\"capriciosa\")\n paste1 = Paste(\"Paste Carbonara\")\n comanda1 = Comanda(self.cont1, [pizza1, paste1])\n comanda2 = Comanda(self.cont1, [paste1])\n assert comanda2.id_comanda == 2\n","sub_path":"pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"454831356","text":"#-*- coding: utf-8 -*-\nimport Tkinter\nimport random\nflag=1\n\nclass gui:\n def __init__(self,master):\n global flag\n flag = 1\n self.master = master\n self.master.geometry('300x300')\n self.my_frame=Tkinter.Frame(root,width=120,heigh=120)\n self.my_frame2=Tkinter.Frame(root,width=180,heigh=300)\n self.my_frame3=Tkinter.Frame(root,width=300,heigh=60)\n self.my_frame.place(x=0,y=0)\n self.my_frame2.place(x=130,y=0)\n self.my_frame3.place(x=0,y=150)\n self.mylabel()\n self.mybutton(0,0,master=self.my_frame2,w=80,text='New Game')\n self.mybutton(0,0,self.my_frame)\n self.mybutton(40,0,self.my_frame)\n self.mybutton(80,0,self.my_frame)\n self.mybutton(0,40,self.my_frame)\n self.mybutton(40,40,self.my_frame)\n self.mybutton(80,40,self.my_frame)\n self.mybutton(0,80,self.my_frame)\n self.mybutton(40,80,self.my_frame)\n self.mybutton(80,80,self.my_frame)\n\n self.spisok={}\n self.var_g(master)\n self.master.mainloop()\n def var_g(self,master):\n \"\"\"function create Radiobutton \"\"\"\n self.var=Tkinter.IntVar()\n self.var.set(2)\n self.rbutton1=Tkinter.Radiobutton(self.my_frame2,text='Computer',variable=self.var,value=1)\n self.rbutton2=Tkinter.Radiobutton(self.my_frame2,text='Players ',variable=self.var,value=2)\n self.rbutton1.place(x=0,y=50)\n self.rbutton2.place(x=0,y=70)\n def mybutton(self,x_b,y_b,master,w=40,h=40,text=''):\n \"\"\"function create button\"\"\"\n self.mybutton1=Tkinter.Button(master,text=text)\n self.mybutton1.place(x=x_b,y=y_b,width=w, height=h)\n if self.mybutton1['text']=='New Game':\n self.mybutton1.bind('<1>', self.myreset)\n else:\n self.mybutton1.bind('<1>', self.logic)\n self.slaves=master.place_slaves()\n def mylabel(self):\n \"\"\"function create Label \"\"\"\n self.mylabel1=Tkinter.Label(self.my_frame3,font='arial 18',text='')\n self.mylabel1.place(x=0,y=0)\n\n def player_ev(self,ev):\n \"\"\"function player-player\"\"\"\n global flag\n if flag==1:\n ev.widget['text']='X'\n flag=0\n elif flag==0:\n ev.widget['text']='0'\n flag=1\n t=0\n for itim in self.slaves:\n t+=1\n self.spisok[t]=itim['text']\n t=0\n self.win()\n def comp_hod(self,ev):\n \"\"\"function player-comp\"\"\"\n global flag\n if flag==1:\n ev.widget['text']='X'\n flag=2\n elif flag==2:\n self.spisok_pust=[i for i in self.slaves if i['text']=='']\n self.comp=random.choice(self.spisok_pust)\n self.comp['text']='0'\n flag=1\n t=0\n for itim in self.slaves:\n t+=1\n self.spisok[t]=itim['text']\n t=0\n self.win()\n\n def logic(self,ev):\n \"\"\"function logic\"\"\"\n if not ev.widget['text']:\n if self.var.get()==2:\n self.player_ev(ev)\n else:\n self.comp_hod(ev)\n def myreset(self,ev):\n \"\"\"function Start New Game\"\"\"\n self.__init__(root)\n def win(self):\n \"\"\"function Who is winner.\"\"\"\n global flag\n if self.spisok[1]==self.spisok[2]==self.spisok[3]!='' or self.spisok[4]==self.spisok[5]==self.spisok[6]!=''\\\n or self.spisok[7]==self.spisok[8]==self.spisok[9]!='' or self.spisok[1]==self.spisok[4]==self.spisok[7]!=''\\\n or self.spisok[2]==self.spisok[5]==self.spisok[8]!='' or self.spisok[3]==self.spisok[6]==self.spisok[9]!=''\\\n or self.spisok[1]==self.spisok[5]==self.spisok[9]!=''or self.spisok[3]==self.spisok[5]==self.spisok[7]!='':\n if flag==1 and self.var.get()==2:\n self.mylabel1['text']='Player 2 is winner'\n elif flag==0:\n self.mylabel1['text']='Player 1 is winner'\n elif flag==1:\n self.mylabel1['text']='Comp is winner,\\n you are loser=)'\n elif flag==2:\n self.mylabel1['text']='Player 1 is winner'\n\n flag = 3\n elif list(self.spisok.values()).count(\"\")==0:\n self.mylabel1['text']='No winner'\n \n\n\n\nif __name__ == '__main__':\n root=Tkinter.Tk()\n doom = gui(root)\n\n","sub_path":"new_game.py","file_name":"new_game.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273431217","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 23 15:21:13 2020\r\n\r\n@author: Edgar López\r\n\"\"\"\r\n\r\ns = 'zyxwvutsrqponmlkjihgfedcba'\r\nprint('Given string: ' + s)\r\n\r\nprechar = s[0]\r\nsubs1 = s[0]\r\nsubs2 = ''\r\n\r\nfor char in s[1:len(s)+1]:\r\n# print('char',char, 'pre', prechar, 'sub1', subs1, 'sub2', subs2)\r\n if char >= prechar:\r\n subs1 += char\r\n prechar = char\r\n \r\n #print(subs1)\r\n else:\r\n prechar = char\r\n if len(subs1) > len(subs2):\r\n subs2 = subs1\r\n subs1 = char\r\n #print(subs2)\r\n continue\r\n #if len(subs1) == len(subs2):\r\n subs1 = prechar\r\nif len(subs2) >= len(subs1): \r\n print('Longest substring in alphabetical order is: '+subs2)\r\nelse:\r\n print('Longest substring in alphabetical order is: '+subs1)","sub_path":"long-alph-sub.py","file_name":"long-alph-sub.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638045816","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_psi(ds1, ds2):\n \"\"\"\n given the two ds segments computes the cosine and sine of the angle between the matching \n \"\"\"\n hyp = (ds1**2 + ds2**2)**0.5\n cos_psi = ds1 / hyp\n sin_psi = ds2 / hyp\n \n return cos_psi, sin_psi\n \ndef get_delta_s(p1, p2):\n \"\"\"\n computes the ds between two points on a curve \n \"\"\"\n x1, y1 = p1\n x2, y2 = p2\n ds = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return ds\n\ndef get_kappa_and_derivatives(a):\n \"\"\"\n returns the curvature and gradients of a curve segment \n \"\"\"\n dx_dt = np.gradient(a[:, 0])\n dy_dt = np.gradient(a[:, 1])\n d2x_dt2 = np.gradient(dx_dt)\n d2y_dt2 = np.gradient(dy_dt)\n curvature = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt)**1.5\n return curvature, [dx_dt, dy_dt, d2x_dt2, d2y_dt2] \n\n\ndef get_segments_from_pt_list(p1):\n seg_list = []\n curv_list = []\n for i in range(1, len(p1)):\n strt = p1[i-1]\n end = p1[i]\n ds = get_delta_s(strt, end)\n seg_list.append(ds)\n \n return seg_list\n\ndef get_unique_elements(p1):\n \"\"\"\n returns only unique pts from the pt list \n \"\"\"\n pint = []\n pset = set()\n for pi in p1:\n if (pi[0], pi[1]) in pset:\n pass\n else:\n pint.append(pi)\n pset.add((pi[0], pi[1]))\n return np.asarray(pint)\n \ndef get_tup_list(i, j):\n \"\"\"\n get valid tup list from all possible tuples \n \"\"\"\n str_tup_list = [(i, i), (i-1, i), (i-1, i), (i-2, i), (i-3, i), (i-3, i), (i-2, i), (i-1, i), (i-1, i)]\n end_tup_list = [(j-1, j), (j, j), (j-1, j), (j-1, j), (j-1, j), (j-2, j), (j-3, j), (j-3, j), (j-2, j)]\n s1_list = []\n s2_list = []\n for (str_tup, end_tup) in zip(str_tup_list, end_tup_list):\n s1, s2 = str_tup\n u1, u2 = end_tup\n if s1<0 or s2<0 or u1<0 or u2<0:\n pass\n else:\n s1_list.append(str_tup)\n s2_list.append(end_tup)\n \n return s1_list, s2_list\n \n\ndef plot_cost_map(i, j, p1_int, p2_int):\n \n \"\"\"\n plots the possible cost maps of the decision to be taken from i, j \n \"\"\"\n\n s1_list, s2_list = get_tup_list(i, j)\n\n for (str_tup, end_tup) in zip(s1_list, s2_list):\n i1, i2 = str_tup ## here we match the segments i1-i2 in the first curve with j1-j2 in the second curve \n j1, j2 = end_tup \n #\n print (f\"{i1} to {i2} in curve 1\")\n print (f\"{j1} to {j2} in curve 2 \")\n p1_pts = p1_int[i1:i2+1] ## these will be the corresponding pts in both the curve \n p2_pts = p2_int[j1:j2+1]\n\n f, a = plt.subplots(1, 2, figsize = (12, 8))\n a[0].scatter(p1_int[:, 0], p1_int[:, 1], c = \"b\") ## i is blue \n a[1].scatter(p2_int[:, 0], p2_int[:, 1], c = \"y\") ## j is yellow \n a[0].plot(p1_int[:, 0], p1_int[:, 1])\n a[0].set_axis_off()\n a[1].set_axis_off()\n a[1].plot(p2_int[:, 0], p2_int[:, 1])\n #print (p1_pts)\n if len(p1_pts) == 1:\n a[0].scatter(p1_pts[:, 0], p1_pts[:, 1], c = \"r\")\n else:\n a[0].plot(p1_pts[:, 0], p1_pts[:, 1], c = \"r\")\n if len(p2_pts) == 1:\n a[1].scatter(p2_pts[:, 0], p2_pts[:, 1], c = \"r\")\n else: \n a[1].plot(p2_pts[:, 0], p2_pts[:, 1], c = \"r\")\n\n\n\n plt.show()\n plt.close(\"all\")\n \ndef get_tangent_vector(a):\n \"\"\"\n returns the tangent vectors of a curve segment \n \"\"\"\n dx_dt = np.gradient(a[:, 0])\n dy_dt = np.gradient(a[:, 1])\n \n \n return np.asarray([dx_dt, dy_dt]).T\n\ndef matching_cost(i1, i2, j1, j2, p1_int, p2_int, kappa1, kappa2, R):\n\n \"\"\"\n computes the cost of matching the given curve segments \n \"\"\"\n \n pt1_list = p1_int[i1:i2+1] ## these will be the corresponding pts in both the curve \n pt2_list = p2_int[j1:j2+1]\n \n ## pt1_list, pt2_list\n if len(pt1_list) == 1:\n ## do something \n ds2 = get_delta_s(pt2_list[0], pt2_list[-1])\n cost = ds2*(1 + R*abs(kappa2[j2]+kappa2[j2-1])*0.5)\n\n elif len(pt2_list) == 1:\n ## do something \n ds1 = get_delta_s(pt1_list[0], pt1_list[-1])\n cost = ds1*(1 + R*abs(kappa1[i2-1]+kappa1[i2])*0.5)\n\n elif len(pt1_list) > 1 and len(pt2_list) > 1 :\n\n ds_dt1 = get_tangent_vector(pt1_list) ## compute\n ds_dt2 = get_tangent_vector(pt2_list) ## compute \n\n Ta = ds_dt1[0]\n Tb = ds_dt1[-1]\n\n Tb_ = ds_dt2[-1]\n\n normTa = np.linalg.norm(Ta)\n normTb = np.linalg.norm(Tb)\n normTb_ = np.linalg.norm(Tb_)\n \n if normTb == 0.0 or normTa == 0.0 or normTb_ == 0:\n print (pt1_list, pt2_list)\n \n cos_theta1 = np.dot(Ta, Tb)/(normTa*normTb)\n cos_theta2 = np.dot(Ta, Tb_)/(normTa*normTb_)\n \n \n dtheta1 = np.arccos(np.clip(cos_theta1, -1.0, 1.0))\n dtheta2 = np.arccos(np.clip(cos_theta2, -1.0, 1.0))\n \n \n \n ds1 = get_delta_s(pt1_list[0], pt1_list[-1])\n ds2 = get_delta_s(pt2_list[0], pt2_list[-1])\n\n cost = abs(ds1-ds2) + R*abs(dtheta1 - dtheta2)\n #print (pt1_list, pt2_list)\n \n return cost \n\n\ndef plot_points(p1, p2):\n f, a = plt.subplots(1, 3, figsize = (12, 4))\n a[0].invert_yaxis()\n a[0].scatter(p1[:, 0], p1[:, 1], c = \"r\")\n a[0].set_axis_off()\n a[1].invert_yaxis()\n a[1].scatter(p2[:, 0], p2[:, 1], c = \"y\")\n a[1].set_axis_off()\n a[2].invert_yaxis()\n a[2].scatter(p2[:, 0], p2[:, 1], c = \"y\")\n a[2].scatter(p1[:, 0], p1[:, 1], c = \"r\")\n a[2].set_axis_off()\n plt.show()\n\n\ndef get_matches(dist, predecessor):\n\n src1, src2 = dist.shape[0]-1, dist.shape[1]-1\n #c = 0\n global_path = [(src1, src2)]\n while((src1, src2) != (0, 0)):\n global_path.append(predecessor[src1][src2])\n src1, src2 = predecessor[src1][src2]\n #c = c+1\n #if c > 2500:\n # break\n return global_path\n\n\ndef plot_matches(p1_int, p2_int, global_path, offset, nstart, npts):\n p1_x , p1_y = p1_int[:, 0] + offset, p1_int[:, 1]\n f , a = plt.subplots(1, 1, figsize = (16 , 8)) \n a.invert_yaxis()\n a.scatter(p2_int[:, 0], p2_int[:, 1], c = \"y\")\n a.scatter(p1_x, p1_y, c = \"r\")\n a.set_axis_off()\n\n arr_p = {\"width\": 1e-6, \"head_width\":1e-4}\n for (m1, m2) in global_path[nstart:nstart+npts]:\n a1x, a1y = p1_x[m1-1], p1_y[m1-1]\n dx, dy = p2_int[m2-1][0] - a1x, p2_int[m2-1][1] - a1y\n a.arrow(a1x, a1y, dx, dy, alpha = 0.25, fc = \"red\", **arr_p )\n plt.show()\n","sub_path":"curve_matching.py","file_name":"curve_matching.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"483383643","text":"# -*- coding:utf-8 -*-\n__author__ = 'qiyingzhang'\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport pymongo\nfrom pymongo import MongoClient\nimport json\nimport re\nimport pandas as pd\n\ndef comment_input_mongoe(address):\n #get client\n db_client=MongoClient('localhost',27017);\n \n #get database\n db = db_client[\"Collections\"]\n \n commenttable = db['comment']\n \n input_file = open(address, 'r')\n for line in input_file:\n get_json = json.loads(line)\n commenttable.insert(get_json)\n input_file.close()\n\nhome = \"../../shared/data/comments_\"\nfor i in range(32):\n address = home + str(i).zfill(12)\n comment_input_mongoe(address)\n","sub_path":"module/spyder/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650575638","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Programa realizado por, Jeison Pernía y Jonathan Reyes en el marco\n# del plan de estudios de la UNEFA, como TRABAJO ESPECIAL DE GRADO,\n# con el fin de optar al título de Ingeniero de Sistemas.\n# \n# Visitanos en http://juventudproductivabicentenaria.blogspot.com\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp import http,tools, api,SUPERUSER_ID\nimport sys\nreload(sys)\nsys.setdefaultencoding('UTF8')\n\n\n#~ def filtrar_carreras_regimen_general(self,cr,uid,ids,context=None):\n #~ objeto_users=self.pool.get('res.users')\n #~ id_users=objeto_users.search(cr,uid,[('id','=',uid)],context=context)\n #~ data_users=objeto_users.browse(cr,uid,id_users)\n #~ value={}\n #~ value={\n #~ 'carrera_id': data_users['carrera_id'].id,\n #~ 'turno': data_users['coordinacion_id'].regimen,\n #~ }\n #~ return {'value':value}\n\n\n\nclass unefa_planificacion_semestre(osv.osv):\n _name = 'unefa.planificacion_semestre'\n _rec_name = 'nombre'\n \n def default_carrera(self, cr, uid, ids, context=None):\n users_obj=self.pool.get('res.users')\n users_ids=users_obj.search(cr,uid,[('id','=',uid)],context=context)\n users_data=users_obj.browse(cr,uid,users_ids)\n return int(users_data['carrera_id'])\n \n \n _columns = {\n 'nombre': fields.char(\n 'Nombre de planificación',\n required=True,\n size=80,\n states={'aprobado': [('readonly', True)]}\n ),\n 'carrera_id': fields.many2one(\n 'unefa.carrera',\n 'Carrera',\n required=False,\n readonly=True,\n ),\n 'periodo_id': fields.many2one(\n 'unefa.conf.periodo_academico',\n 'Período Académico',\n required=True,\n states={'aprobado': [('readonly', True)]}\n ),\n 'observaciones': fields.text(\n 'Observaciones',\n states={'aprobado': [('readonly', True)]}\n ),\n 'actividad_ids': fields.one2many(\n 'unefa.cronograma', \n 'planif_id', \n 'Actividad',\n required=True,\n states={'aprobado': [('readonly', True)]}\n ),\n 'state': fields.selection([\n ('borrador', 'Borrador'),\n ('aprobado', 'Aprobado'),], \n 'Estatus', \n readonly=True, \n help=\"Este es es estado actual del cronograma.\",\n ),\n 'turno': fields.selection([\n ('nocturno', 'NOCTURNO'),\n ('diurno', 'DIURNO'),], \n 'Turno', \n required=True,\n readonly=True,\n help=\"Este es es estado actual del cronograma.\",\n ),\n 'cronograma_ids': fields.many2many('ir.attachment', 'cronograma_attachment_rel', 'cronograma_id', 'attachment_id', 'Descargar Cronograma'),\n }\n \n _defaults = {\n 'carrera_id': default_carrera,\n 'turno': 'nocturno',\n 'create_date': fields.datetime.now,\n }\n \n _order = 'create_date desc, id desc'\n \n #~ def default_carrera(self,cr,uid,ids,context=None):\n #~ return filtrar_carreras_regimen_general(self, cr, uid, ids)\n \n def onchange_peridodo_id(self, cr, uid, ids,context=None):\n list_periodo=[]\n domain={}\n planifiacion_id=self.search(cr,uid,[],context=context)\n for i in self.browse(cr,uid,planifiacion_id):\n list_periodo.append(i.periodo_id)\n periodo_obj=self.pool.get('unefa.conf.periodo_academico')\n for n in list_periodo:\n periodo_ids=periodo_obj.search(cr,uid,[('id','!=',int(n))],context=context)\n domain = {'periodo_id': [('id', '=', list(periodo_ids))]}\n return {'domain': domain}\n \n def onchange_cronograma(self, cr, uid, ids, context=None):\n res={}\n cronograma_obj=self.pool.get('unefa.cronograma_actividades')\n cronograma_ids=cronograma_obj.search(cr,uid,[('activo','=','True')],context=context)\n cronograma_datos=cronograma_obj.browse(cr,uid,cronograma_ids,context=context)\n list_actividad=[]\n for i in cronograma_datos:\n list_actividad.append([0,False,{'actividad_id':i.id }])\n res={\n 'actividad_ids':list_actividad,\n }\n return {'value':res}\n \n def aprobar_planificacion(self, cr, uid, ids, context=None):\n list_partners=[]\n mail_message_obj=self.pool.get('mail.message')\n usuarios_obj=self.pool.get('res.users')\n \n for registros in self.browse(cr,uid,ids):\n periodo=registros.periodo_id.periodo_academico\n usuarios_ids=usuarios_obj.search(cr,uid,[('carrera_id','=',registros.carrera_id.id),('regimen','=',registros.turno)])\n usuarios_data=usuarios_obj.browse(cr,uid,usuarios_ids)\n for usuario in usuarios_data:\n list_partners.append(usuario.partner_id.id)\n \n values={\n 'body': 'Ha sido aprobada la planificación semestral para el período '+periodo+'.', \n 'model': 'unefa.planificacion_semestre', \n 'res_id': ids[0], \n 'parent_id': False, \n 'subtype_id': False, \n 'author_id': uid, \n 'type': 'notification', \n 'notified_partner_ids': [[6, False, list_partners]], \n 'subject': False}\n \n mail_message_obj.create(cr,SUPERUSER_ID ,values)\n \n return self.write(cr, uid, ids, {'state':'aprobado'})\n \n def create(self,cr,uid,vals,context=None):\n vals.update({\n 'state':'borrador',\n })\n ids=self.search(cr,uid,[('periodo_id','=',vals['periodo_id'])])\n if len(ids)==1:\n peridodo_obj=self.pool.get('unefa.conf.periodo_academico')\n periodo_ids=peridodo_obj.search(cr,uid,[('id','=',vals['periodo_id'])],context=context)\n periodo_datos=peridodo_obj.browse(cr,uid,periodo_ids,context=context)\n raise osv.except_osv(('Error !'),\n ('Ya existe un Cronograma Académico con el periodo '+periodo_datos['periodo_academico'].upper()))\n return super(unefa_planificacion_semestre,self).create(cr,uid,vals,context=context)\n \n \nclass cronograma(osv.osv):\n _name = 'unefa.cronograma'\n\n \n _columns = {\n 'planif_id': fields.many2one(\n 'unefa.planificacion_semestre',\n 'Actividad'\n ),\n 'actividad_id': fields.many2one(\n 'unefa.cronograma_actividades',\n 'Actividad',\n readonly=True,\n ),\n 'fecha_desde': fields.date(\n 'Fecha Inicio',\n required=True,\n ),\n 'fecha_hasta': fields.date(\n 'Fecha Final',\n required=True,\n ),\n 'cronograma_ids': fields.many2many('ir.attachment', 'cronograma_attachment_rel', 'cronograma_id', 'attachment_id', 'Descargar Cronograma'),\n }\n \n \n \n \n def onchange_fecha(self, cr, uid, ids, fecha_desde, fecha_hasta, periodo_id, context=None):\n res={}\n warning={}\n periodo_obj=self.pool.get('unefa.conf.periodo_academico')\n periodo_ids=periodo_obj.search(cr,uid,[('id','=',int(periodo_id))],context=context)\n periodo_data=periodo_obj.browse(cr,uid,periodo_ids,context=context)\n for fecha in periodo_data:\n fecha_ini_periodo = fecha.fecha_inicio\n fecha_fin_periodo = fecha.fecha_fin\n if fecha_desde:\n if cmp(fecha_desde,fecha_hasta)==1:\n res={\n 'fecha_hasta':'',\n }\n warning={\n 'title':('Error de fechas'),\n 'message':('La fecha de inicio no puede ser mayo a la fecha final'),\n }\n if cmp(fecha_hasta, fecha_ini_periodo) == -1 or cmp(fecha_hasta, fecha_fin_periodo) == 1:\n warning={\n 'title':('Error'),\n 'message':('La fecha para la actividad no puede \\\n ser menor a la fecha de inicio, ni \\\n mayor a la fecha final del período académico.'),\n }\n res={\n 'fecha_hasta':'',\n } \n else:\n res={\n 'fecha_hasta':'',\n }\n warning={\n 'title':('Error'),\n 'message':('Debe seleccionar una fecha de inicio'),\n }\n \n return {'warning':warning,'value':res}\n \n def validar_fecha_cronograma(self, cr, uid, ids, fecha_desde,periodo_id, context=None):\n warning={}\n value={}\n if not periodo_id: \n warning={\n 'title':('Aviso!'),\n 'message':('Debe seleccionar el período académico'),\n }\n value={\n 'fecha_desde':''\n }\n return {'warning':warning,'value':value}\n \n periodo_obj=self.pool.get('unefa.conf.periodo_academico')\n periodo_ids=periodo_obj.search(cr,uid,[('id','=',int(periodo_id))],context=context)\n periodo_data=periodo_obj.browse(cr,uid,periodo_ids,context=context)\n for fecha in periodo_data:\n fecha_ini_periodo = fecha.fecha_inicio\n fecha_fin_periodo = fecha.fecha_fin\n if cmp(fecha_desde, fecha_ini_periodo) == -1 or cmp(fecha_desde, fecha_fin_periodo) == 1:\n warning={\n 'title':('Error'),\n 'message':('La fecha para la actividad no puede ser menor a la fecha de inicio, ni mayor a la fecha final del período académico.'),\n }\n value={\n 'fecha_desde':''\n }\n return {'warning':warning,'value':value}\n","sub_path":"unefa_planificacion_semestre/models/planificacion_semestre/unefa_planificacion_semestre.py","file_name":"unefa_planificacion_semestre.py","file_ext":"py","file_size_in_byte":11016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141141693","text":"\n# coding: utf-8\n\n# 1st Case study:\n# \n# The case study is about identifying a bank which wishes to use dimensions of the bank note to identify if it is fake or not. Data were extracted from images that were taken from genuine and forged banknote-like specimens. For digitization, an industrial camera usually used for print inspection was used. The final images have 400x 400 pixels. Due to the object lens and distance to the investigated object gray-scale pictures with a resolution of about 660 dpi were gained. Wavelet Transform tool were used to extract features from images.\n# \n# The objective of the case study is to provide a solution to the bank so that they identify fake bank notes at the deposit point.\n# \n# Attribute Information:\n# \n# variance of Wavelet Transformed image (continuous) \n# skewness of Wavelet Transformed image (continuous) \n# curtosis of Wavelet Transformed image (continuous) \n# entropy of image (continuous) \n# class (integer) \n# \n# Link to the case file: \n# \n# \"http://archive.ics.uci.edu/ml/machine-learning-databases/00267/\\data_banknote_authentication.txt\"\n# \n# \n# \n# The points distribution for this case is as follows:\n# \n# 1. Data pre-processing - Understand the data and treat missing values, outliers\n# \n# 2. Understanding the attributes - Find relationship between different attributes (Independent variables) and choose carefully which all attributes have to be a part of the analysis and why \n# \n# 3. Model the data using Naive Bayes & find its accuracy & confusion matrix \n# \n# 4. Use Support vector machines and use grid search (try C values - 0.01, 0.05,1 and kernel = linear) and find out the best hyper parameters. \n# \n# 5. Tabulate the accuracy for both SVM & Naive Bayes and choose the best model (2.5 points)\n# \n# \n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport mymodules as mm\nimport seaborn as sns\n\n\n# In[2]:\n\n\nvariables = ['variance','skewness','curtosis','entropy','class']\ndf = pd.read_csv(\"http://archive.ics.uci.edu/ml/machine-learning-databases/00267/data_banknote_authentication.txt\", \n names=variables)\n\n\n# In[3]:\n\n\ndf.sample(5)\n\n\n# In[4]:\n\n\ndf.info()\n\n\n# In[5]:\n\n\ndf.describe().transpose()\n\n\n# ## Observations:\n# \n# No Missing Values\n# \n# The data does have outliers\n\n# In[6]:\n\n\noutlier_indexes = mm.find_outliers_indexes(df)\n\n\n# In[7]:\n\n\ndf[df['curtosis']>10.31]['curtosis'].count()\n\n\n# In[8]:\n\n\ndf[df['curtosis']>10.31]['class'].value_counts()\n\n\n# In[9]:\n\n\ndf[df['entropy']<-6.63]['entropy'].count()\n\n\n# In[10]:\n\n\ndf[df['entropy']<-6.63]['class'].value_counts()\n\n\n# In[11]:\n\n\nsns.pairplot(df)\n\n\n# In[12]:\n\n\ndf['class'].hist()\n\n\n# In[13]:\n\n\ndf['class'].value_counts()\n\n\n# ## Observations\n# \n# Curtosis & Entropy features has outlier values.\n# \n# Totally 92 records has outlier values out of the total 1372 records.\n# \n# Curtosis is right skewed, it has a long right tail (curtosis max value is 17.x, while the right tail ends at 10.x)\n# \n# Total of 59 records of Curtosis crossed the right tail\n# \n# Entropy is left skewed, it has a long left tail (Entropy min value is -8.x, while left tail ends at -6.x)\n# \n# Total of 33 records of Entropy crossed the left tail\n# \n# Based on the data it's clear the outlier records of both Curtosis & Entropy are mutually exclusive\n# \n# The data is well distributed between 2 classes\n# \n# ### Outlier Treatment\n# \n# The number of outliers contributes is small in nature, hence removing it should not have any major impact to the dataset.\n# \n# Total of 74 records for Class 1 will be dropped\n# \n# Total of 18 records for Class 0 will be dropped\n# \n# The current distribution of data is 55.5% of Class 0 & 44.5% of Class 1\n# \n# After outlier treatment the distribution will be 58% & 42%\n\n# In[14]:\n\n\ndf_new = df.drop(outlier_indexes)\n\n\n# In[15]:\n\n\ndf_new.info()\n\n\n# In[16]:\n\n\npredictors = ['variance','skewness','curtosis','entropy']\ntarget = ['class']\n\n\n# In[17]:\n\n\ncorr=df_new[predictors].corr()\ncorr\n\n\n# In[18]:\n\n\nsns.heatmap(corr,cmap='plasma',annot=True)\n\n\n# In[19]:\n\n\ndf_new.var()\n\n\n# ## Observations\n# \n# Skewness & Curtosis is strongly correlated (Negative Correlation)\n# \n# Lets use VIF method to determine whether we need to eliminate one of them\n\n# In[20]:\n\n\nmm.eliminate_predictors_using_vif(predictors, df_new, 5)\n\n\n# ## Observations\n# \n# The VIF didn't find any variables to eliminate, hence lets run the model with both skewness & curtosis. \n# \n# We will do one more run by using only one of them.\n\n# # Data Standardization\n\n# In[21]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nstd_scale = StandardScaler()\n\n\n# In[22]:\n\n\nfrom sklearn import model_selection\nX = df_new[predictors]\nY = df_new[target]\n# Split train & test\nX_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.3, random_state=10)\n\n\n# In[23]:\n\n\nX_train = pd.DataFrame(std_scale.fit_transform(X_train, Y_train), columns=X_train.columns, index=X_train.index)\nX_test = pd.DataFrame(std_scale.fit_transform(X_test, Y_test), columns=X_test.columns, index=X_test.index)\n\n\n# # NaveBayes Model\n\n# In[24]:\n\n\nfrom sklearn.naive_bayes import GaussianNB\nmm.model_and_printscores(GaussianNB(), X_train, Y_train, X_test, Y_test)\n\n\n# # Grid Search\n\n# In[30]:\n\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import svm\nparams = {'C':[0.01, 0.05, 1]}\nclf = GridSearchCV(svm.SVC(kernel='linear'), params)\nclf.fit(X, Y)\n\n\n# In[31]:\n\n\nclf.cv_results_\n\n\n# In[35]:\n\n\nclf.best_estimator_\n\n\n# In[36]:\n\n\nmm.model_and_printscores(svm.SVC(C=1, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False), X_train, Y_train, X_test, Y_test)\n\n\n# # Cross Validation Scores\n\n# In[38]:\n\n\n# For Naive Bayes Modelling \nmm.model_and_printcrossvalscores(GaussianNB(), X, Y, 10)\n# For SVM \nmm.model_and_printcrossvalscores(svm.SVC(C=1, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False), X, Y, 10)\n\n\n# ## Observations\n# \n# Model|Training Score|Test Score\n# --|--|--\n# NaiveBayes|87|85.7\n# SVM|98.4|99.5\n# \n# \n# Based on the above table I conclude that for this data set SVM is the best model\n\n# # Thank You\n","sub_path":"Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56193351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom .abstractgenerator import AbstractMISPObjectGenerator\nimport logging\nfrom dateutil.parser import parse\n\nlogger = logging.getLogger('pymisp')\n\n\nclass Fail2BanObject(AbstractMISPObjectGenerator):\n\n def __init__(self, parameters, standalone=True, **kwargs):\n super(Fail2BanObject, self).__init__('fail2ban', standalone=standalone, **kwargs)\n self.__parameters = parameters\n self.generate_attributes()\n\n def generate_attributes(self):\n self.add_attribute('banned-ip', value=self.__parameters['banned-ip'])\n self.add_attribute('attack-type', value=self.__parameters['attack-type'])\n try:\n timestamp = parse(self.__parameters['processing-timestamp'])\n except Exception:\n timestamp = datetime.now()\n\n self.add_attribute('processing-timestamp', value=timestamp.isoformat())\n\n if 'failures' in self.__parameters:\n self.add_attribute('failures', value=self.__parameters['failures'])\n if 'sensor' in self.__parameters:\n self.add_attribute('', value=self.__parameters['sensor'])\n if 'victim' in self.__parameters:\n self.add_attribute('victim', value=self.__parameters['victim'])\n","sub_path":"pymisp/tools/fail2banobject.py","file_name":"fail2banobject.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"116002428","text":"# -*- coding : utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render, redirect, resolve_url\nfrom django.http import HttpResponse\nfrom main.models import CleanReview, ProductReview\nimport json\n\ndef getList(request):\n result = {'code':'','msg':'','clean':[],'product':[]}\n\n try:\n clean = CleanReview.objects.all()\n for row in clean:\n tmp = {''}\n result['clean'].append(tmp)\n\n product = ProductReview.objects.all()\n for row in product:\n tmp = {''}\n result['product'].append(tmp)\n\n result['code']=1\n\n except Exception as e:\n raise e\n\n return HttpResponse(json.dumps(json))\n\n","sub_path":"mobileWeb/main/API/admin/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20209319","text":"# _jengkolrebus\n# Curup, Bengkulu\n# Mei 2020\n\nimport matplotlib\nmatplotlib.rcParams['text.usetex'] = True\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nt = np.arange(0.0, 15, 0.01)\nA = 1 #Amplitudo\nr = 0.5\nm = 0.5\ns = 30.0\nphi = (1)*np.pi\n\np = (-r*t)/(2*m)\nq = (s/m)-(np.square(r)/(4*np.square(m)))\n\nomega = np.sqrt(q)\nx = A*np.exp(p)*np.sin((omega*t)+phi)\n\nfig, ax = plt.subplots()\nax.plot(t, x)\nax.set(xlabel='t', ylabel='x', \n title='$x=A e^{-rt/2m} \\sin(\\omega^\\prime t + \\phi)$' '\\n'\n '$A=1, r=0.5, s=30, m=0.5, \\phi = \\pi$')\n# ax.set_title('$x=A e^{-rt/2m} \\sin(\\omega^\\prime t + \\phi)$' '\\n'\n# '$A=1, r=0.5, s=30, m=0.5, \\phi = \\pi$')\nax.grid()\nplt.show()","sub_path":"lightdamped.py","file_name":"lightdamped.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"274762174","text":"from myro import *\r\nfrom math import *\r\n\r\n\"\"\"wheel speed on carpet in m/s\"\"\"\r\nWHEEL_SPEED = 0.15\r\n\r\n\"\"\"scribbler radius in m\"\"\"\r\nSCRIBBLER_RADIUS = 0.075\r\n\r\n\"\"\"pi\"\"\"\r\nPI = 3.14159265359\r\n\r\ndef moveforward(distance=1, fwd=True):\r\n \"\"\"moves forward a certain distance based on speed (0.15 m/s)\"\"\"\r\n if fwd==True:\r\n f=1\r\n else:\r\n f=-1\r\n forward(f,distance/WHEEL_SPEED)\r\n\r\ndef turnside(radians=PI, left=True):\r\n \"\"\"rotates in a direction (1.99rad/sec)\"\"\"\r\n if radians < 0:\r\n radians = -radians\r\n left = not left\r\n if left:\r\n d=1\r\n else:\r\n d=-1\r\n turn(d,1,radians/1.99)\r\n \r\ndef circle(radius=0.5, radians=PI, dirleft = True, fwd = True):\r\n \"\"\"makes robot move in circle based on radius and degrees\"\"\"\r\n if dirleft:\r\n d=1\r\n else:\r\n d=-1\r\n if fwd:\r\n f=1\r\n else:\r\n f=-1\r\n vin = (radius - SCRIBBLER_RADIUS) / (radius + SCRIBBLER_RADIUS)\r\n move(f*(1+vin)/2,d*(1-vin)/2)\r\n dist = 1.0*radius*radians\r\n angvel = (WHEEL_SPEED+WHEEL_SPEED*vin)/2\r\n wait(dist/angvel)\r\n stop()\r\n\r\ndef stuck(repeat=3):\r\n \"\"\"goes back and forth to shake out when stuck\"\"\"\r\n if repeat>=0:\r\n f=1\r\n else:\r\n f=-1\r\n repeat=-repeat\r\n for _ in xrange(repeat):\r\n forward(-f,0.5)\r\n forward(f,1)\r\n\r\ndef shift(distance=1, left=True):\r\n \"\"\"moves sideways and returns to its previous heading\"\"\"\r\n turnside(PI/2,left)\r\n moveforward(distance)\r\n if left==True:\r\n turnside(PI/2,False)\r\n else:\r\n turnside(PI/2)\r\n\r\ndef goto(x=0,y=0,heading=True):\r\n \"\"\"goes to a location based on xy co-ordinate\"\"\"\r\n if x==0 and y==0:\r\n return\r\n if x>0:\r\n d=False\r\n else:\r\n d=True\r\n x=-x\r\n if y>=0:\r\n f=True\r\n else:\r\n f=False\r\n y=-y\r\n if x==0:\r\n if heading==True and f==False:\r\n turnside()\r\n moveforward(y)\r\n else:\r\n moveforward(y,f)\r\n else:\r\n direction = atan(y/x)\r\n distance = hypot(x,y)\r\n if d==False and f==True:\r\n turnside(PI/2-direction,False)\r\n moveforward(distance)\r\n if heading==False:\r\n turnside(PI/2-direction,True)\r\n elif d==True and f==True:\r\n turnside(PI/2-direction,True)\r\n moveforward(distance)\r\n if heading==False:\r\n turnside(PI/2-direction,False)\r\n elif d==False and f==False:\r\n turnside(PI-(PI/2-direction),False)\r\n moveforward(distance)\r\n if heading==False:\r\n turnside(PI-(PI/2-direction),True)\r\n elif d==True and f==False:\r\n turnside(PI-(PI/2-direction),True)\r\n moveforward(distance)\r\n if heading==False:\r\n turnside(PI-(PI/2-direction),False)\r\n\r\nif __name__ == \"__main__\":\r\n initialize(\"COM40\")\r\n #while True:\r\n #print map(getObstacle,xrange(3))\r\n","sub_path":"movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"525476376","text":"\"\"\"\n\tThis helper file contains mutable constants that are used throughout the control flow builder program.\n\tCFs is a dictionary used to check if we can link, as well as ends, breaks, and vals.\n\tThe various lists and dictionaries are lookups used mostly for class calls, except for v and edges, which are vertices and edge list resp.\n\tThe functions are simple common functions that didn't fit anywhere else.\n\"\"\"\n\nCFs = {\"ElifHeader\":True, \"IfElse\":True, \"IfEnd\":True, \"LoopFooter\":True,\n\t\t \"LoopElse\":True, \"LoopEnd\":True, \"ExcHeader\":True, \"TryEnd\":True,\n\t\t \"TryElse\":True, \"TryFinal\":True, \"FuncHeader\":True, \"FuncEnd\":True,\n\t\t \"ClassHeader\":True, \"ClassEnd\":True} \nends = {\"TryEnd\":True, \"LoopEnd\":True, \"IfEnd\":True}\nbreaks = {\"Raise\":True, \"Return\":True, \"Continue\":True, \"Break\":True}\nvals = {\"AugAssign\": True, \"Assign\": True}\t\t \n\t\t \nv = []\nedges = []\nseenFuncs = {}\npossibleReturns = {}\nfuncLocals = {}\nfuncReturns = {}\nfuncAliases = {}\nseenClasses = {}\nclassAliases = {}\nclassMethods = {}\n\t\t\ndef objType(obj):\n\tif obj: return obj.__class__.__name__\n\telse: return str(None)\n\ndef addLink(v1, v2):\n\tedges[v1][v2] = 1\n\ndef canLink(CF, i):\n\tif not (\"FuncEnd\" == objType(v[i-1][0]) or\n\t\t\t \"ClassEnd\" == objType(v[i-1][0]) or\n\t\t\t \"Call\" == objType(v[i-1][-1]) or\n\t\t\t (vals.get(objType(v[i-1][-1])) and \"Call\" == objType(v[i-1][-1].value) and v[i-1][-1].value.trueFunc in seenFuncs) or\n\t\t\t\tbreaks.get(objType(v[i-1][-1])) or\n\t\t\t\tCF):\n\t\treturn True\n\telse: return False\t\t\n","sub_path":"GraphBuilder/CFConstants.py","file_name":"CFConstants.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"320638585","text":"import numpy as np\nimport pandas as pd\nimport talib\n\nbearthreshold= 0.08\nbullthreshold=0.13\nperiod= 90\nshort=30 \nmed= 90\nlong= 180\n\n\ndef initialize(context):\n \n context.status=0.5\n context.days= 0\n context.bulldays= 0\n context.beardays= 0\n context.vxx= sid(38054)\n context.spy= sid(8554)\n\n schedule_function(check)\n\ndef check(context, data):\n\n \n heightspy= data.history(context.spy, 'high', period, '1d')\n peak= heightspy.max()\n trough= heightspy.min()\n mean= talib.SMA(heightspy, period)[-1]\n current= talib.SMA(heightspy, int(period/45))[-1]\n \n spydata= data.history(context.spy, 'price', long+5, '1d')\n sd= talib.SMA(spydata, short)[-1]\n md= talib.SMA(spydata, med)[-1]\n ld= talib.SMA(spydata, long)[-1]\n \n n = 28\n vxx_prices = data.history(context.vxx, \"price\", n + 2, \"1d\")[:-1]\n vxx_lows = data.history(context.vxx, \"low\", n + 2, \"1d\")[:-1]\n vxx_highest = vxx_prices.rolling(window = n, center=False).max() \n\n WVF = ((vxx_highest - vxx_lows)/(vxx_highest)) * 500\n\n \n \n shortp= conditionCheck(sd, md, 0.035)\n longp= 2*(conditionCheck(md, ld, 0.035))\n\n if current > (1+bullthreshold)*trough and not current < (1-bearthreshold)*peak:\n context.status=1\n\n elif current < (1-bearthreshold)*peak and not current > (1+bullthreshold)*trough:\n context.status= 0\n\n elif current < (1-bearthreshold)*peak and current > (1+bullthreshold)*trough:\n if context.bulldays > context.beardays:\n context.status= 0\n \n elif context.bulldays <= context.beardays:\n context.status=1\n \n \n \n \n context.days += 1\n \n if context.status== 1:\n context.bulldays += 1\n \n elif context.status== 0:\n context.beardays += 1\n\n #record(wvf_vxx = WVF[-1]/100, volbarrier=1.3)\n #record(shortterm= shortp, longterm=longp)\n record(spypeak=context.status)\n \n \ndef conditionCheck(small, large, var):\n if small > (1+var)*large:\n return 1\n \n elif (1-var)*large < small < (1+var)*large:\n return 0\n \n elif small < (1-var)*large:\n return -1","sub_path":"TrendTrading/BearBullIndicators/Market Prediction and WVF Volatility.py","file_name":"Market Prediction and WVF Volatility.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300541923","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nUtility functions for CVXPy module\n\"\"\"\n\nfrom typing import Callable\nimport functools\n\n# Check if CVXPY package is installed\ntry:\n import cvxpy\n\n HAS_CVXPY = True\nexcept ImportError:\n cvxpy = None\n HAS_CVXPY = False\n\n\ndef requires_cvxpy(func: Callable) -> Callable:\n \"\"\"Function decorator for functions requiring CVXPy.\n\n Args:\n func: a function requiring CVXPy.\n\n Returns:\n The decorated function.\n\n Raises:\n QiskitError: If CVXPy is not installed.\n \"\"\"\n\n @functools.wraps(func)\n def decorated_func(*args, **kwargs):\n if not HAS_CVXPY:\n raise ImportError(\n f\"The CVXPY package is required to for {func}.\"\n \"You can install it with 'pip install cvxpy'.\"\n )\n return func(*args, **kwargs)\n\n return decorated_func\n\n\nclass SDPSolverChecker:\n \"\"\"Class for checking installed CVXPy SDP solvers\"\"\"\n\n _HAS_SDP_SOLVER = None\n _HAS_SDP_SOLVER_NOT_SCS = False\n _SDP_SOLVERS = set()\n\n def __init__(self):\n self._check_for_sdp_solver()\n\n @property\n def has_sdp_solver(self) -> bool:\n \"\"\"Return True if CVXPy is installed with an SDP solver\"\"\"\n return SDPSolverChecker._HAS_SDP_SOLVER\n\n @property\n def has_sdp_solver_not_scs(self) -> bool:\n \"\"\"Return True if CVXPy is installed with an SDP solver\"\"\"\n return SDPSolverChecker._HAS_SDP_SOLVER_NOT_SCS\n\n @property\n def sdp_solvers(self):\n \"\"\"Return True if CVXPy is installed with an SDP solver other than SCS\"\"\"\n return self._SDP_SOLVERS\n\n @classmethod\n def _check_for_sdp_solver(cls):\n \"\"\"Check if CVXPy solver is available\"\"\"\n if cls._HAS_SDP_SOLVER is None:\n cls._HAS_SDP_SOLVER = False\n if HAS_CVXPY:\n # pylint:disable=import-error\n solvers = cvxpy.installed_solvers()\n # Check for other SDP solvers cvxpy supports\n for solver in [\"CVXOPT\", \"MOSEK\"]:\n if solver in solvers:\n cls._SDP_SOLVERS.add(solver)\n cls._HAS_SDP_SOLVER = True\n cls._HAS_SDP_SOLVER_NOT_SCS = True\n if \"SCS\" in solvers:\n # Try example problem to see if built with BLAS\n # SCS solver cannot solver larger than 2x2 matrix\n # problems without BLAS\n try:\n var = cvxpy.Variable((5, 5), PSD=True)\n obj = cvxpy.Minimize(cvxpy.norm(var))\n cvxpy.Problem(obj).solve(solver=\"SCS\")\n cls._SDP_SOLVERS.add(\"SCS\")\n cls._HAS_SDP_SOLVER = True\n except cvxpy.error.SolverError:\n pass\n","sub_path":"qiskit_experiments/library/tomography/fitters/cvxpy_utils.py","file_name":"cvxpy_utils.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"366542582","text":"from flask import Flask, render_template, flash, redirect, request, session\nimport re\n\napp = Flask(__name__)\napp.secret_key = 'fifeepItSecretKeepItSafe'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/ninja', methods=[\"POST\"])\ndef process():\n tmnt = True\n return render_template(\"index2.html\", tmnt=tmnt)\n\n@app.route('/ninja/')\ndef color(color):\n tmnt = False\n return render_template(\"index2.html\", color=color, tmnt=tmnt)\n\napp.run(debug=True)\n","sub_path":"Flask_Fundamentals/Disappearing_Ninja/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620943888","text":"from __future__ import print_function\nfrom math import ceil\nimport numpy as np\nimport sys\nimport pdb\nimport random\nimport pickle as pkl\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\n\nimport generator\nimport discriminator\nimport helpers\n\nPAD_IDX = 1\n\nCUDA = True\ngen_num_layers = 2\n\nMAX_SEQ_LEN = 40\nSTART_LETTER = 0\nBATCH_SIZE = 32\nMLE_TRAIN_EPOCHS = 30\nADV_TRAIN_EPOCHS = 30\n\nGEN_EMBEDDING_DIM = 100\nGEN_HIDDEN_DIM = 256\n\nDIS_NET = \"CNN\"\nDIS_EMBEDDING_DIM = 100\nDIS_HIDDEN_DIM = 256\n\n# pretrained_gen_path = './gen_MLEtrain_EMBDIM32_HIDDENDIM32_VOCAB5000_MAXSEQLEN20.trc'\n# pretrained_dis_path = './dis_pretrain_EMBDIM_64_HIDDENDIM64_VOCAB5000_MAXSEQLEN20.trc'\n\n\ndef train_generator_MLE(gen, gen_opt, real_data_samples, epochs):\n \"\"\"\n Max Likelihood Pretraining for the generator\n \"\"\"\n for epoch in range(epochs):\n print('epoch %d : ' % (epoch + 1), end='')\n sys.stdout.flush()\n total_loss = 0\n random.shuffle(real_data_samples)\n for i in range(0, len(real_data_samples), BATCH_SIZE):\n batch_data = real_data_samples[i:i + BATCH_SIZE]\n# start_letter = torch.randn(len(batch_data))\n inp, target = helpers.prepare_generator_batch(batch_data, start_letter=START_LETTER,\n gpu=CUDA)\n gen_opt.zero_grad()\n loss = gen.batchNLLLoss(inp, target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(gen.parameters(), 3)\n gen_opt.step()\n\n total_loss += loss.data.item()\n\n if (i / BATCH_SIZE) % ceil(\n ceil(POS_NEG_SAMPLES / float(BATCH_SIZE)) / 10.) == 0: # roughly every 10% of an epoch\n print('.', end='')\n sys.stdout.flush()\n\n # each loss in a batch is loss per sample\n total_loss = total_loss / ceil(POS_NEG_SAMPLES / float(BATCH_SIZE)) / MAX_SEQ_LEN\n\n print('[%d] average_train_NLL = %.4f' % (len(real_data_samples), total_loss))\n\n\ndef train_generator_PG(gen, gen_opt, dis, num_batches):\n \"\"\"\n The generator is trained using policy gradients, using the reward from the discriminator.\n Training is done for num_batches batches.\n \"\"\"\n\n for batch in range(num_batches):\n s = gen.sample(BATCH_SIZE*2) # 64 works best\n inp, target = helpers.prepare_generator_batch(s, start_letter=START_LETTER, gpu=CUDA)\n rewards = dis.batchClassify(target)\n\n gen_opt.zero_grad()\n pg_loss = gen.batchPGLoss(inp, target, rewards)\n pg_loss.backward()\n torch.nn.utils.clip_grad_norm_(gen.parameters(), 3)\n gen_opt.step()\n\n print(' pg_loss = %.4f' % pg_loss)\n\n\ndef train_discriminator(discriminator, dis_opt, real_data_samples, generator, d_steps, epochs):\n \"\"\"\n Training the discriminator on real_data_samples (positive) and generated samples from generator (negative).\n Samples are drawn d_steps times, and the discriminator is trained for epochs epochs.\n \"\"\"\n\n # generating a small validation set before training (using real data and generator)\n pos_val = random.sample(real_data_samples, 100)\n neg_val = generator.sample(100)\n val_inp, val_target = helpers.prepare_discriminator_data(pos_val, neg_val, MAX_SEQ_LEN, gpu=CUDA)\n\n for d_step in range(d_steps):\n s = helpers.batchwise_sample(generator, POS_NEG_SAMPLES, BATCH_SIZE)\n dis_inp, dis_target = helpers.prepare_discriminator_data(real_data_samples, s, MAX_SEQ_LEN, gpu=CUDA)\n for epoch in range(epochs):\n print('d-step %d epoch %d : ' % (d_step + 1, epoch + 1), end='')\n sys.stdout.flush()\n total_loss = 0\n total_acc = 0\n\n for i in range(0, 2 * POS_NEG_SAMPLES, BATCH_SIZE):\n inp, target = dis_inp[i:i + BATCH_SIZE], dis_target[i:i + BATCH_SIZE]\n dis_opt.zero_grad()\n out = discriminator.batchClassify(inp)\n loss_fn = nn.BCELoss()\n loss = loss_fn(out, target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(dis.parameters(), 3)\n dis_opt.step()\n\n total_loss += loss.data.item()\n total_acc += torch.sum((out>0.5)==(target>0.5)).data.item()\n\n if (i / BATCH_SIZE) % ceil(ceil(2 * POS_NEG_SAMPLES / float(\n BATCH_SIZE)) / 10.) == 0: # roughly every 10% of an epoch\n print('.', end='')\n sys.stdout.flush()\n\n total_loss /= ceil(2 * POS_NEG_SAMPLES / float(BATCH_SIZE))\n total_acc /= float(2 * POS_NEG_SAMPLES)\n\n val_pred = discriminator.batchClassify(val_inp)\n print(' average_loss = %.4f, train_acc = %.4f, val_acc = %.4f' % (\n total_loss, total_acc, torch.sum((val_pred>0.5)==(val_target>0.5)).data.item()/200.))\n\n# MAIN\n# +++++++++++ Questions +++++++++++ #\n# Do we keep start letter 0, even during evaluation? we prob need to sample gaussian noise.\n# How do we actually sample? Simply use gen.sample?\n# +++++++++++++++++++++++++++++++++ #\nif __name__ == '__main__':\n [idx_data, token_dataset, token2id, id2token] = pkl.load(open(\"short_jokes-40.pkl\", \"rb\"))\n POS_NEG_SAMPLES = len(idx_data)\n VOCAB_SIZE = len(id2token)\n gen = generator.Generator(GEN_EMBEDDING_DIM, GEN_HIDDEN_DIM, VOCAB_SIZE, MAX_SEQ_LEN, gen_num_layers, gpu=CUDA)\n dis = discriminator.Discriminator(DIS_EMBEDDING_DIM, DIS_HIDDEN_DIM, VOCAB_SIZE, MAX_SEQ_LEN, gpu=CUDA, net=DIS_NET)\n print(gen)\n print(dis)\n if CUDA:\n gen = gen.cuda()\n dis = dis.cuda()\n\n # GENERATOR MLE TRAINING\n print('Starting Generator MLE Training...')\n gen_optimizer = optim.Adam(gen.parameters(), lr=3e-4)\n train_generator_MLE(gen, gen_optimizer, idx_data, MLE_TRAIN_EPOCHS)\n\n torch.save(gen.state_dict(), 'gen-40-MLE.ckpt')\n# gen.load_state_dict(torch.load('gen.ckpt'))\n\n # PRETRAIN DISCRIMINATOR\n print('\\nStarting Discriminator Training...')\n dis_optimizer = optim.Adagrad(dis.parameters())\n train_discriminator(dis, dis_optimizer, idx_data, gen, 20, 3)\n\n torch.save(dis.state_dict(), 'dis-CNN.ckpt')\n # dis.load_state_dict(torch.load(pretrained_dis_path))\n\n # ADVERSARIAL TRAINING\n print('\\nStarting Adversarial Training...')\n\n for epoch in range(ADV_TRAIN_EPOCHS):\n print('\\n--------\\nEPOCH %d\\n--------' % (epoch+1))\n # TRAIN GENERATOR\n print('\\nAdversarial Training Generator : ', end='')\n sys.stdout.flush()\n train_generator_PG(gen, gen_optimizer, dis, 3)\n\n # TRAIN DISCRIMINATOR\n print('\\nAdversarial Training Discriminator : ')\n train_discriminator(dis, dis_optimizer, idx_data, gen, 5, 3)\n\n torch.save(gen.state_dict(), 'gen-40-GAN.ckpt')\n torch.save(dis.state_dict(), 'dis.ckpt')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459693828","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport logging\nfrom telegram import User, Update\nfrom subprocess import call\nimport system\nimport os\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\n# Define a few command handlers. These usually take the two arguments bot and\n# update. Error handlers also receive the raised TelegramError object in error.\ndef start(bot, update):\n \"\"\"Manda a mensagem quando o comando /start é enviado.\"\"\"\n ID = update.message.chat_id\n FILE = str(ID) + '.txt'\n resposta_file = open(FILE,'w')\n resposta_file.write(str(ID) + \"\\n\")\n\n #print (\"%s %d\", NOME_2,ID_2)\n update.message.reply_text(\n 'Olá eu sou Gandalf\\n\\n'\n 'Mande /registrar para registrar seu veículo ou /cancelar para parar de falar comigo.')\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the EventHandler and pass it your bot's token.\n updater = Updater(\"556806366:AAH9OIYZwwKapLkZrs7fYQU-NdUF2H9MuDc\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", help))\n\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"3_Projeto_Final/Codigos/PC3/Servidor/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"122214290","text":"import results\nimport zernike\nimport numpy as np\n\norder = 10\n\nrings = 20\nwedges = 32\n\nfor i in range(31):\n r = results.read_results(\"./vera_1i_fet/step\" + str(i) + \".pklz\")\n\n con = r.num[0]\n\n #print(r.k)\n\n zer = zernike.ZernikePolynomial(order, con[\"10000\", \"Xe-135\"] * rings * wedges / (np.pi * 0.4096**2) / np.pi)\n\n print(zer.coeffs[0] / (rings * wedges) * np.pi)\n\n #zer.force_positive()\n\n # zer.plot_disk(rings, wedges, \"testg\" + str(i+1) + \".pdf\")\n\n rea = r.rates[0]\n\n zer = rea.get_fet([\"10000\", \"Xe-135\", \"(n,gamma)\"]) * rings * wedges / (np.pi * 0.4096**2) / np.pi * 1.0e24\n\n #zer.plot_disk(rings, wedges, \"testgr\" + str(i+1) + \".pdf\")\n","sub_path":"source/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380910956","text":"class Solution:\n # @param s, a list of 1 length strings, e.g., s = ['h','e','l','l','o']\n # @return nothing\n def reverseWords(self, s):\n s = ''.join(s).split(' ')\n\n i = 0\n j = len(s) - 1\n while i < j:\n s[i], s[j] = s[j], s[i]\n i += 1\n j -= 1\n\n\ns = ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd']\nSolution().reverseWords(s)\nprint(s)\n","sub_path":"reverse-words-in-a-string-ii/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555111009","text":"import pandas as pd\n\nfrom starfile.parser import StarParser\nfrom starfile.writer import StarWriter\n\nfrom .constants import loop_simple, postprocess, pipeline, rln31_style, optimiser_2d, optimiser_3d, sampling_2d, \\\n sampling_3d, test_data, test_df\n\n\ndef test_write_simple_block():\n s = StarParser(postprocess)\n output_file = test_data / 'basic_block.star'\n StarWriter(s.dataframes, output_file, overwrite=True)\n assert output_file.exists()\n\n\ndef test_write_loop():\n s = StarParser(loop_simple)\n output_file = test_data / 'loop_block.star'\n StarWriter(s.dataframes, output_file, overwrite=True)\n assert output_file.exists()\n\n\ndef test_write_multiblock():\n s = StarParser(postprocess)\n output_file = test_data / 'multiblock.star'\n StarWriter(s.dataframes, output_file, overwrite=True)\n assert output_file.exists()\n\n\ndef test_from_single_dataframe():\n output_file = test_data / 'from_df.star'\n\n StarWriter(test_df, output_file, overwrite=True)\n assert output_file.exists()\n\n s = StarParser(output_file)\n\n\ndef test_create_from_dataframes():\n dfs = [test_df, test_df]\n\n output_file = test_data / 'from_list.star'\n StarWriter(dfs, output_file, overwrite=True)\n assert output_file.exists()\n\n s = StarParser(output_file)\n assert len(s.dataframes) == 2\n","sub_path":"tests/test_writing.py","file_name":"test_writing.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"569634935","text":"import logging\nimport hydra\nfrom omegaconf import DictConfig, OmegaConf # (ref) https://hydra.cc/docs/next/tutorials/basic/your_first_app/simple_cli\n\n\n\n\n# A logger for this file\nlog = logging.getLogger(__name__)\n\n\n\n\n@hydra.main(config_path=\"./conf\", config_name=\"config\") \ndef my_app(cfg: DictConfig) -> None:\n log.info(\"YAML config\")\n log.info(OmegaConf.to_yaml(cfg)) # OmegaConf.to_yaml is used to transform 'DictConfig' to 'str'\n \t\t # (ref) https://majianglin2003.medium.com/python-omegaconf-a33be1b748ab\n\n\nif __name__ == \"__main__\":\n my_app()\n","sub_path":"Logging/my_app.py","file_name":"my_app.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631741334","text":"# coding=utf-8\n__author__ = 'AllenCHM'\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n''.join('A|B|C|D|E|F|G'.split('|'))\n\n# 用 itertools.islice,因为可以节选字符串:\nimport itertools\n\n''.join(itertools.islice('A|B|C|D|E|F|G', 6, None, 2))\n# output: 'DEFG'\n\n''.join(itertools.islice('A|B|C|D|E|F|G', 0, None, 2))\n# output: ''ABCDEFG'\n","sub_path":"字符串相关/剔除分隔符.py","file_name":"剔除分隔符.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"146952031","text":"# Author: Hao Mei\n# AndrewID: haomei\n\nfrom turtle import forward, left, right, penup, pendown\n\n\n\n# Task 1\ndef Tokenize(program):\n # Remove all white spaces\n for i in range(len(program)-1, -1, -1):\n if program[i] == ' ':\n program = program[:i] + program[i+1:]\n # English letters excluding M and X\n nonSpecailCharList = ['A','B','C','D','E','F','G','H','I','J','K','L', \\\n 'N','O','P','Q','R','S','T','U','V','W','Y','Z']\n # Start positions of each command, first command start at 0\n posList = [0]\n # Append the (end position + 1) of each command to posList\n i = 0\n while i < len(program):\n if program[i] == 'M' or program[i] == 'X':\n _, closeBracePos, _ = findBracePos(program, i)\n i = closeBracePos + 1\n posList.append(i)\n elif program[i] in nonSpecailCharList:\n # Search pos of next command from pos of (current command + 1)\n nextCommandPos = findNextCommand(program, i+1)\n i = nextCommandPos\n posList.append(i)\n # Get substrings and return as a list\n return [program[posList[i]:posList[i+1]] for i in range(len(posList)-1)]\n\n\n\n# Task 2\ndef execute(commands, funcDict):\n # Unfold loops, remove and record function defs\n commands, funcDict = parseFuncAndLoop(commands, funcDict)\n # Replace all function calls with function definitions\n # funcDict format: {funcName : tuple(commandList)}\n i = 0\n while i < len(commands):\n if commands[i] in funcDict:\n commands = replaceWithDef(commands, funcDict)\n i -= 1\n i += 1\n # Map Turtle Commands to turtle library\n commands = fillDefaults(commands)\n for command in commands:\n if command[0] == 'F':\n forward(int(command[1:]))\n if command[0] == 'L':\n left(int(command[1:]))\n if command[0] == 'R':\n right(int(command[1:]))\n if command[0] == 'D':\n pendown()\n if command[0] == 'U':\n penup()\n\n\n\n# Helper Functions\n\n# Find open and end brace positions\n# and also return true if any other expressions are in the text\ndef findBracePos(program, start):\n openBrace = 0\n closeBrace = 0\n for i in range(start, len(program)):\n # Look for first open brace\n if program[i] == '{' and not bool(openBrace):\n # Set start position if is the first open brace\n startBracePos = i\n openBrace += 1\n elif program[i] == '{' and bool(openBrace):\n openBrace += 1\n elif program[i] == '}':\n closeBrace += 1\n # Return positions if openBrace is not zero and open equals close\n if openBrace == closeBrace and bool(openBrace):\n return startBracePos, i, bool(openBrace-1)\n\n# Input start point of defined command, return start pos of next command\ndef findNextCommand(program, start):\n # All 26 English letters\n charList = ['A','B','C','D','E','F','G','H','I','J','K','L','M', \\\n 'N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n # Return i if found the next english character\n for i in range(start, len(program)):\n if program[i] in charList:\n return i\n # If is the last command, return the length of program\n return len(program)\n\n# Recursively unfold loops, and remove function defs and add to funcDict\ndef parseFuncAndLoop(commandList, funcDict):\n i = 0\n while i < len(commandList):\n if commandList[i][0] == 'M':\n funcDef, funcName, funcDict = getContent(commandList[i], funcDict)\n # Delete function definition from commandList and subtract i by 1\n del commandList[i]\n i -= 1\n # Add function definition to funcDict\n funcDict[funcName] = tuple(funcDef)\n elif commandList[i][0] == 'X':\n loopContent, count, funcDict = getContent(commandList[i], funcDict)\n del commandList[i]\n # Insert loopContent x times at position i of commandList\n for _ in range(int(count)):\n # Last command in loopContent is inserted first\n for index in range(len(loopContent)-1,-1,-1):\n commandList.insert(i, loopContent[index])\n i -= 1\n i += 1\n return commandList, funcDict\n\n# Return content of a funcDef or loop as a list, \n# and return loop number or function name\ndef getContent(program, funcDict):\n # If found more than one pair of braces, otherExpression is True\n startBracePos, _, otherExpression = findBracePos(program, 0)\n commandList = Tokenize(program[startBracePos+1:-1])\n if otherExpression:\n # Recursive step in unfolding commandList\n commandList, funcDict = parseFuncAndLoop(commandList, funcDict)\n # Replace known function names with function defs\n commandList = replaceWithDef(commandList, funcDict)\n return commandList, program[1:startBracePos], funcDict\n\ndef replaceWithDef(commandList, funcDict):\n i = 0\n while i < len(commandList):\n command = commandList[i]\n if command in funcDict:\n # Delete function call\n del commandList[i]\n # Insert function def command list in reverse order\n for index in range(len(funcDict[command])-1,-1,-1):\n commandList.insert(i, list(funcDict[command])[index])\n i += 1\n return commandList\n\n# Fill default value of the command is it doesn't have a value\ndef fillDefaults(returnList):\n for i in range(len(returnList)):\n if returnList[i] == 'L':\n returnList[i] = 'L90'\n if returnList[i] == 'R':\n returnList[i] = 'R90'\n if returnList[i] == 'F':\n returnList[i] = 'F50'\n return returnList","sub_path":"15-112/Assignment 6/haomeihw6.py","file_name":"haomeihw6.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"636329416","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import fields, models\r\n\r\n\r\nclass ProductProduct(models.Model):\r\n _inherit = \"product.product\"\r\n\r\n def prueba(self, location):\r\n consolidado = []\r\n quant_ids = self.env['stock.quant'].search([\r\n ('location_id', '=', location),\r\n ])\r\n product_ids = self.env['product.product'].search([\r\n ('available_in_pos', '=', True),\r\n ])\r\n for product_id in product_ids:\r\n if product_id in quant_ids.product_id:\r\n consolidado.append({\r\n 'id': product_id.id,\r\n 'qty_available': quant_ids.filtered(lambda x: x.product_id == product_id).quantity,\r\n })\r\n else:\r\n consolidado.append({\r\n 'id': product_id.id,\r\n 'qty_available': 0,\r\n })\r\n return consolidado","sub_path":"pos_cantidad_stock/models/product_product.py","file_name":"product_product.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307152174","text":"\"\"\"\r\nEscreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:\r\nO primeiro valor é maior -\r\nO segundo valor é maior -\r\nNão existe valor maior, os dois são iguais\r\n\r\n\"\"\"\r\n\r\na = float(input(\"Digite o primeiro valor:\"))\r\nb = float(input(\"Digite o segundo valor:\"))\r\nif a > b:\r\n print(\"O primeiro valor é maior\")\r\nelif b > a:\r\n print(\"O Segundo valor é maior\")\r\nelse:\r\n print(\"Não existe valor maior, os dois são iguais\")\r\n","sub_path":"PythonBasicoMundo02/Desafio38.py","file_name":"Desafio38.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"746149","text":"\"\"\"\nCopyright 2015 BlazeMeter Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport copy\nimport os\nimport time\nfrom abc import abstractmethod\n\nfrom urwid import Text, Pile\n\nfrom bzt import TaurusConfigError, ToolError\nfrom bzt.engine import FileLister, HavingInstallableTools, SelfDiagnosable\nfrom bzt.modules import ReportableExecutor\nfrom bzt.modules.console import WidgetProvider, PrioritizedWidget\nfrom bzt.utils import get_files_recursive, get_full_path, RequiredTool, unzip, untar, shell_exec\nfrom bzt.utils import is_windows, is_mac, platform_bitness, Environment\n\nfrom bzt.commands import Commands\nfrom bzt.resources.vnc_viewer.vncviewer import VncViewer\nfrom multiprocessing import Process\nimport multiprocessing as mp\nimport requests\nimport shutil\n\ntry:\n mp.set_start_method('spawn', force=True)\nexcept AttributeError:\n pass\n\n\nclass AbstractSeleniumExecutor(ReportableExecutor):\n @abstractmethod\n def get_virtual_display(self):\n \"\"\"\n Return virtual display instance, if any.\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def add_env(self, env): # compatibility with taurus-server\n \"\"\"\n Add environment variables into selenium process env\n :type env: dict[str,str]\n \"\"\"\n pass\n\n\nclass ServiceAttached(object):\n service_attached = []\n\n remote = None\n\n @classmethod\n def get_attached(cls):\n return cls.service_attached\n\n @classmethod\n def add_attach(cls, attach_id):\n cls.service_attached.append(attach_id)\n\n @classmethod\n def detach(cls, attach_id):\n cls.service_attached.remove(attach_id)\n\n @classmethod\n def get_remote(cls, log):\n if cls.remote:\n return cls.remote\n else:\n cls.remote = Commands(log).remote\n return cls.remote\n\n\ndef run_vncviewer(host, port, password, conn_id):\n vnc_viewer = VncViewer(host, port, password, conn_id)\n return vnc_viewer\n\n\nclass SeleniumExecutor(AbstractSeleniumExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):\n \"\"\"\n Selenium executor\n :type runner: bzt.modules.SubprocessedExecutor\n \"\"\"\n\n SUPPORTED_RUNNERS = [\"nose\", \"junit\", \"testng\", \"rspec\", \"mocha\", \"nunit\", \"pytest\", \"wdio\", \"robot\"]\n\n CHROMEDRIVER_DOWNLOAD_LINK = \"https://chromedriver.storage.googleapis.com/{version}/chromedriver_{arch}.zip\"\n CHROMEDRIVER_VERSION = \"2.36\"\n\n GECKODRIVER_DOWNLOAD_LINK = \"https://github.com/mozilla/geckodriver/releases/download/v{version}/\" \\\n \"geckodriver-v{version}-{arch}.{ext}\"\n GECKODRIVER_VERSION = \"0.19.1\"\n\n SELENIUM_TOOLS_DIR = get_full_path(\"~/.bzt/selenium-taurus/tools\")\n\n def __init__(self):\n super(SeleniumExecutor, self).__init__()\n self.end_time = None\n self.runner = None\n self.script = None\n self.runner_working_dir = None\n self.register_reader = True\n self.webdrivers = []\n\n self.vnc_connections = []\n\n def add_env(self, env): # compatibility with taurus-server\n self.env.set(env)\n\n def get_runner_working_dir(self):\n if self.runner_working_dir is None:\n self.runner_working_dir = self.engine.create_artifact(\"classes\", \"\")\n return self.runner_working_dir\n\n def create_runner(self):\n\n runner_type = self.get_runner_type()\n self.runner = self.engine.instantiate_module(runner_type)\n self.runner.env = self.env\n\n # Propagate to Runner Remote capabilities if is used and generate Environment variables for external script\n service_remote = self.execution.get_noset(\"remote\", self.settings.get_noset(\"remote\", None))\n service_capabilities = self.execution.get_noset(\"capabilities\", self.settings.get_noset(\"capabilities\", []))\n use_service = self.execution.get_noset(\"service\", self.settings.get_noset(\"service\", None))\n\n service_video = self.execution.get_noset(\"service_video\",\n self.settings.get_noset(\"service_video\", True))\n service_screenshot = self.execution.get_noset(\"service_screenshot\",\n self.settings.get_noset(\"service_screenshot\", True))\n\n service_id = None\n service_vnc = None\n if use_service:\n service_info = ServiceAttached.get_remote(self.log).pull_service(use_service,\n ServiceAttached.get_attached(), cache=True)\n service_id = service_info[\"service_id\"]\n if service_info[\"remote\"]:\n ServiceAttached.add_attach(service_info[\"attach_id\"])\n service_remote = service_info[\"remote\"]\n service_capabilities = service_info[\"capabilities\"]\n\n if service_info[\"vnc\"] and self.settings.get_noset(\"service_vnc\", True):\n service_vnc = service_info[\"vnc\"]\n\n self.runner.parameters = self.parameters\n self.runner.provisioning = self.provisioning\n self.runner.execution = copy.deepcopy(self.execution)\n\n # Promote the resolution\n self.runner.execution[\"service_id\"] = service_id\n self.runner.execution[\"remote\"] = service_remote\n self.runner.execution[\"capabilities\"] = service_capabilities\n self.runner.execution[\"vnc\"] = service_vnc\n self.runner.execution[\"video\"] = service_video\n self.runner.execution[\"screenshot\"] = service_screenshot\n\n # TODO: For debug, remove\n self.log.info(\"Service:\" + str(use_service))\n self.log.info(\"Remote:\" + str(self.runner.execution[\"remote\"]))\n self.log.info(\"Capabilities:\" + str(len(self.runner.execution[\"capabilities\"])))\n\n if self.env:\n if \"remote\" in self.runner.execution:\n self.add_env({\"BZT_REMOTE\": self.runner.execution[\"remote\"]})\n if \"capabilities\" in self.runner.execution:\n for remote_cap in self.runner.execution[\"capabilities\"]:\n if \"browser\" in remote_cap:\n self.add_env({\"BZT_REMOTE_BROWSER\": remote_cap[\"browser\"]})\n\n self.runner.execution['files'] = self.execution.get('files', [])\n self.runner.execution['executor'] = runner_type\n self.runner.register_reader = self.register_reader\n\n if runner_type == \"nose\":\n self.runner.execution[\"test-mode\"] = \"selenium\"\n\n def get_virtual_display(self):\n pass # for compatibility with taurus server\n\n def _get_chromedriver_link(self):\n settings = self.settings.get('chromedriver')\n link = settings.get('download-link', SeleniumExecutor.CHROMEDRIVER_DOWNLOAD_LINK)\n version = settings.get('version', SeleniumExecutor.CHROMEDRIVER_VERSION)\n if is_windows():\n arch = 'win32' # no 64-bit windows builds, :(\n elif is_mac():\n arch = 'mac64'\n else:\n arch = 'linux32' if platform_bitness() == 32 else 'linux64'\n return link.format(version=version, arch=arch)\n\n def _get_chromedriver_path(self):\n base_dir = get_full_path(SeleniumExecutor.SELENIUM_TOOLS_DIR)\n settings = self.settings.get('chromedriver')\n version = settings.get('version', SeleniumExecutor.CHROMEDRIVER_VERSION)\n filename = 'chromedriver.exe' if is_windows() else 'chromedriver'\n return os.path.join(base_dir, 'chromedriver', version, filename)\n\n def _get_geckodriver_link(self):\n settings = self.settings.get('geckodriver')\n link = settings.get('download-link', SeleniumExecutor.GECKODRIVER_DOWNLOAD_LINK)\n version = settings.get('version', SeleniumExecutor.GECKODRIVER_VERSION)\n if is_windows():\n arch = 'win64' # no 32-bit windows builds, :(\n ext = 'zip'\n elif is_mac():\n arch = 'macos'\n ext = 'tar.gz'\n else:\n arch = 'linux32' if platform_bitness() == 32 else 'linux64'\n ext = 'tar.gz'\n return link.format(version=version, arch=arch, ext=ext)\n\n def _get_geckodriver_path(self):\n base_dir = get_full_path(SeleniumExecutor.SELENIUM_TOOLS_DIR)\n settings = self.settings.get('geckodriver')\n version = settings.get('version', SeleniumExecutor.GECKODRIVER_VERSION)\n filename = 'geckodriver.exe' if is_windows() else 'geckodriver'\n return os.path.join(base_dir, 'geckodriver', version, filename)\n\n def install_required_tools(self):\n chromedriver_path = self._get_chromedriver_path()\n chromedriver_link = self._get_chromedriver_link()\n geckodriver_path = self._get_geckodriver_path()\n geckodriver_link = self._get_geckodriver_link()\n\n self.webdrivers = [ChromeDriver(chromedriver_path, self.log, chromedriver_link),\n GeckoDriver(geckodriver_path, self.log, geckodriver_link)]\n\n for tool in self.webdrivers:\n if not tool.check_if_installed():\n self.log.info(\"Installing %s...\", tool.tool_name)\n tool.install()\n\n def prepare(self):\n if self.env is None:\n self.env = Environment(self.log, self.engine.env.get()) # for backward compatibility with taurus-server\n\n self.install_required_tools()\n for driver in self.webdrivers:\n self.env.add_path({\"PATH\": driver.get_driver_dir()})\n\n if self.get_load().concurrency and self.get_load().concurrency > 1:\n msg = 'Selenium supports concurrency in cloud provisioning mode only\\n'\n msg += 'For details look at http://gettaurus.org/docs/Cloud.md'\n self.log.warning(msg)\n\n self.create_runner()\n self.runner.prepare()\n self.script = self.runner.script\n\n def get_runner_type(self):\n if \"runner\" in self.execution:\n runner = self.execution[\"runner\"]\n if runner not in SeleniumExecutor.SUPPORTED_RUNNERS:\n msg = \"Runner '%s' is not supported. Supported runners: %s\"\n raise TaurusConfigError(msg % (runner, SeleniumExecutor.SUPPORTED_RUNNERS))\n self.log.debug(\"Using script type: %s\", runner)\n return runner\n\n script_name = self.get_script_path()\n if script_name:\n return self.detect_script_type(script_name)\n else:\n if \"requests\" in self.get_scenario():\n return \"nose\"\n else:\n raise TaurusConfigError(\"You must specify either script or list of requests to run Selenium\")\n\n def resource_files(self):\n self.create_runner()\n return self.runner.resource_files()\n\n def detect_script_type(self, script_name):\n if not os.path.exists(script_name):\n raise TaurusConfigError(\"Script '%s' doesn't exist\" % script_name)\n\n file_types = set()\n\n # gather file extensions and choose script_type according to priority\n if os.path.isfile(script_name): # regular file received\n file_types.add(os.path.splitext(script_name)[1].lower())\n else: # dir received: check contained files\n for file_name in get_files_recursive(script_name):\n file_types.add(os.path.splitext(file_name)[1].lower())\n\n if '.java' in file_types or '.jar' in file_types:\n # todo: next detection logic is duplicated in TestNGTester - can we avoid it?\n script_dir = get_full_path(self.get_script_path(), step_up=1)\n if os.path.exists(os.path.join(script_dir, 'testng.xml')) or self.execution.get('testng-xml', None):\n script_type = 'testng'\n else:\n script_type = 'junit'\n elif '.py' in file_types:\n script_type = 'nose'\n elif '.rb' in file_types:\n script_type = 'rspec'\n elif '.js' in file_types:\n script_type = 'mocha'\n elif '.dll' in file_types or '.exe' in file_types:\n script_type = 'nunit'\n else:\n if os.path.isfile(script_name):\n message = \"Unsupported script type: %r\" % script_name\n else:\n message = \"Directory %r doesn't contain supported scripts\" % script_name\n raise TaurusConfigError(message)\n\n self.log.debug(\"Detected script type: %s\", script_type)\n\n return script_type\n\n def startup(self):\n \"\"\"\n Start runner\n :return:\n \"\"\"\n self.start_time = time.time()\n\n if self.runner.execution[\"vnc\"]:\n vnc_host = self.runner.execution[\"vnc\"].split(\":\")[0]\n vnc_port = int(self.runner.execution[\"vnc\"].split(\":\")[1])\n vnc_pass = \"secret\"\n\n if is_mac():\n cmdline = [\"open\", \"vnc://user:%s@%s:%d\" % (vnc_pass, vnc_host, vnc_port)]\n vnc_proc = shell_exec(cmdline)\n else:\n vnc_proc = Process(target=run_vncviewer, args=(vnc_host, vnc_port, vnc_pass,\n self.runner.execution[\"service_id\"],))\n vnc_proc.daemon = True\n vnc_proc.start()\n\n self.vnc_connections.append(vnc_proc)\n\n if self.runner.execution[\"remote\"]:\n service_host = self.runner.execution[\"remote\"].split(\":\")[1]\n service_url = self.runner.execution[\"remote\"].split(\":\")[0] + ':' + service_host + \\\n ':5555/extra/bzt_servlet?command=startTest'\n\n video = self.runner.execution.get_noset(\"video\", self.runner.settings.get_noset(\"video\", False))\n screenshot = self.runner.execution.get_noset(\"screenshot\", self.runner.settings.get_noset(\"screenshot\", False))\n\n first_connetion_timeout = 3.05 # Slightly larger than 3, default TCP packet retransmission window.\n first_reponse_timeout = 6\n try:\n response = requests.post(service_url,\n json={\"enableVideo\": video, \"enableScreenshot\": screenshot},\n timeout=(first_connetion_timeout, first_reponse_timeout))\n if response.status_code == 200:\n self.log.info(\"Service StartTest\")\n except requests.exceptions.RequestException as e:\n self.log.info(\"Service without StartTest\")\n\n self.runner.startup()\n\n def check(self):\n \"\"\"\n check if test completed\n :return:\n \"\"\"\n if self.widget:\n self.widget.update()\n\n return self.runner.check()\n\n def report_test_duration(self):\n if self.start_time:\n self.end_time = time.time()\n self.log.debug(\"Selenium tests ran for %s seconds\", self.end_time - self.start_time)\n\n def service_sync_artifacts(self):\n self.log.info(\"Service:\" + str(self.runner.execution[\"service_id\"]))\n self.log.info(\"Remote:\" + str(self.runner.execution[\"remote\"]))\n self.log.info(\"Capabilities:\" + str(len(self.runner.execution[\"capabilities\"])))\n\n first_connetion_timeout = 3.05 # Slightly larger than 3, default TCP packet retransmission window.\n first_reponse_timeout = 6\n if self.runner.execution[\"remote\"]:\n service_host = self.runner.execution[\"remote\"].split(\":\")[1]\n service_url = self.runner.execution[\"remote\"].split(\":\")[0] + \\\n ':' + service_host + \\\n ':5555/extra/bzt_servlet?command=endTest'\n\n try:\n response = requests.post(service_url,\n json={},\n timeout=(first_connetion_timeout, first_reponse_timeout))\n\n if response.status_code == 200:\n self.log.info(\"Service EndTest\")\n service_url = self.runner.execution[\"remote\"].split(\":\")[\n 0] + \":\" + service_host + \":5555/extra/bzt_servlet\"\n request = requests.get(service_url, stream=True,\n timeout=(first_connetion_timeout, first_reponse_timeout))\n self.log.info(\"Script:\" + self.script)\n base_path_script = '.'.join(self.script.split('.')[:-1])\n execution_artifacts_file = base_path_script + \".zip\"\n with open(execution_artifacts_file, 'wb') as f:\n shutil.copyfileobj(request.raw, f)\n except requests.exceptions.RequestException as e:\n self.log.info(\"Service without endTest\")\n\n def shutdown(self):\n \"\"\"\n shutdown test_runner\n :return:\n \"\"\"\n self.runner.shutdown()\n self.report_test_duration()\n\n def post_process(self):\n self.runner.post_process()\n self.service_sync_artifacts()\n if os.path.exists(\"geckodriver.log\"):\n self.engine.existing_artifact(\"geckodriver.log\", True)\n\n def has_results(self):\n return self.runner.has_results()\n\n def get_widget(self):\n if not self.widget:\n self.widget = SeleniumWidget(self.script, self.runner.stdout_file)\n return self.widget\n\n def get_error_diagnostics(self):\n diagnostics = []\n if self.runner:\n diagnostics.extend(self.runner.get_error_diagnostics())\n gecko_logs = [\"geckodriver.log\", os.path.join(self.engine.artifacts_dir, \"geckodriver.log\")]\n for possible_log in gecko_logs:\n if os.path.exists(possible_log):\n with open(possible_log) as fds:\n diagnostics.append(\"Geckodriver log:\\n\" + fds.read())\n return diagnostics\n\n\nclass SeleniumWidget(Pile, PrioritizedWidget):\n def __init__(self, script, runner_output):\n widgets = []\n self.script_name = Text(\"Selenium: %s\" % os.path.basename(script))\n self.summary_stats = Text(\"Delayed...\")\n self.runner_output = runner_output\n widgets.append(self.script_name)\n widgets.append(self.summary_stats)\n super(SeleniumWidget, self).__init__(widgets)\n PrioritizedWidget.__init__(self, priority=10)\n\n def update(self):\n reader_summary = ''\n if self.runner_output is not None and os.path.exists(self.runner_output):\n with open(self.runner_output, \"rt\") as fds:\n lines = fds.readlines()\n if lines:\n line = lines[-1]\n if not line.endswith(\"\\n\") and len(lines) > 1:\n line = lines[-2]\n if line and \",\" in line:\n reader_summary = line.split(\",\")[-1]\n\n if reader_summary:\n self.summary_stats.set_text(reader_summary)\n else:\n self.summary_stats.set_text('In progress...')\n\n self._invalidate()\n\n\nclass ChromeDriver(RequiredTool):\n def __init__(self, tool_path, parent_logger, download_link):\n super(ChromeDriver, self).__init__(\"ChromeDriver\", tool_path, download_link)\n self.log = parent_logger.getChild(self.__class__.__name__)\n\n def check_if_installed(self):\n return os.path.exists(self.tool_path)\n\n def get_driver_dir(self):\n return get_full_path(self.tool_path, step_up=1)\n\n def install(self):\n dest = self.get_driver_dir()\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n self.log.info(\"Will install %s into %s\", self.tool_name, dest)\n dist = self._download(use_link=True)\n try:\n self.log.info(\"Unzipping %s to %s\", dist, dest)\n unzip(dist, dest)\n finally:\n os.remove(dist)\n\n if not is_windows():\n os.chmod(self.tool_path, 0o755)\n\n if not self.check_if_installed():\n raise ToolError(\"Unable to find %s after installation!\" % self.tool_name)\n\n\nclass GeckoDriver(RequiredTool):\n def __init__(self, tool_path, parent_logger, download_link):\n super(GeckoDriver, self).__init__(\"GeckoDriver\", tool_path, download_link)\n self.log = parent_logger.getChild(self.__class__.__name__)\n\n def check_if_installed(self):\n return os.path.exists(self.tool_path)\n\n def get_driver_dir(self):\n return get_full_path(self.tool_path, step_up=1)\n\n def install(self):\n dest = self.get_driver_dir()\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n self.log.info(\"Will install %s into %s\", self.tool_name, dest)\n dist = self._download(use_link=True)\n try:\n if self.download_link.endswith('.zip'):\n self.log.info(\"Unzipping %s to %s\", dist, dest)\n unzip(dist, dest)\n else:\n self.log.info(\"Untaring %s to %s\", dist, dest)\n untar(dist, dest)\n finally:\n os.remove(dist)\n\n if not is_windows():\n os.chmod(self.tool_path, 0o755)\n\n if not self.check_if_installed():\n raise ToolError(\"Unable to find %s after installation!\" % self.tool_name)\n\n # TODO: check for compatible browser versions?\n","sub_path":"bzt/modules/selenium.py","file_name":"selenium.py","file_ext":"py","file_size_in_byte":21648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101649379","text":"import unittest\n\nfrom pyramid.config import Configurator\nfrom pyramid.decorator import reify\n\n\nclass Base(unittest.TestCase):\n\n @reify\n def config(self):\n from sndcld import includeme\n _config = Configurator(settings={})\n _config.include(includeme)\n self.addCleanup(delattr, self, 'config')\n return _config\n\n @reify\n def app(self):\n _app = self.config.make_wsgi_app()\n self.addCleanup(delattr, self, 'app')\n return _app\n","sub_path":"sndcld/tests/functional/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620370534","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n }\n}\nINSTALLED_APPS = (\n 'django_nose',\n 'django.contrib.contenttypes',\n 'popolo',\n 'candidator',\n)\nSITE_ID = 1\nSECRET_KEY = 'this-is-just-for-tests-so-not-that-secret'\nROOT_URLCONF = 'popolo.urls'\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n","sub_path":"testing_settings.py","file_name":"testing_settings.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50702563","text":"class Atm(object):\r\n def __init__(self, name, ccv, cardNumber):\r\n self.name = name\r\n self.ccv = ccv\r\n self.cardNumber = cardNumber\r\n def Pay(self, keytype):\r\n print(\"Withdrawl: \" + Pay)\r\n\r\natm1 = Atm(\"bob\", \"782\", \"1082099\")\r\nprint(atm1.name, atm1.ccv, atm1.cardNumber)\r\natm1.Pay(\"success\")","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88823870","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\n# from odoo.exceptions import UserError\n\n\nclass CommonVisitMaintenance(models.Model):\n \"\"\" Model for case stages. This models the main stages of a Maintenance Request management flow. \"\"\"\n\n _name = 'common.visit.maintenance'\n _description = 'common filed between visit and maintenance request'\n\n num = fields.Char(string='Serial')\n note = fields.Html(string='Note')\n date = fields.Date('Date')\n customer = fields.Many2one('res.partner', string='Customer')\n responsible = fields.Many2one('res.users', string='Responsible')\n receiver_person = fields.Char(string='Receiver')\n signature = fields.Binary(string='Signature')\n contract_id = fields.Many2one('contract.maintenance', string='Contract')\n state = fields.Selection([('draft', 'Draft'), ('progress', 'In Progress'), ('delay', 'Delay'), ('done', 'Done')],\n string='State', default='draft',track_visibility='onchange')\n is_additional = fields.Boolean('Additional', defualt=False)\n\n def action_progress(self):\n self.state='progress'\n\n def action_done(self):\n self.state='done'\n\n def set_to_draft(self):\n self.state = 'draft'\n\nclass VisitManagement(models.Model):\n _name = 'visit.management'\n _inherit = ['common.visit.maintenance', 'mail.thread']\n _description = 'visit management'\n _rec_name = 'num'\n\n status = fields.Selection([('work', 'Work'), ('not', 'Not Work')], string='', default='work')\n alarm_line_ids = fields.One2many('alarm.system.line', 'visit_id', string='Alarm System')\n actual_date = fields.Date('Actual Date')\n extinguishing_line_ids = fields.One2many('extinguishing.system.line', 'visit_id', string='Extinguishing System')\n\n def action_progress(self):\n res = super(VisitManagement, self).action_progress()\n records = self.env['visit.management'].search([('id', '!=', self.id), ('contract_id', '=', self.contract_id.id),\n ('customer', '=', self.customer.id),\n ('is_additional', '=', False),\n ('date', '<', self.date),('state','!=','done')])\n if records:\n raise ValidationError(_('You cannot change the current visit to progress unless the previous visit done'))\n else:\n self.actual_date = fields.Date.today()\n return res\n\n def action_delay_visit(self):\n records = self.env['visit.management'].search([('state','=','draft'),('is_additional','=',False)])\n for rec in records:\n to_day = fields.Date.today()\n if rec.date < to_day:\n rec.state='delay'\n\n @api.constrains('contract_id','customer','responsible','date')\n def check_state(self):\n for rec in self:\n records = self.env['visit.management'].search([('id','!=',rec.id),('contract_id','=',rec.contract_id.id),\n ('customer','=', rec.customer.id),('is_additional','=',False),\n ('date','<=',rec.date)])\n if records:\n for visit in records:\n record_date_month = datetime.strptime(str(visit.date), \"%Y-%m-%d\").month\n rec_date_month = datetime.strptime(str(rec.date), \"%Y-%m-%d\").month\n if record_date_month == rec_date_month:\n raise ValidationError(_('You cant not create 2 Visit for same contract and customer in the same months'))\n\n def action_raise_complaints(self):\n domain = []\n context = {}\n context = dict(self.env.context or {})\n context['default_visit_id'] = self.id\n context['default_date'] = fields.Date.today()\n context['default_customer'] = self.customer.id\n context['default_responsible'] = self.responsible.id\n context['default_type'] = 'visit'\n return {\n 'name': _('Raise Complaints'),\n 'view_mode': 'form',\n 'view_type': 'form',\n 'type': 'ir.actions.act_window',\n 'res_model': 'raise.complaints.wiz',\n 'view_id': self.env.ref('visit_management_module.view_raise_complaints_wizard_form').id,\n 'target': 'new',\n 'domain': domain,\n 'context': context,\n }\n\n def create_maintenance_request(self):\n maintenance = self.env['maintenance.management'].create({'contract_id': self.contract_id.id, 'responsible': self.responsible.id,\n 'customer': self.customer.id})\n alarm_line_ids = [(0, 0, {'name': line.name.id, 'count': 0, 'contract_id':False,'visit_id': False,'maintenance_id':maintenance.id}) for line in self.alarm_line_ids]\n extinguishing_line_ids = [(0, 0, {'name': line.name.id, 'count': 0, 'contract_id':False,'visit_id': False,'maintenance_id':maintenance.id}) for line in self.extinguishing_line_ids]\n maintenance.write({'extinguishing_line_ids':extinguishing_line_ids, 'alarm_line_ids':alarm_line_ids})\n action = self.env.ref('visit_management_module.maintenance_action').read()[0]\n action['domain'] = [('id', '=', maintenance.id),]\n return action\n\n def fetch_sequence(self, data=None):\n '''generate transaction sequence'''\n return self.env['ir.sequence'].get('visit.management')\n\n @api.model\n def create(self, vals):\n seq = self.fetch_sequence()\n contract_id = vals.get('contract_id')\n final_seq = seq\n if contract_id:\n final_seq = ''\n contract_seq = self.env['contract.maintenance'].sudo().browse(contract_id).read(['num'])\n contract_seq = contract_seq[0]['num']\n final_seq = contract_seq+'/'+seq\n vals['num'] = final_seq\n return super(VisitManagement, self).create(vals)\n\n\nclass MaintenanceManagement(models.Model):\n _name = 'maintenance.management'\n _inherit = ['common.visit.maintenance', 'mail.thread']\n _description = 'Maintenance Management'\n _rec_name = 'num'\n\n alarm_line_ids = fields.One2many('alarm.system.line', 'maintenance_id', string='Alarm System')\n visit_id = fields.Many2one('visit.management', string=\"Visit\")\n extinguishing_line_ids = fields.One2many('extinguishing.system.line', 'maintenance_id', string='Extinguishing System')\n\n def create_sale_order(self):\n order = self.env['sale.order'].create({'partner_id': self.customer.id})\n action = self.env.ref('sale.action_orders').read()[0]\n action['domain'] = [('id', '=', order.id)]\n return action\n\n def action_raise_complaints(self):\n domain = []\n context = {}\n context = dict(self.env.context or {})\n context['default_maintenance_id'] = self.id\n context['default_date'] = fields.Date.today()\n context['default_customer'] = self.customer.id\n context['default_responsible'] = self.responsible.id\n context['default_type'] = 'maintenance'\n return {\n 'name': _('Raise Complaints'),\n 'view_mode': 'form',\n 'view_type': 'form',\n 'type': 'ir.actions.act_window',\n 'res_model': 'raise.complaints.wiz',\n 'view_id': self.env.ref('visit_management_module.view_raise_complaints_wizard_form').id,\n 'target': 'new',\n 'domain': domain,\n 'context': context,\n }\n\n def fetch_sequence(self, data=None):\n '''generate transaction sequence'''\n return self.env['ir.sequence'].get('maintenance.management')\n\n @api.model\n def create(self, vals):\n seq = self.fetch_sequence()\n vals['num'] = seq\n return super(MaintenanceManagement, self).create(vals)\n\n\nclass AlarmSystemLine(models.Model):\n _inherit = 'alarm.system.line'\n\n visit_id = fields.Many2one('visit.management')\n maintenance_id = fields.Many2one('maintenance.management')\n\n\nclass ExtinguishingSystemLine(models.Model):\n _inherit = 'extinguishing.system.line'\n\n visit_id = fields.Many2one('visit.management')\n maintenance_id = fields.Many2one('maintenance.management')\n","sub_path":"custom-addons/visit_management_module/models/visit.py","file_name":"visit.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"260573179","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ylgongPw @ 2020-02-17 11:29:15\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n comm = {}\n\n while headA:\n tmpA = headA\n comm[tmpA] = 1\n headA = tmpA.next\n\n while headB:\n tmpB = headB\n if comm.get(tmpB,0) == 1:\n return tmpB\n headB = tmpB.next\n\n return None\n\n\n\n\n\ndef init_list_node(alist,skip,comm_node):\n head = ListNode(alist[0])\n curr = head\n\n i = 1\n while i != len(alist):\n if i == skip:\n tmp = comm_node\n else:\n tmp = ListNode(alist[i])\n\n curr.next = tmp\n curr = curr.next\n i += 1\n return head\n\ndef print_node(head):\n while head:\n print (\"id:{},val:{}\".format(id(head),head.val))\n head = head.next\n\n\nif __name__ == '__main__':\n listA = [2,6,4]\n skipA = 3\n\n listB = [1,5]\n skipB = 2\n\n intersectVal = 0\n comm_node = ListNode(intersectVal)\n\n headA = init_list_node(listA,skipA,comm_node)\n headB = init_list_node(listB,skipB,comm_node)\n print (\"headA\")\n print_node(headA)\n print (\"headB\")\n print_node(headB)\n S = Solution()\n node = S.getIntersectionNode(headA, headB)\n print (node)\n\n","sub_path":"160/160-my.py","file_name":"160-my.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607146015","text":"import datetime\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\ndef make_html_calendar(alert_code):\n calendar_ = html.Div([\n dcc.DatePickerSingle(\n id='date_picker_{}'.format(alert_code),\n min_date_allowed=datetime.date(2019, 1, 1),\n max_date_allowed=datetime.date.today() - datetime.timedelta(days=1),\n date=datetime.date.today() - datetime.timedelta(days=1),\n display_format='MMM Do, YYYY',\n with_portal=True)\n ])\n return calendar_\n","sub_path":"src/web/components/make_html_calendar.py","file_name":"make_html_calendar.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540081672","text":"from pygame import mixer\r\nfrom time import time\r\nfrom datetime import datetime\r\nimport random\r\ndef play(file,s):\r\n mixer.init()\r\n mixer.music.load(file)\r\n mixer.music.play()\r\n while True:\r\n a=input()\r\n if a==s:\r\n mixer.music.stop()\r\n break\r\n else:\r\n mixer.music.rewind()\r\ndef note(msg):\r\n with open(\"corona.txt\",\"a\") as f:\r\n f.write(f\"{msg} {datetime.now()}\\n\")\r\nif __name__ == '__main__':\r\n init_msg=time()\r\n msgsec=5\r\n\r\n while True:\r\n if time()-init_msg>msgsec:\r\n print(\"Wash hands. Press 'd' to stop alarm\")\r\n play(random.choice([\"Baby Robot_01.mp3\",\"Donald Duck_01.mp3\"]),\"d\")\r\n init_msg = time()\r\n note(\"Wash hands at\")\r\n","sub_path":"corana_care.py","file_name":"corana_care.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"316825826","text":"import numpy as np\nimport pandas as pd\nimport conc.profile\n\nclass Bacteria:\n def __init__(self, bact_id, seed, directory_name, initial_position, D_rot):\n self.id = bact_id\n self.rng = np.random.RandomState(seed)\n self.traj_path = directory_name + '/traj/id_'\n \n from inputs import T_max, memory, t_sim, bact_speed, horizon, grad_label, food_surface\n self.dim = 2 # 2 for 2D, 3 for 3\n #self.Lambda = Lambda\n self.dt = T_max/memory\n self.memory = memory\n sim_time = t_sim + 0.02\n self.n_sim_time = int(sim_time/self.dt) # simulation time -- discrete\n self.bact_speed = bact_speed\n self.horizon = horizon\n self.food_surface = food_surface\n self.k1 = int(grad_label[0]) # grad_label = [k1, k2], where k is the delay. k1 0.0 \n return\n def rotational_diffusion(self, psi, size):\n if size == 1:\n psi_update = psi + np.sqrt(2*self.D_rot*self.dt)*self.rng.randn()\n return psi_update\n elif size > 1:\n psi_list = np.empty(size)\n psi_list[0] = psi\n for i in range(1, size):\n psi_list[i] = psi_list[i-1] + np.sqrt(2*self.D_rot*self.dt)*self.rng.randn()\n return psi_list\n else:\n raise ValueError('check rotational_diffusion(self, psi, size)')\n \n def velocity_for(self, psi_list):\n V = np.empty((psi_list.size, self.dim))\n V[:,0] = self.bact_speed*np.cos(psi_list)\n V[:,1] = self.bact_speed*np.sin(psi_list)\n return V\n\n def get_new_line(self):\n \"\"\"\n Returns\n -------\n array of shape memory x dim . Co-ordinates of the new trajectory. \n \"\"\"\n self.psi = self.random_psi()\n self.psi_list = self.rotational_diffusion(self.psi, self.memory)\n self.V = self.velocity_for(self.psi_list) \n line = np.empty((self.memory, self.dim))\n line[0] = self.R\n for i in range(1, self.memory): # i count the rows\n line[i] = line[i-1] + self.V[i-1]*self.dt # x(t) = x(t-dt) + v(t) * dt\n if len(self.trajectory) + len(line) > self.n_sim_time: # it goes beyond simulation time ? shorten it.\n upto = self.n_sim_time - len(self.trajectory)\n line = line[:upto] \n \n return line # trajectory as columns (2 or 3)\n \n def update_trajectory(self, line):\n \"\"\"\n Update the trajectory with current line\n Write to file if flag_inside is True\n Also write if flag_force_write is True -- end of simulation time\n\n Parameters\n ----------\n \n\n Returns\n -------\n None.\n\n \"\"\"\n self.trajectory = np.vstack((self.trajectory, line))\n # update other labels..\n self.assign_label()\n if (len(line) == 1):\n self.tumble.append(self.flag_tumble)\n self.label_true.append(self.conc_label_prev)\n self.label_pred.append(not self.flag_tumble)\n else:\n nonelist = [None]*(len(line)-1)\n self.tumble.extend([self.flag_tumble] + nonelist)\n self.label_true.extend([self.conc_label_prev] + nonelist)\n self.label_pred.extend([not self.flag_tumble] + nonelist)\n #\n if len(self.trajectory) == self.n_sim_time: # reached simulation time; enough!\n self.flag_force_write = True\n print('TIMEOUT ---')\n # check this\n if self.flag_inside or self.flag_outside:\n self.flag_force_write = True\n \n if self.flag_force_write: # then write it to file\n if self.dim==2:\n column_name = ['time', 'x', 'y']\n elif self.dim==3:\n column_name = ['time', 'x', 'y', 'z']\n else:\n print('Check self.dim | must be 2 or 3')\n \n n_time = len(self.trajectory)-1\n data = np.empty((n_time, 1+self.dim))\n data[:,0] = np.around(np.arange(n_time)*self.dt,2)\n data[:,1:1+self.dim] = np.around(self.trajectory[1:],4)\n df_traj = pd.DataFrame(data, columns=column_name)\n # other labels.. \n df_traj['tumble'] = self.tumble\n df_traj['label_true'] = self.label_true\n df_traj['label_pred'] = self.label_pred\n #\n output_path= self.traj_path + str(self.id) + '.csv'\n df_traj.to_csv(output_path, index=False)\n pass\n \n def get_concentration(self, food, line):\n \"\"\"\n To find the concentration along the line (or at a point), due to the food-object\n - If the bacteria is inside the food circle --> set the flag, write the trajectory and exit!\n - otherwise, return the concentration.\n \n Parameters\n ----------\n food : object of type FoodEnviornment(class)\n used to estimate the current distance from the food and concentration along the trajectory.\n line : array (2 or 3 columns) \n co-ordinates of new line-trajectory; or the co-ordinates of the new point\n\n Returns\n -------\n array of size len(line). \n Concentration along the new line-trajectory; or at the new point\n\n \"\"\"\n conc_line = np.empty(len(line)) # to store the concentration along trajectory\n for i in range(len(line)):\n conc_line[i], flag_inside, flag_outside = food.concentration_at(line[i]) # calling FoodEnviornment function\n if flag_inside:\n print('REACHED <<---')\n self.flag_inside = flag_inside \n self.update_trajectory(line[:i+1]) # update trajectory and write it to file. \n break # exit here\n if flag_outside:\n print('LOST --->>')\n self.flag_outside = flag_outside \n self.update_trajectory(line[:i+1]) # update trajectory and write it to file. \n break # exit here\n return(conc_line)\n def move_and_record(self, food, flag_tumble):\n \"\"\"\n - bacteria selct a random velocity vector if flag_tumble is True\n - construct a line (trajectory) of fixed length incorporating rotational noise.\n - get the concentration along this new line -- this depends on food enviornment (conc, grad etc)\n Parameters\n ----------\n food : object of type FoodEnviornment(class)\n used to estimate the current distance from the food and concentration along the trajectory.\n flag_tumble : bool\n change the velocity vector if True.\n \n Returns\n -------\n 1d array of lenth = memory. Poisson random variable indicating the binding of signaling\n molecules along a fixed line(trajectory). This will be used further to make prediction about\n concentration gradient\n\n \"\"\"\n self.flag_tumble = flag_tumble\n #------------------------------------------------------------------------------------------------\n if self.flag_tumble:\n new_line = self.get_new_line() # pick up a new line-trajectory\n conc_line = self.get_concentration(food, new_line) # concentration along this new line-trajectory\n self.conc = np.flip(conc_line, axis=None) # 1st element is the recent one; Volttera kernel applicable\n self.R = new_line[-1] # end point; this will be the initial position for the next line.\n self.psi = self.psi_list[-1] # end point; this will be the initial position for the next line.\n if not self.flag_force_write:\n self.update_trajectory(new_line) # update the new line-trajectory\n else:\n self.psi = self.rotational_diffusion(self.psi, 1) # update psi only once\n self.V = self.velocity_for(self.psi) # updated velocity\n self.R = self.R + self.V * self.dt # then find the new point\n conc_R = self.get_concentration(food, self.R) # concentration at the new point\n self.conc = np.roll(self.conc, 1) # rotating to right\n self.conc[0] = conc_R # new conc as first element\n if not self.flag_force_write:\n self.update_trajectory(self.R.reshape(1,-1)) # update the new point\n #--------------------------------------------------------------------------------------------------\n if self.flag_force_write: # trajectory is already written in file.\n u_record = None # no need to record further. Assigning None is useful for further check\n else:\n u_record = self.conc\n return(u_record) # return value is None only if the bacteria is already reached the food!\n\ndef trajectory_for(bsim, food, k1, k2):\n flag_tumble = True # Do not change\n flag_go = True\n while flag_go:\n urecord = bsim.move_and_record(food, flag_tumble)\n if(urecord is None): # bacteria reached the food-circle\n flag_go = False\n else:\n decision = (urecord[k1] - urecord[k2]) > 0.0 \n flag_tumble = (decision == 0) # decision is 0 if concentration gradient is -ve ; then tubmle!\n return\n\ndef random_initial_condition(seed, n_sample):\n from inputs import traj_between\n r1, r2 = traj_between[0], traj_between[1]\n rng = np.random.RandomState(seed)\n random_r = rng.uniform(r1**2, r2**2, n_sample)\n random_theta = rng.uniform(-np.pi, np.pi, n_sample)\n psi = rng.uniform(-np.pi, np.pi, n_sample)\n df = pd.DataFrame()\n df['x'] = np.sqrt(random_r) * np.cos(random_theta)\n df['y'] = np.sqrt(random_r) * np.sin(random_theta)\n df['psi'] = psi\n return(df)\n\ndef run(args):\n food = conc.profile.DimensionTwo()\n dir_name = args['dir_name']\n from inputs import n_bacteria, grad_label\n k1 = int(grad_label[0]) # grad_label = [k1, k2], where k is the delay. k1 3:\n # print(paint.blue.bold(txt[:3]), paint.blue.underline(txt[3:]))\n # elif random.randint(0,10) > 6:\n # print(paint.green.bold(txt[:3]), paint.green.underline(txt[3:]))\n # else:\n # print(paint.red.bold(txt[:3]), paint.red.underline(txt[3:]))\n\ndef run():\n addresses = list_addresses()\n go_to_address(addresses)\n\ndef go_to_address(addresses):\n if pyperclip.paste() == 'pyperclip.paste()':\n print(\"Enter number to search, or type in an address\")\n else:\n address = pyperclip.paste().strip()\n print(\"Select index to search, or type in an address. \\nLeave blank to search for: \\\"{0}\\\" \".format(address[:50]+\"...\"))\n\n answer = input()\n url = lambda address : \"https://www.google.com/maps/place/{0}\".format(address)\n if answer == \"\":\n q = address\n else:\n try:\n q = addresses[int(answer)]\n except ValueError:\n q = answer.strip()\n except IndexError:\n print(\"You probably entered the wrong number, lets try again\")\n go_to_address(addresses)\n\n insert_into_recent_searches(q)\n webbrowser.open(url(q))\n\n\ndef insert_into_recent_searches(query):\n file = shelve.open('gmaps', flag='c', protocol=None, writeback=False)\n recent_searches = file['recent_searches']\n # pdb.set_trace()\n if query not in recent_searches:\n recent_searches.insert(0, query)\n recent_searches = recent_searches[:9]\n file['recent_searches'] = recent_searches\n file.close()\n\ndef list_addresses():\n file = shelve.open('gmaps', flag='c', protocol=None, writeback=False)\n if 'recent_searches' in file.keys():\n p = file['recent_searches']\n\n if len(p) > 0:\n print(\"Recent searches:\")\n\n for a in reversed(p):\n custom_print(\"{0}: {1}\".format(p.index(a), a))\n print(paint.rainbow(\"-----------------------------------\"))\n else:\n p = []\n file['recent_searches'] = []\n\n file.close()\n return p\n\n\nif __name__ == '__main__':\n run()","sub_path":"gmaps.py","file_name":"gmaps.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211843605","text":"# *****************************************************************************\n# Copyright (c) 2019, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\nimport pandas as pd\nimport numpy as np\nimport time\nimport sdc\nfrom sdc import prange\n\n# adopted from:\n# http://www.pythonforfinance.net/2017/02/20/intraday-stock-mean-reversion-trading-backtest-in-python/\n\n\n@sdc.jit\ndef intraday_mean_revert():\n nsyms = 1000\n max_num_days = 80000\n all_res = 0.0\n\n t1 = time.time()\n for i in prange(nsyms):\n # np.random.seed(0)\n s_open = 20 * np.random.randn(max_num_days)\n s_low = 18 * np.random.randn(max_num_days)\n s_close = 19 * np.random.randn(max_num_days)\n df = pd.DataFrame({'Open': s_open, 'Low': s_low,\n 'Close': s_close})\n\n # create column to hold our 90 day rolling standard deviation\n df['Stdev'] = df['Close'].rolling(window=90).std()\n\n # create a column to hold our 20 day moving average\n df['Moving Average'] = df['Close'].rolling(window=20).mean()\n\n # create a column which holds a TRUE value if the gap down from previous day's low to next\n # day's open is larger than the 90 day rolling standard deviation\n df['Criteria1'] = (df['Open'] - df['Low'].shift(1)) < -df['Stdev']\n\n # create a column which holds a TRUE value if the opening price of the stock is above the 20 day moving average\n df['Criteria2'] = df['Open'] > df['Moving Average']\n\n # create a column that holds a TRUE value if both above criteria are also TRUE\n df['BUY'] = df['Criteria1'] & df['Criteria2']\n\n # calculate daily % return series for stock\n df['Pct Change'] = (df['Close'] - df['Open']) / df['Open']\n\n # create a strategy return series by using the daily stock returns where the trade criteria above are met\n df['Rets'] = df['Pct Change'][df['BUY']]\n\n all_res += df['Rets'].mean()\n\n print(all_res)\n print(\"execution time:\", time.time() - t1)\n\n\nintraday_mean_revert()\n","sub_path":"examples/intraday_mean_rand.py","file_name":"intraday_mean_rand.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"197824140","text":"# Autor: Luis Humberto Burgueño Paz\n# Calcula el rendimiento de la gasolina y dice al usuario la cantidad de litros necesitados para cierta cantidad de kilómetros\n\n# Recibe como parámetros los kilometros recorridos y los litros gastados y regresa los kilómetros recorridos por litro\ndef calcularKmL(kilometros, litros):\n kmL = kilometros / litros\n return kmL\n\n#Recibe como parámetros los kilómetros recorridos y los litros gastados y regresa las millas recorridas por galón\ndef calcularMiG(kmL):\n miGal = kmL / (1.6093 * 0.264)\n return miGal\n\n# Recibe como parámetros los kilómetros por recorrer y regresa los litros necesitados de gasolina\ndef calcularLitros(kilometrosPorRecorrer, kmL):\n litrosNecesitados = kilometrosPorRecorrer / kmL\n return litrosNecesitados\n# Lee los km y los litros e imprime los km/l y mi/gal\ndef main():\n kilometros = int(input(\"Teclea el número de km recorridos: \"))\n litros = int(input(\"Teclea el número de litros de gasolina usados: \"))\n kmL = calcularKmL(kilometros, litros)\n miGal = calcularMiG(kmL)\n print(\"Si recorres %d kms con %d litros de gasolina, el rendimiento es:\" % (kilometros, litros))\n print(\"%.02f km/l\" % kmL)\n print(\"%.02f mi/gal\" % miGal)\n kilometrosPorRecorrer = int(input(\"¿Cuántos kilómetros vas a recorrer? \"))\n litrosNecesitados = calcularLitros(kilometrosPorRecorrer, kmL)\n print(\"Para recorrer %d km. necesitas %.02f litros de gasolina\" % (kilometrosPorRecorrer, litrosNecesitados))\nmain()\n","sub_path":"RendimientoDeUnAuto.py","file_name":"RendimientoDeUnAuto.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"159947017","text":"import collections\nclass buildGreph(object):\n def __init__(self):\n graph = collections.defaultdict(list)\n def addEdge(graph,u,v):\n graph[u].append(v)\n\n\n\n\n\n\n\n\n\nclass graphNode(object):\n def __init__(self,vertex):\n self.vertex = vertex\n self.neig = list(graphNode)\nclass course(object):\n def __init__(self,data):\n self.graph = collections.defaultdict(list)\n self.queue = collections.deque()\n self.indegree = collections.defaultdict(int)\n for v in data:\n e0= v[0]\n e1= v[1]\n self.indegree[e0]\n self.indegree[e1] +=1\n self.graph[e0].append(e1)\n #build indegree queue\n for key,val in self.indegree.iteritems():\n if val == 0:\n self.queue.append(key)\n\n def findCourseChooseOrder(self):\n result = []\n while(len(self.queue)>0):\n #deque 0 indegree elem\n elem=self.queue.pop()\n result.append(elem)\n #delete this elem from indegree\n self.indegree.pop(elem,None)\n for neig in self.graph[elem]:\n self.indegree[neig] -= 1\n if self.indegree[neig] == 0:\n self.queue.append(neig)\n\n\n if len(self.indegree) > 0:\n raise ValueError(\"Graph has cycle\")\n print(result)\n\n def dfsWayToTopologicalSort(self):\n visited = collections.defaultdict(int)\n stack = collections.deque()\n for vertex in list(self.graph):\n self.dfsWalk(vertex,visited,stack)\n print(\"Stack:\",stack)\n\n def dfsWalk(self,node,visited,stack):\n if visited[node] == -1:\n raise ValueError(\"Cycle found\")\n return\n if visited[node] == 1:\n return\n\n visited[node] = -1\n for neig in self.graph[node]:\n self.dfsWalk(neig,visited,stack)\n visited[node] = 1\n print(\"Push to Stack\",node)\n stack.append(node)\n return True\n\n def cloneGraph(graph):\n hMap = {}\n copyGraph = {}\n for node in graph:\n copyNode = new(graphNode(node))\n hMap[node] = copyNode\n for neig in graph[node]:\n copyNode.neig[cloneDFS(neig,hMap)]\n\n copyGraph[copyNode] = copyNode.neig\n return copyGraph\n\n def cloneDFS(graph, node,hMap):\n if node in hMap:\n return hMap[node]\n copyNode = new(graphNode(node))\n hMap[node] = copyNode\n for neig in graph[node]:\n #copy all neig of Node into CopyNode neig\n copyNode.neig[cloneDFS(graph,neig,hMap)]\n return copyNode\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n dep = [('a','b'),('b','c'),('a','c'),('b','d'),('c','d')]\n course_dep = course(dep)\n #course_dep.findCourseChooseOrder()\n course_dep.dfsWayToTopologicalSort()\n\n\n\n","sub_path":"courseOrder.py","file_name":"courseOrder.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"608817051","text":"\nimport pprint\nimport os\nimport json\n\nprinter = pprint.PrettyPrinter(indent=4, width=120)\nfailedPokemon = {}\n\nfiles = os.listdir(\"pokemonListings/\")\n\nfor file in files:\n try:\n f = open(\"pokemonListings/\" + file, 'r')\n data = f.read()\n f.close()\n data = json.loads(data)\n\n f = open(\"pokemonListings/\" + file, 'w')\n # pprint.pprint(data, stream=f, indent=4, width=120)\n json.dump(data, f, indent=4)\n f.close()\n except Exception as ex:\n print(ex)\n print(\"-> \", file)\n","sub_path":"ListingFilePrettifier.py","file_name":"ListingFilePrettifier.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"339633990","text":"import numpy as np\n\nfrom zipline.testing.predicates import assert_equal\nfrom .factor import CustomFactor\n\n\nclass IDBox(object):\n \"\"\"A wrapper that hashs to the id of the underlying object and compares\n equality on the id of the underlying.\n\n Parameters\n ----------\n ob : any\n The object to wrap.\n\n Attributes\n ----------\n ob : any\n The object being wrapped.\n\n Notes\n -----\n This is useful for storing non-hashable values in a set or dict.\n \"\"\"\n def __init__(self, ob):\n self.ob = ob\n\n def __hash__(self):\n return id(self)\n\n def __eq__(self, other):\n if not isinstance(other, IDBox):\n return NotImplemented\n\n return id(self.ob) == id(other.ob)\n\n\nclass CheckWindowsFactor(CustomFactor):\n \"\"\"A custom factor that makes assertions about the lookback windows that\n it gets passed.\n\n Parameters\n ----------\n input_ : Term\n The input term to the factor.\n window_length : int\n The length of the lookback window.\n expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]]\n For each asset, for each day, what the expected lookback window is.\n\n Notes\n -----\n The output of this factor is the same as ``Latest``. Any assets or days\n not in ``expected_windows`` are not checked.\n \"\"\"\n params = ('expected_windows',)\n\n def __new__(cls, input_, window_length, expected_windows):\n return super(CheckWindowsFactor, cls).__new__(\n cls,\n inputs=[input_],\n dtype=input_.dtype,\n window_length=window_length,\n expected_windows=frozenset(\n (k, IDBox(v)) for k, v in expected_windows.items()\n ),\n )\n\n def compute(self, today, assets, out, input_, expected_windows):\n for asset, expected_by_day in expected_windows:\n expected_by_day = expected_by_day.ob\n\n col_ix = np.searchsorted(assets, asset)\n if assets[col_ix] != asset:\n raise AssertionError('asset %s is not in the window' % asset)\n\n try:\n expected = expected_by_day[today]\n except KeyError:\n pass\n else:\n expected = np.array(expected)\n actual = input_[:, col_ix]\n assert_equal(actual, expected)\n\n # output is just latest\n out[:] = input_[-1]\n","sub_path":"zipline/pipeline/factors/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"93676301","text":"import json\nimport logging\nimport os\nimport sys\nfrom typing import Any, Dict, Mapping, Tuple, Union, Optional, TYPE_CHECKING\n\nfrom opentrons.config import CONFIG, ARCHITECTURE, SystemArchitecture\n\nif TYPE_CHECKING:\n from pathlib import Path # noqa(F401) - imported for types\n\nSettingsMap = Dict[str, Optional[bool]]\nSettingsData = Tuple[SettingsMap, int]\n\nlog = logging.getLogger(__name__)\n\n\nclass Setting:\n def __init__(self, _id: str, title: str, description: str,\n old_id: str = None,\n restart_required: bool = False,\n show_if: Tuple[str, bool] = None):\n self.id = _id\n #: The id of the setting for programmatic access through\n #: get_adv_setting\n self.old_id = old_id\n #: the old id before migration, if any\n self.title = title\n #: User facing title\n self.description = description\n #: User facing description\n self.restart_required = restart_required\n #: True if the user must restart\n self.show_if = show_if\n #: A tuple of (other setting id, setting value) that must match reality\n #: to show this setting in http endpoints\n\n def __repr__(self):\n return '{}: {}'.format(self.__class__, self.id)\n\n\n# If you add or remove any settings here BE SURE TO ADD A MIGRATION below.\n# You will also need to update the migration tests in:\n# api/tests/opentrons/config/test_advanced_settings_migration.py\nsettings = [\n Setting(\n _id='shortFixedTrash',\n old_id='short-fixed-trash',\n title='Short (55mm) fixed trash',\n description='Trash box is 55mm tall (rather than the 77mm default)'\n ),\n Setting(\n _id='calibrateToBottom',\n old_id='calibrate-to-bottom',\n title='Calibrate to bottom',\n description='Calibrate using the bottom-center of well A1 for each'\n ' labware (rather than the top-center)'\n ),\n Setting(\n _id='deckCalibrationDots',\n old_id='dots-deck-type',\n title='Deck calibration to dots',\n description='Perform deck calibration to dots rather than crosses, for'\n ' robots that do not have crosses etched on the deck'\n ),\n Setting(\n _id='disableHomeOnBoot',\n old_id='disable-home-on-boot',\n title='Disable home on boot',\n description='Prevent robot from homing motors on boot'\n ),\n Setting(\n _id='useOldAspirationFunctions',\n title='Use older pipette calibrations',\n description='Use the older pipette calibrations for P10S, P10M, P50S,'\n ' P50M, and P300S pipettes. Note this will cause the '\n ' default aspirate behavior (ul to mm conversion) to '\n ' function as it did prior to version 3.7.0. '\n ' NOTE: this does not impact GEN2 pipettes'\n )\n]\n\nif ARCHITECTURE == SystemArchitecture.BUILDROOT:\n settings.append(\n Setting(\n _id='disableLogAggregation',\n title='Disable Opentrons Log Collection',\n description='Prevent the robot from sending its logs to Opentrons'\n ' for analysis. Opentrons uses these logs to'\n ' troubleshoot robot issues and spot error trends.'))\n\nsettings_by_id = {s.id: s for s in settings}\nsettings_by_old_id = {s.old_id: s for s in settings}\n\n\n# TODO: LRU cache?\ndef get_adv_setting(setting: str) -> Optional[bool]:\n setting = _clean_id(setting)\n s = get_all_adv_settings()\n return s.get(setting, {}).get('value') # type: ignore\n\n\ndef get_all_adv_settings() -> Dict[str, Dict[str, Union[str, bool, None]]]:\n \"\"\"\n :return: a dict of settings keyed by setting ID, where each value is a\n dict with keys \"id\", \"title\", \"description\", \"value\",\n \"restart_required\", and \"show_if\"\n \"\"\"\n settings_file = CONFIG['feature_flags_file']\n\n values, _ = _read_settings_file(settings_file)\n\n return {\n key: {**settings_by_id[key].__dict__,\n 'value': value}\n for key, value in values.items() if key in settings_by_id\n }\n\n\ndef set_adv_setting(_id: str, value: Optional[bool]):\n _id = _clean_id(_id)\n settings_file = CONFIG['feature_flags_file']\n settings, version = _read_settings_file(settings_file)\n settings[_id] = value\n _write_settings_file(settings, version, settings_file)\n\n\ndef _clean_id(_id: str) -> str:\n if _id in settings_by_old_id.keys():\n _id = settings_by_old_id[_id].id\n return _id\n\n\ndef _read_json_file(path: Union[str, 'Path']) -> Dict[str, Any]:\n try:\n with open(path, 'r') as fd:\n data = json.load(fd)\n except FileNotFoundError:\n data = {}\n except json.JSONDecodeError as e:\n sys.stderr.write(\n f'Could not load advanced settings file {path}: {e}\\n')\n data = {}\n return data\n\n\ndef _read_settings_file(settings_file: 'Path') -> SettingsData:\n \"\"\"\n Read the settings file, which is a json object with settings IDs as keys\n and boolean values. For each key, look up the `Settings` object with that\n key. If the key is one of the old IDs (kebab case), replace it with the\n new ID and rewrite the settings file\n\n :param settings_file: the path to the settings file\n :return: a dict with all new settings IDs as the keys, and boolean values\n (the values stored in the settings file, or `False` if the key was not\n found).\n \"\"\"\n # Read settings from persistent file\n data = _read_json_file(settings_file)\n settings, version = _migrate(data)\n settings = _ensure(settings)\n\n if (data.get('_version') != version):\n _write_settings_file(settings, version, settings_file)\n\n return settings, version\n\n\ndef _write_settings_file(data: Mapping[str, Any],\n version: int,\n settings_file: 'Path'):\n try:\n with settings_file.open('w') as fd:\n json.dump({**data, '_version': version}, fd)\n fd.flush()\n os.fsync(fd.fileno())\n except OSError:\n log.exception(\n f'Failed to write advanced settings file to {settings_file}')\n\n\ndef _migrate0to1(previous: Mapping[str, Any]) -> SettingsMap:\n \"\"\"\n Migrate to version 1 of the feature flags file. Replaces old IDs with new\n IDs and sets any False values to None\n \"\"\"\n next: SettingsMap = {}\n\n for s in settings:\n id = s.id\n old_id = s.old_id\n\n if previous.get(id) is True:\n next[id] = True\n elif old_id and previous.get(old_id) is True:\n next[id] = True\n else:\n next[id] = None\n\n return next\n\n\ndef _migrate1to2(previous: SettingsMap) -> SettingsMap:\n \"\"\"\n Migration to version 2 of the feature flags file. Adds the\n disableLogAggregation config element.\n \"\"\"\n newmap = {k: v for k, v in previous.items()}\n newmap['disableLogAggregation'] = None\n return newmap\n\n\ndef _migrate2to3(previous: SettingsMap) -> SettingsMap:\n \"\"\"\n Migration to version 3 of the feature flags file. Adds the\n enableApi1BackCompat config element.\n \"\"\"\n newmap = {k: v for k, v in previous.items()}\n newmap['enableApi1BackCompat'] = None\n return newmap\n\n\n_MIGRATIONS = [_migrate0to1, _migrate1to2, _migrate2to3]\n\"\"\"\nList of all migrations to apply, indexed by (version - 1). See _migrate below\nfor how the migration functions are applied. Each migration function should\nreturn a new dictionary (rather than modify their input)\n\"\"\"\n\n\ndef _migrate(data: Mapping[str, Any]) -> SettingsData:\n \"\"\"\n Check the version integer of the JSON file data a run any necessary\n migrations to get us to the latest file format. Returns dictionary of\n settings and version migrated to\n \"\"\"\n next = dict(data)\n version = next.pop('_version', 0)\n target_version = len(_MIGRATIONS)\n migrations = _MIGRATIONS[version:]\n\n if len(migrations) > 0:\n log.info(\n \"Migrating advanced settings from version {} to {}\"\n .format(version, target_version))\n\n for m in migrations:\n next = m(next)\n\n return next, target_version\n\n\ndef _ensure(data: Mapping[str, Any]) -> SettingsMap:\n \"\"\"\n Even after migration, we may have an invalid file. For instance,\n we may have _downgraded_. Make sure all required keys are present.\n \"\"\"\n newdata = {k: v for k, v in data.items()}\n for s in settings:\n if s.id not in newdata:\n newdata[s.id] = None\n return newdata\n\n\ndef get_setting_with_env_overload(setting_name):\n env_name = 'OT_API_FF_' + setting_name\n if env_name in os.environ:\n return os.environ[env_name].lower() in ('1', 'true', 'on')\n else:\n return get_adv_setting(setting_name) is True\n","sub_path":"api/src/opentrons/config/advanced_settings.py","file_name":"advanced_settings.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34782301","text":"import glob\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nimport pydash\n\nprint(__doc__)\n\nimport joblib\nimport pandas as pd\n\n\ndef read_all():\n path = r\"/Users/huang/share/data/daily_all_features/\" # use your path\n allFiles = glob.glob(path + \"/*.csv\")\n\n list_ = []\n\n for file_ in allFiles:\n df = pd.read_csv(file_, index_col=None, header=0)\n list_.append(df)\n\n return pd.concat(list_, axis=0, ignore_index=True)\n\n\ndef read_all2():\n path = r\"/Users/huang/share/data/daily_all_features/\" # use your path\n allFiles = glob.glob(path + \"/*.csv\")\n\n list_ = []\n\n # csv_path = \"/Users/huang/share/data/daily_all_features/date_20181130.all_features.csv\"\n # iris = pd.read_csv(csv_path)\n # list1 = iris.columns.tolist()\n #\n # csv_path = \"/Users/huang/share/data/daily_all_features/date_20181129.all_features.csv\"\n # csv_path = \"/Users/huang/share/data/daily_all_features/date_20181128.all_features.csv\"\n # iris = pd.read_csv(csv_path, index_col=\"id\")\n # list2 = iris.columns.tolist()\n #\n # print(\"xx1\", pydash.difference(list1, list2))\n # print(\"xx1\", pydash.difference(list2, list1))\n # exit()\n # iris = pd.read_csv(csv_path)\n for file_ in allFiles:\n try:\n df = pd.read_csv(file_)\n except:\n df = pd.read_csv(file_, index_col=\"id\")\n list_.append(df)\n\n return pd.concat(list_, axis=0, ignore_index=True)\n\n\niris = read_all2()\n\nprint(\"columns\", iris.columns.tolist())\niris.drop([\"id\"], axis=1, inplace=True)\n\nlabel = \"label\"\ndiabetes_y_train = iris[label].values\ndiabetes_X_train = iris.drop([label], axis=1).values\n\npercentage = 0.01\n\ndef map_func(value):\n if value > percentage:\n return 1\n else:\n return 0\n\n\ndiabetes_y_train = pydash.map_(diabetes_y_train, map_func)\n\nX = diabetes_X_train\ny = diabetes_y_train\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=42)\n\nclf = RandomForestClassifier()\n# clf = AutoSklearnClassifier()\nclf.fit(X_train, y_train)\n\ny_pred = clf.predict(X_train)\nprint(classification_report(y_train, y_pred))\n\nmodel_path = \"test_model.%.2f.pkl\" % percentage\n# print(clf.feature_importances_)\njoblib.dump(clf, model_path)\n\ny_pred = clf.predict(X_test)\nprint(classification_report(y_test, y_pred))\n\nloaded_clf = joblib.load(model_path)\n\ny_pred = loaded_clf.predict(X_test)\nprint(\"Loaded model!\")\nprint(classification_report(y_test, y_pred))\n","sub_path":"src/gp_demo/do_classification2.py","file_name":"do_classification2.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637559544","text":"# coding: utf-8\n\nfrom django.forms import Widget\nfrom django.utils.datastructures import MultiValueDict, MergeDict\nfrom django.template.loader import render_to_string\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\n\n\nclass FileWidgetBase(Widget):\n\n def __init__(self, to_app, to_model, *args, **kwargs):\n self.to_app = to_app\n self.to_model = to_model\n super(FileWidgetBase, self).__init__(*args, **kwargs)\n\n def render(self, name, value, attrs=None):\n ctx = {\n 'name': name,\n 'value': value,\n 'attrs': attrs,\n 'app': self.to_app,\n 'model': self.to_model,\n }\n try:\n ctx.update(self.get_context_data())\n except AttributeError:\n pass\n return render_to_string(self.template, ctx)\n\n class Media:\n css = {\n 'all': [\n static('sticky_files/css/sticky_uploader.css'),\n ]\n }\n js = [\n static('sticky_files/js/sticky_uploader.js'),\n ]\n\n\nclass ManyFileWidget(FileWidgetBase):\n template = 'sticky_files/widgets/many_file_widget.html'\n\n def __init__(self, max_objects=None, *args, **kwargs):\n self.max_objects = max_objects\n super(ManyFileWidget, self).__init__(*args, **kwargs)\n\n def value_from_datadict(self, data, files, name):\n if isinstance(data, (MultiValueDict, MergeDict)):\n return data.getlist(name)\n return data.get(name, None)\n\n def get_context_data(self):\n return {\n 'max_objects': self.max_objects,\n }\n\n\nclass ManyImageWidget(ManyFileWidget):\n template = 'sticky_files/widgets/many_image_widget.html'\n\n def value_from_datadict(self, data, files, name):\n if isinstance(data, (MultiValueDict, MergeDict)):\n return data.getlist(name)\n return data.get(name, None)\n\n\nclass OneFileWidget(FileWidgetBase):\n template = 'sticky_files/widgets/one_file_widget.html'\n\n def value_from_datadict(self, data, files, name):\n return data.get(name, None)\n\n\nclass OneImageWidget(OneFileWidget):\n template = 'sticky_files/widgets/one_image_widget.html'\n","sub_path":"sticky_files/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496949260","text":"def parse_file():\n blacklist = []\n\n with open('input.txt') as f:\n for line in f.readlines():\n line = [int(c) for c in line.split('-')]\n blacklist.append(line)\n return blacklist\n\n\ndef get_lowest_allowed_ip(blacklist):\n min_allowed = 0\n inf = [float('inf'), float('inf')]\n\n while any(line != inf for line in blacklist):\n lower, upper = min(blacklist, key=lambda x: x[0])\n\n if lower > min_allowed + 1:\n return min_allowed + 1\n min_allowed = max(upper, lower, min_allowed)\n min_in_blacklist_index = blacklist.index([lower, upper])\n blacklist[min_in_blacklist_index] = inf\n return None\n\n\ndef get_nbr_allowed_ip(blacklist):\n blacklisted_ip_count = 0\n inf = [float('inf'), float('inf')]\n min_value = 0\n max_value = 0\n\n while any(line != inf for line in blacklist):\n lower, upper = min(blacklist, key=lambda x: x[0])\n\n if lower > max_value + 1:\n blacklisted_ip_count += (max_value - min_value + 1)\n min_value = lower\n\n max_value = max(max_value, upper)\n index = blacklist.index([lower, upper])\n blacklist[index] = inf\n\n blacklisted_ip_count += (max_value - min_value + 1)\n return 4294967295 - blacklisted_ip_count + 1\n\n\nif __name__ == \"__main__\":\n blacklist = parse_file()\n ret = get_lowest_allowed_ip(blacklist)\n print(\"Puzzle 1: {}\".format(ret))\n\n blacklist = parse_file()\n ret = get_nbr_allowed_ip(blacklist)\n print(\"Puzzle 2: {}\".format(ret))\n","sub_path":"AOC-2016/Day-20/day-20.py","file_name":"day-20.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502653536","text":"from typing import Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as TF\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import PackedSequence\n\ndef init_feedforward_weights(dnn: nn.Module,\n init_mean=0,\n init_std=1,\n init_xavier: bool=True,\n init_normal: bool=True,\n init_gain: float=1.0):\n for name, p in dnn.named_parameters():\n if 'bias' in name:\n p.data.zero_()\n if 'weight' in name: \n if init_xavier:\n if init_normal:\n nn.init.xavier_normal(p.data, init_gain)\n else:\n nn.init.xavier_uniform(p.data, init_gain)\n else:\n if init_normal:\n nn.init.normal(p.data, init_gain)\n else:\n nn.init.uniform(p.data, init_gain)\n\ndef _init_rnn_weights(rnn: nn.Module,\n init_xavier: bool=True,\n init_normal: bool=True,\n init_gain: float=1.0,\n ):\n for name, p in rnn.named_parameters():\n if 'bias' in name:\n p.data.fill_(0)\n if isinstance(rnn, (torch.nn.LSTM, torch.nn.LSTMCell)):\n n = p.nelement()\n p.data[n // 4:n // 2].fill_(1) # forget bias\n elif 'weight' in name:\n if init_xavier:\n if init_normal:\n nn.init.xavier_normal(p, init_gain)\n else:\n nn.init.xavier_uniform(p, init_gain)\n else:\n if init_normal:\n nn.init.normal(p, init_gain)\n else:\n nn.init.uniform(p, init_gain)\n\nclass LinearModel(nn.Module):\n \"\"\"Simple Linear Model\n \"\"\"\n def __init__(self,\n in_features,\n out_features,\n bias=True,\n activation='relu',\n init_mean=0,\n init_std=1,\n init_xavier: bool=True,\n init_normal: bool=True,\n init_gain: float=1.0):\n super(LinearModel, self).__init__() \n \n self.in_features = in_features\n self.out_features = out_features\n self.fc = nn.Linear(in_features, out_features, bias)\n if activation.lower() == 'relu':\n self.activation = nn.ReLU()\n else:\n self.activation = None\n \n init_feedforward_weights(self.fc,\n init_mean,\n init_std,\n init_xavier,\n init_normal,\n init_gain)\n \n def forward(self, x):\n if self.activation is not None:\n x = self.activation(self.fc(x))\n else:\n x = self.fc(x)\n return x\n \n \n \nclass RNNModel(nn.Module):\n \"\"\"Simple RNNmodel\n \"\"\"\n def __init__(self,\n input_size,\n hidden_size,\n rnn_type='LSTM', \n num_layers=1,\n bidirectional=False,\n dropout=0.0,\n batch_first=True,\n init_xavier: bool=True,\n init_normal: bool=True,\n init_gain: float=1.0,\n concat: bool=True,\n ):\n super(RNNModel, self).__init__()\n self.in_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.bidirectional = bidirectional\n self.concat = concat\n self.rnn_type = rnn_type\n if not self.rnn_type in ['LSTM', 'GRU', 'RNN']:\n raise NotImplementedError(self.rnn_type)\n self.rnn =\\\n getattr(nn, rnn_type)(input_size,\n hidden_size,\n num_layers,\n bidirectional=bidirectional,\n dropout=dropout,\n batch_first=batch_first)\n _init_rnn_weights(self.rnn,\n init_xavier=init_xavier,\n init_normal=init_normal,\n init_gain=init_gain\n )\n\n def forward(self,\n x: Union[Variable, PackedSequence],\n hx: Union[Variable, Tuple[Variable, ...]]=None) ->\\\n Tuple[Variable, Variable]:\n \n assert isinstance(x, Variable) or\\\n isinstance(x, PackedSequence), type(x)\n \n self.rnn.flatten_parameters()\n output, hx = self.rnn(x, hx)\n self.rnn.flatten_parameters()\n \n if (not self.concat) and self.bidirectional:\n B, T, F = output.size()\n output = output[:, :, :F//2] + output[:, :, F//2:]\n \n return output, hx \n\n\nclass LinearLayer(nn.Module):\n def __init__(self, data_shape: list, n_channel, n_layer):\n super(LinearLayer, self).__init__() \n self.data_shape = data_shape\n assert len(self.data_shape) == 3, ('data_shape must be '\n '(batch_size, num_atom, features)')\n assert n_layer >= 0, 'n_layers must be oever 0.' \n _, _, F = data_shape\n \n super(LinearLayer, self).__init__()\n\n if n_layer == 1:\n self.ll = LinearModel(F, n_channel)\n else:\n self.ll = nn.Sequential(\n LinearModel(F, n_channel, activation='relu'),\n *[LinearModel(n_channel, n_channel, activation='relu')\n for _ in range(n_layer - 1) ]\n )\n \n def forward(self, x):\n n_batch, n_atom, n_channel = x.shape\n x = functions.reshape(x, (n_batch * n_atom, n_channel))\n for l in self.layers:\n x = l(x)\n x = functions.reshape(x, (n_batch, n_atom, self.n_output_channel))\n return x\n\n\nif __name__ == '__main__':\n B = 10\n N = 100\n C = 33\n x = torch.autograd.Variable(torch.randn(B, N, C))\n ll = LinearLayer(x.size(), 4, 3)\n print(ll)\n","sub_path":"pytorch_chemistry/models/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349497427","text":"from Expression import Expression\nfrom Relation import Relation\n\n\nclass Projection (Expression) :\n \"\"\"\n this class represents the Projection operation in SPJRUD\n \"\"\"\n\n def __init__(self,colonnes, table):\n self.colonnes = colonnes\n self.table = table\n self.verify()\n self.compile()\n self.imbriquementRelation()\n self.request = \"SELECT \"+ self.res +\" FROM \"+self.table\n\n def verify(self):\n \"\"\"\n\t\tthis method verify that syntax is respected and value are correct for the table in parameters\n\t\t:return: None\n\t\t\"\"\"\n if not isinstance(self.colonnes,list) or len(self.colonnes) == 0 :\n raise TypeError(\"The first parameter is not a list or the list is empty\")\n if not isinstance(self.table,Relation) and not isinstance(self.table,Expression) :\n raise TypeError(\"The last argument has to be a Relation object or a Expression object\")\n\n def compile(self): #compile and verify all the arguments in the list\n self.res =\"\"\n for i in self.colonnes:\n self.calculate()\n if isinstance(i,str): #this is the column case\n if i in self.attributes :\n self.res = self.res + i + \",\"\n else :\n raise ValueError (i+\" is not an attribut of the Relation \"+self.table.name)\n else: #this is the imbriquement case\n i = \"( \"+i.__str__()+\" )\"\n self.res = self.res + i +\", \"\n self.res = self.res[:len(self.res)-1]\n\n def __str__(self):\n return self.request\n\n def calculate(self):\n \"\"\"\n\t\tthis method calculate the attributes in the table and represents that as a dictionnary in the variable attributes.\n\t\tit return the variable too.\n\t\t:return: Dictionnary\n\t\t\"\"\"\n self.attributes = self.colonnes\n return self.attributes\n\n","sub_path":"Projection.py","file_name":"Projection.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523264895","text":"from pydantic import BaseModel, validator\nfrom pydantic.typing import AnyCallable\nfrom typing import Callable as CallableT\nfrom inspect import signature\nfrom parameters import ParameterSet\n\n## initializer decorator\ndef initializer(\n *fields, uninitialized=None, pre=True, always=False, **dec_kwargs\n) -> CallableT[[AnyCallable], classmethod]:\n \"\"\"\n Specialized validator for writing more complex default initializers with\n less boilerplate. Does two things:\n\n - Changes the default for `pre` to ``True``.\n - Always sets `always=True` (the `always` parameter is still accepted,\n but with a slightly different meaning; see note below).\n - Allows model parameters to be specified as keyword arguments in the\n validator signature. This works with both model-level parameters, and\n the parameters defined in the `Parameters` subclass.\n\n .. Note:: The point of an initializer is to replace a default value, so\n it doesn't make sense to set `always=False`. However, by default an\n initializer will *not* execute if a value is already provided.\n (The logic being that if a value is provided, it doesn't need to be\n initialized.) Thus, in analogy with `~pydantic.validator`, the `always`\n keyword is provided to specify that an initializer should be run even if\n a value for that parameter is provided.\n\n Example\n -------\n\n The following\n\n >>> class Model(BaseModel):\n >>> a: float\n >>> t: float = None\n >>> @initializer('t'):\n >>> def set_t(t, a):\n >>> return a/4\n\n is equivalent to\n\n >>> class Model(BaseModel):\n >>> a: float\n >>> t: float = None\n >>> @validator('t', pre=True, always=True):\n >>> def set_t(t, values):\n >>> if t is not None:\n >>> return t\n >>> a = values.get('a', None)\n >>> if a is None:\n >>> raise AssertionError(\n >>> \"'a' cannot be found within the model parameters. This may be \"\n >>> \"because it is defined after 't' in the list of parameters, \"\n >>> \"or because its own validation failed.\")\n >>> return a/4\n\n Parameters\n ----------\n *fields\n pre (default: True)\n each_item\n check_fields\n allow_reuse: As in `pydantic.validator`, although some arguments may not\n be so relevant.\n\n always: bool\n - `True`: Always run the initializer. This is the same as setting\n `always=True` with a Pydantic `~pydantic.validator`.\n - `False` (default): Only run the initializer when the value is **not**\n provided. Note that this is the opposite effect to setting\n `always=False` with a Pydantic `~pydantic.validator`.\n\n uninitialized: Any (default: None)\n The initializer is only executed when the parameter is equal to this\n value.\n \"\"\"\n\n val_fn = validator(*fields, pre=pre, always=True, **dec_kwargs)\n\n # Refer to pydantic.class_validators.make_generic_validator\n def dec(f: AnyCallable) -> classmethod:\n sig = signature(f)\n args = list(sig.parameters.keys())\n # 'value' is the first argument != from 'self', 'cls'\n # It is positional, and the only required argument\n if args[0] in ('self', 'cls'):\n req_val_args = args[:2]\n opt_val_args = set(args[2:]) # Remove cls and value\n else:\n req_val_args = args[:1]\n opt_val_args = set(args[1:]) # Remove value\n # opt_validator_args will store the list of arguments recognized\n # by pydantic.validator. Everything else is assumed to match an earlier\n # parameter.\n param_args = set()\n for arg in opt_val_args:\n if arg not in ('values', 'config', 'field', '**kwargs'):\n param_args.add(arg)\n for arg in param_args:\n opt_val_args.remove(arg)\n def new_f(cls, v, values, field, config):\n if not always and v is not uninitialized:\n return v\n param_kwargs = {}\n params = values.get('params', None)\n if not isinstance(params, BaseModel):\n params = None # We must not be within a sinn Model => 'params' does not have special meaning\n for p in param_args:\n if p in values: # Try module-level param first\n pval = values.get(p)\n elif params is not None and hasattr(params, p):\n pval = getattr(params, p)\n else:\n raise AssertionError(\n f\"'{p}' cannot be found within the model parameters. \"\n \"This may be because it is \"\n f\"defined after '{field.name}' in the list of parameters, \"\n \"or because its own validation failed.\")\n param_kwargs[p] = pval\n\n # Now assemble the expected standard arguments\n if len(req_val_args) == 2:\n val_args = (cls, v)\n else:\n val_args = (v,)\n val_kwargs = {}\n if 'values' in opt_val_args: val_kwargs['values'] = values\n if 'field' in opt_val_args: val_kwargs['field'] = field\n if 'config' in opt_val_args: val_kwargs['config'] = config\n\n return f(*val_args, **val_kwargs, **param_kwargs)\n\n # Can't use @wraps because we changed the signature\n new_f.__name__ = f.__name__\n # Having a different qualname is required to avoid overwriting validators\n # (Pydantic identifies them by name, and otherwise they all have `new_f`)\n new_f.__qualname__ = f.__qualname__\n new_f.__doc__ = f.__doc__\n\n return val_fn(new_f)\n\n return dec\n\ndef add_exclude_mask(exclude, mask):\n \"\"\"\n Merge `exclude` and `mask` into a single set/dict, in the format\n expected by BaseModel's `json`, `dict` and `copy` methods.\n This is used to specialize these methods within particular BaseModels,\n in order to ensure certain attributes are always excluded from export.\n\n :param exclude: set | dict | None\n :param mask: set | dict | ParameterSet\n Hierarchies can be indicated with either nested dicts or by separating\n the levels in the key names with a period.\n :returns: set | dict\n Returns dict if `exclude` or `mask` are a dict, otherwise returns\n set.\n \"\"\"\n if exclude is not None and not isinstance(exclude, (dict, set)):\n raise TypeError(f\"Argument 'exclude' should be either a set or dict, \"\n f\"but received '{exclude}'\")\n if not isinstance(mask, (dict, set)):\n raise TypeError(f\"Argument 'mask' should be either a set or dict, \"\n f\"but received '{mask}'\")\n if isinstance(exclude, dict) or isinstance(mask, dict):\n if exclude is None:\n exclude = {}\n elif isinstance(exclude, set):\n exclude = {attr: ... for attr in exclude}\n\n if isinstance(mask, set):\n exclude.update({attr: ... for attr in mask})\n else:\n assert isinstance(mask, dict)\n # Use ParameterSet to resolve dotted hierarchies\n for attr, excl in ParameterSet(mask).items():\n if excl is ...:\n exclude[attr] = ...\n elif exclude.get(attr, None) is not ...:\n # If it matches ..., there is nothing to do:\n # everything under 'attr' is already excluded\n exclude[attr] = add_exclude_mask(\n exclude.get(attr, None), excl)\n else:\n if exclude is None:\n exclude = set()\n exclude = exclude | mask\n\n return exclude\n","sub_path":"sinn/utils/pydantic.py","file_name":"pydantic.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135570511","text":"from torch.utils.data import Dataset, DataLoader # For custom data-sets\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport pandas as pd\nimport torchvision.transforms.functional as TF\nimport random\nfrom collections import namedtuple\n\nn_class = 34\nmeans = np.array([103.939, 116.779, 123.68]) / 255. # mean of three channels in the order of BGR\n\n# a label and all meta information\nLabel = namedtuple('Label', [\n\n 'name', # The identifier of this label, e.g. 'car', 'person', ... .\n # We use them to uniquely name a class\n\n 'id', # An integer ID that is associated with this label.\n # The IDs are used to represent the label in ground truth images\n # An ID of -1 means that this label does not have an ID and thus\n # is ignored when creating ground truth images (e.g. license plate).\n\n 'trainId', # An integer ID that overwrites the ID above, when creating ground truth\n # images for training.\n # For training, multiple labels might have the same ID. Then, these labels\n # are mapped to the same class in the ground truth images. For the inverse\n # mapping, we use the label that is defined first in the list below.\n # For example, mapping all void-type classes to the same ID in training,\n # might make sense for some approaches.\n\n 'category', # The name of the category that this label belongs to\n\n 'categoryId', # The ID of this category. Used to create ground truth images\n # on category level.\n\n 'hasInstances', # Whether this label distinguishes between single instances or not\n\n 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored\n # during evaluations or not\n\n 'color', # The color of this label\n])\n\nlabels_classes = [\n # name, id, trainId, category, catId, hasInstances, ignoreInEval, color\n Label('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),\n Label('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),\n Label('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),\n Label('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),\n Label('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),\n Label('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),\n Label('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),\n Label('road', 7, 0, 'ground', 1, False, False, (128, 64, 128)),\n Label('sidewalk', 8, 1, 'ground', 1, False, False, (244, 35, 232)),\n Label('parking', 9, 255, 'ground', 1, False, True, (250, 170, 160)),\n Label('rail track', 10, 255, 'ground', 1, False, True, (230, 150, 140)),\n Label('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),\n Label('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),\n Label('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),\n Label('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),\n Label('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),\n Label('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),\n Label('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),\n Label('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),\n Label('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),\n Label('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),\n Label('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),\n Label('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),\n Label('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),\n Label('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),\n Label('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),\n Label('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),\n Label('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),\n Label('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),\n Label('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),\n Label('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),\n Label('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),\n Label('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),\n Label('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32))\n]\n\n# Convert to dictionary\nlabels_dict = dict()\nfor label in labels_classes:\n labels_dict[label[1]] = {\"name\": label[0], \"trainId\": label[2], \"category\": label[3], \"catId\": label[4],\n \"hasInstances\": label[5], \"ignoreInEval\": label[6], \"color\": label[7]}\n\nvalid_labels = []\nfor label in labels_dict:\n if labels_dict[label]['trainId'] != 255:\n valid_labels.append(label)\n\n\nclass CityScapesDataset(Dataset):\n\n def __init__(self, csv_file, n_class=n_class, train=False):\n self.data = pd.read_csv(csv_file)\n self.means = means\n self.n_class = n_class\n self.train = train\n\n def __len__(self):\n return len(self.data)\n\n def _transform(self, image, mask):\n # Resize\n # resize = transforms.Resize(512)\n # image = resize(image)\n # mask = resize(mask)\n\n # Random rotations to improve rotations invariance\n angle = transforms.RandomRotation.get_params([-15, 15])\n image = TF.rotate(image, angle)\n mask = TF.rotate(mask, angle)\n\n # Random crop\n i, j, h, w = transforms.RandomCrop.get_params(\n image, output_size=(512, 512))\n image = TF.crop(image, i, j, h, w)\n mask = TF.crop(mask, i, j, h, w)\n\n # Random horizontal flipping\n if random.random() > 0.5:\n image = TF.hflip(image)\n mask = TF.hflip(mask)\n\n # Random vertical flipping\n if random.random() > 0.5:\n image = TF.vflip(image)\n mask = TF.vflip(mask)\n\n # Convert to numpy array\n image = TF.to_tensor(image).numpy()\n mask = np.asarray(mask)\n return image, mask\n\n def __getitem__(self, idx):\n img_name = self.data.iloc[idx, 0]\n label_name = self.data.iloc[idx, 1]\n img = Image.open(img_name).convert(\"RGB\")\n label = Image.open(label_name)\n\n # perform transformations\n if self.train:\n img, label = self._transform(img, label)\n # PyTorch toTensor operator automatically converts\n # numpy.ndarray (H x W x C) in the range [0, 255]\n # to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n img = img[::-1, :, :] # switch to BGR\n else:\n img = np.asarray(img)\n label = np.asarray(label)\n\n img = img[:, :, ::-1] # switch to BGR\n img = np.transpose(img, (2, 0, 1)) / 255.\n\n # reduce mean\n img[0] -= self.means[0]\n img[1] -= self.means[1]\n img[2] -= self.means[2]\n\n # convert to tensor\n img = torch.from_numpy(img.copy()).float()\n label = torch.from_numpy(label.copy()).long()\n\n # create one-hot encoding\n h, w = label.shape\n target = torch.zeros(self.n_class, h, w)\n for c in range(self.n_class):\n target[c][label == c] = 1\n\n return img, target, label\n","sub_path":"semantic_segmentation/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"156977086","text":"import numpy as np\nimport itertools\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\n\n\nclass Agent(object):\n \"\"\"\n An Agent Object for the Minority Game. The agent is based off of:\n\n Challet and Zhang 'Emergence of Cooperation and Organization in an\n Evolutionary Game.' Physica A: Statistical Mechanics and its\n Applications. 1997. These objects should only be created through\n the MinorityGame class.\n\n Parameters\n ----------\n\n m : int\n Memory Parameter\n\n s : int\n Stratgy space parameter. Of the 2^(2^M) strategies, how many\n strategies does the agent consider. There may be duplicates.\n Can always put in a check to ensure no duplicates.\n \"\"\"\n\n def __init__(self, m, s):\n self.m = m\n self.s = s\n self.strategies = self._draw_strategies()\n self.vpoints = np.zeros(self.s)\n\n def _draw_strategies(self):\n history = list(itertools.product(['0', '1'], repeat=self.m))\n history = [''.join(x) for x in history]\n actions = np.random.randint(0, 2, size=(self.s, 2**self.m))\n # Can check here to make sure no two rows the same if want to\n # eliminate duplicates. However, as m->infty, the probability\n # of a duplicate goes to 0.\n strats = [dict(zip(history, actions[i, :])) for i in range(self.s)]\n return strats\n\n def get_action(self, h):\n strat = self.strategies[np.argmax(self.vpoints)]\n return strat[h]\n\n def update_virtual_points(self, h, winner):\n for ix, s in enumerate(self.strategies):\n if s[h] == winner:\n self.vpoints[ix] += 1\n\n\nclass MinorityGame(object):\n \"\"\"\n A class that creates agents and runs the game.\n\n Parameters\n ----------\n\n nagents : int\n The number of agents\n\n m : int\n The memory of each agent\n\n s : int\n The number of strategies per agent\n\n p : float \\in [0,1]\n Minority proportion. i.e. Agent i wins if the proportion of\n agents taking the same action of agent i is less than p.\n (step behavior inaccurate if p<0.5; winner selected even when p threshold not reached...)\n\n \"\"\"\n\n def __init__(self, nagents, m, s, p=.5):\n self.nagents = nagents\n self.agents = [Agent(m, s) for x in range(nagents)]\n self.h = ''.join(np.random.choice(['0', '1'], size=m)) # Initial History\n self.p = p\n\n def step(self):\n actions = [a.get_action(self.h) for a in self.agents]\n winner = int(np.sum(actions) / float(self.nagents) <= self.p)\n [a.update_virtual_points(self.h, winner) for a in self.agents]\n self.h = self.h[1:] + str(winner)\n return actions, winner # This is like returning the next state?\n\n\ndef repro_fig_1():\n \"\"\"\n This function (roughly) reproduces figure 1 in\n Challet and Zhang 'Emergence of Cooperation and Organization in an\n Evolutionary Game.' Physica A: Statistical Mechanics and its\n Applications. 1997.\n \"\"\"\n game1 = MinorityGame(1001, 6, 3)\n g1y = [np.sum(game1.step()[0]) for x in range(1000)]\n game2 = MinorityGame(1001, 8, 3)\n g2y = [np.sum(game2.step()[0]) for x in range(1000)]\n game3 = MinorityGame(1001, 10, 3)\n g3y = [np.sum(game3.step()[0]) for x in range(1000)]\n fig, axes = plt.subplots(3)\n ys = [g1y, g2y, g3y]\n [axes[i].plot(ys[i]) for i in range(3)]\n [ax.set_ylim(0, 1000) for ax in axes]\n [ax.set_xlim(0, 1000) for ax in axes]\n [ax.set_xlabel('Iteration') for ax in axes]\n [ax.set_ylabel('Bar Attendees') for ax in axes]\n fig.text(.5, .03,\n 'Minority game with s=3 and M=6,8,10 from top to bottom',\n ha='center', fontsize=18)\n return fig, axes\n","sub_path":"minoritygame/minority_base.py","file_name":"minority_base.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205737287","text":"# -*- coding: utf-8 -*- \n# From https://www.cnblogs.com/zhangtianyuan/p/6922825.html\nimport gensim\nimport codecs\n\ndef main():\n path_to_model = 'wiki.zhs.model'\n output_file = 'wiki.zh.vec'\n bin2txt(path_to_model, output_file)\n\ndef bin2txt(path_to_model, output_file):\n output = codecs.open(output_file, 'w' , 'utf-8')\n model = gensim.models.Word2Vec.load(path_to_model)\n print('Done loading Word2Vec!') \n vocab = model.wv.vocab \n for item in vocab:\n vector = list()\n for dimension in model.wv[item]:\n vector.append(str(dimension))\n vector_str = \" \".join(vector)\n line = item + \" \" + vector_str\n output.writelines(line + \"\\n\")\n output.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gensim2txt.py","file_name":"gensim2txt.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121497208","text":"from guizero import App, Text, TextBox, PushButton\n\ndef say_my_name():\n welcome_message.value = my_name.value\n\napp = App(title=\"Hello world\",)\n\nwelcome_message = Text(app, text=\"Welcome to my app!\", size=40, font=\"Helvetica\", color=\"blue\")\n\nmy_name = TextBox(app)\n\nupdate_text = PushButton(app, command=say_my_name, text=\"Display my name\")\n\napp.display()","sub_path":"gui_test.py","file_name":"gui_test.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"313541380","text":"# -*- coding: utf-8 -*-\n\"\"\"\nsuggested by bugsuse(https://github.com/bugsuse)\n\"\"\"\n\nfrom setuptools import find_packages, setup\nimport sys, os\n\nparent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(parent_dir)\n\nDISTNAME = \"pycwr\"\nAUTHOR = \"Yu Zheng\"\nAUTHOR_EMAIL = \"zhengyunuist@gmail.com\"\nURL = \"https://github.com/YvZheng/NuistRadar\"\nPYTHON_REQUIRES = \">=3.6\"\nINSTALL_REQUIRES = [\"matplotlib>=2.2.3\", \"pyproj>=1.9.6\", \"Cartopy>=0.17.0\", \"xarray>=0.12.1\",\\\n\"numpy<=1.15.0\", \"scipy>=1.1.0\", \"pandas>=0.23.4\", \"PyQt5>=5.10.0\", \"netCDF4>=1.5.2\", 'easydict>=1.9']\nDESCRIPTION = \"China Weather Radar tools\"\nLONG_DESCRIPTION = \"\"\"The Weather Radar Toolkit, support most of China's radar formats\n(WSR98D, CINRAD/SA/SB/CB, CINRAD/CC/CCJ, CINRAD/SC/CD)\"\"\"\n\nsetup(\n name=DISTNAME,\n version=\"0.2\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n python_requires=PYTHON_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n url=URL,\n include_package_data = True,\n packages=find_packages(parent_dir),\n package_data={\"pycwr\": [\"data/*.*\",\"__init_.py\", \"GraphicalInterface/*.py\", \"draw/colormap/balance-rgb.txt\"]},\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"549729050","text":"import sys\nimport numpy as np\nimport math\nimport random\nimport matplotlib.pyplot as plt\n\nfrom dqn import DQN, ReplayMemory, Transition\nfrom util import Timer\n\nimport gym\nimport gym_game\nimport torch\nimport torch.nn.functional as F\nimport util\n\ndef simulate():\n num_episodes = 20000\n #env.is_view = True\n for epi in range(num_episodes):\n env.reset()\n last_screen = torch.from_numpy(env.render()).unsqueeze(0).to(device)\n current_screen = torch.from_numpy(env.render()).unsqueeze(0).to(device)\n state = current_screen - last_screen\n total_reward = 0\n timer.set_timer(\"episode\")\n #if epi == 500:\n for t in range(MAX_T):\n action = select_action(state)\n _, reward, done, _ = env.step(action.item())\n\n total_reward += float(reward)\n reward = torch.tensor([reward], device=device)\n last_screen = current_screen\n current_screen = torch.from_numpy(env.render()).unsqueeze(0).to(device)\n if done:\n next_state = None\n else:\n next_state = current_screen - last_screen\n\n memory.push(state, action, next_state, reward)\n state = next_state\n\n #env.render()\n optimize_model()\n\n if done:\n print(\"episode %d, total reward = %f\" % (epi + 1, total_reward))\n break\n\n timer.print_time(\"episode\")\n print(\"\")\n if epi % TARGET_UPDATE == 0:\n target_net.load_state_dict(policy_net.state_dict())\n\n\ndef optimize_model():\n if len(memory) < BATCH_SIZE:\n return\n transition = memory.sample(BATCH_SIZE)\n batch = Transition(*zip(*transition))\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool)\n non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()\n\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n\ndef select_action(state):\n global steps_done\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n return policy_net(state).max(1)[1].view(1, 1)\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)\n\n\nif __name__ == \"__main__\":\n BATCH_SIZE = 128\n GAMMA = 0.999\n EPS_START = 0.9\n EPS_END = 0.05\n EPS_DECAY = 200\n TARGET_UPDATE = 10\n MAX_T = 9999\n steps_done = 0\n timer = Timer()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n env = gym.make(\"Game-v0\")\n height, width = 64, 64\n\n n_actions = env.action_space.n\n policy_net = DQN(width, height, n_actions).to(device)\n target_net = DQN(width, height, n_actions).to(device)\n target_net.load_state_dict(policy_net.state_dict())\n target_net.eval()\n\n optimizer = torch.optim.RMSprop(policy_net.parameters())\n memory = ReplayMemory(3000)\n simulate()\n","sub_path":"Main_RL.py","file_name":"Main_RL.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"158138423","text":"# ex:ts=4:sw=4:sts=4:et\n# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-\nfrom __future__ import absolute_import\nimport re\nimport json\n\nfrom svtplay_dl.service import Service\nfrom svtplay_dl.error import ServiceError\nfrom svtplay_dl.log import log\nfrom svtplay_dl.fetcher.hls import hlsparse\nfrom svtplay_dl.utils import decode_html_entities\n\n\nclass Expressen(Service):\n supported_domains = ['expressen.se']\n\n def get(self):\n data = self.get_urldata()\n\n if self.exclude():\n yield ServiceError(\"Excluding video\")\n return\n\n match = re.search('=\"(http://www.expressen.se/tvspelare[^\"]+)\"', data)\n if not match:\n log.error(\"Can't find video id\")\n return\n url = decode_html_entities(match.group(1))\n data = self.http.request(\"get\", url)\n\n match = re.search(\"window.Player.settings = ({.*});\", data.text)\n if not match:\n log.error(\"Can't find json info.\")\n\n dataj = json.loads(match.group(1))\n if \"streams\" in dataj:\n if \"iPad\" in dataj[\"streams\"]:\n streams = hlsparse(self.options, self.http.request(\"get\", dataj[\"streams\"][\"iPad\"]), dataj[\"streams\"][\"iPad\"])\n for n in list(streams.keys()):\n yield streams[n]\n if \"hashHls\" in dataj[\"streams\"]:\n streams = hlsparse(self.options, self.http.request(\"get\", dataj[\"streams\"][\"hashHls\"]), dataj[\"streams\"][\"hashHls\"])\n for n in list(streams.keys()):\n yield streams[n]\n","sub_path":"lib/svtplay_dl/service/expressen.py","file_name":"expressen.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"117295104","text":"\n\nclass SymTable:\n def __init__(self, size):\n self.__size = size\n self.__table = [[] for _ in range(size)]\n\n def hashfun(self, element):\n char_sum = 0\n for char in element:\n char_sum += ord(char)\n return char_sum % self.__size\n\n def insert(self, element):\n position = self.hashfun(element)\n if element not in self.__table[position]:\n self.__table[position].append(element)\n\n def lookup(self, position_table, position_list):\n try:\n return self.__table[position_table][position_list]\n except Exception:\n return None\n\n def get_position(self, element):\n table_position = self.hashfun(element)\n elements_on_pos = self.__table[table_position]\n\n if len(elements_on_pos) != 0:\n for i, e in enumerate(elements_on_pos):\n if e == element:\n return table_position, i\n else:\n return -1, -1\n\n def __str__(self):\n table = \"\"\n for i, elements in enumerate(self.__table):\n if len(elements) != 0:\n table += f'{i}: {elements}\\n'\n return table\n\n\ndef testing():\n print(\"Testing\")\n symtab = SymTable(50)\n symtab.insert(\"a\")\n # \"a\" will not appear twice\n symtab.insert(\"a\")\n symtab.insert(\"b\")\n symtab.insert(\"aux\")\n\n symtab.insert(\"0\")\n symtab.insert(\"23\")\n symtab.insert('\"abcd\"')\n symtab.insert(\"'@'\")\n\n print(str(symtab))\n\n print(symtab.get_position(\"aux\"))\n print(symtab.get_position('\"abcd\"'))\n print(symtab.get_position(\"0\"))\n print(symtab.get_position(\"fff\"))\n assert symtab.get_position(\"aux\") == (34, 0)\n assert symtab.get_position('\"abcd\"') == (12, 0)\n assert symtab.get_position(\"0\") == (48, 1)\n assert symtab.get_position(\"fff\") == (-1, -1)\n\n print(symtab.lookup(48, 0))\n print(symtab.lookup(30, 1))\n assert symtab.lookup(48, 0) == \"b\"\n assert symtab.lookup(30, 1) is None\n\n\n# if __name__ == \"__main__\":\n# testing()","sub_path":"3rd Year/FLCD/Lab6/SymTable.py","file_name":"SymTable.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530637421","text":"def cs_extract_number(string, count = 1):\n if type(string).__name__ != 'str' and type(string).__name__ != 'unicode':\n raise Exception(\"Expected a string for parameter 'string', got \" + str(string))\n\n nums = string.split()\n n = count\n result = None\n for x in nums:\n if x.isdigit():\n if n == 1:\n result = x\n break\n else:\n n = n - 1\n return result\n","sub_path":"cloudslang-runtime/src/main/resources/scripts/cs_extract_number.py","file_name":"cs_extract_number.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229464968","text":"# C - Maximal Value\n# https://atcoder.jp/contests/abc140/tasks/abc140_c\n# これも簡単。。minとるだけや。\n\n# 0:50 - 0:58 事前にスマホで問題読んでる\nn = int(input())\nb = list(map(int,input().split()))\na = [0]*n\na[0] = b[0]\na[n-1] = b[n-2]\nfor i in range(1,n-1):\n a[i] = min(b[i],b[i-1])\nprint(sum(a))","sub_path":"abc_py/abc140/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"488541910","text":"\"\"\"\nCreate two lists; one contains 10 elements and the other one is empty.\nWrite a python program that iterates through the first one, pops the elements from the end of it,\nthen it pushes them one by one to the empty list. As your program iterates through the first list,\nclean it before processing the data, meaning if any element is like a special character ( , . ; : )\nit must be discarded and not gets pushed to the second list.\n@Authors\n Urmi Parekh, id : 500186977\n Karthik Thallam, id : 500188370\n Ashish Sharma, id : 500188494\n Swapnil Bandgar, id : 500186962\n\n\"\"\"\n\n\ndef listOperation():\n # Defining list 1\n list1 = [1, 'LoyalistAI', 'A', 6, 'Loyalist123', 'Urmi@2020', 'I', 8, ':', 10]\n\n # Defining empty list\n list2 = []\n\n print(\"List1 before operation:\", list1)\n print(\"Empty list before operation:\", list2)\n\n # Iterating through all the elements from the list\n for i in range(len(list1)):\n\n # Removing the last element from the list\n poped_element = str(list1.pop())\n\n # Checking if list contains any special character\n if not poped_element.isnumeric() and not poped_element.isdigit() and not poped_element.isalpha() and not poped_element.isalnum():\n print(\"The special character\", poped_element + \" is removed.\")\n\n else:\n # Adding elements from list1 to the empty list\n list2.append(poped_element)\n print(\"List 1:\", list1)\n print(\"New List :\", list2)\n\n\nif __name__ == '__main__':\n print(\"The list operations begins now:\")\n\n # Calling the function\n listOperation()\n","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346420711","text":"# 783. Minimum Risk Path\n# https://www.lintcode.com/problem/minimum-risk-path/description\n\n# Total runtime: 68ms\nclass Solution:\n \"\"\"\n @param n: maximum index of position.\n @param m: the number of undirected edges.\n @param x: \n @param y: \n @param w: \n @return: return the minimum risk value.\n \"\"\"\n def getMinRiskValue(self, n, m, x, y, w):\n edges = sorted(zip(w,x,y))\n \n father = [i for i in range(n+1)]\n risk = 0\n \n for r, u, v in edges:\n root1 = self.find(father, u)\n root2 = self.find(father, v)\n \n if root1 != root2:\n father[root1] = root2\n risk = max(risk, r)\n \n if self.find(father, 0) == self.find(father,n):\n return risk\n \n return -1\n \n def find(self, father, node):\n if father[node] == node:\n return node\n root = self.find(father, father[node])\n father[node] = root\n return root","sub_path":"Algorithm/HW07_Graph_Theory/lintcode783_Minimum_Risk_Path.py","file_name":"lintcode783_Minimum_Risk_Path.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125031656","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom pyquery import PyQuery as pq\nfrom bs4 import BeautifulSoup\nimport time\nbrowser=webdriver.Chrome()\nbrowser.maximize_window()\nwait=WebDriverWait(browser,15)\n\ndef crawle(url,key,page):\n browser.get(url=url)\n try:\n print('下拉到最后')\n browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n # wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#offer60')))\n time.sleep(3)\n except :\n print('*'*30,'等待商品加载超时','*'*30,'\\n\\n\\n')\n for item in get_products():\n print(item)\n save_to_mongo(item, key)\n if page>1:\n for page in range(2,page+1):\n print('*' * 30, '第',page,'页', '*' * 30, '\\n\\n\\n')\n get_more_page(page)\n for item in get_products():\n print(item)\n save_to_mongo(item, key)\n\ndef get_more_page(page):\n page_input=browser.find_element_by_class_name('ui2-pagination-goto')\n page_input.clear()\n page_input.send_keys(page)\n button=browser.find_element_by_css_selector('.ui2-pagination-go')\n button.click()\n time.sleep(3)\n try:\n browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n # wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#offer20')))\n except :\n print('*'*30,'超时加载','*'*30,'\\n\\n\\n')\n time.sleep(3)\n\n\ndef get_products():\n html=browser.page_source\n doc=pq(html)\n items=doc('.brh-rfq-item').items()\n index=0\n for item in items:\n index+=1\n title=item.find('.brh-rfq-item__subject a').text().split('\\n')\n title=' '.join(title)\n detail_a=item.find('.brh-rfq-item__detail').text().split('\\n')\n detail=''.join(detail_a[:2])\n date_posted=item.find('.brh-rfq-item__open-time').text().replace('Date Posted','').split(' ')\n date_posted_n=date_posted[0]\n date_posted_u=date_posted[1:]\n date_posted_unit=' '.join(date_posted_u)\n quantity=item.find('.brh-rfq-item__quantity-num').text()\n country=item.find('.brh-rfq-item__country').text().replace('Posted in','')\n purchaser=item.find('.next-tag-body').text()\n quote_left=item.find('.quote-left').text().replace('Quotes Left ','')\n #拼接json\n yield{\n 'main_category':'Electronic Components & Supplies',\n 'second_category': 'EL Products',\n 'third_category': 'EL Products',\n 'key_word': '150412',\n 'title':title,\n 'detail':detail,\n 'date_posted_n':date_posted_n,\n 'date_posted_unit': date_posted_unit,\n 'quantity': quantity,\n 'country': country,\n 'purchaser': purchaser,\n 'quote_left':quote_left}\n\n print(' (●ˇ∀ˇ●) '*5)\n print('一共%d条数据'%index)\n\nimport pymongo\nclient=pymongo.MongoClient()\ndb=client.alibaba\ndef save_to_mongo(item,key):\n #根据关键字动态存入相应的表\n collection=db[key]\n if item:\n collection.insert(item)\n print('成功存储到mongo')\ndef main():\n url = 'https://sourcing.alibaba.com/rfq_search_list.htm?spm=a2700.8073608.1998677539.17.6d1e65aacOeDre&categoryIds=150412'\n key_words='source_category'\n page=int(1)\n crawle(url,key_words,page)\n\nmain()\n","sub_path":"alibaba_source.py","file_name":"alibaba_source.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250467100","text":"from db import db\nfrom sqlalchemy import text\nimport helper\n\n\nclass ProductModel(db.Model):\n __tablename__ = 'product'\n id=db.Column(db.Integer,primary_key=True)\n name=db.Column(db.String(80))\n update_date=db.Column(db.DateTime)\n status=db.Column(db.String(10))\n category=db.Column(db.String(20))\n quantity=db.Column(db.Integer)\n description=db.Column(db.String(80))\n price=db.Column(db.Float)\n currency=db.Column(db.String(3))\n merchant_code=db.Column(db.String(20))\n image=db.Column(db.LargeBinary)\n\n def __init__(self,merchant_code,\\\n name,\\\n id,\\\n status,\\\n category,\\\n quantity,\\\n description,\\\n price,\\\n currency,\\\n update_date):\n\n self.name=name\n self.merchant_code=merchant_code\n self.update_date=update_date\n self.status=status\n self.category=category\n self.quantity=quantity\n self.description=description\n self.price=price\n self.currency=currency\n self.merchant_code=merchant_code \n\n\n @classmethod\n def find_products(cls,name,\\\n category,\\\n description,\\\n merchant_code):\n result = cls.query.filter_by(merchant_code=merchant_code)\n if len(name)>0:\n result=result.filter(text('name ~ :reg')).params(reg=name)\n if len(category)>0:\n result=result.filter(text('category ~ :reg')).params(reg=category)\n if len(description)>0:\n result=result.filter(text('description ~ :reg')).params(reg=description)\n return result\n \n @classmethod\n def find_by_id(cls,merchant_code,_id):\n result = cls.query.filter_by(merchant_code=merchant_code).\\\n filter_by(id=_id).first()\n return result\n \n @classmethod\n def find_by_name(cls,merchant_code,name):\n result = cls.query.filter_by(merchant_code=merchant_code).\\\n filter_by(name=name).first()\n return result\n\nclass ProductImageModel(db.Model):\n __tablename__ = 'product'\n __table_args__ = {'extend_existing': True}\n merchant_code=db.Column(db.String(20))\n id=db.Column(db.Integer,primary_key=True)\n image =db.Column(db.LargeBinary) \n\n def getImage(self):\n return self.image\n \n def setImage(self,imageData):\n self.image=imageData\n \n @classmethod\n def find_by_id(cls,merchant_code,id):\n result = cls.query.filter_by(merchant_code=merchant_code).filter_by(id=id).first()\n return result\n","sub_path":"models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246223927","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport blanc_basic_assets.fields\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assets', '0001_initial'),\n ('pages', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='page',\n name='hero_image',\n field=blanc_basic_assets.fields.AssetForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Image'),\n ),\n ]\n","sub_path":"blanc_basic_pages/migrations/0002_page_hero_image.py","file_name":"0002_page_hero_image.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163691633","text":"import sqlite3\nimport sys, os\nimport argparse\nimport re\n# make fake anno db from only VCF file\nin_vcf_addr = sys.argv[1]\nin_db_addr = sys.argv[2]\n\nparser = argparse.ArgumentParser(description='create db or add variant to db')\nparser.add_argument('-i', '--in_vcf', nargs='+', help='input vcf file')\nparser.add_argument('-e', '--end_pos', help='output a file with: chr file_name end_pos')\nparser.add_argument('-d', '--db', help='sqlite3 db file')\nparser.add_argument('-m', '--mode', choices=['add', 'new'],\n help='create: rm exist table and build a new table. add: only add data to table',\n default='add')\nargs = parser.parse_args()\n\nin_vcf_addr_l = args.in_vcf\nin_db_addr = args.db\n\n\nconn = sqlite3.connect(in_db_addr)\nc = conn.cursor()\nif args.mode == 'new':\n c.execute('DROP TABLE IF EXISTS anno')\n c.execute(\n 'CREATE TABLE anno (variant text, chr text, pos integer, rsid text, af real, info real, '\n 'enrichment_nfsee_genomes real, enrichment_nfsee_exomes real, gene_most_severe text, most_severe text, '\n 'consequence_gnomad text, in_data )')\n c.execute('DROP TABLE IF EXISTS chip')\n c.execute('CREATE TABLE chip (variant text, chip text)')\nchr_end_l = []\nfor in_vcf_addr in in_vcf_addr_l:\n f_in_vcf = open(in_vcf_addr)\n for line in f_in_vcf:\n if not line.startswith('#'):\n line_l = line.split('\\t')\n snp_chr = line_l[0].replace('chr', '')\n snp_pos = int(line_l[1])\n snp_id = line_l[2]\n # snp_ref = line_l[3]\n # snp_alt = line_l[4]\n snp_info = line_l[7]\n snp_af = float(snp_info.split(';')[1].split('=')[1])\n\n snp_anno = (snp_id, snp_chr, snp_pos, 'NA', snp_af, 1, 0, 0, 'NA', 'NA', 'NA', 3)\n c.execute('INSERT INTO anno VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', snp_anno)\n c.execute('INSERT INTO chip VALUES (?,?)', (snp_id, 'exon'))\n vcf_file_name = os.path.basename(in_vcf_addr)\n chr_end_l.append('{}\\t{}/{}\\t{}\\n'.format(snp_chr, 'data', vcf_file_name, snp_pos))\n print('Done', vcf_file_name)\n\n\nconn.commit()\nc.close()\nconn.close()\n\nf_chr_end = open(args.end_pos, 'w')\nf_chr_end.write(''.join(chr_end_l))\nf_chr_end.close()","sub_path":"scripts/add_db.v0.2.py","file_name":"add_db.v0.2.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"315998463","text":"import requests, json, re, openpyxl, sys\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nfrom random import sample\n\n\ndef get_courses_links_list():\n response = requests.get(\"https://www.coursera.org/sitemap~www~courses.xml\")\n root = etree.fromstring(response.content)\n return [url.text for url in root.iter(\"{*}loc\")]\n\n\ndef get_course_start_data(course_page):\n tag_with_json_data = course_page.find(\"script\", attrs={\"type\":\"application/ld+json\"})\n if tag_with_json_data is None:\n return None\n json_data = json.loads(tag_with_json_data.text)\n try:\n return json_data[\"hasCourseInstance\"][0][\"startDate\"]\n except KeyError:\n return None\n\n\ndef get_course_rate(course_page):\n course_rate_tag = course_page.find(\"div\", class_=\"ratings-text\")\n\n if course_rate_tag is None:\n return None\n\n rate = re.search(r\"\\d([.]\\d)*\", course_rate_tag.text)\n if rate is None:\n return None\n\n return rate.group()\n\n\ndef get_course_info(course_url):\n response = requests.get(course_url)\n course_page = BeautifulSoup(response.content, \"html.parser\")\n\n course_name = course_page.find(\"div\", class_=\"title\").text\n course_language = course_page.find(\"div\", class_=\"language-info\").text\n course_start_date = get_course_start_data(course_page)\n number_of_weeks = len(course_page.find_all(\"div\", class_=\"week\"))\n course_rate = get_course_rate(course_page)\n \n return (course_name, course_language, course_start_date, number_of_weeks, course_rate)\n\n\ndef get_all_courses_info(required_number):\n courses_links_list = get_courses_links_list()\n courses_info_list = []\n\n for url in sample(courses_links_list, required_number):\n course_info = get_course_info(url)\n courses_info_list.append(course_info)\n\n return courses_info_list\n\n\ndef output_courses_info_to_xlsx(filepath, courses_info):\n workbook = openpyxl.Workbook()\n worksheet = workbook.active\n first_info_row = 2\n\n worksheet.cell(row=1, column=1, value=\"Name\")\n worksheet.cell(row=1, column=2, value=\"Language\")\n worksheet.cell(row=1, column=3, value=\"Start date\")\n worksheet.cell(row=1, column=4, value=\"Number of weeks\")\n worksheet.cell(row=1, column=5, value=\"Rate\")\n\n for course_number, course_info in enumerate(courses_info, start=first_info_row):\n for property_number, course_property in enumerate(course_info, start=1):\n worksheet.cell(row=course_number, column=property_number, value=course_property)\n\n workbook.save(filepath)\n\n\nif __name__ == '__main__':\n required_courses_number = 20\n\n if len(sys.argv) < 2:\n print(\"Set filepath as first parameter please\")\n exit(1)\n\n courses_info_list = get_all_courses_info(required_courses_number)\n print(\"Courses info obtained.\")\n\n try:\n output_courses_info_to_xlsx(sys.argv[1], courses_info_list)\n except openpyxl.utils.exceptions.InvalidFileException:\n print(\"File writing error.\")\n exit(1)\n\n print(\"Done.\")\n \n \n","sub_path":"coursera.py","file_name":"coursera.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"542328240","text":"# encoding: utf-8\n#\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)\n#\n\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nfrom mo_threads import Queue, THREAD_STOP, Thread, Till\n\nfrom mo_logs import Except, Log\nfrom mo_logs.log_usingNothing import StructuredLogger\n\nDEBUG = False\nPERIOD = 0.3\n\n\nclass StructuredLogger_usingThread(StructuredLogger):\n def __init__(self, logger, period=PERIOD):\n if not isinstance(logger, StructuredLogger):\n Log.error(\"Expecting a StructuredLogger\")\n\n self.logger = logger\n self.queue = Queue(\n \"Queue for \" + self.__class__.__name__,\n max=10000,\n silent=True,\n allow_add_after_close=True,\n )\n self.thread = Thread(\n \"Thread for \" + self.__class__.__name__, worker, logger, self.queue, period\n )\n # worker WILL BE RESPONSIBLE FOR THREAD stop()\n self.thread.parent.remove_child(self.thread)\n self.thread.start()\n\n def write(self, template, params):\n try:\n self.queue.add({\"template\": template, \"params\": params})\n return self\n except Exception as e:\n e = Except.wrap(e)\n raise e # OH NO!\n\n def stop(self):\n try:\n self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT\n self.thread.join()\n except Exception as e:\n Log.note(\"problem in threaded logger\" + str(e))\n\n\ndef worker(logger, queue, period, please_stop):\n please_stop.then(lambda: queue.close)\n\n try:\n while not please_stop:\n log = queue.pop(till=please_stop)\n logs = [log] + queue.pop_all()\n for log in logs:\n if log is THREAD_STOP:\n please_stop.go()\n continue\n\n logger.write(**log)\n (Till(seconds=period) | please_stop).wait()\n\n # ONE LAST DRAIN\n for log in queue.pop_all():\n if log is not THREAD_STOP:\n logger.write(**log)\n except Exception as e:\n import sys\n\n sys.stderr.write(\n \"problem in \" + StructuredLogger_usingThread.__name__ + \": \" + str(e)\n )\n","sub_path":".venv/lib/python3.8/site-packages/mo_logs/log_usingThread.py","file_name":"log_usingThread.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386529010","text":"import pandas as pd\n\nsamples=pd.read_csv('big-dvd-df-rated.txt',seq=',',header=None)\nsamples=samples.iloc[:,:3]\nsamples.columns=['user','item','rate']\n\nsamples['user']=samples['user']-1\nsamples['item']=samples['item']-1\n\ndf=np.array(samples)\n","sub_path":"practice_mf/data/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474220075","text":"a = int(input())\r\nif 0<=a<=36:\r\n if a==0:\r\n print('зеленый')\r\n if 1<=a<=10:\r\n if a%2==0:\r\n print('черный')\r\n else:\r\n print('красный')\r\n if 11<=a<=18:\r\n if a%2==0:\r\n print('красный')\r\n else:\r\n print('черный')\r\n if 19<=a<=28:\r\n if a%2==0:\r\n print('черный')\r\n else:\r\n print('красный')\r\n if 29<=a<=36:\r\n if a%2==0:\r\n print('красный')\r\n else:\r\n print('черный')\r\nelse:\r\n print('ошибка ввода')","sub_path":"Условный оператор/Вложенные и каскадные условия/Цвет колеса рулетки (X).py","file_name":"Цвет колеса рулетки (X).py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"55688463","text":"#!/usr/bin/env python\n\n# This file is part of MAUS: http://micewww.pp.rl.ac.uk/projects/maus\n#\n# MAUS is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# MAUS is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with MAUS. If not, see .\n#\n\n# pylint: disable = W0311, E1101, W0613, C0111, R0911, W0621, C0103, R0902\n\nimport ROOT\nimport os\nimport math\n\nimport framework\nimport analysis.tools as tools\n\n\n\"\"\"\n KL Analysis Classes are stored here.\n\"\"\"\n\nclass kl_spacepoint_extractor(framework.processor_base) :\n\n def __init__(self) :\n framework.processor_base.__init__(self, \"kl_spacepoint_extractor\")\n\n self.__quality_cut = False\n self.__counter_hits = 0\n self.__counter_cut = 0\n self._reset()\n\n self.__hit_positions = ROOT.TH2F('kl_hit_positions', \\\n \"Positions of KL Hits\", 200, -100.0, 100.0, 200, -100.0, 100.0 )\n self.__hit_positions_cut = ROOT.TH2F('kl_hit_positions_cut', \\\n \"Positions of KL Hits (CUT)\", 200, -100.0, 100.0, 200, -100.0, 100.0 )\n\n\n def set_quality_cut(self, cut) :\n self.__quality_cut = cut\n \n\n def get_dependencies(self, inserter) :\n pass\n\n\n def get_args(self, parser) :\n parser.add_argument('--cut_kl_quality', type=bool, default=False, \\\n help='Set to cut poor quality reconstructed hits')\n\n\n def process_args(self, namespace) :\n self.__quality_cut = namespace.cut_kl_quality\n\n\n def _reset(self) :\n self.__pos = []\n self.__charge = 0.0\n self.__charge_product = 0.0\n self.__cell = -1\n self.__quality_flag = False\n\n self.__hits = []\n\n\n def _process(self, file_reader) :\n kl_event = file_reader.get_event('kl')\n\n cell_hit_container = kl_event.GetKLEventCellHit()\n number_hits = cell_hit_container.GetKLCellHitArraySize()\n\n self.__counter_hits += number_hits\n\n for hit_i in range(number_hits) :\n cell_hit = cell_hit_container.GetKLCellHitArrayElement(hit_i)\n\n self.__hit_positions.Fill(cell_hit.GetGlobalPosX(), \\\n cell_hit.GetGlobalPosY())\n\n if self.__quality_cut and not cell_hit.GetFlag() :\n self.__counter_cut += 1\n else :\n self.__hits.append(cell_hit)\n self.__hit_positions_cut.Fill(cell_hit.GetGlobalPosX(), \\\n cell_hit.GetGlobalPosY())\n\n\n def _store_plots(self, plot_dict) :\n kl_dict = {}\n\n kl_dict['hit_positions'] = self.__hit_positions\n kl_dict['hit_positions_cut'] = self.__hit_positions_cut\n\n plot_dict['kl'] = kl_dict\n return plot_dict\n\n\n def _store_data(self, data_dict) :\n kl_dict = {}\n\n kl_dict['total_hits'] = self.__counter_hits\n kl_dict['total_events'] = self.get_number_events()\n kl_dict['hits_event'] = self.__counter_hits / self.get_number_events()\n kl_dict['cut_hits'] = self.__counter_cut\n\n data_dict['kl_extractor'] = kl_dict\n return data_dict\n\n","sub_path":"lib/MPA/kl_analysis.py","file_name":"kl_analysis.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582993800","text":"\"\"\"\nRAMP database helper queries\n\"\"\"\nfrom __future__ import print_function, absolute_import\n\nfrom ..model import Submission, Event, EventTeam, Team\n\n\ndef select_submissions_by_id(session, submission_id):\n \"\"\"\n Query for all submissions of given event with given status\n\n Parameters\n ----------\n session : `sqlalchemy.orm.Session`\n database connexion session\n event_name : str\n name of the RAMP event\n state : str\n state of the requested submissions\n\n Returns\n -------\n submission : `rampbkd.model.Submission`\n queried submission\n\n \"\"\"\n submission = (session\n .query(Submission)\n .filter(Submission.id == submission_id)\n .first())\n\n return submission\n\n\ndef select_submissions_by_state(session, event_name, state):\n \"\"\"\n Query for all submissions of given event with given status\n\n Parameters\n ----------\n session :\n database connexion session\n event_name : str\n name of the RAMP event\n state : str\n state of the requested submissions\n\n Returns\n -------\n List of submissions : List[`rampbkd.model.Submission`]\n queried submissions\n\n \"\"\"\n submissions = (session\n .query(Submission)\n .filter(Event.name == event_name)\n .filter(Event.id == EventTeam.event_id)\n .filter(EventTeam.id == Submission.event_team_id)\n .filter(Submission.state == state)\n .order_by(Submission.submission_timestamp)\n .all())\n\n return submissions\n\n\ndef select_submission_by_name(session, event_name, team_name, name):\n \"\"\"\n Get a submission by name\n\n Parameters\n ----------\n session :\n database connexion session\n event_name : str\n name of the RAMP event\n team_name : str\n name of the RAMP team\n name : str\n name of the submission\n\n Returns\n -------\n `Submission` instance\n\n \"\"\"\n submission = (session\n .query(Submission)\n .filter(Event.name == event_name)\n .filter(Event.id == EventTeam.event_id)\n .filter(Team.name == team_name)\n .filter(Team.id == EventTeam.team_id)\n .filter(EventTeam.id == Submission.event_team_id)\n .filter(Submission.name == name)\n .order_by(Submission.submission_timestamp)\n .one())\n return submission\n\n\ndef select_event_by_name(session, event_name):\n \"\"\"\n Get an event by name\n\n Parameters\n ----------\n session :\n database connexion session\n event_name : str\n name of the RAMP event\n\n Returns\n -------\n `Event` instance\n\n \"\"\"\n event = session.query(Event).filter(Event.name == event_name).one()\n return event\n","sub_path":"ramp-database/rampdb/tools/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"592938828","text":"import re, traceback\nimport json, jwt\nimport connection\n\nfrom flask import jsonify, request\nfrom functools import wraps\nfrom config import SECRET_KEY, ALGORITHM, database\nfrom model.user_dao import UserDao\nfrom pymysql import err\n\ndef login_confirm(original_function):\n \"\"\"\n 로그인 인증 - Business Layer(service) function\n Args:\n original_function : 로그인 인증이후 사용해야하는 함수\n \n Returns :\n original_function : 유효한 토큰일 경우 로그인한 유저 정보와 DB\n 에러코드 401 : 유효한 토큰이 아닐경우 메세지와 함께 401 코드를 클라이언트로 전달\n jsonify(message), 400 : 예외가 발생하면 예외코드와 예외명을 400 코드와 함께 클라이언트로 전달\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-10-07 (taeha7b@gmail.com (김태하)) : 불필요한 코드 삭제 및 파라미터 수정\n 2020-09-27 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n def wrapper(self, *args, **kwargs):\n try:\n access_token = request.headers.get(\"Authorization\", None)\n token_payload = jwt.decode(access_token, SECRET_KEY['secret'], ALGORITHM['algorithm'])\n return original_function(self, token_payload, *args, **kwargs)\n\n except Exception as e:\n\n return jsonify({'message':f'{e}'}), 401\n\n return wrapper\n\nclass AccountValidattionError(Exception):\n \"\"\"\n 유저 아이디 예외 발생\n Args:\n Exception : 예외 발생시의 메세지\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-27 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n def __init__(self, message):\n super().__init__()\n self.message = message\n\nclass PasswordValidattionError(Exception):\n \"\"\"\n 유저 비밀번호 예외 발생\n Args:\n Exception : 예외 발생시의 메세지\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-27 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n def __init__(self, message):\n super().__init__()\n self.message = message\n \nclass EmailValidattionError(Exception):\n \"\"\"\n 유저 이메일 예외 발생\n Args:\n Exception : 예외 발생시의 메세지\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-27 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n def __init__(self, message):\n super().__init__()\n self.message = message\n\n\ndef account_validate(value):\n \"\"\"\n 유저 아이디 유효성 검사 \n Args:\n 유저 아이디\n Returns :\n True : 유저 아이디와 정규식이 일치하면 True를 반환함\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-29 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n regex = re.compile(r'^[a-zA-Z0-9]{4,20}$')\n if not regex.match(value):\n return True\n \ndef password_validate(value):\n \"\"\"\n 유저 비밀번호 유효성 검사 \n Args:\n 유저 비밀번호\n Returns :\n True : 유저 비밀번호와 정규식이 일치하면 True를 반환함\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-29 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n regex = re.compile(r'^(?=.*[A-Za-z])(?=.*\\d)(?=.*[$@$!%*#?&])[A-Za-z\\d$@$!%*#?&]{8,20}')\n if not regex.match(value):\n return True\n\ndef email_validate(value):\n \"\"\"\n 유저 이메일 유효성 검사 \n Args:\n 유저 이메일\n Returns :\n True : 유저 이메일와 정규식이 일치하면 True를 반환함\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-29 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n regex = re.compile(r'^[a-zA-Z0-9+-_]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$')\n if not regex.match(value):\n return True\n\ndef phone_num_validate(value):\n \"\"\"\n 유저 전화번호 유효성 검사 \n Args:\n 유저 전화번호\n Returns :\n True : 유저 전화번호와 정규식이 일치하면 True를 반환함\n Author :\n taeha7b@gmail.com (김태하)\n History:\n 2020-09-29 (taeha7b@gmail.com (김태하)) : 초기생성\n \"\"\"\n regex = re.compile(r'(\\d{3}).*(\\d{4}).*(\\d{4})')\n if not regex.match(value):\n return True\n\ndef catch_exception(f, *args, **kwargs):\n \"\"\"\n decorator API\n Args:\n 에러들을 받는다.\n Retruns:\n return f(*args, **kwargs) -> 데코레이터를 발행해준다.\n jsonify({\"message\" : f\"INVALID_PARAMETER_{e.args[0]}\"}), 400 -> 해당 에러 메시지 내용�� 400에러\n Authors:\n wldus9503@gmail.com(이지연)\n History:\n 2020-09-29(wldus9503@gmail.com) : 데코레이터 초기 생성\n \"\"\"\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n traceback.print_exc()\n if len(e.args)==0:\n return jsonify({\"message\" : \"INVALID_PARAMETER\"}), 400\n return jsonify({\"message\" : f\"INVALID_PARAMETER_{e.args[0]}\"}), 405\n return wrapper\n\n\n\"\"\"\nSQL triggers list\n\n#1 : QNA가 추가되면 qna_count값을 +1하는 트리거\nDELIMITER $$\n\nCREATE TRIGGER qna_count_up\nAFTER INSERT ON questions FOR EACH ROW\nBEGIN\n\tIF NEW.id IS NOT NULL THEN\n\t\tUPDATE products as p SET qna_count = qna_count +1 WHERE p.id = NEW.product_id;\n\tEND IF;\nEND $$\n\nDELIMITER ;\n\n#2 : QNA가 삭제되면 qna_count값을 -1하는 트리거\nDELIMITER $$\n\nCREATE TRIGGER qna_count_down\nAFTER UPDATE ON questions FOR EACH ROW\nBEGIN\n\tIF NEW.is_deleted = 1 THEN\n\t\tUPDATE products as p SET qna_count = qna_count -1 WHERE p.id = OLD.product_id;\n\tEND IF;\nEND $$\n\nDELIMITER ;\n\n#3 : 쿠폰을 다운로드하면 download_count값을 +1하는 트리거\nDELIMITER $$\n\nCREATE TRIGGER coupon_download_count_up\nAFTER INSERT ON user_coupons FOR EACH ROW\nBEGIN\n\tIF NEW.id IS NOT NULL THEN\n\t\tUPDATE coupon_details as cd SET download_count = download_count +1 WHERE cd.coupon_id = NEW.coupon_id;\n\tEND IF;\nEND $$\n\nDELIMITER ;\n\n#4 : 다운로드한 쿠폰을 사용하면 use_count값을 +1하는 트리거\nDELIMITER $$\n\nCREATE TRIGGER coupon_use_count_up\nAFTER UPDATE ON user_coupons FOR EACH ROW\nBEGIN\n\tIF NEW.is_deleted = 1 THEN\n\t\tUPDATE coupon_details SET use_count = use_count +1 WHERE coupon_id = OLD.coupon_id;\n\tEND IF;\nEND $$\n\nDELIMITER ;\n\n\"\"\" ","sub_path":"service_back/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"604805907","text":"import random\nfrom operator import itemgetter\nimport math\nimport os\n\nfrom scripts.mapgen.draw import Draw\nfrom scripts.mapgen.coords import Coords\nfrom scripts.mapgen.ascii import ascii\nfrom globals import temps_folder\n\n\nclass Map(dict):\n \"\"\"This class instantiates a dictionary that fills itself with a grid of\n specified height and length. The keys are tuples of each x, y coordinate\n on the grid. The values of those keys are initializes as an empty\n 'CastleWall' sprite.\"\"\"\n def __init__(self, name, l, h):\n super().__init__()\n self.name = self['name'] = name\n for y in range(h):\n for x in range(l):\n self.update({(x, y): 'CastleWall'})\n self.update({'name': name})\n self.update({'height': h - 1, 'length': l - 1, 'area': h * l})\n self.update({'center': Coords.center(l, h)})\n self.MAP = ''\n\n def __setitem__(self, key, item):\n self.__dict__[key] = item\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __repr__(self):\n return repr(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def clear(self):\n return self.__dict__.clear()\n\n def copy(self):\n return self.__dict__.copy()\n\n def has_key(self, k):\n return k in self.__dict__\n\n def update(self, *args, **kwargs):\n return self.__dict__.update(*args, **kwargs)\n\n def keys(self):\n return self.__dict__.keys()\n\n def values(self):\n return self.__dict__.values()\n\n def items(self):\n return self.__dict__.items()\n\n def pop(self, *args):\n return self.__dict__.pop(*args)\n\n def __cmp__(self, dict_):\n return self.__cmp__(self.__dict__)\n\n def __contains__(self, item):\n return item in self.__dict__\n\n def __iter__(self):\n return iter(self.__dict__)\n\n # def __unicode__(self):\n # return unicode(repr(self.__dict__))\n\n def add_room(self, inp_position, shape, size):\n \"\"\"This method adds a room of specified shape, random or specified\n size, and random or specified location to the dungeon. It also checks\n if the space where the room is located is open. If the space is not\n available it destroys the prepared room. The 'border' of the room is\n given the 'Wall' img while the area is tagged as 'Empty Floor', which\n gets interpreted as the background layer.\"\"\"\n grid_h = self['height']\n grid_l = self['length']\n border = []\n area = []\n space_open = True\n doors = 0\n min_size = 8\n\n if inp_position == 'random':\n position = Coords.random(self['length'], self['height'], min_size, min_size)\n else:\n position = inp_position\n\n if size == 'random':\n max_h = round(grid_h / 4)\n max_lth = round(grid_l / 4)\n x, y = position\n if x + max_lth >= self['length']:\n max_lth = self['length'] - x\n elif x - max_lth <= 0:\n max_lth = x\n if y + max_h >= self['height']:\n max_h = self['height'] - y\n elif y - max_h <= 0:\n max_h = y\n lth = min_size + random.randint(0, (max_lth - min_size))\n h = min_size + random.randint(0, (max_h - min_size))\n if shape == 'circle' or shape == 'diamond':\n size = lth if lth <= h else h\n else:\n size = (lth, h)\n\n if shape == 'square':\n border, area = Draw.square(position, size, 'parsed')\n doors = random.randint(4, 8)\n elif shape == 'circle':\n border, area = Draw.circle(position, size, 'parsed')\n doors = random.randint(4, 8)\n elif shape == 'diamond':\n border, area = Draw.diamond(position, size, 'parsed')\n doors = random.randint(4, 8)\n for coordinate in border + area:\n if self[coordinate] != 'CastleWall':\n space_open = False\n\n door_chance = 1\n lth, h = size if shape == 'square' else (size, size)\n door_roll = round((lth * 2 + h * 2) / 4)\n if space_open:\n for coordinate in area:\n self[coordinate] = 'EmptyFloor'\n for coordinate in border:\n self[coordinate] = 'Wall'\n door_calc = random.randint(door_chance, door_roll) if door_chance < door_roll else door_roll\n if door_calc == door_roll and doors > 0:\n door_chance = 1\n if shape == 'square':\n max_x = max(border, key=itemgetter(0))[0]\n min_x = min(border, key=itemgetter(0))[0]\n max_y = max(border, key=itemgetter(1))[1]\n min_y = min(border, key=itemgetter(1))[1]\n if (\n coordinate != (max_x, min_y) and\n coordinate != (max_x, max_y) and\n coordinate != (min_x, min_y) and\n coordinate != (min_x, max_y)\n ):\n doors -= 1\n self[coordinate] = 'Door'\n else:\n doors -= 1\n self[coordinate] = 'Door'\n else:\n door_chance += 1\n del border\n del area\n\n def add_hallways(self):\n \"\"\"\n This function loops across each door in the floor grid, or map,\n and attempts to find the \"best door\" or shortest distance door without\n obstruction to pair it with and build a hallway to.\n \"\"\"\n # first this function collects all the doors in this grid into a list:\n all_doors = [k for k, v in self.items() if v == 'Door']\n for door in all_doors:\n # next it sets the x and y coordinates to the current door tuple:\n x, y = door\n\n # and establishes variables to be used:\n scan_size = 30\n x_door = False\n y_door = False\n\n # here we check the position of the door relative to empty grid space,\n # which is known as 'CastleWall'. This allows determination of the door's\n # directionality as x - 1 or x + 1, y - 1, or y + 1.\n # this portion ensures that the scanned range, scan_x and scan_y\n # does not exceed the bounds of the total grid.\n if self[(x - 1, y)] == 'CastleWall':\n x_door = True\n x_if = x - scan_size * 2 if x >= scan_size * 2 else 0\n scan_x = range(x_if, x - 1)\n y_r_min = y - scan_size if y >= scan_size else 0\n y_r_max = y + scan_size if y + scan_size <= self['height'] else self['height']\n scan_y = range(y_r_min, y_r_max)\n elif self[(x + 1, y)] == 'CastleWall':\n x_door = True\n x_if = x + scan_size * 2 if x + scan_size * 2 <= self['length'] else self['length']\n scan_x = range(x + 1, x_if)\n y_r_min = y - scan_size if y >= scan_size else 0\n y_r_max = y + scan_size if y + scan_size <= self['height'] else self['height']\n scan_y = range(y_r_min, y_r_max)\n elif self[(x, y - 1)] == 'CastleWall':\n y_door = True\n y_if = y - scan_size * 2 if y >= scan_size * 2 else 0\n scan_y = range(y_if, y - 1)\n x_r_min = x - scan_size if x >= scan_size else 0\n x_r_max = x + scan_size if x + scan_size <= self['height'] else self['height']\n scan_x = range(x_r_min, x_r_max)\n elif self[(x, y + 1)] == 'CastleWall':\n y_door = True\n y_if = y + scan_size * 2 if y + scan_size * 2 <= self['height'] else self['height']\n scan_y = range(y + 1, y_if)\n x_r_min = x - scan_size if x >= scan_size else 0\n x_r_max = x + scan_size if x + scan_size <= self['length'] else self['length']\n scan_x = range(x_r_min, x_r_max)\n else:\n # if the door has no directionality toward empty grid space\n # we can also check if it can be made into a \"nook\" door between adjacent rooms.\n # regardless of if this is possible this else then continues the loop\n # to the next door on the all_doors list.\n if (\n self[(x - 1, y)] == 'EmptyFloor'\n and self[(x + 2, y)] == 'EmptyFloor'\n ):\n self[(x + 1, y)] = 'EmptyFloor'\n self[(x, y)] = 'EmptyFloor'\n elif (\n self[(x + 1, y)] == 'EmptyFloor'\n and self[(x - 2, y)] == 'EmptyFloor'\n ):\n self[(x - 1, y)] = 'EmptyFloor'\n self[(x, y)] = 'EmptyFloor'\n elif (\n self[(x, y - 1)] == 'EmptyFloor'\n and self[(x, y + 2)] == 'EmptyFloor'\n ):\n self[(x, y + 1)] = 'EmptyFloor'\n self[(x, y)] = 'EmptyFloor'\n elif (\n self[(x, y + 1)] == 'EmptyFloor'\n and self[(x, y - 2)] == 'EmptyFloor'\n ):\n self[(x, y - 1)] = 'EmptyFloor'\n self[(x, y)] = 'EmptyFloor'\n continue\n\n # if the current door has directionality toward empty space or 'CastleWall'\n # then the range toward that empty space is scanned. An adj value is added so\n # that the minimum clearance size is greater than 0 and the for loops below occur.\n # This is needed for perfectly horizontal or vertical door-to-door pairs.\n # Once the range is established in a way that is adaptable to all above\n # directionality the spaces in the x and y clearance are checked to be empty\n # grid space or 'CastleWall' Every workable door is added to an option list.\n\n low_value = 1000\n min_distance = 4\n best_door = []\n for X in scan_x:\n for Y in scan_y:\n distance = int(math.sqrt(math.pow((X - x), 2) + math.pow((Y - y), 2)))\n if self[(X, Y)] == 'EmptyFloor' and min_distance < distance < low_value:\n good_door = True\n diff_x = abs(X - x)\n diff_y = abs(Y - y)\n X_door = bool(self[(X - 2, Y)] == 'CastleWall' or self[(X + 2, Y)] == 'CastleWall')\n Y_door = bool(self[(X, Y - 2)] == 'CastleWall' or self[(X, Y + 2)] == 'CastleWall')\n diag_door = False\n\n if diff_y == 0:\n hall_type = 'direct_x'\n clearance_x = range(x + 1, X - 2) if x < X else range(X + 2, x - 1)\n clearance_y = range(y - 1, y + 1)\n elif diff_x == 0:\n hall_type = 'direct_y'\n clearance_x = range(x - 1, x + 1)\n clearance_y = range(y + 1, Y - 2) if y < Y else range(Y + 2, y - 1)\n elif x_door and Y_door and not diag_door:\n hall_type = 'corner_xtoy'\n clearance_x = range(x + 1, X) if x < X else range(X, x - 1)\n clearance_y = range(y, Y - 2) if y < Y else range(Y + 2, y)\n elif y_door and X_door and not diag_door:\n hall_type = 'corner_ytox'\n clearance_y = range(y + 1, Y) if y < Y else range(Y, y - 1)\n clearance_x = range(x, X - 2) if x < X else range(X + 2, x)\n else:\n x_prop = round(diff_x / diff_y)\n y_prop = round(diff_y / diff_x)\n hall_type = 'diag_y' if y_prop > x_prop else 'diag_x'\n clearance_x = range(x, X) if x < X else range(X, x)\n clearance_y = range(y, Y) if y < Y else range(Y, y)\n\n for space_x in clearance_x:\n for space_y in clearance_y:\n if self[(space_x, space_y)] != 'CastleWall':\n good_door = False\n break\n if not good_door:\n break\n if good_door:\n low_value = distance\n best_door = [(X, Y), hall_type]\n\n if len(best_door) == 0:\n continue\n # since the terminal door was found, the best door, we now assign to X and Y:\n X, Y = best_door[0]\n hall_type = best_door[1]\n diff_x = abs(X - x)\n diff_y = abs(Y - y)\n x_inc = 1 if x < X else -1\n y_inc = 1 if y < Y else -1\n\n # now we increment the x and/or y value, starting from the\n # initial door, toward the terminal X and/or Y.\n while (\n (hall_type == 'direct_x' and diff_x > 2) or\n (hall_type == 'direct_y' and diff_y > 2) or\n (hall_type == 'corner_xtoy' and diff_y > 2) or\n (hall_type == 'corner_ytox' and diff_x > 2) or\n (hall_type == 'diag_y' and diff_x > 2) or\n (hall_type == 'diag_x' and diff_y > 2)\n ):\n diff_x = abs(X - x)\n diff_y = abs(Y - y)\n if diff_y == 0:\n x += x_inc\n self[(x, y + 1)] = 'Wall'\n self[(x, y)] = 'EmptyFloor'\n self[(x, y - 1)] = 'Wall'\n elif diff_x == 0:\n y += y_inc\n self[(x + 1, y)] = 'Wall'\n self[(x, y)] = 'EmptyFloor'\n self[(x - 1, y)] = 'Wall'\n elif hall_type == 'corner_xtoy':\n x += x_inc\n self[(x, y + 1)] = 'Wall'\n self[(x, y)] = 'EmptyFloor'\n self[(x, y - 1)] = 'Wall'\n if diff_x == 1:\n self[(x + x_inc, y)] = 'Wall'\n self[(x + x_inc, y - y_inc)] = 'Wall'\n elif hall_type == 'corner_ytox':\n y += y_inc\n self[(x + 1, y)] = 'Wall'\n self[(x, y)] = 'EmptyFloor'\n self[(x - 1, y)] = 'Wall'\n if diff_y == 1:\n self[(x, y + y_inc)] = 'Wall'\n self[(x - x_inc, y + y_inc)] = 'Wall'\n elif hall_type == 'diag':\n x = X\n y = Y\n else:\n break\n\n def place_player(self):\n \"\"\"This method places the player on the map with the ascii character\n '1'\"\"\"\n coordinate = self['center']\n self[coordinate] = 'Player1'\n\n def add_stairs(self, number, direction):\n \"\"\"This method adds stairs to the dungeon floor. It also uses the\n empty floors method to make sure the stair is welcome!\"\"\"\n coordinates = []\n if direction.lower() == 'both':\n coordinates += (self.empty_floors(number * 2))\n for i, coordinate in enumerate(coordinates):\n if i <= number:\n self[coordinate] = 'StairsUp'\n else:\n self[coordinate] = 'StairsDown'\n else:\n coordinates += (self.empty_floors(number))\n for coordinate in coordinates:\n self[coordinate] = direction\n\n def add_blocks(self, coordinates):\n \"\"\"This method adds a Block to the dungeon floor using the '?' ascii\n character. It accepts a list of coordinates to add.\"\"\"\n for coordinate in coordinates:\n self[coordinate] = 'Block'\n\n def add_spawners(self, density):\n \"\"\"This method adds monster spawners to the dungeon floor with the\n ascii character 'M'. It uses the empty floors method to ensure a\n space is available.\"\"\"\n area = self['area']\n total_spawners = round((area / 1000) * density)\n for coordinate in self.empty_floors(total_spawners):\n self[coordinate] = 'Spawner'\n\n def empty_floors(self, number):\n \"\"\"Takes a specified number and generates a list of coordinates for\n the currently empty floors in the dungeon. Every round of the while\n loop it gets a random coordinate key and checks if its value in the\n dungeon map is an Empty floor. Then it appends the coordinate and\n removes all duplicates from itself. If it cannot find the specified\n number in number * 10 trials it gives up and returns whatever\n coordinates it did find.\"\"\"\n coordinates = []\n trials = 0\n while number > len(coordinates):\n coordinate = Coords.random(self['length'], self['height'])\n trials += 1\n if self[coordinate] == 'EmptyFloor':\n coordinates.append(coordinate)\n coordinates = list(set(coordinates))\n elif trials > number * 10:\n break\n return coordinates\n\n def convert_to_text(self):\n \"\"\"This turns the map dictionary into a text string composed of the ascii\n characters defined in ascii's, the dictionary, keys.\"\"\"\n all_doors = [k for k, v in self.items() if v == 'Door']\n for door in all_doors:\n X, Y = door\n if(\n self[(X + 1, Y)] == 'CastleWall' or\n self[(X - 1, Y)] == 'CastleWall' or\n self[(X, Y + 1)] == 'CastleWall' or\n self[(X, Y - 1)] == 'CastleWall'\n ):\n self[(X, Y)] = 'Wall'\n length = self['length']\n height = self['height']\n for y in range(height):\n for x in range(length):\n self.MAP += ascii[self[(x, y)]]\n if x == length - 1:\n self.MAP += '\\n'\n\n def finalize(self):\n \"\"\"This turns the text string composed by convert_to_text() as self.MAP into a .txt file.\"\"\"\n self.convert_to_text()\n file_name = '% s.txt' % self.name\n file = open(os.path.join(temps_folder, file_name), 'w+')\n file.write(self.MAP)\n file.close()\n del file\n del self\n","sub_path":"scripts/mapgen/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":18896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"394112775","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom tkinter import Tk, ttk, Label, Button, Text, END\n\nreports = [\n {'이름' : '가영', '학년' : 1, '국어' : 90, '수학' : 22, '영어' : 33, '한국사' : 66, '통합과학' : 99},\n {'이름' : '나겸', '학년' : 3, '국어' : 90, '수학' : 23, '영어' : 90, '한국사' : 56, '통합과학' : 29},\n {'이름' : '다은', '학년' : 2, '국어' : 20, '수학' : 28, '영어' : 66, '한국사' : 66, '통합과학' : 94},\n {'이름' : '라희', '학년' : 3, '국어' : 44, '수학' : 90, '영어' : 77, '한국사' : 26, '통합과학' : 99}\n]\n \nselected_index = 0\n\ndef report_selected(event) : # 선택하면 반영되게끔\n global selected_index\n for item in treeTable.selection() :\n selected_index = int(treeTable.item(item, \"text\"))\n report = reports[selected_index]\n name = report['이름']\n grade = str(report['학년'])\n kor = str(report['국어'])\n math = str(report['수학'])\n eng = str(report['영어'])\n hist = str(report['한국사'])\n sci = str(report['통합과학'])\n \n text_Name.delete(\"1.0\",END)\n text_Name.insert(\"end\", name)\n text_Grade.delete(\"1.0\",END)\n text_Grade.insert(\"end\", grade)\n text_Kor.delete(\"1.0\",END)\n text_Kor.insert(\"end\", kor)\n text_Math.delete(\"1.0\",END)\n text_Math.insert(\"end\", math)\n text_Eng.delete(\"1.0\",END)\n text_Eng.insert(\"end\", eng)\n text_Hist.delete(\"1.0\",END)\n text_Hist.insert(\"end\", hist)\n text_Sci.delete(\"1.0\",END)\n text_Sci.insert(\"end\", sci)\n\ndef setTableItems() :\n treeTable.delete(*treeTable.get_children())\n for idx, report in enumerate(reports) :\n name = report['이름']\n grade = str(report['학년'])\n kor = str(report['국어'])\n math = str(report['수학'])\n eng = str(report['영어'])\n hist = str(report['한국사'])\n sci = str(report['통합과학'])\n treeTable.insert(\"\", 'end', iid = None, text=str(idx), values=[name, grade, kor, math, eng, hist, sci])\n \ndef insert_report() :\n name = text_Name.get(\"1.0\", END)\n grade = int(text_Grade.get(\"1.0\", END))\n kor = int(text_Kor.get(\"1.0\", END))\n math = int(text_Math.get(\"1.0\", END))\n eng = int(text_Eng.get(\"1.0\", END))\n hist = int(text_Hist.get(\"1.0\", END))\n sci = int(text_Sci.get(\"1.0\", END))\n report = { '이름' : name, '학년' : grade, '국어' : kor, '수학' : math, '영어' : eng, '한국사' : hist, '통합과학' : sci}\n reports.append(report)\n setTableItems()\n\ndef update_report() :\n global selected_index\n name = text_Name.get(\"1.0\", END)\n grade = int(text_Grade.get(\"1.0\", END))\n kor = int(text_Kor.get(\"1.0\", END))\n math = int(text_Math.get(\"1.0\", END))\n eng = int(text_Eng.get(\"1.0\", END))\n hist = int(text_Hist.get(\"1.0\", END))\n sci = int(text_Sci.get(\"1.0\", END))\n selectedItem = reports[selected_index]\n selectedItem['이름'] = name\n selectedItem['학년'] = grade\n selectedItem['국어'] = kor\n selectedItem['수학'] = math\n selectedItem['영어'] = eng\n selectedItem['한국사'] = hist\n selectedItem['통합과학'] = sci\n setTableItems()\n \n \ndef delete_report() :\n global selected_index\n reports.pop(selected_index)\n setTableItems()\n\n\nwindow = Tk()\nwindow.title(\"성적관리 프로그램 v1.0\")\nwindow.geometry(\"1000x600\")\nwindow.resizable(0,0)\ntitle = \"9월 모의고사 성적결과\"\ntitle_feature = Label(window, text = title, font = (\"Noto Sans KR Black\", 20))\ntitle_feature.pack(padx = 10, pady = 15) # 위치\n\n#성적관리 화면에 표현\ntreeTable = ttk.Treeview(window)\ntreeTable[\"columns\"] = (\"name\", \"grade\", \"kor\", \"math\", \"eng\", \"hist\", \"sci\")\ntreeTable.column(\"#0\", width = 50)\ntreeTable.column(\"name\", width = 100)\ntreeTable.column(\"grade\", width = 50)\ntreeTable.column(\"kor\", width = 100)\ntreeTable.column(\"math\",width = 100)\ntreeTable.column(\"eng\", width = 100)\ntreeTable.column(\"hist\", width = 100)\ntreeTable.column(\"sci\", width = 100)\n\n# treeStationfares에는 순번, 정류장, 요금 표시\ntreeTable.heading(\"#0\", text=\"No.\")\ntreeTable.heading(\"name\", text=\"이름\")\ntreeTable.heading(\"grade\", text=\"학년\")\ntreeTable.heading(\"kor\", text=\"국어\")\ntreeTable.heading(\"math\", text=\"수학\")\ntreeTable.heading(\"eng\", text=\"영어\")\ntreeTable.heading(\"hist\", text=\"한국사\")\ntreeTable.heading(\"sci\", text=\"통합과학\")\n\ntreeTable.place(x = 100, y = 100, width=800, height=200)\n\ntreeTable.bind(\"<>\", report_selected)\n\n# button\nbtn_Insert = Button(window, text = \"추가\", command = insert_report, font = (\"Noto Sans KR Medium\", 14))\nbtn_Insert.place(x = 100, y = 350, width = 200, height = 30)\nbtn_Update = Button(window, text = \"수정\", command = update_report, font = (\"Noto Sans KR Medium\", 14))\nbtn_Update.place(x = 400, y = 350, width = 200, height = 30)\nbtn_Delete = Button(window, text = \"삭제\", command = delete_report, font = (\"Noto Sans KR Medium\", 14))\nbtn_Delete.place(x = 700, y = 350, width = 200, height = 30)\n\n# label\nlabel_Name = Label(window, text = \"이름\")\nlabel_Name.place(x = 150, y = 430, width = 50, height = 25)\nlabel_Grade = Label(window, text = \"학년\")\nlabel_Grade.place(x = 250, y = 430, width = 50, height = 25)\nlabel_Kor = Label(window, text = \"국어\")\nlabel_Kor.place(x = 350, y = 430, width = 50, height = 25)\nlabel_Math = Label(window, text = \"수학\")\nlabel_Math.place(x = 450, y = 430, width = 50, height = 25)\nlabel_Eng = Label(window, text = \"영어\")\nlabel_Eng.place(x = 550, y = 430, width = 50, height = 25)\nlabel_Hist = Label(window, text = \"한국사\")\nlabel_Hist.place(x = 650, y = 430, width = 50, height = 25)\nlabel_Sci = Label(window, text = \"공통과학\")\nlabel_Sci.place(x = 750, y = 430, width = 50, height = 25)\n\ntext_Name = Text(window, width = 13, height = 1)\ntext_Name.place(x = 130, y = 460)\ntext_Grade = Text(window, width = 4, height = 1)\ntext_Grade.place(x = 260, y = 460)\ntext_Kor = Text(window, width = 8, height = 1)\ntext_Kor.place(x = 345, y = 460)\ntext_Math = Text(window, width = 8, height = 1)\ntext_Math.place(x = 445, y = 460)\ntext_Eng = Text(window, width = 8, height = 1)\ntext_Eng.place(x = 545, y = 460)\ntext_Hist = Text(window, width = 8, height = 1)\ntext_Hist.place(x = 645, y = 460)\ntext_Sci = Text(window, width = 8, height = 1)\ntext_Sci.place(x = 745, y = 460)\n\n\nsetTableItems()\n\nwindow.mainloop()\n\n","sub_path":"성적관리프로그램/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537928760","text":"s = input().split(', ')\nnums = s[0][8:-1].split(',')\ntarget = int(s[1][9])\n\nnumsLeft = nums[:len(nums) // 2]\nnumsRight = nums[(len(nums) // 2) + 1:]\n\nrealNums = []\nflag = ''\nflag1 = False\n\nif int(numsLeft[0]) <= target:\n realNums = numsLeft\n flag = 'left'\nelse:\n realNums = numsRight\n flag = 'right'\n\nfor index in range(len(realNums)):\n if int(realNums[index]) == target:\n if flag == 'left':\n print(index)\n else:\n print(index + (len(nums) // 2) + 1)\n flag1 = True\n break\n\nif not flag1:\n print(-1)","sub_path":"Code/CodeRecords/2449/48117/245716.py","file_name":"245716.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626442589","text":"import MeCab\nimport re\n\n# 형태소 분석\ndef start_mecab(sent):\n m = MeCab.Tagger()\n te = m.parse(sent)\n return te\n # tagger = Mecab()\n # print(tagger.pos('혼성단체(hybrid entity)혼성비대칭 거래(hybrid mismatch arrangements)구나 2018년 5월 베이징에서 열린 ISO/TC 184 연례회의(Super Meeting)에서는 스마트 제조가 주요 화제였다.'))\n\n\n\n\n# 단어, 품사 구별\ndef words_morph(sent):\n sent = re.sub(r'(\\,\\*)*(\\n|\\t)','\\n', sent)\n sent = sent.split('\\n')\n \n return sent\n\n\n\n\n# dict_list에 구별해서 단어, 형태소 각각 넣어주기\n# raw_mor 한 문장을 쪼개서 짝수 - 단어, 홀수 - 형태소 \ndef words_mors(raw_mor):\n key = ''\n value = ''\n word_mor_dict = dict()\n words_list = list()\n words_one_str = str()\n morphemes_list = list()\n morphemes_one_str = str()\n\n for i in range(len(raw_mor)):\n # print(i+1)\n # 짝수\n if i % 2 == 0:\n key = raw_mor[i]\n # print(key)\n words_list.append(key)\n words_one_str += key\n # 홀수\n else: \n # wecab으로 뽑아낸 형태소의 값 1개만 뽑아주기 위해서\n value = raw_mor[i].split(',')\n value = value[0]\n # print(value)\n morphemes_list.append(value)\n morphemes_one_str += value\n \n word_mor_dict[key] = value\n return words_list, morphemes_list, words_one_str, morphemes_one_str\n\n\n\n\n# 패턴찾아서 리스트에서 찾기\ndef find_mor_pattern(morphemes_one_str):\n # mor_pattern = '(NNG|NNP)?(NNG|NNP)?(NNG|NNP)?(SSO)?(SL)(SY)?(SL)?(SY)?(SL)?(SY)?(SL)?(SSC)?'\n # mor_pattern = '(NNG|NNP)?(NNG|NNP)?(NNG|NNP)?(SSO)(SL)(SY)?(SL)?(SY)?(SL)?(SY)?(SL)?(SSC)' #괄호 있어야만함\n # mor_pattern = '(XPN|XSV)?(NNG|NNP)(XSN|XSV|XSA)?(XPN|XSV)?(NNG|NNP)?(XSN|XSV|XSA)?(XPN|XSV)?(NNG|NNP)?(XSN|XSV|XSA)?(SSO)?(SL)(SY)?(SL)?(SY)?(SL)?(SY)?(SL)?(SSC)?'\n # mor_pattern = '(XPN|XSV)?(NNG|NNP)(XSN|XSV|XSA)?(XPN|XSV)?(NNG|NNP)?(XSN|XSV|XSA)?(XPN|XSV)?(NNG|NNP)?(XSN|XSV|XSA)?(SSO)(SL)(SY)?(SL)?(SY)?(SL)?(SY)?(SL)?(SSC)'#괄호 있어야만함\n mor_pattern = '(XPN|XSV)?(ETN)?(NNG|NNP)(XSN|XSV|XSA)?(XPN|XSV)?(ETN)?(JX)?(NNG|NNP)?(XSN|XSV|XSA)?(XPN|XSV)?(ETN)?(NNG|NNP)?(XSN|XSV|XSA)?(JKO)?(SSO)(SL)(SY)?(SC)?(SL)?(SY)?(SL)?(SY)?(SL)?(SL)?(SL)?(SC)?(SL)?'#괄호 있어야만함\n \n mor_match_pre = re.findall(mor_pattern, morphemes_one_str, flags=0)\n mor_match_list= list()\n # ''없애주기\n for i in range(len(mor_match_pre)):\n sample = [i for i in mor_match_pre[i] if len(i) >= 1 ]\n mor_match_list.append(sample)\n print('mor_match_list:', mor_match_list)\n return mor_match_list\n\n\n\n\n# 형태소 패턴과 일치하는 단어 찾아서 추출\ndef find_word(mor_match_list, words_list, morphemes_list):\n word_match_list = list()\n for mor_match_idx in range(len(mor_match_list)):\n # print(len(morphemes))\n # print(len(mor_match[mor_match_idx]))\n for i in range(0, len(morphemes_list)-len(mor_match_list[mor_match_idx])):\n comparison = [morphemes_list[j] for j in range(i, i+len(mor_match_list[mor_match_idx]))]\n # print(comparison)\n if comparison == mor_match_list[mor_match_idx]:\n # print(words[i:i+len(mor_match[mor_match_idx])])\n word_match_list.append(words_list[i:i+len(mor_match_list[mor_match_idx])])\n \n print('word_match_list:', word_match_list)\n return word_match_list\n\n \ndef find_pattern_show_words(sent):\n # 형태소 분석\n te = start_mecab(sent)\n\n # 단어, 품사 구별\n raw_mor = words_morph(te)\n \n # 형태소 패턴과 일치하는 것 찾아서 추출\n words_list, morphemes_list, words_one_str, morphemes_one_str = words_mors(raw_mor)\n\n # 형태소 패턴찾아서 리스트에서 찾기\n mor_match_list = find_mor_pattern(morphemes_one_str)\n \n # 형태소 패턴과 일치하는 단어 찾아서 추출\n word_match_list = find_word(mor_match_list, words_list, morphemes_list)\n \n mor_match_list_str = list()\n\n for mm in word_match_list:\n mmp = ''\n for m in range(len(mm)):\n mmp += mm[m] + '-'\n mor_match_list_str.append(mmp)\n # print('mor_match_list_str: ', mor_match_list_str)\n\n return te, word_match_list, mor_match_list_str\n\n\ndef isKorean(single_word):\n ko = re.compile('[ㄱ-ㅣ가-힣]')\n return bool(ko.match(single_word))\n\ndef isEnglish(single_word):\n en = re.compile('[a-zA-Z]')\n return bool(en.match(single_word))\n\n\ndef make_str(word_matched_list):\n ko_words = list()\n en_words = list()\n for single_list in word_matched_list:\n # print(single_list)\n ko_word = str()\n en_word = str()\n for single_word in single_list:\n # 한글 만들어주기\n if isKorean(single_word) == True:\n ko_word += single_word\n # 영어 만들어주기\n elif isEnglish(single_word) == True:\n en_word += single_word + ' '\n ko_words.append(ko_word)\n en_words.append(en_word)\n # print(ko_words, '-', en_words)\n return ko_words, en_words\n\n","sub_path":"18_Natural_Processing/NIA_DICT_2/mecab.py","file_name":"mecab.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496403944","text":"#!/usr/bin/env python3\n\nimport copy\nimport os\nimport os.path as osp\nimport time\nfrom typing import List, NamedTuple, Optional, Tuple\n\nimport torch\nimport torch_quiver as qv\nfrom ogb.nodeproppred import PygNodePropPredDataset\n\nfrom ogbn_products_sage.cuda_sampler import CudaNeighborSampler\n\n\ndef info(t, name=None):\n msg = ''\n if name:\n msg += name\n msg += ' ' + str(t.type())\n msg += ' ' + str(t.shape)\n print(msg)\n\n\ndef load_dataset():\n home = os.getenv('HOME')\n data_dir = osp.join(home, '.pyg')\n root = osp.join(data_dir, 'data', 'products')\n dataset = PygNodePropPredDataset('ogbn-products', root)\n split_idx = dataset.get_idx_split()\n data = dataset[0]\n return data, split_idx\n\n\ndef main():\n data, split_idx = load_dataset()\n train_idx = split_idx['train']\n sampler = CudaNeighborSampler(\n data.edge_index,\n node_idx=train_idx,\n sizes=[15, 10, 5],\n batch_size=1024,\n )\n\n for idx, (batch_size, n_id, adjs) in enumerate(sampler):\n print('#%d' % (idx))\n\n\nmain()\n","sub_path":"benchmarks/bench-pyg-product.py","file_name":"bench-pyg-product.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"573667762","text":"import urllib.request\r\nimport json\r\nimport sys\r\n\r\ndef subCounter():\r\n name = input(\"Enter username: \")\r\n if name != \"stop\":\r\n try:\r\n key = \"your_key\"\r\n data = urllib.request.urlopen(\"https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername=\" + name + \"&key=\" + key).read()\r\n subs = json.loads(data)[\"items\"][0][\"statistics\"][\"subscriberCount\"]\r\n\r\n print(name + \" has \" + \"{:,d}\".format(int(subs)) + \" subscribers!\\n\")\r\n except IndexError:\r\n print(\"\\n\" + name + \" is not a valid entry. Try another name. \")\r\n subCounter()\r\n else:\r\n sys.exit()\r\n\r\nwhile True:\r\n subCounter()\r\n","sub_path":"SubscriberCount.py","file_name":"SubscriberCount.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349008492","text":"\n\nfrom Prac_08.unreliable_car import UnreliableCar\n\n\ndef main():\n \"\"\"Test UnreliableCars.\"\"\"\n\n # create cars with different reliabilities\n car1 = UnreliableCar(\"Car 1\", 100, 99)\n car2 = UnreliableCar(\"Car 2\", 100, 1)\n\n # to see if the car moves\n car1.drive(50)\n car2.drive(50)\n print(car1)\n print(car2)\n\n car1.drive(30)\n car2.drive(30)\n print(car1)\n print(car2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Prac_08/unreliable_car_test.py","file_name":"unreliable_car_test.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220235022","text":"import json\nimport re\nfrom os import path\n\nimport markovify\nimport nltk\nimport requests\nimport pinhook.plugin\n\nclass POSifiedText(markovify.Text):\n def word_split(self, sentence):\n words = re.split(self.word_split_pattern, sentence)\n words = [w for w in words if len(w) > 0]\n words = [\":-:\".join(tag) for tag in nltk.pos_tag(words)]\n return words\n\n def word_join(self, words):\n sentence = \" \".join(word.split(\":-:\")[0] for word in words)\n return sentence\n\nebooksdir = path.join(path.dirname(path.abspath(__file__)), 'ebooks')\n\ndef generate_message(ebook):\n with open(path.join(ebooksdir, ebook)) as e:\n model = POSifiedText.from_json(json.load(e))\n return pinhook.plugin.message(model.make_short_sentence(512))\n\nwith open(path.join(ebooksdir, 'evil.json')) as e:\n evil = POSifiedText.from_json(json.load(e))\n\n@pinhook.plugin.register('!cyber')\ndef cyber(msg):\n out = requests.get('http://cyber.archangelic.space/snippet').content.decode()\n return pinhook.plugin.message(out)\n\n@pinhook.plugin.register('!lordmarkov')\ndef lordmarkov(msg):\n out = 'If I Ever Become an Evil Overlord: '\n out += evil.make_short_sentence(476)\n return pinhook.plugin.message(out)\n\n@pinhook.plugin.register('!bitcoin')\ndef btc(msg):\n return generate_message('btc.json')\n\n@pinhook.plugin.register('!lisp')\ndef lisp(msg):\n return generate_message('lisp.json')\n\n@pinhook.plugin.register('!naked')\ndef naked(msg):\n return generate_message('naked.json')\n","sub_path":"plugins/ebooks.py","file_name":"ebooks.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"294077731","text":"import logging\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import (\n AbstractRequestHandler, AbstractExceptionHandler,\n AbstractRequestInterceptor, AbstractResponseInterceptor)\nfrom ask_sdk_core.utils import is_request_type, is_intent_name\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model.ui import SimpleCard\nfrom ask_sdk_model import Response\n\nimport boto3\nfrom boto3 import resource\n\nSKILL_NAME = \"Favorite Color\"\n\nsb = SkillBuilder()\nlogger = logging.getLogger()\n\nclass DefaultHandler(AbstractRequestHandler):\n\n\tdef can_handle(self, handler_input):\n\t\treturn(is_request_type(\"LaunchRequest\")(handler_input))\n\t\t\n\tdef handle(self, handler_input):\n\t\tspeech = \"You can ask me to remember a color or to tell you what your favorite color is\"\n\t\t\n\t\thandler_input.response_builder.speak(speech).set_card(\n\t\t\tSimpleCard(SKILL_NAME, speech))\n\t\treturn handler_input.response_builder.response\n\t\t\nclass SetFavoriteColorHandler(AbstractRequestHandler):\n\t\n\tdef can_handle(self, handler_input):\n\t\treturn(is_intent_name(\"SetFavoriteColor\")(handler_input))\n\t\t\n\tdef handle(self, handler_input):\n\t\tslots = handler_input.request_envelope.request.intent.slots\n\t\n\t\tif 'FavoriteColor' in slots:\n\t\t\tfavoriteColor = slots['FavoriteColor'].value\n\t\t\t\n\t\t\tif favoriteColor is not None:\n\t\t\n\t\t\t\thandler_input.attributes_manager.session_attributes['favoriteColor'] = favoriteColor;\n\t\t\t\t\t\n\t\t\t\tspeech = \"Got it. Your favorite color is \" + handler_input.attributes_manager.session_attributes['favoriteColor']\n\t\t\t\treprompt = \"You can ask me what your favorite color is\"\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tspeech = \"Sorry, we had an issue\"\n\t\t\t\treprompt = \"Try to ask me to remember another favorite color\"\n\t\t\n\t\thandler_input.response_builder.speak(speech).set_card(\n\t\t\tSimpleCard(SKILL_NAME, speech)).ask(reprompt)\n\t\treturn handler_input.response_builder.response\n\t\t\nclass GetFavoriteColorHandler(AbstractRequestHandler):\n\t\n\tdef can_handle(self, handler_input):\n\t\treturn(is_intent_name(\"GetFavoriteColor\")(handler_input))\n\t\t\n\tdef handle(self, handler_input):\n\t\tif 'favoriteColor' in handler_input.attributes_manager.session_attributes:\n\t\t\tmyFavoriteColor = handler_input.attributes_manager.session_attributes['favoriteColor']\n\t\t\tspeech = \"Your favorite color is \" + myFavoriteColor\n\t\t\t\n\t\telse:\n\t\t\tspeech = \"I don't think I know your favorite color\"\n\n\t\thandler_input.response_builder.speak(speech).set_card(\n\t\t\tSimpleCard(SKILL_NAME, speech))\n\t\treturn handler_input.response_builder.response\n\nclass HelpHandler(AbstractRequestHandler):\n\t\n\tdef can_handle(self, handler_input):\n\t\treturn(is_request_type(\"LaunchRequest\")(handler_input))\n\t\t\n\tdef handle(self, handler_input):\n\t\tspeech = \"You can ask me to remember a color or to tell you what your favorite color is\"\n\t\t\n\t\thandler_input.response_builder.speak(speech).set_card(\n\t\t\tSimpleCard(SKILL_NAME, speech))\n\t\treturn handler_input.response_builder.response\n\t\t\nclass CancelOrStopHandler(AbstractRequestHandler):\n\n\tdef can_handle(self, handler_input):\n\t\treturn(is_request_type(\"LaunchRequest\")(handler_input))\n\t\t\n\tdef handle(self, handler_input):\n\t\tspeech = \"Goodbye\"\n\t\t\n\t\thandler_input.response_builder.speak(speech).set_card(\n\t\t\tSimpleCard(SKILL_NAME, speech))\n\t\treturn handler_input.response_builder.response\n\nsb.add_request_handler(DefaultHandler())\nsb.add_request_handler(SetFavoriteColorHandler())\nsb.add_request_handler(GetFavoriteColorHandler())\nsb.add_request_handler(HelpHandler())\nsb.add_request_handler(CancelOrStopHandler())\n\nlambda_handler = sb.lambda_handler()","sub_path":"persistenceColors.py","file_name":"persistenceColors.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"175950172","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nfrom magn import slopes_preT as pt\nfrom magn import include_Rotations as inc_rot\n\n\ndef helper(err):\n if err==0:\n help_msg=\"\"\" determine relative distance from regions to natural water sources\n optional arguments:\n -l : ./underserved -l INT,INT #--lim|--limiter INT,INT : limit the matrice size\n -f : full faux dada\n -h : help prompt\n ex:\n ./underserved -l 8,8 # 8x8 limited region\n \"\"\"\n print(help_msg)\n elif int(err)==1:\n err_msg=\"\"\" err : {} : {} : your limiting boundary failed\n please try again supplying two integers seperated by a comma\n -l INT,INT\n ex.\n ./underserved -l 8,8\n or use -h for the help prompt\n \"\"\".format(err,sys.argv[2])\n print(err_msg)\n elif int(err)==2:\n err_msg=\"\"\" err : {} : {} : bad argument\n optional arguments:\n -l : ./underserved -l INT,INT #--lim|--limiter INT,INT : limit the matrice size\n -f : full faux dada\n -h : help prompt\n \"\"\".format(err,sys.argv[1])\n print(err_msg)\n else:\n helper(0)\n exit(0)\n\ndef f_fromFile():\n #input from file\n mat=[]\n fi=open(\"../dada/faux_bloc\",\"r\")\n for line in fi.readlines():\n line=line.strip()\n j=[]\n for ch in line:\n if ch==\"#\":\n j.append(0)\n else: #ch==\"_\"\n j.append(1)\n mat.append(j)\n return mat\n\ndef f_limiter(lim,mat):\n #BOILERPLATE_shrink the size of the matrice for testing\n lim_mat=[]\n di=lim[0]\n dj=lim[1]\n for i in mat[:di]:\n lim_j=[]\n for j in i[:dj]:\n lim_j.append(j)\n lim_mat.append(lim_j)\n return lim_mat\n\n\ndef f_limiter_chk(mat):\n #BOILERPLATE_provide cli control of limiter\n l_arg=len(sys.argv)\n if l_arg>1:\n if sys.argv[1]==\"-l\" and l_arg == 3:\n spl=sys.argv[2].split(\",\")\n dim=[]\n dim.append(int(spl[0]))\n dim.append(int(spl[1]))\n mat=f_limiter(dim,mat)\n return mat\n\n\n### stdout >>>\ndef out_al(bnd,ar):\n #stdout print statement evenly displays list elements\n #based on the length of the first element\n # add to any elements until all elements are the same length as the first\n ml=len(str(bnd))\n sar=\"\"\n for i in ar:\n si=\" \"*(ml-len(str(i)))+str(i)\n sar=sar+\" \"+str(si)\n return sar\n\n\ndef out_mat(verbose,mat,bound):\n #stdout print matrix with evenly spaced elements\n print()\n if verbose==True:\n for _axis in mat:\n print(out_al(bound,_axis))\n\n### stdout <<<\n\n\n\ndef quad_bounds(mat,i,j):\n #determines mat bounds based on planar quadrant\n qb=True\n mat_dim=[len(mat)-1,len(mat[0])-1]\n if i<0 or j<0 or j>mat_dim[0] or i>mat_dim[1]:\n qb=False\n return qb\n\n\ndef branch(mat,i,j,magn):\n #searches outward from current position in branching pattern\n mult=1 #vector magnitude multiplier\n all_dir=[]\n all_branch_slopes=inc_rot(pt(magn)[0])\n longest_search=0\n for s_vec in all_branch_slopes:\n mult=1\n found_dist=0\n sl_i=i+(s_vec[0]*(mult))\n sl_j=j+(s_vec[1]*(mult))\n qb=quad_bounds(mat,sl_i,sl_j)\n while qb==True:\n found=chk_waterSource(mat,sl_i,sl_j)\n if found==True:\n svi=(s_vec[0]*(mult)) #vector magnitude from i,j to water source\n svj=(s_vec[1]*(mult)) #vector magnitude from i,j to water source\n found_dist=math.sqrt(svi**2+svj**2)\n all_dir.append(found_dist)\n qb=False\n else:\n mult+=1\n sl_i=i+(s_vec[0]*(mult))\n sl_j=j+(s_vec[1]*(mult))\n qb=quad_bounds(mat,sl_i,sl_j)\n if qb==False:\n svi=(s_vec[0]*(mult-1)) #use most recent usable multiple\n svj=(s_vec[1]*(mult-1)) #use most recent usable multiple\n failed_dist=math.sqrt(svi**2+svj**2)\n if failed_dist>longest_search:\n longest_search=failed_dist\n if len(all_dir)==0:\n all_dir.append(longest_search)\n shortest=sorted(all_dir)[0] #shortest distance to water source\n return shortest\n\n\ndef chk_waterSource(mat,i,j):\n found=False\n if mat[j][i]==1:\n found=True\n return found\n\n\n#faux dada located at ../dada/faux_bloc\ndef faux(magn):\n mat=f_fromFile()\n mat=f_limiter_chk(mat)\n print(\" branch magnitude:\",magn)\n print(\" dims: {} , {}\".format(len(mat),len(mat[0])))\n\n print(\"mat:\")\n out_mat(True,mat,0)\n und_mat=[]\n for j,_i in enumerate(mat):\n und_j=[]\n for i,_j in enumerate(_i):\n if _j == 1:\n und_j.append(0) # distance to water at water is 0\n else:\n und_j.append(int(branch(mat,i,j,magn)))\n und_mat.append(und_j)\n print(\"und_mat:\")\n out_mat(True,und_mat,11)\n\ndef onit():\n branch_magnitude=4\n if len(sys.argv)>1:\n if sys.argv[1]==\"-l\":\n if len(sys.argv)>2:\n for ea in sys.argv[2].split(\",\"):\n try:\n int(ea)\n except:\n helper(1.1)\n else:\n helper(1.0)\n faux(branch_magnitude)\n elif sys.argv[1]==\"-f\":\n faux(branch_magnitude)\n elif sys.argv[1]==\"-h\":\n helper(0)\n else:\n helper(2)\n else:\n helper(0)\n\nif __name__ == \"__main__\":\n onit()\n\n#./underserved -l INT,INT #--lim|--limiter INT,INT : limit the matrice size\n","sub_path":"scripts/underserved.py","file_name":"underserved.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"348833019","text":"from conans.errors import ConanException\n\n\ndef msbuild_verbosity_cmd_line_arg(conanfile):\n verbosity = conanfile.conf.get(\"tools.microsoft.msbuild:verbosity\")\n if verbosity:\n if verbosity not in (\"Quiet\", \"Minimal\", \"Normal\", \"Detailed\", \"Diagnostic\"):\n raise ConanException(\"Unknown msbuild verbosity: {}\".format(verbosity))\n return '/verbosity:{}'.format(verbosity)\n\n\ndef msbuild_arch(arch):\n return {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}.get(str(arch))\n\n\nclass MSBuild(object):\n def __init__(self, conanfile):\n self._conanfile = conanfile\n self.build_type = conanfile.settings.get_safe(\"build_type\")\n # if platforms:\n # msvc_arch.update(platforms)\n arch = conanfile.settings.get_safe(\"arch\")\n msvc_arch = msbuild_arch(arch)\n if conanfile.settings.get_safe(\"os\") == \"WindowsCE\":\n msvc_arch = conanfile.settings.get_safe(\"os.platform\")\n self.platform = msvc_arch\n\n def command(self, sln, targets=None):\n cmd = ('msbuild \"%s\" /p:Configuration=%s /p:Platform=%s'\n % (sln, self.build_type, self.platform))\n\n verbosity = msbuild_verbosity_cmd_line_arg(self._conanfile)\n if verbosity:\n cmd += \" {}\".format(verbosity)\n\n maxcpucount = self._conanfile.conf.get(\"tools.microsoft.msbuild:max_cpu_count\",\n check_type=int)\n if maxcpucount:\n cmd += \" /m:{}\".format(maxcpucount)\n\n if targets:\n if not isinstance(targets, list):\n raise ConanException(\"targets argument should be a list\")\n cmd += \" /target:{}\".format(\";\".join(targets))\n\n return cmd\n\n def build(self, sln, targets=None):\n cmd = self.command(sln, targets=targets)\n self._conanfile.run(cmd)\n\n @staticmethod\n def get_version(_):\n return NotImplementedError(\"get_version() method is not supported in MSBuild \"\n \"toolchain helper\")\n","sub_path":"conan/tools/microsoft/msbuild.py","file_name":"msbuild.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651250754","text":"#!/usr/bin/env python3\n\nimport subprocess as sp, sys, signal, json\n\nargs=sys.argv[1:]\np = sp.Popen(['coqtop'] + args, stdin=sp.PIPE)\n\nwhile True:\n s=json.loads(input())\n if s['signal']:\n p.send_signal(signal.SIGINT)\n else:\n s=s['message']\n p.stdin.write(bytes(s+'\\r\\n', 'UTF-8'))\n p.stdin.flush()\n","sub_path":"coq_over_ssh/labcoqtop_remote.py","file_name":"labcoqtop_remote.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"314969384","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\napp_name = \"utilisateur\"\n\nurlpatterns = [\n path('', views.index),\n\n # all login methods\n path('login_page', views.login_page),\n path('login_process', views.login_process),\n\n path('logout', views.logout_process),\n\n # all register methods\n path('register_page', views.register_page),\n path('register', views.register),\n\n # all urls of user\n path('list_article', views.list_article),\n path('ajouter_article', views.ajouter_article),\n path('about', views.about),\n path('delete/', views.delete_article),\n path('ajax/', views.requete_ajax),\n path('update/', views.update_utilisateur)\n\n]\n","sub_path":"Utilisateur/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"411517655","text":"import cv2 as cv2\r\nimport time\r\nimport os\r\nimport numpy as np\r\n\r\nfrom pathlib import Path\r\nimport xml.etree.cElementTree as ET\r\nfrom PIL import Image\r\n\r\ndef create_labimg_xml(image_path, annotation_list):\r\n \r\n image_path = Path(image_path)\r\n img = np.array(Image.open(image_path).convert('RGB'))\r\n\r\n annotation = ET.Element('annotation')\r\n ET.SubElement(annotation, 'folder').text = str(image_path.parent.name)\r\n ET.SubElement(annotation, 'filename').text = str(image_path.name)\r\n ET.SubElement(annotation, 'path').text = str(image_path)\r\n\r\n source = ET.SubElement(annotation, 'source')\r\n ET.SubElement(source, 'database').text = 'Unknown'\r\n\r\n size = ET.SubElement(annotation, 'size')\r\n ET.SubElement(size, 'width').text = str (img.shape[1])\r\n ET.SubElement(size, 'height').text = str(img.shape[0])\r\n ET.SubElement(size, 'depth').text = str(img.shape[2])\r\n\r\n ET.SubElement(annotation, 'segmented').text = '0'\r\n\r\n for annot in annotation_list:\r\n tmp_annot = annot.split(',')\r\n cords, label = tmp_annot[0:-1], tmp_annot[-1]\r\n xmin, ymin, xmax, ymax = cords[0], cords[1], cords[2], cords[3]\r\n\r\n object = ET.SubElement(annotation, 'object')\r\n ET.SubElement(object, 'name').text = label\r\n ET.SubElement(object, 'pose').text = 'Unspecified'\r\n ET.SubElement(object, 'truncated').text = '0'\r\n ET.SubElement(object, 'difficult').text = '0'\r\n\r\n bndbox = ET.SubElement(object, 'bndbox')\r\n ET.SubElement(bndbox, 'xmin').text = str(xmin)\r\n ET.SubElement(bndbox, 'ymin').text = str(ymin)\r\n ET.SubElement(bndbox, 'xmax').text = str(xmax)\r\n ET.SubElement(bndbox, 'ymax').text = str(ymax)\r\n\r\n tree = ET.ElementTree(annotation)\r\n xml_file_name = f\"{image_path.parent}\\\\annotations\\\\{(image_path.name.split('.')[0]+'.xml')}\"\r\n tree.write(xml_file_name)\r\n\r\ndef img_segmentation(image):\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n # reshape the image to a 2D array of pixels and 3 color values (RGB)\r\n pixel_values = image.reshape((-1, 3))\r\n # convert to float\r\n pixel_values = np.float32(pixel_values)\r\n # define stopping criteria\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)\r\n # number of clusters (K)\r\n k = 5\r\n _, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\r\n # convert back to 8 bit values\r\n centers = np.uint8(centers)\r\n # flatten the labels array\r\n labels = labels.flatten()\r\n # convert all pixels to the color of the centroids\r\n segmented_image = centers[labels.flatten()]\r\n # reshape back to the original image dimension\r\n segmented_image = segmented_image.reshape(image.shape)\r\n # disable only the cluster number 2 (turn the pixel into black)\r\n masked_image = np.copy(image)\r\n # convert to the shape of a vector of pixel values\r\n masked_image = masked_image.reshape((-1, 3))\r\n # color (i.e cluster) to disable\r\n cluster = 5\r\n masked_image[labels == cluster] = [0, 0, 0]\r\n masked_image[labels == 2] = [0, 0, 0]\r\n masked_image[labels == 3] = [255, 0, 0]\r\n # convert back to original shape\r\n masked_image = masked_image.reshape(image.shape)\r\n return masked_image\r\n\r\npath = \"D:/deeplearning learn/Social distancing/face-mask-detector/dataset_GMR/classification/without_mask/\"\r\ncv2Net = cv2.dnn.readNetFromTensorflow(\"D:/deeplearning learn/OrionEdgeSocialDistancingAPI/ModelGraph/PPE_Detection_FrozenGraph1/frozen_inference_graph_old.pb\", \"D:/deeplearning learn/OrionEdgeSocialDistancingAPI/ModelGraph/PPE_Detection_FrozenGraph1/PPE_detection.pbtxt\")\r\n# cv2Net = cv2.dnn.readNetFromTensorflow('D://deeplearning learn//Social distancing//blockdetection//covid-19//frozen_inference_graph.pb', 'D://deeplearning learn//Social distancing//blockdetection//covid-19//fasterRcnnmappbtxtfile.pbtxt')\r\n#cv2Net = cv2.dnn.readNetFromTensorflow('D:/deeplearning learn/OrionEdgeSocialDistancingAPI/ModelGraph/Mask_Detection_FrozenGraph/frozen_inference_graph3.pb', 'covid-19/mask_detection_pipeline_config/mask_detection_faster_rcnn_inception_v2_coco_9_6_20.pbtxt')\r\n#cv2Net = cv2.dnn.readNetFromTensorflow('D:/deeplearning learn/OrionEdgeSocialDistancingAPI/ModelGraph/Mask_Detection_FrozenGraph/frozen_inference_graph2.pb', 'covid-19/mask_detection_pipeline_config/mask_detection_faster_rcnn_inception_v2_coco_17_6_20.pbtxt')\r\n#cv2Net = cv2.dnn.readNetFromTensorflow('D:/deeplearning learn/OrionEdgeSocialDistancingAPI/ModelGraph/Mask_Detection_FrozenGraph/frozen_inference_graph1.pb', 'covid-19/mask_detection_pipeline_config/mask_detection_frozen_inference_graph20_5_20.pbtxt')\r\n#cv2Net = cv2.dnn.readNetFromModelOptimizer('C:/Users/Raju/Documents/Intel/OpenVINO/openvino_models/ir/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml', 'C:/Users/Raju/Documents/Intel/OpenVINO/openvino_models/ir/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.bin')\r\n# cap = cv2.VideoCapture(\"C:/Users/Raju/Videos/Captures/lucastv.mp4\")\r\n#cap = cv2.VideoCapture(\"D:/deeplearning learn/Social distancing/blockdetection/output.mp4\")\r\n#cap = cv2.VideoCapture(\"D:/deeplearning learn/pyreaserch/social-distance-detector/marico.mp4\")\r\n#cap = cv2.VideoCapture(\"D:/deeplearning learn/marico_demo/HUL3.mp4\")\r\n#cap = cv2.VideoCapture(\"rtsp://admin:HUL@2020@103.89.58.106:554/cam/realmonitor?channel=6&subtype=0\")\r\n#cap = cv2.VideoCapture(\"rtsp://admin:G#i@l@2019@10.66.0.116:554/cam/realmonitor?channel=10&subtype=0\")\r\ncap = cv2.VideoCapture(\"rtsp://admin:G#i@l@2019@10.66.0.191:554/cam/realmonitor?channel=10&subtype=0\")\r\n# cap = cv2.VideoCapture(0)\r\n#cap = cv2.VideoCapture(\"D:/deeplearning learn/pose_extractor_build/ActionAI/image_dir/all/sample.mp4\")\r\ntimeStamp=time.time()\r\nfpsFilt = 0\r\nfrmno = 1088\r\nfrmnumber = 0\r\nwhile cap.isOpened() :\r\n anotation_list = []\r\n ret, img = cap.read()\r\n frmnumber = frmnumber+1\r\n #img = cv2.imread('000023.jpg')\r\n \r\n img = cv2.resize(img,(1080,720),interpolation=cv2.INTER_CUBIC)\r\n #seg_frame = img_segmentation(img)\r\n rows = img.shape[1]\r\n cols = img.shape[0]\r\n if frmnumber%60==0:\r\n cv2Net.setInput(cv2.dnn.blobFromImage(img, size=(1080*2,720*2), swapRB=True, crop=False))\r\n #cv2Net.setInput(cv2.dnn.blobFromImage(img,scalefactor=2, size=(1080,720), swapRB=True, crop=False))\r\n cv2Out = cv2Net.forward()\r\n centroids = []\r\n boxes = []\r\n confidences = []\r\n classIDs = []\r\n for detection in cv2Out[0,0,:,:]:\r\n score = float(detection[2])\r\n class_id = int(detection[1])\r\n #class_id = int(np.argmax(score))\r\n confidence = score\r\n if score > 0.55 and (class_id == 0 or class_id == 1):\r\n #if score > 0.85 :\r\n left = int(detection[3] * rows)\r\n top = int(detection[4] * cols)\r\n right = int(detection[5] * rows)\r\n bottom = int(detection[6] * cols)\r\n # top = int(detection[3] * cols)\r\n # left = int(detection[4] * rows)\r\n # bottom = int(detection[5] * cols)\r\n # right = int(detection[6] * rows)\r\n \r\n #cv2.rotate(img,rotateCode=90) \r\n #cv2.imwrite(os.path.join(path,f'{str(frmno)}.jpg'),img[left:right, top:bottom])\r\n #cv2.imwrite(os.path.join(path,f'{str(frmno)}.jpg'),img)\r\n #cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\r\n # if class_id == 0 :\r\n # cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\r\n # cv2.putText(img, 'No mask', (int(left), int(top - 10)),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n # #if class_id == 1:\r\n # else:\r\n # cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\r\n # cv2.putText(img, 'with mask', (int(left), int(top - 10)),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n if class_id == 0 :\r\n #cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 255, 0), thickness=2)\r\n #cv2.putText(img, 'with_helmet', (int(left), int(top - 10)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\r\n class_name=\"with_helmet\"\r\n coords = str(f\"{left},{top},{right},{bottom},with_helmet\")\r\n elif class_id == 1 :\r\n class_name=\"without_helmet\"\r\n #cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\r\n #cv2.putText(img, 'without_helmet', (int(left), int(top - 10)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n coords = str(f\"{left},{top},{right},{bottom},without_helmet\")\r\n anotation_list.append(coords)\r\n \r\n \r\n # update our list of bounding box coordinates,\r\n # centroids, and confidences\r\n classIDs.append(class_name)\r\n boxes.append([left, top, int(right), int(bottom)])\r\n confidences.append(float(confidence))\r\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.30, 0.3)\r\n # ensure at least one detection exists\r\n if len(idxs) > 0 :\r\n # loop over the indexes we are keeping\r\n for i in idxs.flatten():\r\n frmno = frmno+1\r\n # extract the bounding box coordinates\r\n (x, y) = (boxes[i][0], boxes[i][1])\r\n (w, h) = (boxes[i][2], boxes[i][3])\r\n # cv2.rectangle(img, (x, y), (x + w, y + h), (0,0,255), 2)\r\n #cv2.rectangle(img, (x, y), (w, h), (0,0,255), 2)\r\n # (startX, startY) = (max(0, int(x-(w-x)/2)), max(0, int(y-(h-y)/2)))\r\n # (endX, endY) = (min(rows - 1, int(w+(w-x)/2)), min(cols - 1, int(h+(h-y)/2)))\r\n # save_Frame=img[startY:endY, startX:endX]\r\n #save_Frame=seg_frame[y:h, x:w]\r\n save_Frame=img[y:h, x:w]\r\n # save_Frame=img[y:int(y+(h-y)/4), int(x+(w-x)/5):int(w-(w-x)/5)]\r\n # save_Frame = cv2.resize(save_Frame,(300,600),fx=4,fy=4,interpolation=cv2.INTER_CUBIC)\r\n \r\n # seg_frame = img_segmentation(save_Frame)\r\n # coords = str(f\"{int(x+(w-x)/5)},{y},{int(w-(w-x)/5)},{int(y+(h-y)/5)},with_helmet\")\r\n # anotation_list.append(coords)\r\n \r\n \r\n try:\r\n save_Frame= cv2.resize(save_Frame,(299,299))\r\n cv2.imwrite(os.path.join(path,f'{str(frmno)}.jpg'),save_Frame)\r\n \r\n except:\r\n continue\r\n # try:\r\n # pathh = path+classIDs[i]+'/'\r\n # save_Frame=img[y:h, x:w]\r\n # save_Frame = cv2.resize(save_Frame,(299,299),fx=4,fy=4,interpolation=cv2.INTER_CUBIC)\r\n # cv2.imwrite(os.path.join(pathh,f'{str(frmno)}.jpg'),save_Frame)\r\n \r\n # except:\r\n # continue\r\n\r\n dt=time.time()-timeStamp\r\n timeStamp=time.time()\r\n fps=1/dt\r\n fpsFilt=.9*fpsFilt + .1*fps\r\n #print(str(round(fps,1))+' fps')\r\n #cv2.putText(img,str(round(fpsFilt,1))+' fps',(0,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)\r\n cv2.imshow('img', img)\r\n #cv2.imwrite(os.path.join(path,f'{str(frmnumber)}.jpg'),img)\r\n #create_labimg_xml(os.path.join(path,f'{str(frmnumber)}.jpg'), anotation_list)\r\n cv2.resizeWindow('Frame',800,600)\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n # Press `q` to exit\r\n if key == ord(\"q\"):\r\n break\r\n\r\n# Clean\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"fasterRcnnInferencing.py","file_name":"fasterRcnnInferencing.py","file_ext":"py","file_size_in_byte":12045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473986134","text":"import time\nimport json\nimport sys\nimport tweepy\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nfrom pymongo import MongoClient\nfrom keys import *\n\n# keys\nstart_time = time.time() # grabs the system time\nkeyword_list = ['twitter'] # track list\nckey = consumer_key\nconsumer_secret = consumer_secret\naccess_token_key = access_token\naccess_token_secret = access_token_secret\n\n\nclass listener(StreamListener):\n def __init__(self, start_time, time_limit=82800):\n\n self.time = start_time\n self.limit = time_limit\n self.tweet_data = []\n\n def on_data(self, status):\n while (time.time() - self.time) < self.limit:\n tweet = json.loads(status)\n try:\n client = MongoClient('localhost', 27017)\n db = client['Washington_tweets']\n collection = db['twitter_collection']\n if tweet['coordinates'] is not None:\n collection.insert(tweet)\n return True\n # various exception handling blocks\n except KeyboardInterrupt:\n sys.exit()\n except AttributeError as e:\n print('AttributeError was returned, stupid bug')\n print(e)\n except tweepy.TweepError as e:\n print('Below is the printed exception')\n print(e)\n if '401' in e:\n # not sure if this will even work\n print('Below is the response that came in')\n print(e)\n time.sleep(60)\n else:\n# raise an exception if another status code was returned,we don't like other kinds\n time.sleep(60)\n except BaseException as e:\n print('failed ondata,', str(e))\n time.sleep(5)\n exit()\n\n def on_error(self, status):\n print(status)\n# Instance\nauth = OAuthHandler(ckey, consumer_secret) # Consumer keys\nauth.set_access_token(access_token_key, access_token_secret) # Secret Keys\n# initialize Stream object with a time out limit\ntwitterStream = Stream(auth, listener(start_time, time_limit=82800))\n# bounding box filter for Washington\ntwitterStream.filter(locations=[-124.84, 45.54, -116.92, 49.0])\n","sub_path":"Data_Collection/WashingtonListener.py","file_name":"WashingtonListener.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88189751","text":"from django.utils.text import slugify\n\n\ndef unique_slug(value, instance):\n slug = slugify(value)\n\n queryset = instance.__class__._default_manager.all()\n if instance.pk:\n queryset = queryset.exclude(pk=instance.pk)\n\n next = 2\n while queryset.filter(slug=slug):\n end = '-%s' % next\n slug = '%s%s' % (slug, end)\n next += 1\n\n return slug\n","sub_path":"common/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30605227","text":"\"\"\"\nContains all views for application\n\"\"\"\n\nfrom flask import Flask, jsonify\nfrom validators import User, Book, Library\n\napp = Flask(__name__)\napp.debug = True\n\nLIBRARY = Library()\n\n@app.route(\"/api/v1//\", methods=['GET'])\ndef api_get_collection(collection=None):\n \"\"\"Get all items in a collection\"\"\"\n statement = LIBRARY.get(collection)\n # return statement\n return jsonify({collection: statement})\n\n@app.route(\"/\")\ndef index():\n \"\"\"default routing provides reference to all routes\"\"\"\n return 'Please follow links to make use of Mongo Database:
' \\\n '1. /users/ - get all users
' \\\n '2. /users/username/ - get particular user
' \\\n\n# URIs should only be nouns. Discard 'GET' from uri!!!!!\n@app.route(\"/users/get/\")\n@app.route(\"/users/get/\")\ndef get_user(username=None):\n \"\"\"get a specific user by username\"\"\"\n if username:\n user = User(username)\n statement = user.get()\n else:\n statement = 'please provide a username'\n return statement\n\n@app.route(\"/users/post///\")\ndef post_user(username=None, firstname=None, lastname=None):\n \"\"\"create a user\"\"\"\n if username:\n user = User(username)\n statement = user.post(firstname, lastname)\n else:\n statement = 'no username provided'\n return statement\n\n@app.route(\"/users/delete/\")\n@app.route(\"/users/delete/\")\ndef delete_user(username=None):\n \"\"\"delete a user, TODO: Unable to delete user while books are checked out\"\"\"\n if username:\n user = User(username)\n statement = user.delete()\n else:\n statement = 'username missing'\n return statement\n\n@app.route(\"/books/get/\")\n@app.route(\"/books/get/\")\ndef get_book(title=None):\n \"\"\"get a specific book\"\"\"\n if title:\n book = Book(title)\n statement = book.get()\n else:\n statement = 'no title provided'\n return statement\n\n@app.route(\"/books/post/\")\n@app.route(\"/books/post/<title>/<year>\")\ndef post_book(title=None, year=None, borrower=None):\n \"\"\"post (create) a book\"\"\"\n if title and year:\n book = Book(title)\n statement = book.post(year, borrower)\n else:\n statement = 'invalid'\n return statement\n\n@app.route(\"/books/delete/\")\n@app.route(\"/books/delete/<title>\")\ndef delete_book(title=None):\n \"\"\"# delete a book # TODO: Programmatically manipulate a book based on UUID\"\"\"\n if title:\n book = Book(title)\n statement = book.delete()\n else:\n statement = 'title missing'\n return statement\n\n@app.route(\"/<collection>/\")\ndef get_collection(collection=None):\n \"\"\"Get all items in a collection\"\"\"\n statement = LIBRARY.get(collection)\n return statement\n\n@app.route(\"/<collection>/reset\")\ndef reset_collections(collection=None):\n '''Delete all items in a collection'''\n statement = LIBRARY.reset(collection)\n return statement\n\n\n@app.route(\"/checkout/\")\n@app.route(\"/checkout/<title>/<username>\")\ndef checkout(title=None, username=None):\n \"\"\"A user checks out an available copy of a book\"\"\"\n if title and username:\n statement = LIBRARY.checkout(title, username)\n else:\n statement = 'please provide username and title'\n return statement\n\n@app.route('/checkin/')\n@app.route('/checkin/<title>/<username>')\ndef check_in(title=None, username=None):\n \"\"\"A user checks in an available copy of a book\"\"\"\n if title and username:\n statement = LIBRARY.checkin(title, username)\n else:\n statement = 'please provide title and borrower'\n return statement\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"deprecated-files/app-deprecated.py","file_name":"app-deprecated.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120683968","text":"\nfrom .permissions import Permissions\nfrom .util import ern_split\n\nclass User(object):\n def from_dict(self, data):\n self.ern = data[\"ern\"]\n self.email = data[\"email\"]\n self.gender = data[\"gender\"]\n self.name = data[\"name\"]\n self.birth_year = data[\"birth_year\"]\n self.permissions = Permissions.hydrated(data[\"permissions\"])\n return self\n\n def to_dict(self):\n return {\n \"id\": int(ern_split(self.ern)[-1]),\n \"ern\": self.ern,\n \"birth_year\": self.birth_year,\n \"email\": self.email,\n \"name\": self.name,\n \"gender\": self.gender,\n \"permissions\": self.permissions.dehydrated()\n }\n","sub_path":"eta/api/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420191279","text":"import pygame\r\nclass Button:\r\n def __init__(self, text, x, y,width,height,color):\r\n self.text = text\r\n self.x = x\r\n self.y = y\r\n self.color = color\r\n self.width = width\r\n self.height = height\r\n self.show = True\r\n\r\n def draw(self,win):\r\n pygame.draw.rect(win,self.color,(self.x,self.y,self.width,self.height))\r\n font = pygame.font.SysFont('comicsans',int(self.width*30/100))\r\n text = font.render(self.text,1,(0,0,0))\r\n win.blit(text,(self.x + round(self.width/2) - round(text.get_width()/2),\\\r\n self.y + round(self.height/2) - round(text.get_height()/2)))\r\n\r\n def click(self,pos):\r\n x1 = pos[0]\r\n y1 = pos[1]\r\n\r\n if self.x <= x1 <= self.x + self.width and self.y <= y1 <= self.y + self.height and self.show:\r\n return True\r\n else:\r\n return False\r\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448678810","text":"import time\n\nprint(\"\\tWelcome to the logical question test\")\n\nfirst = input(\"1. If A+b = 75 and A-b = 25 then A : B = __\\nanswer: \")\nsecond =input(\"2. 0,0,1,0,1,2,0,1,2,_ complete the missing one\\nanswer: \")\nthird = input(\"3. If a train running 60km from A-B take 8 hour, and B-A with speed 96km so how many hour did it take?\\nanswer: \")\nfourth = input(\"4. The ages of a mother and her 'graduate son' add up to 66. The mother's age is the son's age reversed. How old is the graduate son?\\nanswer: \")\nfifth = input(\"5. If 510 = 2, 372 = 24, 981 = 9 so 381 = __ ?\\nanswer: \")\nprint(\"we are processing your answer, please wait for a moment!....\\n\")\ntime.sleep(3)\ndef Answer(first,second,third,fourth,fifth):\n total = 0\n if first == \"2\":\n total += 20\n else:\n total -= 10\n \n if second == \"3\":\n total += 20\n else:\n total -= 10\n \n if third == \"5\":\n total += 20\n else:\n total -= 10\n \n if fourth == \"24\":\n total += 20\n else:\n total -= 10\n \n if fifth == \"27\":\n total += 20\n else:\n total -= 10\n return total\n \nscore = Answer(first,second,third,fourth,fifth)\nprint(score)\n\ndef result(score):\n\n if score == 20:\n return \"your rank is E\"\n elif score == 30:\n return \"your rank is E+\"\n elif score == 40:\n return \"your rank is D\"\n elif score == 50:\n return \"your rank is D+\"\n elif score == 60:\n return \"your rank is C\"\n elif score == 70:\n return \"your rank is C+\"\n elif score == 80:\n return \"your rank is B\"\n elif score == 90:\n return \"your rank is B+\"\n elif score == 100:\n return \"your rank is A\"\n else:\n return \"your rank is F\" \na = result(score)\nprint(\"Congratulations you have finished your test...\\n\")\nprint(a)\ninput(\"\\nPress enter to exit...\")\nvalue = 3\nwhile value > 0:\n print(value,\" second\")\n time.sleep(1)\n value -= 1\n","sub_path":"python/mini game/logical test.py","file_name":"logical test.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601511242","text":"import numpy as np\n\nclass SVM:\n def __init__(self, epochs, rate=0.0001):\n self.epochs = epochs\n self.rate = rate\n self.current_epoch = 1\n\n self.w1 = np.float64(0)\n self.w2 = np.float64(0)\n self.b = np.float64(0)\n\n def regularization(self) -> float:\n return 1 / self.current_epoch\n\n def fit(self, data, classes):\n for epoch in range(self.current_epoch, self.epochs):\n self.current_epoch = epoch\n y_pred = (self.w1 * data[:, 0] + self.w2 * data[:, 1] + self.b) * classes\n\n m1_deriv = 0\n m2_deriv = 0\n b_deriv = 0\n\n for index, value in enumerate(y_pred):\n if value < 1:\n m1_deriv += data[index, 0] * classes[index]\n m2_deriv += data[index, 1] * classes[index]\n b_deriv += classes[index]\n\n self.w1 += self.rate * (m1_deriv - 2 * self.regularization() * self.w1)\n self.w2 += self.rate * (m2_deriv - 2 * self.regularization() * self.w2)\n self.b += self.rate * (b_deriv - 2 * self.regularization() * self.b)\n\n def predict(self, val1, val2):\n return self.w1 * val1 + self.w2 * val2 + self.b\n\n def print_weights(self):\n print(f\"w1: {self.w1} w2: {self.w2} B: {self.b}\")\n","sub_path":"app/svm/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595905899","text":"import numpy as np\nfrom scipy.special import ellipe, ellipk\nfrom scipy.constants import mu_0\nimport polarMeasurement as field\nimport math\nimport typeChecking as check\nimport cartesianMeasurement as xyz\n\nimport matplotlib.pyplot as graph\n\n\ndef fieldFromCurrentLoop(current, radius, R, Z):\n \"\"\"\n Calculates the field from a current loop according to http://archive.org/details/nasa_techdoc_20010038494\n on a grid given by (R, Z). In the NASA tech document, \"rho\" = sqrt(x**2 + y**2), which is just R. In this\n function, \"rho\" = sqrt(R**2 + Z**2) and k == k**2 (for easier coding), otherwise everything is identical (the constant \"rho\" just now also\n includes the contributions from Z**2).\n\n Depends on:\n - numpy library\n - scipy (special, constants) library\n - math library\n\n Calls:\n - checkInput_fieldFromCurrentLoop()\n\n Inputs:\n - current: The current in the loop in A\n - radius: The radius of the current loop in m\n - R: 2D grid of radial points to calculate the field on in m\n - Z: 2D grid of longitudinal points to calculate the field on in m\n\n Returns:\n - A tuple of (Br, Bz) in Tesla\n \"\"\"\n # check.fieldFromCurrentLoop(current, radius, R, Z)\n\n rho = np.sqrt(R**2 + Z**2)\n alpha = np.sqrt(radius**2 + rho**2 - 2*radius*R)\n beta = np.sqrt(radius**2 + rho**2 + 2*radius*R)\n k = 1 - (alpha**2)/(beta**2)\n\n C = (mu_0*current)/np.pi # Constant common to both equations for Br & Bz\n a = 1.0/(2*(alpha**2)*beta)\n b = Z/(2*(alpha**2)*beta*R)\n\n Bz = C * a * ((radius**2 - rho**2)*ellipe(k) + (alpha**2)*ellipk(k))\n Br = C * b * ((radius**2 + rho**2)*ellipe(k) - (alpha**2)*ellipk(k))\n\n Br[np.isnan(Br)] = 0\n Bz[np.isnan(Bz)] = 0\n\n Br[np.isinf(Br)] = 0 # elliptic integral of the first kind (ellipk) returns\n Bz[np.isinf(Bz)] = 0 # infinity when k = +1\n\n return Br, Bz\n\n\n\n\n\n\n\ndef makeCurrentLayer(numLoops, separation, startLoopPosition, current, radius, R, Z):\n \"\"\"\n Calculates the field from a layer of current loops by superposition of their individual fields.\n\n Calls:\n - checkInput_makeCurrentLayer()\n - fieldFromCurrentLoop()\n\n Inputs:\n - numLoops: Integer number of current loops in the layer\n - separation: Longitudinal separation between current loops in m\n - startLoopPosition: Longitudinal position of the first current loop in the sheet in m. All other loops proceed from this one in the direction of positive z (i.e. upstream to downstream).\n - current: The current in each current loop in A.\n - radius: The radius of each current loop in m.\n - R: 2D grid of radial points to calculate the field on in m\n - Z: 2D grid of longitudinal points to calculate the field on in m\n\n Returns:\n - A tuple of (Br, Bz) in Tesla\n \"\"\"\n # check.makeCurrentLayer(numLoops, separation, startLoopPosition, current, radius, R, Z)\n\n newR = R\n newZ = Z\n\n totalBr = np.zeros(newR.shape)\n totalBz = np.zeros(newZ.shape)\n\n offset = startLoopPosition - separation\n\n for loop in range(0, numLoops):\n offset += separation\n newZ = Z - offset\n newBr, newBz = fieldFromCurrentLoop(current, radius, newR, newZ)\n totalBr += newBr\n totalBz += newBz\n\n return totalBr, totalBz\n\n\n\n\ndef makeCoil(numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, R, Z):\n \"\"\"\n Calculates the field from a coil by superposition of the field of its current layers.\n\n Calls:\n - checkInput_makeCoil()\n - makeCurrentLayer()\n\n Inputs:\n - numLayers: Integer number of current layers in the coil\n - numLoopsPerLayer: Integer number of current loops per current layer in the coil\n - layerSeparation: Radial separation between current layers in the coil in m\n - loopSeparation: Longitudinal separation between current loops in the coil in m\n - startPosition: Longitudinal position of the upstream edge of the coil (i.e. coil continues in the positive z direction)\n - current: The current through the coil in A\n - minRadius: The interior radius of the coil in m (or the radius of its smallest current loop in m)\n - R: 2D grid of radial points to calculate the field on in m\n - Z: 2D grid of longitudinal points to calculate the field on in m\n\n Returns:\n - A tuple of (Br, Bz) in Tesla\n \"\"\"\n # check.makeCoil(numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, R, Z)\n\n totalBr = np.zeros(R.shape)\n totalBz = np.zeros(Z.shape)\n\n radius = minRadius - layerSeparation\n for layer in range(0, numLayers):\n radius += layerSeparation\n newBr, newBz = makeCurrentLayer(numLoopsPerLayer, loopSeparation, startPosition, current, radius, R, Z)\n totalBr += newBr\n totalBz += newBz\n\n return totalBr, totalBz\n\n\n\n\n\ndef makeMagnet(numCoils, numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, R, Z):\n \"\"\"\n Calculates the field from a magnet by superposition of the field from its coils\n\n Calls:\n - checkInput_makeMagnet()\n - makeCoil()\n\n Inputs:\n - numCoils: Integer number of coils that make up the magnet\n - numLayers: List of integer layers for each coil in the magnet\n - numLoopsPerLayer: List of integer current loops per layer of each coil in the magnet\n - layerSeparation: List of radial layer separations, per coil in the magnet, in m\n - loopSeparation: List of longitudinal separations, per coil in the magnet, in m\n - startPosition: List of longitudinal positions of the upstream edge of each coil in m\n - current: List of the currents, per coil in the magnet, in A\n - minRadius: List of interior radii, per coil in the magnet, in m\n - R: 2D grid of radial points to calculate the field on in m\n - Z: 2D grid of longitudinal points to calculate the field on in m\n\n NB: List is defined as ([coil1Parameter, coil2Parameter, coil3Parameter, .......])\n\n Returns:\n - A tuple of (Br, Bz) in Tesla\n \"\"\"\n\n # check.makeMagnet(numCoils, numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, R, Z)\n totalBr = np.zeros(R.shape)\n totalBz = np.zeros(Z.shape)\n\n for coil in range(0, numCoils):\n newBr, newBz = makeCoil(numLayers[coil], numLoopsPerLayer[coil], layerSeparation[coil], loopSeparation[coil], startPosition[coil], current[coil], minRadius[coil], R, Z)\n totalBr += newBr\n totalBz += newBz\n\n return totalBr, totalBz\n\n\n\n\ndef calcFieldOnAxis(numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, z):\n \"\"\"\n Calculates the field along the axis (i.e. at r = 0) from a coil by superposition of the field from its current layers using a simple analytical formula.\n\n Calls:\n - checkInput_calcFieldOnAxis()\n\n Inputs:\n - numLayers: Integer number of current layers in the coil\n - numLoopsPerLayer: Integer number of current loops per current layer in the coil\n - layerSeparation: Radial separation between current layers in the coil in m\n - loopSeparation: Longitudinal separation between current loops in the coil in m\n - startPosition: Longitudinal position of the upstream edge of the coil (i.e. coil continues in the positive z direction)\n - current: The current through the coil in A\n - minRadius: The interior radius of the coil in m (or the radius of its smallest current loop in m)\n - z: 1D list of longitudinal points to calculate the field over in m\n\n Returns:\n - Bz in Tesla\n\n \"\"\"\n\n l = (numLoopsPerLayer-1)*loopSeparation\n n = numLoopsPerLayer/l\n Bz = []\n\n r = minRadius\n startOfCoil = startPosition\n endOfCoil = startPosition + l\n\n for x in z:\n B = 0.0\n r1 = r\n r2 = r + layerSeparation\n x2 = x - startOfCoil\n x1 = x - endOfCoil\n\n for layer in range(0, numLayers):\n const = (mu_0 * current * n) / (2 * (r2 - r1))\n a = np.sqrt(r2**2 + x2**2) + r2\n b = np.sqrt(r1**2 + x2**2) + r1\n term1 = np.log(a/b)\n\n c = np.sqrt(r2**2 + x1**2) + r2\n d = np.sqrt(r1**2 + x1**2) + r1\n term2 = np.log(c/d)\n\n B += const * (x2*term1 - x1*term2)\n r1 = r2\n r2 = r1 + layerSeparation\n\n Bz.append(B)\n\n\n return Bz\n\n\n\n\n\n\ndef calcMagnetFieldOnAxis(numCoils, numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, z):\n \"\"\"\n Calculates the field along the axis (i.e. at r = 0) from a magnet by superposition of the field from its coils using a simple analytical formula.\n\n Calls:\n - checkInput_calcMagnetFieldOnAxis()\n - calcFieldOnAxis()\n\n Inputs:\n - numCoils: Integer number of coils that make up the magnet\n - numLayers: List of integer layers for each coil in the magnet\n - numLoopsPerLayer: List of integer current loops per layer of each coil in the magnet\n - layerSeparation: List of radial layer separations, per coil in the magnet, in m\n - loopSeparation: List of longitudinal separations, per coil in the magnet, in m\n - startPosition: List of longitudinal positions of the upstream edge of each coil in m\n - current: List of the currents, per coil in the magnet, in A\n - minRadius: List of interior radii, per coil in the magnet, in m\n - z: 1D list of longitudinal points to calculate the field over in m\n\n NB: List is defined as ([coil1Parameter, coil2Parameter, coil3Parameter, .......])\n\n Returns:\n - Bz in Tesla\n \"\"\"\n\n # check.calcMagnetFieldOnAxis(numCoils, numLayers, numLoopsPerLayer, layerSeparation, loopSeparation, startPosition, current, minRadius, z)\n\n Bz = np.zeros(z.shape)\n for coil in range(0, numCoils):\n Bz += calcFieldOnAxis(numLayers[coil], numLoopsPerLayer[coil], layerSeparation[coil], loopSeparation[coil], startPosition[coil], current[coil], minRadius[coil], z)\n\n return Bz\n\n\n\n\ndef calcFieldOnAxis_v2(numLayers, turnsPerLayer, layerSeparation, turnSeparation, coilCentre, coilLength, innerRadius, outerRadius, current, z):\n\n # first work out the upstream and downstream end positions of the coil given its\n # centre and length\n startOfCoil = coilCentre - 0.5*coilLength\n endOfCoil = coilCentre + 0.5*coilLength\n\n l = coilLength\n n = turnsPerLayer/l\n Bz = []\n\n r = innerRadius\n\n\n for x in z:\n B = 0.0\n r1 = r\n r2 = r + layerSeparation\n x2 = x - startOfCoil\n x1 = x - endOfCoil\n\n for layer in range(0, numLayers):\n const = (mu_0 * current * n) / (2 * (r2 - r1))\n a = np.sqrt(r2**2 + x2**2) + r2\n b = np.sqrt(r1**2 + x2**2) + r1\n term1 = np.log(a/b)\n\n c = np.sqrt(r2**2 + x1**2) + r2\n d = np.sqrt(r1**2 + x1**2) + r1\n term2 = np.log(c/d)\n\n B += const * (x2*term1 - x1*term2)\n #r1 = r2\n #r2 = r1 + layerSeparation\n r1 += layerSeparation\n r2 += layerSeparation\n\n Bz.append(B)\n\n\n# Bz = np.multiply(Bz, numLayers)\n\n return Bz\n\n\n\ndef printField(R, Z, Br, Bz, saveName, description=None):\n \"\"\"\n Writes an output file with columns for r (m), z (m), Br (T), Bz (T), |B| (T), sensorNumber that can be imported into these analysis routines or any other plotting program. The sensorNumber column always reads -1 for calculated fields, but is at least consistent with reformatted data columns\n\n Calls:\n - checkInput_printField()\n\n Depends on:\n - fieldFormat.py\n\n Inputs:\n - R: 2D grid of radial points the field was calculated on in m\n - Z: 2D grid of longitudinal points the field was calculated on in m\n - Br: 2D grid of radial field calculations in Tesla\n - Bz: 2D grid of longitudinal field calculations in Tesla\n - saveName: The name used to save the output file\n - description: (Optional) A string containing comments (i.e. # lines) about the field generation to attach to the bottom of the output file.\n\n Outputs:\n - A 5 column, tab separated text file with optional descriptive comments at the end.\n\n Returns:\n - This function does not return anything.\n\n \"\"\"\n\n check.printField(R, Z, Br, Bz, saveName, description)\n\n z = np.ravel(Z)\n r = np.ravel(R)\n bz = np.ravel(Bz)\n br = np.ravel(Br)\n\n listOfMeasurements = []\n\n for index in np.arange(0, z.size):\n listOfMeasurements.append(field.Measurement(r[index], 0.0, z[index], br[index], 0.0, bz[index]))\n\n f = open(saveName, 'w')\n f.write('#z (m)\\tr (m)\\tphi (deg) \\tBr (T)\\tBphi (T)\\tBz (T)\\tB (T)\\tprobeID\\tDate (DDMMYYY)\\tTime (24-HH:MM:SS)\\n')\n\n listOfMeasurements.sort()\n\n for m in listOfMeasurements:\n f.write(m.asFileLine())\n\n if description != None:\n # add the description to the bottom of the file\n f.write('\\n')\n f.write(description)\n\n f.close()\n\ndef printFieldFromList(listOfMeasurements, saveName, description=None):\n f = open(saveName, 'w')\n if listOfMeasurements[0].identifier() == 'Polar Data':\n f.write('#z (m)\\tr (m)\\tphi (deg) \\tBr (T)\\tBphi (T)\\tBz (T)\\tB (T)\\tprobeID\\tDate (DDMMYYY)\\tTime (24-HH:MM:SS)\\n')\n else:\n f.write('#z (m)\\tx (m)\\ty (m) \\tBx (T)\\tBy (T)\\tBz (T)\\tB (T)\\tprobeID\\tDate (DDMMYYY)\\tTime (24-HH:MM:SS)\\n')\n\n listOfMeasurements.sort()\n\n for m in listOfMeasurements:\n f.write(m.asFileLine())\n\n if description != None:\n # add the description to the bottom of the file\n f.write('\\n')\n f.write(description)\n\n f.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"calculateField.py","file_name":"calculateField.py","file_ext":"py","file_size_in_byte":13947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"588557032","text":"from pymuser.muserenv import muserenv\r\nimport struct\r\nimport binascii\r\nimport pyfits\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport numpy as np\r\nimport os\r\nimport glob\r\nimport os\r\nimport time\r\nimport sys\r\nimport re\r\nimport struct\r\nimport binascii\r\nimport time,datetime, string\r\nimport globalpy\r\n#from matplotlib import mplDeprecation\r\nimport matplotlib as mpl\r\n\r\n\r\nfrom argparse import *\r\n\r\ndef valid_date(s):\r\n try:\r\n s = s.strip()\r\n split_s = string.split(s, ' ')\r\n if len(split_s) == 1:\r\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\r\n elif len(split_s) == 2:\r\n return datetime.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\")\r\n elif len(split_s) == 3:\r\n s = string.join(split_s, ' ')\r\n return datetime.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S %f\")\r\n else:\r\n msg = \"Not a valid date: '{0}'.\".format(s)\r\n raise ArgumentTypeError(msg)\r\n except ValueError:\r\n msg = \"Not a valid date: '{0}'.\".format(s)\r\n raise ArgumentTypeError(msg)\r\n\r\ndef data_dir(self, sub_array, year, month, day, hour, minute):\r\n file_name = ('%04d%02d%02d-%02d%02d') % (year, month, day, hour, minute)\r\n file_path = self.MUSER_ARCH + \"/\" + file_name[:8] + \"/MUSER-\" + str(sub_array) + \"/dat/\"\r\n if not os.path.exists(file_path):\r\n os.makedirs(file_path)\r\n #full_file_name = os.path.join(file_path, file_name)\r\n return file_path\r\n\r\n\r\ndef listuvfits (\r\n muser=None,\r\n start='',\r\n debug=None,\r\n ):\r\n if start is not None and len(start)>0:\r\n start_date = valid_date(start)\r\n else:\r\n return\r\n startdir = muserenv.uvfits_dir(muser, start_date.year, start_date.month, start_date.day, start_date.hour, start_date.minute)\r\n #listfile=os.listdir(info)\r\n\r\n print ('%-5.5s %-35.35s %-5.5s %-10.10s' % ('No.','File', 'Pol.','Freq(GHz)'))\r\n #os.chdir(info)\r\n #s=len(listfile)\r\n i = 0\r\n for dirpath, dirnames, filenames in os.walk(startdir):\r\n for fitsfile in filenames:\r\n if os.path.splitext(fitsfile)[1] == '.uvfits':\r\n filename = os.path.join(startdir, fitsfile)\r\n try:\r\n hdulist = pyfits.open(filename, mode='readonly', ignore_missing_end=True)\r\n\r\n # hdulist.info()\r\n object = hdulist[0].header['OBJECT']\r\n polarization = np.int32( hdulist[0].header['CRVAL3'])\r\n basefreq = np.float32( hdulist[0].header['CRVAL4'])\r\n freq = (basefreq + np.float32( hdulist[1].data[\"IF FREQ\"][0]))/1E9\r\n\r\n g= os.path.getsize(filename)\r\n d= os.path.getctime(filename)\r\n h=time.ctime(d)\r\n time_original = h\r\n time_format = datetime.datetime.strptime(time_original, '%a %b %d %H:%M:%S %Y')\r\n time_format = time_format.strftime('%Y-%m-%d %H:%M:%S')\r\n print ('%-5.5s %-35.35s %-5.5s %-10.10s' % (str(i+1),fitsfile, 'LL' if polarization==-2 else 'RR', freq))\r\n i += 1\r\n finally:\r\n hdulist.close()\r\n","sub_path":"src/python/scripts/task_listuvfits.py","file_name":"task_listuvfits.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"41240268","text":"\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QPixmap\r\nfrom PyQt5.QtGui import QCursor\r\nimport time\r\nimport datetime\r\nimport categoryController as CC\r\nimport errors as E\r\nimport addDrugController as ADC\r\nimport pharmacyLoginController as PLC\r\nfrom PyQt5.QtGui import QIcon, QPixmap\r\nclass Ui_addDrug(object):\r\n\r\n def __init__(self):\r\n global numberValidator,createDate\r\n unix = time.time()\r\n createDate = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))\r\n\r\n numberValidator = QtGui.QIntValidator()\r\n\r\n def setupUi(self, Dialog):\r\n Dialog.setObjectName(\"adminPages\")\r\n Dialog.setFixedSize(927, 594)\r\n self.setWindowFlags(QtCore.Qt.Window)\r\n self.setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False)\r\n self.frame = QtWidgets.QFrame(Dialog)\r\n self.frame.setGeometry(QtCore.QRect(0, 0, 950, 81))\r\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame.setObjectName(\"header\")\r\n self.backImg = QtWidgets.QLabel(self.frame)\r\n self.backImg.setGeometry(QtCore.QRect(30, 16, 40, 41))\r\n self.backImg.setObjectName(\"backImg\")\r\n self.backImg.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n pixmap = QPixmap('data/images/back2.png')\r\n self.backImg.setPixmap(pixmap)\r\n self.backImg.setScaledContents(True)\r\n self.title = QtWidgets.QLabel(self.frame)\r\n self.title.setGeometry(QtCore.QRect(300, 20, 351, 50))\r\n self.title.setObjectName(\"pagesTitle\")\r\n self.username = QtWidgets.QLabel(self.frame)\r\n self.username.setGeometry(QtCore.QRect(830, 20, 120, 50))\r\n self.username.setObjectName(\"usernameLabel\")\r\n\r\n self.widget = QtWidgets.QWidget(Dialog)\r\n self.widget.setGeometry(QtCore.QRect(9, 80, 911, 100))\r\n self.widget.setObjectName(\"widget\")\r\n self.label = QtWidgets.QLabel(self.widget)\r\n self.label.setGeometry(QtCore.QRect(30, 30, 850, 40))\r\n self.label.setObjectName(\"subTitle\")\r\n self.widget_2 = QtWidgets.QWidget(Dialog)\r\n self.widget_2.setGeometry(QtCore.QRect(10, 170, 911, 391))\r\n self.widget_2.setObjectName(\"widget_2\")\r\n self.formLayoutWidget = QtWidgets.QWidget(self.widget_2)\r\n self.formLayoutWidget.setGeometry(QtCore.QRect(30, 10, 391, 271))\r\n self.formLayoutWidget.setObjectName(\"formLayoutWidget\")\r\n self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)\r\n self.formLayout.setContentsMargins(0, 0, 0, 0)\r\n self.formLayout.setObjectName(\"formLayout\")\r\n self.medicineNameLabel = QtWidgets.QLabel(self.formLayoutWidget)\r\n self.medicineNameLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.medicineNameLabel)\r\n self.medicineNameLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)\r\n self.medicineNameLineEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.medicineNameLineEdit)\r\n self.quantityLabel = QtWidgets.QLabel(self.formLayoutWidget)\r\n self.quantityLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.quantityLabel)\r\n self.quantityLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)\r\n self.quantityLineEdit.setObjectName(\"addDrugFields\")\r\n self.quantityLineEdit.setValidator(numberValidator)\r\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.quantityLineEdit)\r\n self.batchNumberLabel = QtWidgets.QLabel(self.formLayoutWidget)\r\n self.batchNumberLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.batchNumberLabel)\r\n self.batchNumberLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)\r\n self.batchNumberLineEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.batchNumberLineEdit)\r\n self.categoryLabel = QtWidgets.QLabel(self.formLayoutWidget)\r\n self.categoryLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.categoryLabel)\r\n self.categoryComboBox = QtWidgets.QComboBox(self.formLayoutWidget)\r\n self.categoryComboBox.setObjectName(\"addDrugFields\")\r\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.categoryComboBox)\r\n self.manufucturerLabel = QtWidgets.QLabel(self.formLayoutWidget)\r\n self.manufucturerLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.manufucturerLabel)\r\n self.manufucturerLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)\r\n self.manufucturerLineEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.manufucturerLineEdit)\r\n self.formLayoutWidget_2 = QtWidgets.QWidget(self.widget_2)\r\n self.formLayoutWidget_2.setGeometry(QtCore.QRect(460, 10, 421, 271))\r\n self.formLayoutWidget_2.setObjectName(\"formLayoutWidget_2\")\r\n self.formLayout_2 = QtWidgets.QFormLayout(self.formLayoutWidget_2)\r\n self.formLayout_2.setContentsMargins(0, 0, 0, 0)\r\n self.formLayout_2.setObjectName(\"formLayout_2\")\r\n self.productionDateLabel = QtWidgets.QLabel(self.formLayoutWidget_2)\r\n self.productionDateLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.productionDateLabel)\r\n self.productionDateDateEdit = QtWidgets.QLineEdit(self.formLayoutWidget_2)\r\n self.productionDateDateEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.productionDateDateEdit)\r\n self.expiryDateLabel = QtWidgets.QLabel(self.formLayoutWidget_2)\r\n self.expiryDateLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.expiryDateLabel)\r\n self.expiryDateDateEdit = QtWidgets.QDateEdit(self.formLayoutWidget_2)\r\n self.expiryDateDateEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.expiryDateDateEdit)\r\n self.entryDateLabel = QtWidgets.QLabel(self.formLayoutWidget_2)\r\n self.entryDateLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.entryDateLabel)\r\n self.entryDateDateEdit = QtWidgets.QDateEdit(self.formLayoutWidget_2)\r\n self.entryDateDateEdit.setObjectName(\"addDrugFields\")\r\n self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.entryDateDateEdit)\r\n self.buyingPriceLabel = QtWidgets.QLabel(self.formLayoutWidget_2)\r\n self.buyingPriceLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.buyingPriceLabel)\r\n self.buyingPriceLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget_2)\r\n self.buyingPriceLineEdit.setObjectName(\"addDrugFields\")\r\n self.buyingPriceLineEdit.setValidator(numberValidator)\r\n self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.buyingPriceLineEdit)\r\n self.sellingPriceLabel = QtWidgets.QLabel(self.formLayoutWidget_2)\r\n self.sellingPriceLabel.setObjectName(\"pagesLabel\")\r\n self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.sellingPriceLabel)\r\n self.sellingPriceLineEdit = QtWidgets.QLineEdit(self.formLayoutWidget_2)\r\n self.sellingPriceLineEdit.setObjectName(\"addDrugFields\")\r\n self.sellingPriceLineEdit.setValidator(numberValidator)\r\n self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.sellingPriceLineEdit)\r\n self.drugBTN = QtWidgets.QPushButton(self.widget_2)\r\n self.drugBTN.clicked.connect(self.addDrug)\r\n self.drugBTN.setGeometry(QtCore.QRect(370, 310, 230, 41))\r\n self.drugBTN.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n self.drugBTN.setObjectName(\"pagesBTN\")\r\n\r\n #intials\r\n self.productionDateDateEdit.setValidator(numberValidator)\r\n self.retranslateUi(Dialog)\r\n QtCore.QMetaObject.connectSlotsByName(Dialog)\r\n\r\n #loading categories\r\n catList = CC.categoryController.getCatData()\r\n for x in catList:\r\n self.categoryComboBox.addItem(x.getCatName())\r\n\r\n #addDrug\r\n def addDrug(self):\r\n drugName = self.medicineNameLineEdit.text()\r\n qtn = self.quantityLineEdit.text()\r\n batchNo = self.batchNumberLineEdit.text()\r\n cat = self.categoryComboBox.currentText()\r\n supplier = self.manufucturerLineEdit.text()\r\n reorder = self.productionDateDateEdit.text()\r\n expDate = self.expiryDateDateEdit.text()\r\n entryDate = self.entryDateDateEdit.text()\r\n buyPrice = self.buyingPriceLineEdit.text()\r\n sellPrice = self.sellingPriceLineEdit.text()\r\n\r\n drugName = drugName.title()\r\n expDate1 = datetime.datetime.strptime(expDate, \"%d/%m/%Y\")\r\n entryDate1 = datetime.datetime.strptime(entryDate, \"%d/%m/%Y\")\r\n expDate = datetime.datetime.strftime(expDate1, '%Y-%m-%d')\r\n entryDate = datetime.datetime.strftime(entryDate1, '%Y-%m-%d')\r\n\r\n eObj = E.errors()\r\n\r\n if drugName ==\"\" or qtn ==\"\" or reorder==\"\" or batchNo==\"\" or supplier==\"\" or buyPrice ==\"\" or sellPrice==\"\":\r\n eObj.errorBox('Error', 'Please fill all fields')\r\n else:\r\n addDrug = ADC.addDrugController.storeDrug(self, drugName, qtn, batchNo, cat, supplier, reorder, expDate, entryDate,\r\n buyPrice, sellPrice, createDate,\"Admin\")\r\n if addDrug == \"failed\":\r\n eObj.errorBox('Error', 'Drug already existing in the same category and its not yet out of stock.Change drug name and try again')\r\n elif addDrug == \"updated\":\r\n eObj.success('Congratulations','This drug was out of stock but its now updated')\r\n self.addDrugClear()\r\n elif addDrug == \"inserted\":\r\n eObj.success('Congratulations', 'New drug has been added')\r\n self.addDrugClear()\r\n else:\r\n print(drugName)\r\n eObj.errorBox('Error', 'Some thing went wrong and not data has been saved')\r\n\r\n def addDrugClear(self):\r\n self.medicineNameLineEdit.setText(\"\")\r\n self.quantityLineEdit.clear()\r\n self.batchNumberLineEdit.setText(\"\")\r\n self.categoryComboBox.setCurrentIndex(0)\r\n self.manufucturerLineEdit.setText(\"\")\r\n self.productionDateDateEdit.clear()\r\n self.buyingPriceLineEdit.clear()\r\n self.sellingPriceLineEdit.clear()\r\n\r\n\r\n def retranslateUi(self, Dialog):\r\n _translate = QtCore.QCoreApplication.translate\r\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Add Drug | UG - Pharmacy Management System\"))\r\n\r\n self.backImg.setText(_translate(\"Dialog\", \"\"))\r\n self.title.setText(_translate(\"Dialog\", \"New Drug Entry\"))\r\n self.username.setText(_translate(\"Dialog\", str(PLC.pharmacyLoginController.getUsername())))\r\n self.label.setText(_translate(\"Dialog\", \"Drug Information \"))\r\n self.medicineNameLabel.setText(_translate(\"Dialog\", \"Drug Name \"))\r\n self.quantityLabel.setText(_translate(\"Dialog\", \"Quantity\"))\r\n self.batchNumberLabel.setText(_translate(\"Dialog\", \"Batch Number\"))\r\n self.categoryLabel.setText(_translate(\"Dialog\", \"Category\"))\r\n self.manufucturerLabel.setText(_translate(\"Dialog\", \"Supplier\"))\r\n self.productionDateLabel.setText(_translate(\"Dialog\", \"Reorder Level\"))\r\n self.expiryDateLabel.setText(_translate(\"Dialog\", \"Expiry Date\"))\r\n self.entryDateLabel.setText(_translate(\"Dialog\", \"Entry Date\"))\r\n self.buyingPriceLabel.setText(_translate(\"Dialog\", \"Buying Price\"))\r\n self.sellingPriceLabel.setText(_translate(\"Dialog\", \"Selling Price\"))\r\n self.drugBTN.setText(_translate(\"Dialog\", \"ADD A DRUG\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n Dialog = QtWidgets.QDialog()\r\n ui = Ui_addDrug()\r\n ui.setupUi(Dialog)\r\n Dialog.show()\r\n sys.exit(app.exec_())\r\n\r\n","sub_path":"addDrug.py","file_name":"addDrug.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"375041711","text":"# The aim of this file is to take the phoneme_audios pickled data and run our algorithm on it\n\nimport random\nimport numpy as np\nimport sys\nimport pickle\nimport matplotlib.pyplot as plt\n\nwith open(\"phoneme_audios.p\", 'rb')as p:\n\tphoneme_audios = pickle.load(p) # TODO this should be read in somehow\n# The structure of this data structure is phoneme to list of audios (each of which is a list)\n\n# print(phoneme_audios.keys())\n#print(phoneme_audios['M'][0].shape)\n\nnum_guesses = 10\nmin_length = 600\n# print(\"Min length: \", min_length)\nmax_length = 25000\nthreshold = 10000\n\ndef evaluate(candidate_field, phoneme):\n\taudios = phoneme_audios[phoneme]\n\tretval = 0\n\tfor audio in audios:\n\t\tretval += np.max(np.correlate(candidate_field, audio, \"full\"))\n\tretval /= np.sqrt(len(candidate_field))\n\treturn retval\n\ndef good_to_best(field, phoneme):\n\t#print(field)\n\taudios = phoneme_audios[phoneme]\n\tretval = 0\n\tbest = np.zeros(max_length)\n\tfor audio in audios:\n\t\tbuffered_audio = np.hstack((np.zeros(len(field)), audio, np.zeros(len(field))))\n\t\t#print(buffered_audio)\n\t\t#print(np.argmax(np.correlate(buffered_audio, field, \"full\")))\n\t\tclipped_overlap = buffered_audio[np.argmax(np.correlate(buffered_audio, field, \"full\"))-len(field)+1:][:max_length]\n\t\t#print(clipped_overlap)\n\t\tto_add = clipped_overlap\n\t\tif len(clipped_overlap) < max_length:\n\t\t\tto_add = np.hstack((clipped_overlap, np.zeros(max_length-len(clipped_overlap))))\n\t\t#print(to_add)\n\t\tbest = np.add(best, to_add)\n\treturn best/len(audios)\n\ndef trim_best(field, thresh, num):\n\tcurrent = 0\n\tlast = max_length\n\tfor i in range(len(field)):\n\t\tval = field[i]\n\t\tif (abs(val) > thresh):\n\t\t\tlast = i\n\t\t\tcurrent = 0\n\t\telse:\n\t\t\tcurrent += 1\n\t\tif current >= num:\n\t\t\tbreak\n\tprint(last)\n\treturn field[:last+1]\n\nphoneme_dict = {}\nfor phoneme in phoneme_audios:\n\tprint(phoneme)\n\t# phoneme = 'NG'\n\taudios = phoneme_audios[phoneme]\n\tif len(audios) < 2:\n\t\tcontinue\n\tfields = []\n\tfield_evals = []\n\tfor i in range(num_guesses):\n\t\tindices = random.sample(range(len(audios)), 2)\n\t\tfirst = audios[indices[0]]\n\t\tsecond = audios[indices[1]]\n\t\t#print(first.shape, second.shape)\n\t\t# #print(np.correlate(first, second, \"full\"))\n\t\tposition = np.argmax(np.correlate(first, second, \"full\"))\n\n\t\tfirst_start = max(0, position-len(second)+1)\n\t\tfirst_end = min(position, len(first)-1) - min_length + 1\n\t\tsecond_start = max(0, len(second)-1 - position)\n\t\tsecond_end = len(second)-1 - max((position-len(first)+1), 0) - min_length+1\n\t\t#print(first_start, first_end, second_start, second_end)\n\t\t#print((first_end-first_start) == (second_end-second_start)) # Should be true always\n\t\tnum_positions = first_end-first_start+1\n\t\tif (num_positions < 0):\n\t\t\tprint(\"ERROR 1!\")\n\t\t\tcontinue\n\t\tdot_products = np.zeros(num_positions)\n\t\tfor j in range(num_positions):\n\t\t\tdot_products[j] = np.dot(first[first_start+j:first_start+j+min_length], second[second_start+j:second_start+j+min_length])\n\t\t#print(dot_products)\n\t\t# print(np.max(dot_products))\n\t\t# print(np.sort(dot_products)[-100:-90])\n\t\tthresholded_dots = np.where(dot_products > threshold, 1, 0)\n\t\t# print(np.sum(thresholded_dots))\n\t\tbounded = np.hstack(([0], thresholded_dots, [0]))\n\t\t# get 1 at run starts and -1 at run ends\n\t\tdifs = np.diff(bounded)\n\t\t# #print(difs)\n\t\trun_starts, = np.where(difs > 0)\n\t\trun_ends, = np.where(difs < 0)\n\t\t# #print(run_starts, run_ends)\n\t\tif len(run_ends-run_starts) <1:\n\t\t\tprint(\"ERROR 2!\")\n\t\t\tcontinue\n\t\ttemp_ind = np.argmax(run_ends - run_starts)\n\t\tstart = run_starts[temp_ind]\n\t\tend = run_ends[temp_ind]\n\t\tfirst_field = first[first_start+start:first_start+end-1+min_length]\n\t\tsecond_field = second[second_start+start:second_start+end-1+min_length]\n\t\t#print(first_field, second_field)\n\t\t# print(len(first_field), len(second_field))\n\t\t# receptive_field = np.add(first_field, second_field)/2\n\t\treceptive_field = first_field\n\n\t\tfields.append(receptive_field)\n\t\tfield_evals.append(evaluate(receptive_field, phoneme))\n\tbest_guess_ind = np.argmax(np.array(field_evals))\n\tgood_guess = fields[best_guess_ind]\n\t# best_guess = good_to_best(good_guess, phoneme)\n\tphoneme_dict[phoneme] = trim_best(good_guess, 500, 2000) # TODO: Tune these last two parameters\nprint(evaluate(phoneme_dict[phoneme], phoneme))\n# plt.subplot(211)\n# plt.plot(first)\n# plt.subplot(212)\n# plt.plot(best_guess)\n# # plt.plot(second)\n# plt.show()\n\npickle.dump(phoneme_dict, open('prod_phoneme_dict2.p', 'wb'))\n\n\t\t\n\n","sub_path":"production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"462938672","text":"#-----------------------------------------------------------------------------\n# Copyright 2007-2011 Mentor Graphics Corporation\n# Copyright 2007-2011 Cadence Design Systems, Inc.\n# Copyright 2010 Synopsys, Inc.\n# Copyright 2019-2020 Tuomas Poikela (tpoikela)\n# All Rights Reserved Worldwide\n#\n# Licensed under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in\n# writing, software distributed under the License is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See\n# the License for the specific language governing\n# permissions and limitations under the License.\n#-----------------------------------------------------------------------------\n\nfrom ..base.uvm_debug import uvm_debug\nfrom ..base.uvm_globals import UVM_NONE, uvm_report_info\nfrom ..base.sv import sv\n\n#//-----------------------------------------------------------------------------\n#// Title: Callback Macros\n#//\n#// These macros are used to register and execute callbacks extending\n#// from ~uvm_callbacks~.\n#//-----------------------------------------------------------------------------\n\n\n#//-----------------------------------------------------------------------------\n#// MACRO: `uvm_register_cb\n#//\n#//| `uvm_register_cb(T,CB)\n#//\n#// Registers the given ~CB~ callback type with the given ~T~ object type. If\n#// a type-callback pair is not registered then a warning is issued if an\n#// attempt is made to use the pair (add, delete, etc.).\n#//\n#// The registration will typically occur in the component that executes the\n#// given type of callback. For instance:\n#//\n#//| virtual class mycb extends uvm_callback;\n#//| virtual function void doit();\n#//| endclass\n#//|\n#//| class my_comp extends uvm_component;\n#//| `uvm_register_cb(my_comp,mycb)\n#//| ...\n#//| task run_phase(uvm_phase phase);\n#//| ...\n#//| `uvm_do_callbacks(my_comp, mycb, doit())\n#//| endtask\n#//| endclass\n#//-----------------------------------------------------------------------------\n\n#`define uvm_register_cb(T,CB) \\\n# static local bit m_register_cb_``CB = uvm_callbacks#(T,CB)::m_register_pair(`\"T`\",`\"CB`\");\ndef uvm_register_cb(T, CB):\n from ..base.uvm_callback import UVMCallbacks\n Ts = T.__name__\n cb_name = \"_m_register_cb_\" + CB.__name__\n ok = UVMCallbacks.m_register_pair(Ts, CB.__name__, T)\n setattr(T, cb_name, ok)\n\n\n#//-----------------------------------------------------------------------------\n#// MACRO: `uvm_set_super_type\n#//\n#//| `uvm_set_super_type(T,ST)\n#//\n#// Defines the super type of ~T~ to be ~ST~. This allows for derived class\n#// objects to inherit typewide callbacks that are registered with the base\n#// class.\n#//\n#// The registration will typically occur in the component that executes the\n#// given type of callback. For instance:\n#//\n#//| virtual class mycb extend uvm_callback;\n#//| virtual function void doit();\n#//| endclass\n#//|\n#//| class my_comp extends uvm_component;\n#//| `uvm_register_cb(my_comp,mycb)\n#//| ...\n#//| task run_phase(uvm_phase phase);\n#//| ...\n#//| `uvm_do_callbacks(my_comp, mycb, doit())\n#//| endtask\n#//| endclass\n#//|\n#//| class my_derived_comp extends my_comp;\n#//| `uvm_set_super_type(my_derived_comp,my_comp)\n#//| ...\n#//| task run_phase(uvm_phase phase);\n#//| ...\n#//| `uvm_do_callbacks(my_comp, mycb, doit())\n#//| endtask\n#//| endclass\n#//-----------------------------------------------------------------------------\n\n#`define uvm_set_super_type(T,ST) \\\n# static local bit m_register_``T``ST = uvm_derived_callbacks#(T,ST)::register_super_type(`\"T`\",`\"ST`\");\n\n\n#-----------------------------------------------------------------------------\n# MACRO: `uvm_do_callbacks\n#\n#| `uvm_do_callbacks(T,CB,METHOD)\n#\n# Calls the given ~METHOD~ of all callbacks of type ~CB~ registered with\n# the calling object (i.e. ~this~ object), which is or is based on type ~T~.\n#\n# This macro executes all of the callbacks associated with the calling\n# object (i.e. ~this~ object). The macro takes three arguments:\n#\n# - CB is the class type of the callback objects to execute. The class\n# type must have a function signature that matches the METHOD argument.\n#\n# - T is the type associated with the callback. Typically, an instance\n# of type T is passed as one the arguments in the ~METHOD~ call.\n#\n# - METHOD is the method call to invoke, with all required arguments as\n# if they were invoked directly.\n#\n# For example, given the following callback class definition:\n#\n#| virtual class mycb extends uvm_cb;\n#| pure function void my_function (mycomp comp, int addr, int data);\n#| endclass\n#\n# A component would invoke the macro as\n#\n#| task mycomp::run_phase(uvm_phase phase);\n#| int curr_addr, curr_data;\n#| ...\n#| `uvm_do_callbacks(mycb, mycomp, my_function(this, curr_addr, curr_data))\n#| ...\n#| endtask\n#-----------------------------------------------------------------------------\n\ndef uvm_do_callbacks(self, CB, METHOD, *args):\n uvm_debug(self, 'uvm_do_callbacks', 'Exec CBs with ' + METHOD)\n uvm_do_obj_callbacks(self, CB, METHOD, *args)\n\nasync def uvm_do_callbacks_async(self, CB, METHOD, *args):\n uvm_debug(self, 'uvm_do_callbacks', 'Exec CBs with ' + METHOD)\n await uvm_do_obj_callbacks_async(self, CB, METHOD, *args)\n\n#-----------------------------------------------------------------------------\n# MACRO: `uvm_do_obj_callbacks\n#\n#| `uvm_do_obj_callbacks(T,CB,OBJ,METHOD)\n#\n# Calls the given ~METHOD~ of all callbacks based on type ~CB~ registered with\n# the given object, ~OBJ~, which is or is based on type ~T~.\n#\n# This macro is identical to <`uvm_do_callbacks> macro,\n# but it has an additional ~OBJ~ argument to allow the specification of an\n# external object to associate the callback with. For example, if the\n# callbacks are being applied in a sequence, ~OBJ~ could be specified\n# as the associated sequencer or parent sequence.\n#\n#| ...\n#| `uvm_do_callbacks(mycb, mycomp, seqr, my_function(seqr, curr_addr, curr_data))\n#| ...\n#-----------------------------------------------------------------------------\n\n\ndef uvm_do_obj_callbacks(OBJ, CB, METHOD, *args):\n from ..base.uvm_callback import UVMCallbackIter\n cb_iter = UVMCallbackIter(OBJ, CB)\n cb = cb_iter.first()\n\n while cb is not None:\n uvm_cb_trace_noobj(cb, (\n \"Executing callback method '{}' for callback {} (CB) from {} (T)\"\n .format(METHOD, cb.get_name(), OBJ.get_full_name())))\n m_to_call = getattr(cb, METHOD)\n m_to_call(*args)\n cb = cb_iter.next()\n\nasync def uvm_do_obj_callbacks_async(OBJ, CB, METHOD, *args):\n from ..base.uvm_callback import UVMCallbackIter\n cb_iter = UVMCallbackIter(OBJ, CB)\n cb = cb_iter.first()\n\n while cb is not None:\n uvm_cb_trace_noobj(cb, (\n \"Executing callback method '{}' for callback {} (CB) from {} (T)\"\n .format(METHOD, cb.get_name(), OBJ.get_full_name())))\n m_to_call = getattr(cb, METHOD)\n await m_to_call(*args)\n cb = cb_iter.next()\n\n#//-----------------------------------------------------------------------------\n#// MACRO: `uvm_do_callbacks_exit_on\n#//\n#//| `uvm_do_callbacks_exit_on(T,CB,METHOD,VAL)\n#//\n#// Calls the given ~METHOD~ of all callbacks of type ~CB~ registered with\n#// the calling object (i.e. ~this~ object), which is or is based on type ~T~,\n#// returning upon the first callback returning the bit value given by ~VAL~.\n#//\n#// This macro executes all of the callbacks associated with the calling\n#// object (i.e. ~this~ object). The macro takes three arguments:\n#//\n#// - CB is the class type of the callback objects to execute. The class\n#// type must have a function signature that matches the METHOD argument.\n#//\n#// - T is the type associated with the callback. Typically, an instance\n#// of type T is passed as one the arguments in the ~METHOD~ call.\n#//\n#// - METHOD is the method call to invoke, with all required arguments as\n#// if they were invoked directly.\n#//\n#// - VAL, if 1, says return upon the first callback invocation that\n#// returns 1. If 0, says return upon the first callback invocation that\n#// returns 0.\n#//\n#// For example, given the following callback class definition:\n#//\n#//| virtual class mycb extends uvm_cb;\n#//| pure function bit drop_trans (mycomp comp, my_trans trans);\n#//| endclass\n#//\n#// A component would invoke the macro as\n#//\n#//| task mycomp::run_phase(uvm_phase phase);\n#//| my_trans trans;\n#//| forever begin\n#//| get_port.get(trans);\n#//| if(do_callbacks(trans) == 0)\n#//| uvm_report_info(\"DROPPED\",{\"trans dropped: %s\",trans.convert2string()});\n#//| else\n#//| // execute transaction\n#//| end\n#//| endtask\n#//| function bit do_callbacks(my_trans);\n#//| // Returns 0 if drop happens and 1 otherwise\n#//| `uvm_do_callbacks_exit_on(mycomp, mycb, extobj, drop_trans(this,trans), 1)\n#//| endfunction\n#//\n#// Because this macro calls ~return~, its use is restricted to implementations\n#// of functions that return a ~bit~ value, as in the above example.\n#//\n#//-----------------------------------------------------------------------------\n\n\n#`define uvm_do_callbacks_exit_on(T,CB,METHOD,VAL) \\\n# `uvm_do_obj_callbacks_exit_on(T,CB,this,METHOD,VAL) \\\ndef uvm_do_callbacks_exit_on(self,CB,METHOD,VAL, *args):\n uvm_do_obj_callbacks_exit_on(self, CB, METHOD, VAL, *args)\n\n\n#//-----------------------------------------------------------------------------\n#// MACRO: `uvm_do_obj_callbacks_exit_on\n#//\n#//| `uvm_do_obj_callbacks_exit_on(T,CB,OBJ,METHOD,VAL)\n#//\n#// Calls the given ~METHOD~ of all callbacks of type ~CB~ registered with\n#// the given object ~OBJ~, which must be or be based on type ~T~, and returns\n#// upon the first callback that returns the bit value given by ~VAL~. It is\n#// exactly the same as the <`uvm_do_callbacks_exit_on> but has a specific\n#// object instance (instead of the implicit this instance) as the third\n#// argument.\n#//\n#//| ...\n#//| // Exit if a callback returns a 1\n#//| `uvm_do_callbacks_exit_on(mycomp, mycb, seqr, drop_trans(seqr,trans), 1)\n#//| ...\n#//\n#// Because this macro calls ~return~, its use is restricted to implementations\n#// of functions that return a ~bit~ value, as in the above example.\n#//-----------------------------------------------------------------------------\n\n#`define uvm_do_obj_callbacks_exit_on(T,CB,OBJ,METHOD,VAL) \\\n# begin \\\n# uvm_callback_iter#(T,CB) iter = new(OBJ); \\\n# CB cb = iter.first(); \\\n# while(cb != null) begin \\\n# if (cb.METHOD == VAL) begin \\\n# `uvm_cb_trace_noobj(cb,$sformatf(`\"Executed callback method 'METHOD' for callback %s (CB) from %s (T) : returned value VAL (other callbacks will be ignored)`\",cb.get_name(), OBJ.get_full_name())) \\\n# return VAL; \\\n# end \\\n# `uvm_cb_trace_noobj(cb,$sformatf(`\"Executed callback method 'METHOD' for callback %s (CB) from %s (T) : did not return value VAL`\",cb.get_name(), OBJ.get_full_name())) \\\n# cb = iter.next(); \\\n# end \\\n# return 1-VAL; \\\n# end\ndef uvm_do_obj_callbacks_exit_on(OBJ, CB, METHOD, VAL, *args):\n from ..base.uvm_callback import UVMCallbackIter\n cb_iter = UVMCallbackIter(OBJ, CB)\n cb = cb_iter.first()\n\n while cb is not None:\n m_to_call = getattr(cb, METHOD)\n ret_val = m_to_call(*args)\n if ret_val == VAL:\n uvm_cb_trace_noobj(cb, sv.sformatf(\"Executed callback method \"\n + \"'METHOD' for callback %s (CB) from %s (T) : returned value VAL\"\n + \"(other callbacks will be ignored)l\", cb.get_name(), OBJ.get_full_name()))\n return VAL\n uvm_cb_trace_noobj(cb, (\n \"Executed callback method '{}' for callback {} (CB) from {} (T)\"\n .format(METHOD, cb.get_name(), OBJ.get_full_name())))\n cb = cb_iter.next()\n return 1-VAL\n\n\n#// The +define+UVM_CB_TRACE_ON setting will instrument the uvm library to emit\n#// messages with message id UVMCB_TRC and UVM_NONE verbosity\n#// notifing add,delete and execution of uvm callbacks. The instrumentation is off by default.\n#\n\n#`define uvm_cb_trace(OBJ,CB,OPER) \\\n# begin \\\n# string msg; \\\n# msg = (OBJ == null) ? \"null\" : $sformatf(\"%s (%s@%0d)\", \\\n# OBJ.get_full_name(), OBJ.get_type_name(), OBJ.get_inst_id()); \\\n# `uvm_info(\"UVMCB_TRC\", $sformatf(\"%s: callback %s (%s@%0d) : to object %s\", \\\n# OPER, CB.get_name(), CB.get_type_name(), CB.get_inst_id(), msg), UVM_NONE) \\\n# end\n#\n\n#`define uvm_cb_trace_noobj(CB,OPER) \\\n# begin \\\n# if(uvm_callbacks_base::m_tracing) \\\n# `uvm_info(\"UVMCB_TRC\", $sformatf(\"%s : callback %s (%s@%0d)\" , \\\n# OPER, CB.get_name(), CB.get_type_name(), CB.get_inst_id()), UVM_NONE) \\\n# end\ndef uvm_cb_trace_noobj(CB, OPER):\n from ..base.uvm_callback import UVMCallbacksBase\n if UVMCallbacksBase.m_tracing:\n uvm_report_info(\"UVMCB_TRC\", (\"{} : callback {} ({}@{})\".format(\n OPER, CB.get_name(), CB.get_type_name(), CB.get_inst_id())), UVM_NONE)\n","sub_path":"src/uvm/macros/uvm_callback_defines.py","file_name":"uvm_callback_defines.py","file_ext":"py","file_size_in_byte":13265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"270009979","text":"from sklearn.metrics import mean_squared_error\nimport csv\nimport re\nimport numpy as np\n\npath = '../data/train.csv'\nstop_words = set({}.fromkeys([line.strip() for line in open('../data/stopword.txt')]))\ndata = []\nlabel = []\nids = []\nwith open(path) as file:\n all_data = csv.reader(file)\n count = 0\n for i in all_data:\n if count == 0:\n count = 1\n continue\n count += 1\n # if count <= 100000:\n # continue\n if i[5] != '':\n continue\n tmp = i[7] + ' ' + i[8]\n tmp = re.sub(r'[^a-zA-Z]', ' ', tmp)\n words = tmp.lower().split()\n data.append(' '.join(words))\n # label.append(float(i[5]))\n ids.append(i[0])\n # if count >= 105001:\n # break\n\nidf = {}\nwith open('../tf_idf_chi2/idf.txt') as file:\n for line in file:\n line = line.replace('\\n', '')\n line = line.split('\\t')\n idf[line[0]] = float(line[1])\n\ntf = {}\nkeys = [1, 2, 3, 4, 5]\nfor key in keys:\n tmp = {}\n with open('../tf_idf_chi2/tf' + str(key) + '.txt') as file:\n for line in file:\n line = line.replace('\\n', '')\n line = line.split('\\t')\n tmp[line[0]] = float(line[1])\n tf[key] = tmp\n\npred = []\nfor line in data:\n line = line.split(' ')\n score = [0 for _ in range(5)]\n for word in line:\n try:\n idf_score = idf[word]\n except KeyError:\n continue\n for key in keys:\n try:\n tf_score = tf[key][word]\n score[key - 1] += tf_score * idf_score\n except KeyError:\n continue\n pred.append(float(np.argmax(score) + 1))\n# print(mean_squared_error(label, pred))\n\nwith open('../tf_idf_chi2/rst.csv', 'w') as file:\n file.write('Id,Score' + '\\n')\n for i in range(len(ids)):\n file.write(ids[i] + ',' + str(pred[i]) + '\\n')\n","sub_path":"src/if_idf_with_chi2/tf_idf_chi2_test.py","file_name":"tf_idf_chi2_test.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179198058","text":"\"\"\"\nType annotations for lookoutvision service literal definitions.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_lookoutvision/literals.html)\n\nUsage::\n\n ```python\n from mypy_boto3_lookoutvision.literals import DatasetStatusType\n\n data: DatasetStatusType = \"CREATE_COMPLETE\"\n ```\n\"\"\"\nimport sys\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\n \"DatasetStatusType\",\n \"ListDatasetEntriesPaginatorName\",\n \"ListModelPackagingJobsPaginatorName\",\n \"ListModelsPaginatorName\",\n \"ListProjectsPaginatorName\",\n \"ModelHostingStatusType\",\n \"ModelPackagingJobStatusType\",\n \"ModelStatusType\",\n \"TargetDeviceType\",\n \"TargetPlatformAcceleratorType\",\n \"TargetPlatformArchType\",\n \"TargetPlatformOsType\",\n)\n\nDatasetStatusType = Literal[\n \"CREATE_COMPLETE\",\n \"CREATE_FAILED\",\n \"CREATE_IN_PROGRESS\",\n \"DELETE_COMPLETE\",\n \"DELETE_FAILED\",\n \"DELETE_IN_PROGRESS\",\n \"UPDATE_COMPLETE\",\n \"UPDATE_FAILED_ROLLBACK_COMPLETE\",\n \"UPDATE_FAILED_ROLLBACK_IN_PROGRESS\",\n \"UPDATE_IN_PROGRESS\",\n]\nListDatasetEntriesPaginatorName = Literal[\"list_dataset_entries\"]\nListModelPackagingJobsPaginatorName = Literal[\"list_model_packaging_jobs\"]\nListModelsPaginatorName = Literal[\"list_models\"]\nListProjectsPaginatorName = Literal[\"list_projects\"]\nModelHostingStatusType = Literal[\n \"HOSTED\", \"HOSTING_FAILED\", \"STARTING_HOSTING\", \"STOPPING_HOSTING\", \"SYSTEM_UPDATING\"\n]\nModelPackagingJobStatusType = Literal[\"CREATED\", \"FAILED\", \"RUNNING\", \"SUCCEEDED\"]\nModelStatusType = Literal[\n \"DELETING\",\n \"HOSTED\",\n \"HOSTING_FAILED\",\n \"STARTING_HOSTING\",\n \"STOPPING_HOSTING\",\n \"SYSTEM_UPDATING\",\n \"TRAINED\",\n \"TRAINING\",\n \"TRAINING_FAILED\",\n]\nTargetDeviceType = Literal[\"jetson_xavier\"]\nTargetPlatformAcceleratorType = Literal[\"NVIDIA\"]\nTargetPlatformArchType = Literal[\"ARM64\", \"X86_64\"]\nTargetPlatformOsType = Literal[\"LINUX\"]\n","sub_path":"typings/mypy_boto3/lookoutvision/literals.pyi","file_name":"literals.pyi","file_ext":"pyi","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602629685","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Andre Anjos <andre.anjos@idiap.ch>\n# Thu 02 May 2013 14:15:42 CEST \n\n\"\"\"Authorship definitions\n\"\"\"\n\nfull_name = 'Joe Doe'\nemail = 'joe.doe@example.com'\nhomepage = 'http://joe.example.com'\n","sub_path":"src/winix/winix/repo/test/recursive/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"397778342","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom channels.models import Channel, Category\n\n\nclass TestCategoryListView(APITestCase):\n\n def setUp(self):\n channel = Channel.objects.create(\n name='Books'\n )\n\n parent_category = Category.objects.create(\n name='National Literature',\n channel=channel\n )\n\n Category.objects.create(\n name='Science fiction',\n parent_category=parent_category,\n channel=channel\n )\n\n Category.objects.create(\n name='Foreign literature',\n channel=channel\n )\n\n self.url = reverse('list-category')\n\n def test_get_response(self):\n\n response = self.client.get(self.url)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_get_data_response(self):\n\n response = self.client.get(self.url)\n category_name = response.data[0]['name']\n category_categories = response.data[0]['categories']\n category_parent_category = response.data[0]['parent_category']\n category_channel = response.data[0]['channel']\n\n self.assertEquals('Foreign literature', category_name)\n self.assertEquals('Books', category_channel)\n self.assertEquals(None, category_parent_category)\n self.assertEquals(0, len(category_categories))\n\n category_name = response.data[1]['name']\n category_categories = response.data[1]['categories']\n category_parent_category = response.data[1]['parent_category']\n category_channel = response.data[1]['channel']\n\n self.assertEquals('National Literature', category_name)\n self.assertEquals('Books', category_channel)\n self.assertEquals(None, category_parent_category)\n self.assertEquals(1, len(category_categories))\n\n def test_get_search_success_response(self):\n\n search_url = \"{}?channel={}&name={}\".format(\n reverse('list-category'),\n 'Books',\n 'National Literature'\n )\n response = self.client.get(search_url)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals('National Literature', response.data[0]['name'])\n\n def test_get_search_error_response(self):\n\n search_url = \"{}?channel={}&name={}\".format(\n reverse('list-category'),\n 'Books',\n 'National'\n )\n response = self.client.get(search_url)\n self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)\n","sub_path":"work-at-olist/channels/api/tests/test_category_list_view.py","file_name":"test_category_list_view.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"355889712","text":"import eth_account\n\n\nclass Signer(object):\n\n def sign(self, message_hash, opt_signer_address):\n '''\n Sign a message hash with an Ethereum key.\n\n :param message_hash: required\n :type message_hash: HexBytes\n\n :param opt_signer_address: optional\n :type opt_signer_address: str\n\n :returns: str\n '''\n raise NotImplementedError()\n\n\nclass SignWithWeb3(Signer):\n\n def __init__(self, web3):\n self.web3 = web3\n\n def sign(self, message_hash, opt_signer_address):\n signer_address = opt_signer_address or self.web3.eth.defaultAccount\n if not signer_address:\n raise ValueError(\n 'Must set ethereum_address or web3.eth.defaultAccount',\n )\n return self.web3.eth.sign(signer_address, message_hash).hex()\n\n\nclass SignWithKey(Signer):\n\n def __init__(self, private_key):\n self.private_key = private_key\n self.address = eth_account.Account.from_key(private_key).address\n\n def sign(self, message_hash, opt_signer_address):\n if (\n opt_signer_address is not None and\n opt_signer_address != self.address\n ):\n raise ValueError(\n 'ethereum_address was set but does not match the Ethereum ' +\n 'key (eth_private_key / web3_account)',\n )\n return eth_account.Account.sign_message(\n eth_account.messages.encode_defunct(hexstr=message_hash.hex()),\n self.private_key,\n ).signature.hex()\n","sub_path":"dydx3/eth_signing/signers.py","file_name":"signers.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"77838949","text":"import json\n\nfrom .api_requestor import ApiRequestor\nfrom .customer import Customer\nfrom .stores import MemoryStore\nfrom .streaming import StreamingClient\n\napi_base = \"https://www.planforge.io/api\"\nserver_key = None\nstore = MemoryStore()\nstripe_livemode = None\nlog = None\n\n\ndef load_from_file(path):\n with open(path, \"r\") as json_file:\n return load_from_json(json_file.read())\n\n\ndef load_from_json(json_str):\n data = json.loads(json_str)\n for entry in data:\n Customer.store(entry)\n","sub_path":"planforge/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624097528","text":"#coding:utf-8\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'blog.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', 'crowix.views.index'),\n url(r'^index/', 'crowix.views.index', name='index'),\n url(r'^login/', 'crowix.views.login', name='login'),\n url(r'^logout/', 'crowix.views.logout', name='logout'),\n url(r'^regist/', 'crowix.views.regist', name='regist'),\n url(r'^contact/', 'crowix.views.contact', name='contact'),\n url(r'^message/', 'crowix.views.message', name='message'),\n url(r'^term/', 'crowix.views.term',name='term'),\n url(r'^admin/', include(admin.site.urls)),\n)","sub_path":"blog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"272445989","text":"from Repository.MasterList import masterList\r\nfrom random import shuffle\r\nfrom copy import deepcopy\r\n\r\nclass masterController:\r\n def __init__(self, repo):\r\n self._repo = repo\r\n \r\n '''\r\n function that calls the add function in the repo.\r\n it takes as input the param of the new question and passes them on to the next funct\r\n '''\r\n def add(self, params):\r\n self._repo.add(params)\r\n \r\n '''\r\n Create function which takes as param the diff, no of questions and name of file\r\n and then creates a new file with the required questions\r\n input = parameters\r\n output - none, the function creates a file\r\n '''\r\n def create(self, params):\r\n if params[0] not in ['easy', 'medium', 'hard']:\r\n raise Exception(\"invalid1!!\")\r\n g = open (params[2], \"w\")\r\n no = int(params[1])\r\n enough = False\r\n \r\n if params[0] == 'easy':\r\n no1 = no//2\r\n no2 = no-no1\r\n for q in self._repo._masterList:\r\n if no1 == 0:\r\n enough = True\r\n break\r\n \r\n if q._difficulty == 'easy\\n':\r\n g.write(str(q))\r\n no1 -=1\r\n for q in self._repo._masterList:\r\n if no2 == 0:\r\n break\r\n \r\n if q._difficulty != 'easy\\n':\r\n g.write(str(q))\r\n no2 -=1\r\n if params[0] == 'medium':\r\n no1 = no//2\r\n no2 = no-no1\r\n for q in self._repo._masterList:\r\n if no1 == 0:\r\n enough = True\r\n break\r\n \r\n if q._difficulty == 'medium\\n':\r\n g.write(str(q))\r\n no1 -=1\r\n for q in self._repo._masterList:\r\n if no2 == 0:\r\n \r\n break\r\n \r\n if q._difficulty != 'medium\\n':\r\n g.write(str(q))\r\n no2 -=1\r\n \r\n if params[0] == 'hard':\r\n no1 = no//2\r\n no2 = no-no1\r\n copy1 = deepcopy(self._repo._masterList)\r\n shuffle(copy1)\r\n for q in copy1:\r\n if no1 == 0:\r\n enough = True\r\n break\r\n \r\n if q._difficulty == 'hard\\n':\r\n g.write(str(q))\r\n no1 -=1\r\n for q in self._repo._masterList:\r\n if no2 == 0:\r\n break\r\n\r\n if q._difficulty != 'hard\\n':\r\n g.write(str(q))\r\n no2 -=1\r\n \r\n if enough == False:\r\n raise Exception(\"Not enough entries\")\r\n g.close()\r\n \r\n '''\r\n Start function which opens the file and creates a quiz game\r\n it reads all entries in the file and requests input from the user\r\n after all lines are read, the function ends and returns the score\r\n input - parameters\r\n output - score\r\n '''\r\n def start(self, params):\r\n file = open(params[0], \"r+\")\r\n line = file.readline()\r\n score = 0\r\n while (line!=''):\r\n show = line.split(\";\")\r\n print(show[0], show[1], show[2], show[3], show[4])\r\n n = input(\"Your answer: \")\r\n \r\n if n == show[5]:\r\n if show[6] == 'easy\\n':\r\n score += 1\r\n if show[6] == 'medium\\n':\r\n score += 2\r\n if show[6] == 'hard\\n':\r\n score += 3\r\n line = file.readline()\r\n \r\n return score\r\n \r\n @staticmethod\r\n def verifyCreate(params, questions):\r\n if params[0] not in ['easy\\n', 'medium\\n', 'hard\\n']:\r\n raise Exception(\"invalid1!!\")\r\n \r\ndef testCreate():\r\n repo = masterList()\r\n ctrl = masterController(repo)\r\n assert len(repo._masterList) == 0\r\n #ctrl.create(['easy','2','adasdasd.txt'])\r\n f = open (\"adasdasd.txt\", \"r+\")\r\n line = f.readline()\r\n leng = 0\r\n #while line != '':\r\n # leng+= 1\r\n # line = f.readline()\r\n \r\n assert leng == 0\r\n","sub_path":"Exam/src/Controller/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344268164","text":"# --------------------------------------------------------\n# Flow-Guided Feature Aggregation\n# Copyright (c) 2017 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Yuwen Xiong\n# --------------------------------------------------------\n\nimport os\nimport sys\nos.environ['PYTHONUNBUFFERED'] = '1'\nos.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'\nos.environ['MXNET_ENABLE_GPU_P2P'] = '0'\nthis_dir = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(this_dir, '..', '..', 'fgfa_rfcn'))\n\nfrom fgfa_rfcn import test\nimport shutil\n\nif __name__ == \"__main__\":\n from vidvrd_challenge.vidor.gen_subset import prepare_ImageSets\n\n split = 'val'\n\n if split == 'test':\n batch_boundaries = [0, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000, 2400]\n batch_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n elif split == 'val':\n batch_boundaries = [0, 200, 400, 600, 1000]\n batch_ids = [0, 1, 2, 3]\n\n for i in range(0, 2):\n\n tgt_ds_root = 'data/VidOR'\n tgt_ds_root = os.path.abspath(tgt_ds_root)\n\n prepare_ImageSets(tgt_ds_root, split, batch_boundaries[i], batch_boundaries[i+1])\n\n cache_path = '../../data/cache'\n if os.path.exists(cache_path):\n shutil.rmtree(cache_path)\n\n print('[%d] %s: %d -> %d' % (batch_ids[i], split, batch_boundaries[i], batch_boundaries[i+1]))\n test.main(batch_id=batch_ids[i])\n","sub_path":"experiments/fgfa_rfcn/fgfa_rfcn_test_batch.py","file_name":"fgfa_rfcn_test_batch.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"295417358","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 31 09:19:21 2017\n\n@author: 9708023\n\"\"\"\nimport os\nimport csv\nimport cv2\nimport numpy as np\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Flatten, MaxPooling2D, Conv2D, Activation, Dropout\nfrom keras.utils import np_utils\nfrom keras.preprocessing import image\nfrom PIL import Image\nfrom sklearn.utils import shuffle\nfrom sklearn.cross_validation import train_test_split\nfrom keras.callbacks import LearningRateScheduler, ModelCheckpoint\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adagrad, Adam\nfrom keras.optimizers import SGD,RMSprop\nnp.random.seed(2017)\n\nclass InfoElement(object):\n \n def __init__(self):\n self.imagePath=''\n self.label_index=''\n\nclass CNNManager(object):\n \n def __init__(self, width, heigh):\n self.model=None\n self.number_classes=0\n self.batch_size=16\n self.names_classes=None\n self.InfoElementList=[]\n self.InfoValidationElementList=[]\n self.loadedIMGData=[]\n self.validationloadedIMGData=[]\n self.trainingIMGData=[]\n self.trainingLabels=[]\n self.validationIMGData=[]\n self.validationLabels=[]\n self.pictureRows=width\n self.pictureColumns=heigh\n self.pictureColorDepth=1\n self.path_current_working_dir = os.getcwd()\n self.filename_dataset='dataset.csv'\n self.filename_classes='class_names.csv'\n self.filename_validation='validation.csv'\n self.trainedFile='data.h5'\n self.epochs=200\n self.number_samples=-1\n \n print(\"current folder = {}\".format(self.path_current_working_dir))\n self.setup_classes(os.path.join(self.path_current_working_dir,self.filename_classes))\n \n def get_resized_image(self, image_file_path, new_width, new_heigh):\n input_img = Image.open(image_file_path)\n return np.asarray(input_img) \n \n def setup_classes(self, csv_file_name):\n self.names_classes = self.read_csv_in_list(csv_file_name) \n self.number_classes = len(self.names_classes[0]) \n print(\"class number={}: {}\".format(self.number_classes, self.names_classes[0]))\n\n def lr_schedule(self, epoch):\n lr = 0.01\n return lr*(0.1**int(epoch/10))\n \n def setup_train_callback(self):\n \n filename_train_log = os.path.join(self.path_current_working_dir,'logs','model_train_new.csv')\n csv_log=callbacks.CSVLogger(filename_train_log, separator=',', append=False)\n early_stopping=callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='min')\n \n filepath=\"Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5\"\n #checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n return [LearningRateScheduler(self.lr_schedule), csv_log, early_stopping] #[csv_log,early_stopping,checkpoint] \n \n def setup_trainingset(self, csv_file_name, is_apply_validationset):\n \n self.setup_pixel_element(csv_file_name, True, foldername='training') \n num_of_samples = len(self.InfoElementList) \n self.number_samples = num_of_samples\n \n labels = np.ones((num_of_samples,),dtype='int64') # reqeust one list with length = samples and default value = 1\n index = 0\n \n for element in self.InfoElementList:\n self.loadedIMGData.append(self.get_resized_image(element.imagePath, self.pictureRows, self.pictureColumns)) \n labels[index] = int(element.label_index)\n index+=1\n #label_setup.append(int(element.label_index))\n \n self.trainingIMGData = np.array(self.loadedIMGData)\n self.trainingIMGData = self.trainingIMGData.astype('float32')\n #self.trainingIMGData /= 255 \n \n self.trainingLabels = np_utils.to_categorical(labels, self.number_classes) # convert class labels to on-hot encoding\n #print(\"lable categorical:{}\".format(self.trainingLabels)) \n #print(\"shape of training data:{}\".format(self.trainingIMGData.shape)) # shape=[record number, rows, colums, depth of color] \n \n #y_data = np.r_[np.c_[np.ones(num_of_samples), np.zeros(num_of_samples)],np.c_[np.zeros(num_of_samples), np.ones(num_of_samples)]]\n #print(\"shape={}, data={}\".format(y_data.shape, y_data)) # shape=1, [1,0]*num_of_samples, [0,1]*num_of_samples in y_data\n \n #print(\"training label set:{}\".format(self.trainingLabels)) \n \n if is_apply_validationset == False:\n #Shuffle the dataset\n train_set, label_set = shuffle(self.trainingIMGData, self.trainingLabels, random_state=None) \n #print(label_set)\n return train_test_split(train_set, label_set, test_size=0.2, random_state=2) # Split the dataset \n else:\n return self.trainingIMGData, self.trainingLabels\n \n def setup_validationset(self, csv_file_name):\n \n self.setup_pixel_element(csv_file_name, False, foldername='validation') \n num_of_samples = len(self.InfoValidationElementList) \n \n labels = np.ones((num_of_samples,),dtype='int64') # declare label integer type collection \n index = 0\n \n for element in self.InfoValidationElementList:\n self.validationloadedIMGData.append(self.get_resized_image(element.imagePath, self.pictureRows, self.pictureColumns))\n labels[index] = int(element.label_index)\n index+=1\n #label_setup.append(int(element.label_index))\n \n self.validationIMGData = np.array(self.validationloadedIMGData)\n self.validationIMGData = self.validationIMGData.astype('float32')\n #self.trainingIMGData /= 255 \n \n self.validationLabels = np_utils.to_categorical(labels, self.number_classes) # convert class labels to on-hot encoding\n return self.validationIMGData, self.validationLabels\n \n def setup_pixel_element(self, csv_file_name, is_training, foldername='training'):\n rows = self.read_csv_in_list(csv_file_name)\n \n for row in rows:\n element = InfoElement()\n element.imagePath = os.path.join(self.path_current_working_dir,foldername,row[0])\n element.label_index = row[1]\n if is_training == True:\n self.InfoElementList.append(element)\n else:\n self.InfoValidationElementList.append(element)\n\n def read_csv_in_list(self, csv_file_name): \n fullpath_dataset = os.path.join(self.path_current_working_dir,csv_file_name)\n dataList = []\n with open(fullpath_dataset,'r') as f:\n row = csv.reader(f)\n dataList = list(row) \n return dataList\n \n def create_cnn_model(self):\n self.model = Sequential()\n\n self.model.add(Conv2D(32, (3,3),padding='same',input_shape=[100,100,3],data_format='channels_last', activation='relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(32, (3, 3), activation='relu', padding='valid')) \n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(64, (3, 3), activation='relu', padding='valid')) \n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n #self.model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) \n #self.model.add(MaxPooling2D(pool_size=(2, 2)))\n #self.model.add(Dropout(0.2))\n #self.model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) \n #self.model.add(MaxPooling2D(pool_size=(2, 2)))\n #self.model.add(Dropout(0.2))\n self.model.add(Flatten())\n self.model.add(Dense(64, activation='relu')) \n self.model.add(Dropout(0.5))\n self.model.add(Dense(self.number_classes, activation='softmax'))\n \n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n self.model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=[\"accuracy\"]) \n #self.model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=[\"accuracy\"])\n # Viewing model_configuration\n\n self.model.summary()\n '''\n model.get_config()\n model.layers[0].get_config()\n model.layers[0].input_shape\t\t\t\n model.layers[0].output_shape\t\t\t\n model.layers[0].get_weights()\n np.shape(model.layers[0].get_weights()[0])\n model.layers[0].trainable\n '''\n def setup_data_for_pure_deep_learninng(self, csv_file_name):\n self.setup_pixel_element(csv_file_name, True, foldername='training') \n num_of_samples = len(self.InfoElementList) \n self.number_samples = num_of_samples\n \n labels = np.ones((num_of_samples,),dtype='int64') # reqeust one list with length = samples and default value = 1\n index = 0\n \n for element in self.InfoElementList:\n image = np.array(Image.open(element.imagePath).resize((self.pictureRows, self.pictureColumns)))\n image = image.transpose(2, 0, 1) \n image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype(\"float32\")[0]\n self.loadedIMGData.append(image / 255.) \n labels[index] = int(element.label_index)\n index+=1\n \n self.loadedIMGData = np.array(self.loadedIMGData)\n self.trainingLabels = np_utils.to_categorical(labels, self.number_classes) # convert class labels to on-hot encoding\n \n def create_pure_deep_learning(self):\n self.model = Sequential()\n self.model.add(Dense(200, input_dim=30000, activation='relu'))\n self.model.add(Dropout(0.2)) \n self.model.add(Dense(200, activation='relu')) \n self.model.add(Dropout(0.2)) \n self.model.add(Dense(self.number_classes, activation='softmax')) \n self.model.compile(loss=\"categorical_crossentropy\", optimizer= Adam(lr=0.001), metrics=[\"accuracy\"])\n \n def start_train(self, is_apply_validationset):\n \n is_cnn_network=False \n callbacks_list = None\n \n if is_cnn_network == True:\n if is_apply_validationset == False:\n X_train, X_test, y_label_train, y_label_test = self.setup_trainingset(self.filename_dataset,is_apply_validationset)\n else:\n X_train, y_label_train = self.setup_trainingset(self.filename_dataset, is_apply_validationset)\n X_test, y_label_test = self.setup_validationset(self.filename_validation) \n \n self.create_cnn_model()\n callbacks_list = self.setup_train_callback()\n self.model.fit(X_train, y_label_train, batch_size=self.batch_size, epochs=self.epochs, verbose=1, validation_data=(X_test, y_label_test), callbacks=callbacks_list)\n score = self.model.evaluate(X_test, y_label_test, verbose=0)\n print('Test Loss:', score[0])\n print('Test accuracy:{}%'.format(score[1]*100))\n else:\n self.setup_data_for_pure_deep_learninng(self.filename_dataset)\n self.create_pure_deep_learning()\n self.model.fit(self.loadedIMGData, self.trainingLabels, batch_size=self.batch_size, epochs=self.epochs, verbose=1, validation_split=0.1, callbacks=callbacks_list) \n \n self.model.save(self.trainedFile)\n \n def test_model(self, path_image_file, width, heigh): \n \n model = load_model(self.trainedFile) \n is_cnn_network=False\n \n if is_cnn_network == True:\n img = image.load_img(path_image_file, target_size=(width, heigh)) \n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0) \n pred = model.predict_proba(x, batch_size=16)\n print('prediction, label={}: '.format(pred))\n label = model.predict_classes(x)\n print('class = {}'.format(self.names_classes[0][label[0]]))\n #print(classification_report(np.argmax(y_test,axis=1), y_pred,target_names=self.names_classes))\n else:\n image = np.array(Image.open(path_image_file).resize((self.pictureRows, self.pictureColumns))) \n image = image.transpose(2, 0, 1)\n image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype(\"float32\")[0]\n result = model.predict_classes(np.array([image / 255.]))\n print(\"predict resutl={}, label={}\".format(result[0], self.names_classes[0][result[0]]))\n\ndef main():\n manager = CNNManager(100,100) \n manager.epochs=300 \n manager.batch_size=2000\n \n option = 2\n \n if option == 1:\n manager.start_train(is_apply_validationset=True) \n else:\n width=100\n heigh=100 \n manager.test_model(os.path.join(manager.path_current_working_dir,\"test\", \"cat13.jpg\"), width, heigh)\n \n\nif __name__ == '__main__':\n main() ","sub_path":"miku6.py","file_name":"miku6.py","file_ext":"py","file_size_in_byte":13408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597623680","text":"# -*- coding: utf-8 -*-\n# 第一行 加上 声明编码格式\n\n__author__ = 'love'\n\n# 15.2 正则表达式使用的特殊符号和字符\n# 最常用的元字符(metacharacter) -- 特殊字符和符号\n'''\n记 号 说 明 正则表达式样例\nliterl 匹配字符串的值 foo\nre1\\re2 匹配正则表达式re1或re2 foo|bar\n. 匹配任意字符(换行符除外) b.b\n^ 匹配字符串的开始 ^Dear\n$ 匹配字符串的结尾 /bin/*sh$\n* 匹配前面出现的正则表达式零次或多次 [A-Za-z0-9]*\n+ 匹配前面出现的正则表达式一次或多次 [a-z]+\\.com\n? 匹配前面出现的正则表达式零次或多次 goo?\n{N} 匹配前面出现的正则表达式N [0-9]{3}\n{M, N} 匹配重复出现M次到N次的正则表达式 [0-9]{5,9}\n[...] 匹配字符组里出现任意一个字符 [aeiou]\n[..x-y..] 匹配从字符x到y中的任意一个字符 [0-9],[A-Za-z0-9]\n[^...] 不匹配次字符集中出现的任何一个字符,包括某一个 [^aeiou],[^A-Za-z0-9]\n 范围的字符(如果在此字符集中出现)\n(*|+|?|{})? 用于上面出现的任何\"非贪婪\"。版本重复匹配次数符号 .*?[a-z]\n(...) 匹配封闭括号中正则表达式(RE),并保存为子组 ([0-9]{3})?,f(oo|u)bar\n\n特殊字符:\n\\d 匹配任意数字,和[0-9]一样(\\D是\\d的反义:任何非数符字) data\\d+.txt\n\\w 匹配任何数字字母字符,和[A-Za-z0-9_]相同(\\W是\\w的反义) [A-Za-z_]\\w+\n\\s 匹配任何空字符,和[\\n\\t\\r\\v\\f]相同,(\\S是\\s的反义) of\\sthe\n\\b 匹配单词边界(\\B的\\b反义) \\bThe\\b\n\\nm 匹配以保存的子组( 参考:(....) ) price:\\16\n\\c 逐一匹配特殊字符c,(即,取消它的特殊含义,按字母匹配) \\., \\\\, \\*\n\\A(\\Z) 匹配字符串的起始(结束) \\ADear\n'''\n\n# 15.2.1 用管道符号(|)匹配多个正则表达式模式\n# 管道符号(|),表示一个或操作,它的意思是选择被管道符号分割的多个不同的正则表达式中的一个。\n\n# 15.2.2 匹配任意一个单个的字符(.)\n# 匹配一个句点(dot .)本身,需要在前面使用反斜杠\"\\\"对它进行转义\n\n# 15.2.3 从字符串的开头或结尾或单词边界开始匹配(^ $ \\b \\B)\n\n# 15.2.4 创建字符类([])\n# 使用方括号([])的正则表达式会匹配方括号里的任何一个字符\n\n# 15.2.5 指定范围(-)和否定(^)\n# 方括号除匹配单个字符外,还可以支持所指定的字符范围。方括号里一对符号中间的连字符(-)用来表示一个字符的范围。\n# 另外加上上箭头符号(^),就表示不匹配指定字符集里的任意字符。\n\n\n# 15.2.6 使用闭包操作符(*,+,?,{})实现多次出现/重复匹配\n#\n\n# 15.2.7 特殊字符表示、字符集\n# \\d 表示十进制数字(0-9)\n# \\w 可用来表示整个字符数字的字符集,即相当于\"A-Za-z0-9_\"的简写形式\n# \\s 代表空白字符\n# 这些特殊字符的大写形式表示不匹配\n\n# 15.2.8 用圆括号(())组建组\n\n\n\n","sub_path":"Books/core/15/readme.py","file_name":"readme.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409263939","text":"import uuid\nimport datetime\nfrom app import db\nfrom app.models import User\nfrom ..schema.error_schema import ErrorSchema\n\ndef save_new_user(data):\n user = User.query.filter_by(email=data['email']).first()\n if not user:\n new_user = User(\n public_id=str(uuid.uuid4()),\n name=data['name'].title(),\n mobile=data['mobile'],\n email=data['email'],\n password=data['password'],\n registered_on=datetime.datetime.utcnow()\n )\n save_changes(new_user)\n response_object = {\n 'status': 'success',\n 'message': 'User successfully registered.'\n }\n return response_object, 201\n else:\n return ErrorSchema.get_response('UserExistError')\n\ndef get_all_users():\n return User.query.all()\n\ndef get_a_user(public_id):\n try:\n return User.query.filter_by(public_id=public_id).first()\n except Exception as e:\n return ErrorSchema.get_response('InternalServerError', e)\n\ndef save_changes(data):\n db.session.add(data)\n db.session.commit()","sub_path":"app/api/helpers/user_helper.py","file_name":"user_helper.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377710325","text":"# Lesson 3.4: Make Classes\n# Mini-Project: Movies Website\n\n# In this file, you will define the class Movie. You could do this\n# directly in entertainment_center.py but many developers keep their\n# class definitions separate from the rest of their code. This also\n# gives you practice importing Python files.\n\nimport webbrowser\n\nclass Video():\n #This class stores common attributes of videos\n def __init__(self, title, year):\n self.title = title\n self.year = year\n \n\nclass Movie(Video):\n # This class provides a way to store movie related information\n\n def __init__(self, title, year, movie_storyline, poster_image, trailer_youtube):\n # initialize instance of class Movie\n Video.__init__(self, title, year)\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n","sub_path":"movie_website/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"24569591","text":"import requests\n#base_url = 'https://www.gastronom.ru'\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'}\nfrom bs4 import BeautifulSoup\nfrom lxml import html\n\nsites = [\n ('https://povar.ru', '/list'),\n ('https://www.gastronom.ru', '/catalog'),\n]\n\nclass SiteParser:\n domain = None\n start_catalog = None\n\n def __init__(self):\n self.base_content = self.get_content_soup(self.start_catalog)\n\n def get_content_soup(self, url):\n if self.domain not in url:\n url = self.domain + url\n try:\n r = requests.get(url, headers=headers, timeout=10)\n except Exception as e:\n print('Exception', e)\n return None\n\n return BeautifulSoup(r.text)\n\n def get_categories_links(self):\n pass\n\n\nclass PovarParser(SiteParser):\n domain = 'https://povar.ru'\n start_catalog = 'https://povar.ru/list'\n\n def get_categories_links(self):\n return self.base_content.find_all('a', {'class', 'itemHdr'})\n\n def parse_page(self, url, page):\n if page == 1:\n page = ''\n\n url = url + str(page)\n soup = self.get_content_soup(url)\n\n if soup:\n return soup.find_all('a', {'class': 'listRecipieTitle'})\n\n return []\n\n\npovar = PovarParser()\ncategories = povar.get_categories_links()\n\nall_links = []\nchecked_categories = []\n\n# povar.parse_page(categories[0]['href'], 900000) tests\n\nfor i, category_url in enumerate(categories):\n for page in range(10000):\n links = povar.parse_page(category_url['href'], page + 1)\n print('Parsed', category_url['href'], i, 'from', len(categories), 'page', page + 1, 'got', len(links))\n all_links += links\n\n if not len(links):\n break\n\n checked_categories.append(category_url['href'])\n with open('receipt/povar_catalog.txt', 'a') as f:\n f.write(category_url['href']+'\\n')\n\n with open('receipt/povar_links.txt', 'a') as f:\n for link in all_links:\n f.write(link['href']+'\\n')\n all_links = []","sub_path":"receipt/site_parser.py","file_name":"site_parser.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"573994930","text":"import sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Window(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 300, 300)\n self.setWindowTitle('PyQt5')\n self.setWindowIcon(QtGui.QIcon('image/bird.png'))\n self.home()\n\n def home(self):\n btn = QtWidgets.QPushButton('Quit', self)\n btn.clicked.connect(self.dodo)\n btn.resize(100, 100)\n btn.move(100, 100)\n self.show()\n\n def dodo(self):\n print('clicked button')\n\n\napp = QtWidgets.QApplication(sys.argv)\nGUI = Window()\nsys.exit(app.exec_())\n","sub_path":"PyQt5Ex/pyqt3.py","file_name":"pyqt3.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347692885","text":"#coding:utf-8\n\ntry:\n from selenium import webdriver\n\nexcept Exception as install_error:\n\n print (install_error,\"检查是否安装好selenium\")\nimport os\n\nimport time\n\nclass Browser_names():\n\n\n def __init__(self):\n\n self.chromes='Chrome'\n\n self.FireFoxs='FireFox'\n\n self.PhantomJS='PhantomJS'\n\n\n self.home_http='http'\n\n\n\n def _run_browser(self,browsers,website):\n # 调用谷歌浏览器并配置下载路径,下载用例需要这样的方法\n if browsers == self.chromes and self.home_http in website:\n\n options=webdriver.ChromeOptions()\n path=os.path.abspath(\"..\")#表示当前所处的文件夹上一级文件夹的绝对路径\n filepath=path+\"\\\\PullFile\"\n\n prefs={'profile.default_content_settings.popups':0,'download.default_directory':filepath}\n options.add_experimental_option('prefs',prefs)\n if website != '':\n br=webdriver.Chrome(chrome_options=options)\n br.maximize_window()\n br.get(website)\n return br\n\n # 调用火狐浏览器\n elif browsers == self.FireFoxs and self.home_http in website:\n br = webdriver.Firefox()\n br.maximize_window()\n br.get(website)\n return br\n\n\n elif browsers == self.PhantomJS and self.home_http in website:\n br = webdriver.PhantomJS()\n br.maximize_window()\n br.get(website)\n return br\n\n\n\n\n\n elif browsers[0] =='c' or browsers[0] =='f':\n print (\"填写浏览器名时首字母请大写\")\n\n elif self.home_http not in website:\n print (\"注意填写网址是需要加上http://...\")\n\n else:\n print ('暂时不支持其他浏览器,可在browserclass中添加')\n\n","sub_path":"PySelenium/pyScript/BrowserClass.py","file_name":"BrowserClass.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359668989","text":"from sympy import *\r\n\r\n\r\ndef f(x):\r\n return 0.2 + 25 * x - 200 * x ** 2 + 675 * x ** 3 - 900 * x ** 4 + 400 * x ** 5\r\n\r\n\r\ndef trapezoid(a, b, n):\r\n h = (b - a) / n\r\n x = a\r\n s = 0\r\n for i in range(1, n):\r\n x = x + h\r\n s = s + f(x)\r\n return (b - a) * (f(a) + 2 * s + f(b)) / (2 * n)\r\n\r\n\r\ndef simpson1_3(a, b, n):\r\n h = (b - a) / n\r\n x = a\r\n s = 0\r\n for i in range(1, n):\r\n x = x + h\r\n if i%2 == 0:\r\n m = 2\r\n else:\r\n m = 4\r\n s = s + m * f(x)\r\n return (b - a) * (f(a) + s + f(b)) / (3 * n)\r\n\r\n\r\nn = 100\r\na = 0\r\nb = 0.8\r\n\r\n# approx = simpson1_3(a, b, n)\r\n\r\nx = symbols('x')\r\nexact_int = integrate(0.2 + 25 * x - 200 * x ** 2 + 675 * x ** 3 - 900 * x ** 4 + 400 * x ** 5, x)\r\nexact = abs(exact_int.subs(x, a) - exact_int.subs(x, b))\r\n\r\nprint(\"Exact integral value = \", exact)\r\n# print(\"Trapezoidal approximation = \", approx)\r\n# print(\"Error (%) = \", 100 * abs(exact - approx) / exact, \"%\")\r\n\r\n\r\nloop = [2, 5, 20, 100]\r\nprint(\"n\\ttrapezoid\\t\\t\\tsimpson1/3\")\r\nfor x in loop:\r\n print(str(x) + \"\\t\" + str(trapezoid(a, b, x)) + \"\\t\" + str(simpson1_3(a, b, x)) )\r\n","sub_path":"Session 11/Exercise 1.py","file_name":"Exercise 1.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"38331716","text":"from country import Country\n\n\nclass Continent(object):\n\n def __init__(self, name: str, countries: list):\n self.name = name\n self.countries = countries\n\n def total_population(self) -> int:\n totalpop = 0\n for country in self.countries:\n totalpop += country.population\n return totalpop\n\n def __str__(self) -> str:\n ret = self.name + \":\\n\"\n for country in self.countries:\n ret += \"{}\\n\".format(country)\n return ret\n\n\nif __name__ == \"__main__\":\n c1 = Country(\"Galicia\", 1000, 5)\n c2 = Country(\"Valencia\", 999, 3)\n c3 = Country(\"Madrid\", 5, 100)\n continent = Continent(\"España no es un continente pero me vale\", [c1, c2, c3])\n print(\"Total population: \", continent.total_population())\n print(continent.name)\n print(\"\\nPrinting continent:\\n\\n\", continent)\n","sub_path":"exercisesWithClasses/continent.py","file_name":"continent.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597884253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 19 12:24:45 2020\r\n\r\n@author: yun liu\r\n\"\"\"\r\nimport pandas as pd\r\n\r\nmonth = pd.read_csv(\"Month.csv\",low_memory=False)\r\nmonthSlice=month.iloc[:,944:1181]\r\nprint(monthSlice.columns)#see the titles\r\n#remova hash\r\ndfr=monthSlice.loc[:,~monthSlice.columns.str.contains('_hash')] \r\n#remove lag excepet lag01, since otheres can get from lag01\r\ndfr1=dfr.loc[:,~dfr.columns.str.contains('lag02|lag03|lag04|lag05|lag06')]\r\n\r\n#remove all the columns that are all nan \r\ndfr2=dfr1\r\nnn=dfr1.shape\r\nfor i in range(nn[1]):\r\n if dfr1.iloc[:,i].isnull().sum()==nn[0]:\r\n dfr2.pop(dfr1.columns[i])\r\n#find all number of 0 in each columns\r\ndfr2.isin([0]).sum()\r\n#remove all the columns that only contain 0 or nan.\r\nmonthLastPart=dfr2.loc[:,~dfr2.columns.str.contains('_isprotected|_isrespite|_sequence|_string')]\r\n#output the final data frame\r\nmonthLastPart.to_csv('LastPart.csv')\r\n","sub_path":"featureSelection4.py","file_name":"featureSelection4.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"65269682","text":"class Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n left = [0]*len(nums)\n prod = 1\n for i in range(len(nums)):\n left[i] = prod\n prod *= nums[i]\n \n prod = 1\n for i in range(len(nums)-1, -1, -1):\n left[i] = left[i]*prod\n prod *= nums[i]\n \n return left\n","sub_path":"238. Product of Array Except Self/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"371760345","text":"class Solution:\n def find_root(self, A, parent):\n if parent[A] == A:\n return A\n return self.find_root(parent[A], parent)\n\n def union(self, A, B, parent, height):\n C = self.find_root(A, parent)\n D = self.find_root(B, parent)\n\n if C == D:\n return\n\n if height[C] < height[D]:\n parent[C] = D\n elif height[C] > height[D]:\n parent[D] = C\n else:\n parent[D] = C\n height[C] += 1\n\n def Solve(self, A, B):\n parent = [i for i in range(A)]\n height = [0 for _ in range(A)]\n B = sorted(B, key=lambda item: item[2])\n mst = 0\n ans = []\n\n for i,j,k in B:\n C = self.find_root(i-1, parent)\n D = self.find_root(j-1, parent)\n\n if C == D:\n continue\n\n self.union(C,D, parent, height)\n mst += k\n ans.append((i-1,j-1, k))\n\n return mst, ans\n\n\nif __name__ == '__main__':\n A = 4\n B = [[1, 2, 1],\n [2, 3, 4],\n [1, 4, 3],\n [4, 3, 2],\n [1, 3, 10]]\n C = Solution()\n print(C.Solve(A, B))\n","sub_path":"Graph Modified/Graph III/KRUSHKAL.py","file_name":"KRUSHKAL.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532021693","text":"#!/usr/bin/env python\n########################\n\nimport os\nimport numpy as np, pymc\nimport varcontainer\n\n########################\n\n\nclass IntrinsicScatter(object):\n\n ##############\n \n def __init__(self, x, y, ysigma, norm=1e15):\n\n self.x = x/norm\n self.y = y/norm\n self.ysigma = ysigma/norm\n\n\n self.buildPriors()\n self.ratioLikelihood()\n\n ##############\n\n def buildMCMC(self, filename = None):\n\n if filename is None:\n mcmc = pymc.MCMC(self)\n elif os.path.exists(filename):\n mcmc = pymc.MCMC(self, db = pymc.database.pickle.load(filename))\n else:\n mcmc = pymc.MCMC(self, db = 'pickle', dbname = filename)\n\n return mcmc\n\n ##############\n \n def buildPriors(self):\n\n self.log10_intrinsic_scatter = pymc.Uniform('log10_intrinsic_scatter', -4, 0)\n\n @pymc.deterministic(name = 'intrinsic_sigma')\n def intrinsic_sigma(log10_scatter = self.log10_intrinsic_scatter):\n return 10**log10_scatter\n\n self.intrinsic_sigma = intrinsic_sigma\n\n\n ###########################\n\n def ratioLikelihood(self):\n\n self.m_angle = pymc.Uniform('m_angle', np.pi/8, 3*np.pi/8.)\n \n @pymc.deterministic(name = 'm')\n def m(m_angle = self.m_angle):\n return np.tan(m_angle)\n\n self.m = m\n\n @pymc.potential(name = 'yhat')\n def yhat(x = self.x, y = self.y, ysigma = self.ysigma, m = self.m, sigma = self.intrinsic_sigma):\n yhat = m*x\n return np.sum([pymc.normal_like(yhat[i], y[i], 1./(ysigma[i]**2 + sigma**2)) for i in range(len(y))])\n self.yhat = yhat\n\n \n \n##############################\n\n\n \n\n \n","sub_path":"intrinsicscatter.py","file_name":"intrinsicscatter.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"399850178","text":"import sys\nimport math\nimport random\nfrom ClosedCurve import ClosedCurve\nimport tkinter\nfrom tkinter import *\n\nclass Circle(ClosedCurve):\n def __init__(self):\n super(Circle, self).__init__()\n\n def drawMe(self, canvas,x):\n pass\n\n def drawRandCircle(self, canvas):\n self.XYpoints = []\n X = random.randint(0, 799)\n self.XYpoints.append(X)\n Y = random.randint(0, 799)\n self.XYpoints.append(Y)\n Rand = 800-X\n while (X + Rand > 800 or Y + Rand > 800):\n x = random.randint(0, 799)\n y = random.randint(0, 799)\n Rand = random.randint(1, 800 - x)\n self.XYpoints.append(X+Rand)\n self.XYpoints.append(Y+Rand)\n print(self.XYpoints)\n self.filColour = self._from_rgb((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n canvas.create_oval(self.XYpoints, fill=self.filColour)\n\n def calcArea(self):\n y1 = self.XYpoints[1]\n y2 = self.XYpoints[3]\n\n d = y2-y1\n print((d/2)**2 * math.pi)\n\n# o = Circle()\n# w = tkinter.Tk()\n# window = tkinter.Canvas(w, height = 800, width = 800, bg = 'purple')\n# window.pack()\n# window.pack_propagate(0)\n#\n# def callback():\n# window.delete(\"all\")\n# o.drawRandCircle(window)\n# o.calcArea()\n#\n# b = Button(window, text=\"Circle\", command=callback)\n# b.pack()\n# w.mainloop()","sub_path":"Circle.py","file_name":"Circle.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523924403","text":"if __name__ == \"__main__\": raise SystemError(\"Incorrect starting file\")\n\nimport socket\nimport threading\nimport time\n\nimport connection\n\ncommand_list = []\n\ndef receiver_func():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind( (\"0.0.0.0\", 8000) )\n\n while True:\n data, address = s.recvfrom(1024)\n addr = address[0]\n\n data_string = str(data, \"ascii\")\n\n command_parts = data_string.split(':')\n\n if address[1] == 10123 and command_parts[4] == \"judgelight\":\n address[0] = (command_parts[3], 9999)\n\n if command_parts[0] == \"ASKCONNECT\":\n connection.connect.accept(addr, command_parts[1])\n continue\n\n try:\n assert address[1] == 9999\n\n for user_ID in connection.users:\n user_addr = connection.users[user_ID].addr\n if addr == user_addr:\n break\n else: raise SystemError\n\n except (SystemError, AssertionError) as error:\n print(\"WARNING:Unexpected connection\")\n continue\n\n command = content = ID = ''\n\n try:\n command = command_parts[0]\n content = command_parts[1]\n ID = command_parts[2]\n except (IndexError ,ValueError):\n print(\"WARNING:Command syntax invalid: \" + data_string)\n continue\n\n try:\n connection.users[ID].command_list.append( (command, content) )\n connection.users[ID].last_receive_time = time.time()\n except KeyError:\n print(\"WARNING:Command ID invalid: \" + data_string)\n\n\nreceiver=threading.Thread(target=receiver_func,name='receiver')\nreceiver.start()\n","sub_path":"HYJgame0.0.1/Server0.0.1/connection/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6992274","text":"\"\"\" \nPrograma para calcular las cita para vacunación.\nIncorpora al modulo citas.py, pruebas.py y usuarios.py \nCarolina Rosa Pulido Gómez\nMayo 23 de 2021 \"\"\"\n#-------------------------------------------\n# IMPORT MÓDULOS/LIBRERÍAS\n# -------------------------------------------\nimport citas as ci\nimport usuarios as us\nimport datetime as dt\nimport dateutil.relativedelta as rdelta\n\n#-------------------------------------------\n# DEFINICIÓN DE FUNCIONES\n# -------------------------------------------\n\"\"\"\n\"\" Registrar los datos del usuario\n\"\" @return:\n\"\" nombre (str) Nombre del usuario\n\"\" apellido (str) Apellido del usuario\n\"\" cedula (int) Cédula del usuario\n\"\" genero (str) Género del usuario\n\"\" edad (dateutil.relativedelta.relativedelta)\n\"\" Edad del usuario\n\"\" sangre (str) Tipo de sangre del usuario\n\"\" peso (float) Peso del usuario\n\"\" fNac (datetime.date) \n\"\" Fecha de Nacimiento\n\"\" fReg (datetime.date) \n\"\" Fecha de Registro (hoy)\n\"\" clave (str) Clave del usuario\n\"\"\"\ndef registrarDatos():\n print(\"===========================================================\\nRegistro del Usuario\\n===========================================================\\nPor favor ingrese los siguientes datos:\\n\")\n nombre, apellido, cedula, genero, sangre, peso, fNac = us.registrarDatos()\n\n print(\"\"\"\\nNOTA: Su clave es las dos primeras letras \n de su primer nombre, seguido de las dos primeras \n letras de su apellido, seguido de su número de \n cédula sin espacios, punto o comas.\\n\"\"\")\n\n print(\"===========================================================\")\n\n return nombre, apellido, cedula, genero, sangre, peso, fNac\n\n\n\"\"\"\n\"\" Asignar la cita del usuario\n\"\" @param:\n\"\" clave (str) Clave del usuario\n\"\" nombre (str) Nombre del usuario\n\"\" apellido (str) Apellido del usuario\n\"\" cedula (int) Cédula del usuario\n\"\" genero (str) Género del usuario\n\"\" edad (dateutil.relativedelta.relativedelta)\n\"\" Edad del usuario\n\"\" sangre (str) Tipo de sangre del usuario\n\"\" peso (float) Peso del usuario\n\"\" fReg (datetime.date) \n\"\" Fecha de Registro (hoy)\n\"\" fNac (datetime.date) \n\"\" Fecha de Nacimiento\n\"\" @return:\n\"\" fCita (datetime.date) \n\"\" Fecha de la Cita\n\"\"\"\ndef asignarCita(claveReg, cedula, genero, edad, sangre, peso, fReg, fNac):\n claveIng = input(\"\"\"Para proceder a asignar su cita, \n ingrese su clave: \\t\"\"\")\n fCita = \"\"\n if validarClave(claveIng, claveReg):\n fCita = ci.calcularFechaCita(genero, edad, sangre, peso, cedula, fReg, fNac)\n return fCita\n\n\"\"\"\n\"\" Validar que la clave ingresada sea la del usuario\n\"\" @param:\n\"\" claveIng (str) Clave ingresada por el usuario\n\"\" claveReg (str) Clave generada en el registro \"\" del usuario\n\"\" @return:\n\"\" True/False (bool) True si la clave coincide\n\"\"\"\ndef validarClave(claveIngresada, claveRegistro):\n if claveIngresada == claveRegistro:\n return True\n else:\n return False\n\n\ndef imprimirFechaCita(fCita, fReg):\n if fCita != \"\":\n tiempo = rdelta.relativedelta(fCita, fReg)\n print(\"\\n===========================================================\\nCita Agendada Exitosamente! \\nLa cita quedó agendada para el {0}.\\n\\tSu cita será en {1.months} meses y {1.days} días.\".format(fCita, tiempo),\"\\n===========================================================\")\n else:\n print(\"\\n===========================================================\\nLa cita NO pudo se agendada! \\n===========================================================\")\n\n\n#-------------------------------------------\n# EJECUCIÓN DEL PROGRAMA\n# -------------------------------------------\n# Registro de los datos del usuario\nelNombre, elApellido, laCedula, elGenero, elTipoSangre, elPeso, laFechaNacimiento = registrarDatos()\n\nlaFechaRegistro = dt.date.today()\nlaEdad = us.calcularEdad(laFechaRegistro, laFechaNacimiento)\n\nlaClaveReg = us.generarClave(elNombre, elApellido, laCedula)\n\n# Asignar la fecha de la cita\nlaFechaCita = asignarCita(laClaveReg, laCedula, elGenero, laEdad, elTipoSangre, elPeso, laFechaRegistro, laFechaNacimiento)\n\n# Imprimir resultado\nimprimirFechaCita(laFechaCita, laFechaRegistro)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"194025179","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.core.paginator import Paginator, EmptyPage\n\nfrom kimo.models import Utilizator\n\n\nclass CRUD(View):\n def get(self, request):\n return render(request, 'crud/crud.html')\n\n\nclass CrudInsert(View):\n def get(self, request):\n return render(request, 'crud/insert.html', context={\n 'id': Utilizator.objects.all().count() + 1\n })\n\n def post(self, request):\n e = 'Success'\n try:\n Utilizator.objects.create(\n nume=request.POST.get('nume'),\n prenume=request.POST.get('prenume'),\n subscriptie='FREE' if 'sw_licenta' not in request.POST else 'PAID',\n adresa=request.POST.get('adresa'),\n telefon=request.POST.get('telefon'),\n username=request.POST.get('username'),\n parola=request.POST.get('parola'),\n expirare=request.POST.get('expirare'),\n id=Utilizator.objects.all().count() + 1,\n )\n except Exception as exc:\n e = exc\n return render(request, 'crud/insert.html', context={\n 'error': e,\n 'id': Utilizator.objects.all().count() + 1\n })\n\n\nclass CrudRead(View):\n @staticmethod\n def select_from_table(request):\n rows = int(request.GET.get('rows', 50))\n page = int(request.GET.get('page', 1))\n\n _dict = dict()\n for _ in request.session['POST_DATA']:\n if request.session['POST_DATA'][_]:\n _dict[_] = request.session['POST_DATA'][_]\n\n if 'sw_licenta' in _dict:\n _dict['subscriptie'] = 'PAID'\n del _dict['sw_licenta']\n else:\n _dict['subscriptie'] = 'FREE'\n\n del _dict['csrfmiddlewaretoken']\n\n r = Utilizator.objects.filter(**_dict)\n\n paginator = Paginator(r, rows)\n try:\n users = paginator.page(page)\n except EmptyPage:\n if page < 1:\n users = paginator.page(1)\n else:\n users = paginator.page(paginator.num_pages)\n\n return render(request, 'crud/read_response.html', context={\n \"query_result\": users,\n \"page\": page,\n \"rows\": rows,\n })\n\n def get(self, request):\n if not request.GET.get('rows') and not request.GET.get('page') \\\n or 'POST_DATA' not in request.session:\n return render(request, 'crud/read_form.html')\n return self.select_from_table(request)\n\n def post(self, request):\n request.session['POST_DATA'] = request.POST\n return self.select_from_table(request)\n\n\nclass CrudUpdate(View):\n def get(self, request):\n return render(request, 'crud/update.html')\n\n def post(self, request):\n if not request.POST.get('id'):\n return render(request, 'crud/update.html', context={\n \"error\": \"id not given\"\n })\n Utilizator.objects.filter(id=request.POST.get('id')).update(\n nume=request.POST.get('nume'),\n prenume=request.POST.get('prenume'),\n subscriptie='FREE' if 'sw_licenta' not in request.POST else 'PAID',\n adresa=request.POST.get('adresa'),\n telefon=request.POST.get('telefon'),\n username=request.POST.get('username'),\n parola=request.POST.get('parola'),\n expirare=request.POST.get('expirare'),\n )\n return render(request, 'crud/update.html', context={\n \"error\": \"Success\"\n })\n\n\nclass CrudDelete(View):\n def get(self, request):\n return render(request, 'crud/delete.html')\n\n def post(self, request):\n _id = request.POST.get('id')\n try:\n\n er = Utilizator.objects.get(id=_id)\n res = str(er.id) + ' ' + str(er.nume) + ' ' + str(er.prenume) + ' ' +\\\n str(er.username)\n er.delete()\n except Exception as exc:\n res = exc\n return render(request, 'crud/delete.html', context={\n 'error': res\n })\n\n\nclass ExportTable(View):\n def get(self, request):\n return render(request, 'crud/export.html')\n\n def post(self, request):\n export_name = request.POST.get(\"export\")\n print(export_name)\n return render(request, 'crud/export.html', context={\n 'error': export_name\n })\n","sub_path":"crud/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"47320897","text":"import argparse\n\nimport hail as hl\n\n\np = argparse.ArgumentParser()\np.add_argument(\"--vcf-url\", help=\"URL of gnomAD SV VCF\", required=True)\np.add_argument(\"--output-url\", help=\"URL to write Hail table to\", required=True)\nargs = p.parse_args()\n\nhl.init(log=\"/tmp/hail.log\")\n\nds = hl.import_vcf(args.vcf_url, force_bgz=True).rows()\n\n\ndef xpos(chrom, position):\n contig_number = (\n hl.case()\n .when(chrom == \"X\", 23)\n .when(chrom == \"Y\", 24)\n .when(chrom[0] == \"M\", 25)\n .default(hl.int(chrom))\n )\n return hl.int64(contig_number) * 1_000_000_000 + position\n\n\ntop_level_info_fields = [\n \"ALGORITHMS\",\n \"CPX_INTERVALS\",\n \"CPX_TYPE\",\n \"EVIDENCE\",\n \"SOURCE\",\n \"STRANDS\",\n \"UNRESOLVED_TYPE\",\n \"PCRPLUS_DEPLETED\",\n \"PESR_GT_OVERDISPERSION\",\n]\n\nprotein_coding_consequences = [\n \"LOF\",\n \"DUP_LOF\",\n \"COPY_GAIN\",\n \"DUP_PARTIAL\",\n \"MSV_EXON_OVR\",\n \"INTRONIC\",\n \"INV_SPAN\",\n \"UTR\",\n \"NEAREST_TSS\",\n \"INTERGENIC\",\n \"PROMOTER\",\n]\n\nper_population_fields = [\n \"AN\",\n \"AC\",\n \"AF\",\n \"N_BI_GENOS\",\n \"N_HOMREF\",\n \"N_HET\",\n \"N_HOMALT\",\n \"FREQ_HOMREF\",\n \"FREQ_HET\",\n \"FREQ_HOMALT\",\n]\n\npopulations = [\"AFR\", \"AMR\", \"EAS\", \"EUR\", \"OTH\"]\n\n\nds = ds.annotate(**{field.lower(): ds.info[field] for field in top_level_info_fields})\n\nds = ds.annotate(\n variant_id=ds.rsid.replace(\"^gnomAD_v2_\", \"\"),\n chrom=ds.locus.contig,\n pos=ds.locus.position,\n xpos=xpos(ds.locus.contig, ds.locus.position),\n end_chrom=ds.info.CHR2,\n end_pos=ds.info.END,\n end_xpos=xpos(ds.info.CHR2, ds.info.END),\n length=ds.info.SVLEN,\n type=ds.info.SVTYPE,\n alts=ds.alleles[1:],\n)\n\n# MULTIALLELIC should not be used as a quality filter in the browser\nds = ds.annotate(filters=ds.filters.difference(hl.set([\"MULTIALLELIC\"])))\n\n# Group gene lists for all consequences in a struct\nds = ds.annotate(\n consequences=hl.struct(\n **{\n csq.lower(): ds.info[f\"PROTEIN_CODING__{csq}\"]\n for csq in protein_coding_consequences\n if csq != \"INTERGENIC\" and csq != \"NEAREST_TSS\"\n }\n )\n)\nds = ds.annotate(intergenic=ds.info.PROTEIN_CODING__INTERGENIC)\n\n# Collect set of all genes for which a variant has a consequence\nall_genes = hl.empty_array(hl.tstr)\nfor csq in ds.consequences.dtype.fields:\n all_genes = all_genes.extend(\n hl.or_else(ds.consequences[csq.lower()], hl.empty_array(hl.tstr))\n )\nds = ds.annotate(genes=hl.set(all_genes))\n\n# Group per-population values in a struct for each field\ndef expr_for_per_population_field(row, field):\n return hl.struct(\n **dict(\n ((pop.lower(), row.info[f\"{pop}_{field}\"]) for pop in populations),\n total=row.info[field],\n )\n )\n\n\nds = ds.annotate(\n **{\n field.lower(): expr_for_per_population_field(ds, field)\n for field in per_population_fields\n }\n)\n\n\n# For MCNVs, sum AC/AF for all alt alleles except CN=2\ndef total_ac_or_af(variant, field):\n return hl.cond(\n variant.type == \"MCNV\",\n hl.bind(\n lambda cn2_index: hl.bind(\n lambda values_to_sum: values_to_sum.fold(lambda acc, n: acc + n, 0),\n hl.cond(\n hl.is_defined(cn2_index),\n field[0:cn2_index].extend(field[cn2_index + 1 :]),\n field,\n ),\n ),\n hl.zip_with_index(variant.alts).find(lambda t: t[1] == \"<CN=2>\")[0],\n ),\n field[0],\n )\n\n\nds = ds.annotate(\n mcnv_ac=hl.or_missing(ds.type == \"MCNV\", ds.ac),\n mcnv_af=hl.or_missing(ds.type == \"MCNV\", ds.af),\n)\nds = ds.annotate(\n **{\n f: ds[f].annotate(\n **{pop: total_ac_or_af(ds, ds[f][pop]) for pop in ds[f].dtype.fields}\n )\n for f in [\"ac\", \"af\"]\n }\n)\nds = ds.annotate(af=ds.af.annotate(popmax=ds.info.POPMAX_AF))\n\nds = ds.key_by().drop(\"locus\", \"alleles\", \"info\")\n\nds = ds.repartition(8, shuffle=True)\n\nds.write(args.output_url)\n","sub_path":"projects/gnomad/data/prepare_gnomad_svs_for_browser.py","file_name":"prepare_gnomad_svs_for_browser.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42766293","text":"import bpy, bmesh\r\nimport random\r\nfrom mathutils import Color\r\nfrom . import infobar\r\nfrom . materials.capaint import carpaint_material\r\nfrom . materials.emission_glow import emission_glow_material\r\nfrom ... preferences import get_preferences\r\nfrom ... ui_framework.master import Master\r\nfrom ... utility.base_modal_controls import Base_Modal_Controls\r\n\r\n# Cursor Warp imports\r\nfrom ... utils.toggle_view3d_panels import collapse_3D_view_panels\r\nfrom ... utils.modal_frame_drawing import draw_modal_frame\r\nfrom ... utils.cursor_warp import mouse_warp\r\nfrom ... addon.utility import method_handler\r\n\r\n\r\nclass HOPS_OT_MaterialScroll(bpy.types.Operator):\r\n bl_idname = \"hops.material_scroll\"\r\n bl_label = \"Material Scroll\"\r\n bl_options = {'REGISTER', 'UNDO', 'GRAB_CURSOR'}\r\n bl_description = \"\"\"Interactively scroll through materials\r\n \r\nCtrl + LMB - Blank material scroll\r\n(Generate new random material each scroll)\r\n\r\nShift + LMB - Non-Destructive material scroll\r\n(non-destructive / keeps indice connections)\r\n\r\nPress H for help\r\n\"\"\"\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n obj = context.active_object\r\n return obj and obj.type in {'MESH', 'CURVE', 'SURFACE', 'META', 'FONT'} and obj.mode in {'OBJECT', 'EDIT'}\r\n\r\n\r\n def invoke(self, context, event):\r\n \r\n self.copy_view = get_preferences().behavior.mat_viewport\r\n\r\n #self.scroll_modes = {'NORMAL', 'BLANK', 'DESTRUCTIVE'} enum prop placeholder\r\n self.scroll_mode = 'NORMAL'\r\n if event.ctrl:\r\n self.scroll_mode = 'BLANK'\r\n elif event.shift:\r\n self.scroll_mode = 'DESTRUCTIVE'\r\n hide_flag = not context.preferences.filepaths.show_hidden_files_datablocks \r\n self.materials = [mat.name for mat in bpy.data.materials if not mat.grease_pencil and (not mat.name.startswith(\".\") or hide_flag)]\r\n if self.scroll_mode != 'BLANK' and not self.materials:\r\n self.report({'INFO'}, \"No materials found\")\r\n return {'CANCELLED'}\r\n\r\n self.overlay_orig = context.space_data.overlay.show_overlays\r\n context.space_data.overlay.show_overlays = False\r\n\r\n self.mode = context.mode\r\n self.active = context.active_object\r\n self.edit_mode_init = False\r\n if self.mode == 'EDIT_MESH':\r\n self.active.select_set(True)\r\n self.edit_mode_init = True\r\n bpy.ops.object.mode_set(mode = 'OBJECT')\r\n\r\n self.objects = [obj for obj in context.selected_objects if obj.type in {'MESH', 'CURVE', 'SURFACE', 'META', 'FONT'} ]\r\n\r\n if not self.objects:\r\n self.report({'INFO'}, \"No valid object is selected\")\r\n return {'CANCELLED'}\r\n #grab whatever object if active was excluded from valid objects, for whatever reason \r\n if self.active not in self.objects:\r\n self.active = self.objects[0] \r\n\r\n #backup and junk\r\n self.slot_backup = dict()\r\n self.created_mats = set()\r\n self.created_slots = []\r\n self.data_index_back = dict()\r\n\r\n #sroll stuff\r\n self.material_types = ['PRINCIPLED', 'CARPAINT', 'EMISSION']\r\n self.material_type = 'PRINCIPLED'\r\n self.force_enable = set()\r\n self.slot_modes = ['ACTIVE', 'ALL', 'NOT ACTIVE']\r\n self.slot_index =0\r\n self.slot_mode = 'ACTIVE'\r\n self.enable_color = False\r\n \r\n self.obj_slots = {}\r\n \r\n #mappring for material slots to object index, their original material, and current material index\r\n for o in self.objects:\r\n \r\n back = [s.material.name if s.material else None for s in o.material_slots ]\r\n self.slot_backup.update ( {o:back} )\r\n if self.mode == 'EDIT_MESH' and self.scroll_mode == 'BLANK' :\r\n\r\n self.data_index_back.update( {o:[p.material_index for p in o.data.polygons]} )\r\n \r\n self.random_mat()\r\n o.data.materials.append(bpy.data.materials[ self.materials[-1] ])\r\n index = len (o.data.materials) -1\r\n\r\n for polygon in o.data.polygons:\r\n if polygon.select:\r\n polygon.material_index = index\r\n\r\n o.active_material_index = index\r\n self.created_slots.append(o)\r\n\r\n if not o.material_slots:\r\n o.data.materials.append(None)\r\n self.created_slots.append(o)\r\n\r\n piece = [self.materials.index(s.material.name) if s.material else 0 for s in o.material_slots ] \r\n self.obj_slots.update({o:piece})\r\n\r\n # filepathprefs = bpy.context.preferences.filepaths\r\n # for material in [mat for mat in bpy.data.materials if not mat.name.startswith('.')] if filepathprefs.show_hidden_files_datablocks else bpy.data.materials:\r\n # self.materials.append(material)\r\n\r\n\r\n # Base Systems\r\n self.master = Master(context=context)\r\n self.master.only_use_fast_ui = True\r\n self.base_controls = Base_Modal_Controls(context, event)\r\n self.original_tool_shelf, self.original_n_panel = collapse_3D_view_panels()\r\n self.draw_handle = bpy.types.SpaceView3D.draw_handler_add(self.safe_draw_shader, (context,), 'WINDOW', 'POST_PIXEL')\r\n\r\n context.window_manager.modal_handler_add(self)\r\n #infobar.initiate(self)\r\n return {\"RUNNING_MODAL\"}\r\n\r\n\r\n def modal(self, context, event):\r\n\r\n # Base Systems\r\n self.master.receive_event(event=event)\r\n self.base_controls.update(context, event)\r\n mouse_warp(context, event)\r\n \r\n if self.base_controls.pass_through:\r\n return {'PASS_THROUGH'}\r\n\r\n elif event.type == 'Z' and (event.shift or event.alt):\r\n return {'PASS_THROUGH'}\r\n\r\n elif self.base_controls.confirm:\r\n if self.overlay_orig:\r\n context.space_data.overlay.show_overlays = True\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n self.clean_mats()\r\n self.report({'INFO'}, \"Finished\")\r\n if self.edit_mode_init:\r\n bpy.ops.object.mode_set(mode = 'EDIT')\r\n return {'FINISHED'}\r\n\r\n elif self.base_controls.cancel:\r\n self.restore_slots()\r\n if self.overlay_orig:\r\n context.space_data.overlay.show_overlays = True\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n self.clean_mats()\r\n self.report({'INFO'}, \"Cancelled\")\r\n if self.edit_mode_init:\r\n bpy.ops.object.mode_set(mode = 'EDIT')\r\n return {'CANCELLED'}\r\n\r\n elif self.base_controls.scroll or (event.type in {'Z', 'X'} and event.value == 'PRESS'):\r\n direction = -1 if (event.type in {'X'} or self.base_controls.scroll == -1) else 1\r\n\r\n if event.shift:\r\n if event.type in {'NUMPAD_PLUS', 'EQUAL','NUMPAD_MINUS', 'MINUS'}:\r\n direction*=-1\r\n for o in self.objects:\r\n max_index = len (o.material_slots) -1\r\n index = o.active_material_index\r\n index -= direction\r\n index = 0 if index > max_index else index\r\n index = max_index if index < 0 else index\r\n o.active_material_index = index\r\n else:\r\n \r\n self.material_scroll( direction = direction)\r\n\r\n elif event.type == 'T' and event.value == 'PRESS': #and self.blank_material_scroll:\r\n if self.scroll_mode == 'NORMAL':\r\n self.scroll_mode = 'BLANK'\r\n #types = ['PRINCIPLED', 'EMISSION', 'GLASS', 'CARPAINT']\r\n types = self.material_types\r\n self.material_type = types[(types.index(self.material_type) + 1) % len(types)]\r\n self.report({'INFO'}, f\"Material Type: {self.material_type.capitalize()}\")\r\n\r\n elif event.type == 'B' and event.value == 'PRESS':\r\n self.scroll_mode = 'BLANK' if self.scroll_mode != 'BLANK' else 'NORMAL' \r\n self.report({'INFO'}, f\"Scroll Mode: {self.scroll_mode}\")\r\n\r\n elif event.type == 'D' and event.value == 'PRESS':\r\n self.scroll_mode = 'DESTRUCTIVE' if self.scroll_mode != 'DESTRUCTIVE' else 'NORMAL'\r\n self.report({'INFO'}, f\"Scroll Mode: {self.scroll_mode}\")\r\n\r\n elif event.type == 'R' and event.value == 'PRESS':\r\n if self.scroll_mode != 'DESTRUCTIVE':\r\n self.randomize_mats()\r\n self.report({'INFO'}, \"Randmized ALL\")\r\n\r\n elif event.type == 'A' and event.value == 'PRESS':\r\n self.slot_index = self.slot_index +1 if self.slot_index<2 else 0\r\n self.slot_mode = self.slot_modes[self.slot_index]\r\n self.report({'INFO'}, F\"Affected slots: {self.slot_mode}\")\r\n \r\n elif self.scroll_mode != 'NORMAL' and event.type == 'C' and event.value == 'PRESS':\r\n self.enable_color = not self.enable_color\r\n msg = \"ON\" if self.enable_color else \"OFF\"\r\n self.report({'INFO'}, F\"COLOR: {msg}\")\r\n\r\n elif self.base_controls.tilde and event.shift == True:\r\n context.space_data.overlay.show_overlays = not context.space_data.overlay.show_overlays\r\n\r\n elif event.type == 'ONE' and event.value == 'PRESS':\r\n if 0 not in self.force_enable :\r\n self.force_enable.add(0)\r\n else :\r\n self.force_enable.discard(0)\r\n elif event.type == 'TWO' and event.value == 'PRESS':\r\n if 1 not in self.force_enable :\r\n self.force_enable.add(1)\r\n else :\r\n self.force_enable.discard(1)\r\n\r\n elif event.type == 'THREE' and event.value == 'PRESS':\r\n if 2 not in self.force_enable :\r\n self.force_enable.add(2)\r\n else :\r\n self.force_enable.discard(2)\r\n\r\n elif event.type == 'FOUR' and event.value == 'PRESS':\r\n if 3 not in self.force_enable :\r\n self.force_enable.add(3)\r\n else :\r\n self.force_enable.discard(3)\r\n\r\n elif event.type == 'V' and event.value == 'PRESS':\r\n self.clean_mats()\r\n context.area.header_text_set(text=None)\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n bpy.ops.hops.adjust_viewport('INVOKE_DEFAULT')\r\n return {'FINISHED'}\r\n\r\n self.draw_master(context=context)\r\n context.area.tag_redraw()\r\n return {\"RUNNING_MODAL\"}\r\n\r\n ####################################################\r\n # CURSOR WARP\r\n ####################################################\r\n\r\n def safe_draw_shader(self, context):\r\n method_handler(self.draw_shader,\r\n arguments = (context,),\r\n identifier = 'UI Framework',\r\n exit_method = self.remove_shader)\r\n\r\n\r\n def remove_shader(self):\r\n '''Remove shader handle.'''\r\n\r\n if self.draw_handle:\r\n self.draw_handle = bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, \"WINDOW\")\r\n\r\n\r\n def draw_shader(self, context):\r\n '''Draw shader handle.'''\r\n\r\n draw_modal_frame(context)\r\n\r\n\r\n # def remove_slot(self):\r\n # if self.active.mode == 'EDIT':\r\n # bpy.ops.object.editmode_toggle()\r\n # bpy.ops.object.material_slot_remove()\r\n # bpy.ops.object.editmode_toggle()\r\n \r\n def material_scroll(self, direction =0):\r\n max_index = len(self.materials) -1\r\n \r\n if max_index < 0:\r\n if self.scroll_mode == 'BLANK' and direction > 0:\r\n max_index = 0\r\n else:\r\n return\r\n\r\n dup_filter =set()\r\n for obj, slot_data in self.obj_slots.items():\r\n \r\n allowed = set( range( len(obj.material_slots) ) )\r\n if self.slot_mode == 'ACTIVE':\r\n allowed = {obj.active_material_index}\r\n allowed.update(self.force_enable)\r\n elif self.slot_mode == \"NOT ACTIVE\" :\r\n allowed.discard (obj.active_material_index)\r\n\r\n for index , slot in enumerate(obj.material_slots):\r\n \r\n if index not in allowed:\r\n continue\r\n if self.scroll_mode == 'DESTRUCTIVE':\r\n if slot.material:\r\n if slot.material.name not in dup_filter:\r\n dup_filter.add (slot.material.name)\r\n else:\r\n continue\r\n self.random_mat(slot.material)\r\n else:\r\n self.random_mat()\r\n slot_data[index] = len(self.materials) -1\r\n slot.material = bpy.data.materials[self.materials[-1]]\r\n continue\r\n\r\n if self.scroll_mode == 'BLANK' and (slot_data[index]+direction>max_index):# direction ==1:\r\n self.random_mat()\r\n slot_data[index] = len(self.materials) -1\r\n slot.material = bpy.data.materials[self.materials[-1]]\r\n continue\r\n\r\n scroll = slot_data[index] +direction\r\n scroll = 0 if scroll > max_index else scroll\r\n scroll = max_index if scroll<0 else scroll\r\n slot_data[index] = scroll\r\n slot.material = bpy.data.materials[self.materials[scroll]]\r\n\r\n\r\n def randomize_mats(self):\r\n max_index = len(self.materials) -1\r\n for obj, slot_data in self.obj_slots.items():\r\n\r\n allowed = set( range( len(obj.material_slots) ) )\r\n if self.slot_mode == 'ACTIVE':\r\n allowed = {obj.active_material_index}\r\n allowed.update(self.force_enable)\r\n elif self.slot_mode == \"NOT ACTIVE\" :\r\n allowed.discard (obj.active_material_index)\r\n\r\n for index , slot in enumerate(obj.material_slots):\r\n if index not in allowed:\r\n continue\r\n mat_index = random.randint(0, max_index)\r\n slot_data[index] = mat_index\r\n slot.material = bpy.data.materials[ self.materials[mat_index] ]\r\n \r\n\r\n def clean_mats(self):\r\n\r\n for mat_name in self.created_mats:\r\n material = bpy.data.materials[mat_name]\r\n if not material.users:\r\n bpy.data.materials.remove(material )\r\n \r\n del material\r\n del self.slot_backup \r\n del self.created_mats \r\n del self.created_slots \r\n del self.materials\r\n del self.data_index_back\r\n del self.objects\r\n\r\n\r\n def restore_slots(self):\r\n \r\n for obj in self.objects:\r\n back_list = self.slot_backup [obj]\r\n for slot, mat_name in zip(obj.material_slots , back_list ):\r\n slot.material = bpy.data.materials[mat_name] if mat_name else None\r\n\r\n if self.data_index_back:\r\n for obj, index_list in self.data_index_back.items():\r\n for poly , index in zip (obj.data.polygons , index_list):\r\n poly.material_index = index\r\n \r\n for obj in self.created_slots:\r\n obj.data.materials.pop()\r\n\r\n\r\n def draw_master(self, context):\r\n\r\n # Start\r\n self.master.setup()\r\n\r\n ########################\r\n # Fast UI\r\n ########################\r\n\r\n if self.master.should_build_fast_ui():\r\n \r\n mods_list = list(reversed([(i+1, mat.name) for i, mat in enumerate(self.active.material_slots)]))\r\n active_mod = self.active.active_material_index +1\r\n active_slot = F\"{active_mod}/{len(self.active.material_slots)}\"\r\n force_enabld = ''\r\n if not len (self.force_enable):\r\n force_enabld = None \r\n else:\r\n force_enabld = sorted([e+1 for e in self.force_enable])\r\n \r\n #Main\r\n win_list = []\r\n if get_preferences().ui.Hops_modal_fast_ui_loc_options != 1: #Fast Floating\r\n win_list.append(len(self.materials))\r\n if self.scroll_mode != 'NORMAL':\r\n win_list.append(self.material_type)\r\n else:\r\n win_list.append(self.scroll_mode)\r\n win_list.append(F'{self.slot_mode} : {active_slot}')\r\n win_list.append(force_enabld)\r\n else:\r\n win_list.append(F'Materials: {len(self.materials)}')\r\n win_list.append(self.scroll_mode)\r\n if self.scroll_mode != 'NORMAL':\r\n win_list.append(F\"Type : {self.material_type} \")\r\n if self.material_type == 'PRINCIPLED':\r\n msg = \"ON\" if self.enable_color else \"OFF\"\r\n win_list.append(F\"Color :{msg} \")\r\n\r\n win_list.append(F\"Slots: {self.slot_mode}\")\r\n win_list.append(F\"Active Slot: {active_slot}\")\r\n if self.slot_mode != 'ALL': \r\n win_list.append(F\"Force Enabled: {force_enabld}\" )\r\n\r\n #win_list.append(f\"{self.index}\")\r\n # win_list.append(f\"{self.materials[self.index].name}\")\r\n\r\n # Help\r\n help_items = {\"GLOBAL\" : [], \"STANDARD\" : []}\r\n\r\n help_items[\"GLOBAL\"] = [\r\n (\"M\", \"Toggle mods list\"),\r\n (\"H\", \"Toggle help\"),\r\n (\"~\", \"Toggle UI Display Type\"),\r\n (\"O\", \"Toggle viewport rendering\")]\r\n\r\n help_items[\"STANDARD\"] = [\r\n (\"Shift+Scroll / Z / X\", \"Increment Active Slot\"),\r\n (\"Scroll\", \"Increment Material\"),\r\n (\"Z / X\", \"Increment Material\"),\r\n (\"V\", \"Viewport Scroll Exit\"),\r\n (\"A\", \"Toggle affected slots\"),\r\n (\"R\", \"Randomize ALL\"),\r\n (\"C\", \"Toggle Color\"),\r\n (\"D\", \"Toggle Non-Destructive Blank Scroll\"),\r\n (\"B\", \"Toggle Blank Material Scroll\"),\r\n (\"T\", \"Cycle Blank Material Type\")]\r\n\r\n if self.slot_mode == 'ACTIVE': \r\n help_items[\"STANDARD\"].append([\"1-4\", \"Force Enable Slots\"])\r\n\r\n # Mods\r\n # active_mod = \"\"\r\n # filepathprefs = bpy.context.preferences.filepaths\r\n # for material in [mat for mat in bpy.data.materials if not mat.name.startswith('.')] if filepathprefs.show_hidden_files_datablocks else bpy.data.materials:\r\n # if self.materials[self.index] == material:\r\n # active_mod = self.materials[self.index].name\r\n # mods_list.append([material.name_full, \"\"])\r\n\r\n self.master.receive_fast_ui(\r\n win_list=win_list,\r\n help_list=help_items,\r\n image=\"InteractiveBoolean\",\r\n mods_list=mods_list,\r\n active_mod_name=active_mod,\r\n mods_label_text=\"Materials\",\r\n number_mods=False)\r\n\r\n # Finished\r\n self.master.finished()\r\n\r\n\r\n def random_mat(self , material = None):\r\n color = 1 if self.enable_color else 0\r\n if self.material_type == 'PRINCIPLED':\r\n new_mat = random_principled(material=material, color_prob=color, copy_view = self.copy_view)\r\n elif self.material_type == 'CARPAINT':\r\n new_mat = random_carpaint(material=material, copy_view = self.copy_view)\r\n elif self.material_type == 'EMISSION':\r\n new_mat = random_emit(material=material, copy_view = self.copy_view)\r\n #new_mat.node_tree.nodes.update()\r\n if self.scroll_mode != 'DESTRUCTIVE':\r\n self.created_mats.add(new_mat.name)\r\n self.materials.append(new_mat.name)\r\n\r\n\r\ndef random_principled (material = None, name = 'Material', metal_prob = 0.8, rough_min = 0.01, \r\n rough_max = 0.4, color_prob = 0.2, clear_prob = 0.2, \r\n val_min = 0.1, val_max =0.8, copy_view = False ):\r\n\r\n material, principled = add_pricipled(material=material)\r\n \r\n metal = 1 if roll(metal_prob) else 0\r\n clearcoat = 1 if roll(clear_prob) else 0\r\n clearcoat_rough = random.uniform(rough_min, rough_max) if clearcoat else 0\r\n roughness = random.uniform(rough_min, rough_max)\r\n roll_color = roll (color_prob)\r\n \r\n if roll_color:\r\n color = random_color (col_min = 0.1, col_max = 0.7)\r\n \r\n else:\r\n color = random_color (color= False, grey_min = 0.1, grey_max= 0.4 ) \r\n\r\n #viewport\r\n if copy_view:\r\n material.diffuse_color = color\r\n material.metallic = metal\r\n material.roughness = roughness\r\n else:\r\n material.diffuse_color = random_color()\r\n\r\n principled.inputs['Base Color'].default_value = color\r\n principled.inputs['Metallic'].default_value = metal\r\n principled.inputs['Roughness'].default_value = roughness\r\n principled.inputs['Clearcoat'].default_value = clearcoat\r\n principled.inputs['Clearcoat Roughness'].default_value = clearcoat_rough\r\n\r\n return material\r\n\r\n\r\ndef random_carpaint(material = None, name = 'Carpaint', metal_prob = 0.8, rough_min = 0.01, \r\n rough_max = 0.4, color_prob = 1, clear_prob = 0.2, copy_view = False):\r\n\r\n material, carpaint_shader = carpaint_material(material = material)\r\n\r\n metal = 1 if roll(metal_prob) else 0\r\n clearcoat = 1 if roll(clear_prob) else 0\r\n clearcoat_rough = random.uniform(rough_min, rough_max) if clearcoat else 0\r\n roughness = random.uniform(rough_min, rough_max)\r\n color = random_color ()\r\n\r\n #viewport\r\n if copy_view:\r\n material.diffuse_color = color\r\n material.metallic = metal\r\n material.roughness = roughness\r\n else:\r\n material.diffuse_color = random_color()\r\n\r\n \r\n carpaint_shader.inputs[\"Hue Variation\"].default_value = random.uniform(0,1)\r\n carpaint_shader.inputs[\"Hue Shift Base Value\"].default_value = 0.5\r\n carpaint_shader.inputs[\"Saturation Variation\"].default_value = random.uniform(0,1)\r\n carpaint_shader.inputs[\"Saturation Base Value\"].default_value = 1.0\r\n carpaint_shader.inputs[\"Brightness Variation\"].default_value = random.uniform(0,1)\r\n carpaint_shader.inputs[\"Brightness Value\"].default_value = random.uniform(0,1)\r\n carpaint_shader.inputs[\"Metallic\"].default_value = random.randint(0,1)\r\n carpaint_shader.inputs[\"Flake Roughness Minimum\"].default_value = 0.23999999463558197\r\n carpaint_shader.inputs[\"Flake Roughness Maximum\"].default_value = 0.8799999952316284\r\n carpaint_shader.inputs[\"Flake Scale\"].default_value = 4000.0\r\n carpaint_shader.inputs[\"Clearcoat\"].default_value = clearcoat\r\n carpaint_shader.inputs[\"Clearcoat Roughness\"].default_value = clearcoat_rough\r\n carpaint_shader.inputs[\"Randomness\"].default_value = 0.0\r\n return material\r\n\r\n\r\ndef random_emit(material = None, name = 'Emission', pulse = True ,\r\n cycle_count = 4, copy_view = False):\r\n material, emission_glow = emission_glow_material(material=material)\r\n\r\n cycle_count = cycle_count if pulse else 0\r\n \r\n col = Color()\r\n h = random.uniform(0,1)\r\n s = random.uniform(0,1)\r\n v = 1\r\n col.hsv = (h,s,v)\r\n color1 =[col.r, col.g, col.b, 1]\r\n emit_multi = random.uniform(1, 10)\r\n\r\n #viewport\r\n if copy_view:\r\n material.diffuse_color = color1\r\n material.metallic = 0\r\n material.roughness = 0\r\n else:\r\n material.diffuse_color = random_color()\r\n\r\n emission_glow.inputs[\"Cycle Count\"].default_value = cycle_count\r\n emission_glow.inputs[\"Transition Sharp\"].default_value = 1.0\r\n emission_glow.inputs[\"Emit Multiplier\"].default_value = emit_multi\r\n emission_glow.inputs[\"Color1\"].default_value = color1\r\n emission_glow.inputs[\"Color2\"].default_value = [0.0, 0.0, 0.0, 1.0]\r\n emission_glow.inputs[\"Func Offset (deg)\"].default_value = 0.0\r\n emission_glow.inputs[\"Emit Offset\"].default_value = 0.0\r\n emission_glow.inputs[\"Color Blend\"].default_value = 0.0\r\n \r\n return material\r\n\r\n\r\ndef add_pricipled (material = None, name = \"Material\"):\r\n if not material:\r\n material = bpy.data.materials.new (name)\r\n material.use_nodes = True\r\n material_nodes = material.node_tree.nodes\r\n material_nodes.clear()\r\n output = material_nodes.new(type=\"ShaderNodeOutputMaterial\")\r\n principled = material_nodes.new(\"ShaderNodeBsdfPrincipled\")\r\n material.node_tree.links.new(principled.outputs[0], output.inputs[0])\r\n principled.location = [-300, output.location[1]] \r\n return (material, principled)\r\n\r\n\r\ndef roll (chance):\r\n return random.uniform(0,1) <=chance\r\n\r\n\r\ndef random_color(color = True, col_min = 0.1, col_max = 0.8, grey_min = 0.1 , grey_max = 0.4):\r\n if color :\r\n r = random.uniform(col_min, col_max)\r\n g = random.uniform(col_min, col_max)\r\n b = random.uniform(col_min, col_max)\r\n\r\n else:\r\n r = g = b = random.uniform (grey_min, grey_max)\r\n \r\n return [r , g, b, 1]","sub_path":"operators/modals/material_scroll.py","file_name":"material_scroll.py","file_ext":"py","file_size_in_byte":25505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503256084","text":"from sqlite3.dbapi2 import SQLITE_UPDATE, connect\nfrom flask import Flask, json, render_template, flash, request, redirect,send_file, url_for, session\nfrom flask.helpers import total_seconds\nfrom werkzeug.utils import secure_filename\nfrom flask import jsonify\nfrom wtforms.validators import ValidationError\nfrom thongke_each import get_each_info\nimport os\nimport sqlite3\nimport re\nimport time\nimport librosa\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '1234567891011121'\n\napp.config['UPLOAD_FOLDER'] = os.getcwd()+ '/static/audios'\n\nconn = sqlite3.connect('transcripts.db')\nc= conn.cursor()\nc.execute(\"SELECT * FROM transcripts\")\ndict_sentences = c.fetchall()\nall_sens_num = len(dict_sentences)\nconn.commit()\nconn.close()\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n if 'id' in session:\n id = session['id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n try:\n c = conn.cursor()\n c.execute(\"select fullname from users where id ={0}\".format(id))\n fullname = c.fetchone()[0]\n c.execute(\"select role from users where id ={0}\".format(id))\n role = c.fetchone()[0]\n except:\n conn.rollback()\n conn.commit()\n # return redirect(url_for(\"record\", id = id, fullname =fullname))\n if (role == 1):\n return redirect(\"/admin\")\n return redirect(\"/record\")\n return render_template(\"home.html\")\n\n@app.route(\"/admin\")\ndef admin():\n \n path_to_folders = os.path.join(str(app.config['UPLOAD_FOLDER']))\n folders_arr = os.listdir(path_to_folders)\n # lấy ra số tin nh��n chưa được phản hồi\n id = session['id']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select * from messages where isCheck = 0\")\n msg_arr = c.fetchall()\n c.execute(\"select fullname from users where id ={0}\".format(id))\n fullname = c.fetchone()[0]\n\n c.execute(\"select save_dir_url from relationships\")\n arr = c.fetchall()\n conn.commit()\n recorded_sen =[] #những câu đã thu rồi\n for item in arr:\n temp1=item[0].split(\"_\")\n temp2= temp1[3].split(\".\")\n id = int(temp2[0])\n recorded_sen.append(id)\n max_sen = max(recorded_sen) # câu lớn nhất thu được\n print(recorded_sen)\n skipped_sen=[] # những câu bị bỏ qua (thống kê tổng quát, k kể người dùng)\n for i in range(0,max_sen+1):\n if i not in recorded_sen:\n skipped_sen.append(i)\n \n sentences_info=[]\n\n c.execute(\"select * from transcripts \")\n arr = c.fetchall()\n for item in arr:\n # nếu tồn tại save_folder của câu này ( đã được lưu rồi)\n if item[2]: # sentence_01 - tên của folder lưu các file ghi âm của từng câu\n folder_save = os.path.join(str(app.config['UPLOAD_FOLDER']),item[2])\n if os.path.exists(folder_save):\n files_arr=os.listdir(folder_save) # mảng lưu tên file của các files ghi âm từng câu ['namcute_11_55.wav', 'namcute_2_55.wav']\n # nếu câu này có người thu rồi\n if len(files_arr):\n sentence=[] \n sentence.append(item[0]) #id của câu\n sentence.append(item[1].capitalize()) # nội dung của câu\n sentence.append(len(files_arr)) # số người dùng đã thu\n sentences_info.append(sentence)\n # câu còn lại\n remain_per = all_sens_num - len(skipped_sen)- len(sentences_info)\n \n \n return render_template(\"admin.html\", fullname = fullname, sentences_info = sentences_info, complete_per= len(sentences_info), skip_per = len(skipped_sen), remain_per= remain_per, all_sens_num = all_sens_num, createdFolders =len(folders_arr))\n\n\n@app.route(\"/admin_post\", methods=['POST','GET']) \ndef admin_post():\n if request.method == 'POST':\n num = int(request.form['folders_num'])\n print(\"Da thuc hien\")\n print(num)\n path_to_folders = os.path.join(str(app.config['UPLOAD_FOLDER']))\n folders_arr = os.listdir(path_to_folders)\n for i in range(len(folders_arr), len(folders_arr)+num ):\n folder_name = \"sentence_\"+str(i)\n print(folder_name)\n folder_upload = os.path.join(str(app.config['UPLOAD_FOLDER']),folder_name)\n print(folder_upload)\n os.mkdir(folder_upload)\n return redirect(\"/admin\")\n \n@app.route(\"/login\", methods=['POST','GET'])\ndef login():\n # if form.validate_on_submit():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n # kiểm tra xem đã tồn tại tài khoản này chưa (kiểm tra ở bảng users)\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select id from users where username = '{0}' and password = '{1}'\".format(username, password))\n id_arr1 = c.fetchall()\n c.execute(\"select id from users where username='{0}' or password='{1}'\".format(username, password))\n id_arr2= c.fetchall()\n if (len(id_arr2)!=0):\n # trường hợp người dùng chỉ nhập đúng mật khẩu hoặc tên đăng nhập\n if (len(id_arr1)==0):\n flash('Tên đăng nhập hoặc mật khẩu không đúng!', 'error')\n return redirect(\"/\")\n else:\n id = id_arr1[0][0]\n # nếu đã tồn tại tài khoản => lưu id vào session, đến trang record\n session['id']=id\n session['username'] = username\n # print(session['id'])\n c.execute(\"select fullname from users where id = {0}\".format(id))\n fullname = c.fetchone()[0]\n c.execute(\"select cur_id from users where id = {0}\".format(id))\n cur_id = c.fetchone()[0]\n session['cur_id'] = cur_id\n print(fullname)\n c.execute(\"select role from users where id ={0}\".format(id))\n role = c.fetchone()[0]\n print(role)\n conn.commit()\n # return redirect(url_for(\"record\", id = id, fullname =fullname))\n if (role ==1):\n return redirect(\"/admin\")\n return redirect(\"/record\")\n \n else: \n # trường hợp chưa có tài khoản => đến trang đăng ký\n flash('Tài khoản chưa được đăng ký!','error')\n return render_template(\"register.html\",title = \"Đăng ký\")\n # return render_template('login.html',title = \"Đăng nhập\")\n return render_template(\"login.html\")\n\n@app.route(\"/register\", methods=['POST','GET'])\ndef register():\n if request.method == 'POST':\n fullname = request.form['fullname']\n username = request.form['username'] \n password = request.form['password']\n confirm_password = request.form['confirm_password']\n if (confirm_password!=password):\n flash('Mật khẩu không khớp!', 'error')\n return redirect(\"/register\")\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select * from users where username='{0}' and password='{1}'\".format(username, password))\n if (len(c.fetchall())!=0):\n flash('Tài khoản này đã tồn tại, vui lòng đăng ký một tài khoản khác!', 'error')\n return redirect(\"/register\")\n\n with sqlite3.connect(\"transcripts.db\") as conn:\n try:\n c = conn.cursor()\n c.execute(\"insert into users(username, password, fullname,cur_id, role, skip_sens) values(?,?,?,?,?,?)\",(username,password,fullname,0,0,''))\n c.execute(\"select id from users where username = '{0}' and password='{1}'\".format(username, password))\n id = c.fetchone()[0]\n c.execute(\"select cur_id from users where id = {0}\".format(id))\n cur_id = c.fetchone()[0]\n session['id']=id\n session['username'] = username\n session['cur_id'] = cur_id\n conn.commit()\n except:\n conn.rollback()\n \n return redirect(\"/record\")\n\n return render_template('register.html')\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n@app.route(\"/upload_msg\", methods=['POST'])\ndef upload_msg():\n if request.method == 'POST':\n email = request.form['email']\n msg = request.form['message'] \n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n try:\n c.execute(\"insert into messages(email,msg) values(?,?)\",(email,msg))\n except:\n conn.rollback()\n return redirect(\"/\")\n\n@app.route(\"/record\", methods=['POST', 'GET'] )\ndef record():\n # lấy id của user hiện tại để kiểm tra \n id = session.get('id')\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n # lấy thông tin của user hiện tại\n c.execute(\"select * from users where id={0}\".format(id))\n user = c.fetchone()\n fullname = user[3]\n username = user[1]\n cur_id_next = cur_id+4\n sen_id = [] # mảng chứa id của các câu được show lên\n for i in range(cur_id, cur_id_next+1):\n sen_id.append(i)\n folder_name = \"sentence_\"+ str(i)\n # kiểm tra trong bảng transcripts câu này đã có save_folder chưa\n c.execute(\"select save_folder from transcripts where sen_id ={0}\".format((i)))\n save_folder = c.fetchone()[0]\n # nếu chưa có save_folder: update bảng transcripts\n if not save_folder:\n try:\n c.execute(\"update transcripts set save_folder ='{0}' where sen_id={1} \".format(folder_name,i))\n except:\n conn.rollback()\n # folder_upload = os.path.join(str(app.config['UPLOAD_FOLDER']),folder_name)\n # if not os.path.exists(folder_upload):\n # os.mkdir(folder_upload)\n # lấy mảng các câu để show lên record\n c.execute(\"select count(save_dir_url) from relationships where user_id ={0}\".format(id))\n complete_sens = c.fetchone()[0] # số câu đã thu\n c.execute(\"select * from transcripts where sen_id >= {0} and sen_id <={1}\".format(cur_id, cur_id_next))\n sens_arr_temp = c.fetchall()\n sens_arr = []\n for sen in sens_arr_temp:\n sen[1].capitalize()\n item = [sen[0],sen[1].capitalize(),sen[2]]\n sens_arr.append(item)\n\n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n left_sens = all_sens_num -complete_sens - len(skip_sens_id) #số câu còn lại \n return render_template('record.html',skip_sens = len(skip_sens_id),skip_sens_id = skip_sens_id, id = id,sens_arr= sens_arr,sens_arr_number=len(sens_arr), complete_sens = complete_sens, left_sens = left_sens,fullname=fullname, sen_id= sen_id, username=username, total_duration = dur_item)\n\n@app.route(\"/save_audios\", methods=['POST'])\ndef save_audios():\n if request.method =='POST':\n username = session['username']\n user_id = session['id']\n files = request.files.getlist('audio_data')\n if files.count ==0:\n flash(f'No file selected for uploading!')\n return redirect(request.url)\n else:\n for file in files:\n if file:\n filename= secure_filename(file.filename)\n file_name_temp1=filename\n file_name_temp2 = \".\".join(file_name_temp1.split(\".\")[:-1])\n sen_id = int(file_name_temp2.split(\"_\")[-1]) # lấy được sen_id => tìm folder \n folder_name = \"sentence_\"+ str(sen_id)\n folder_upload = os.path.join(str(app.config['UPLOAD_FOLDER']), folder_name)\n # lưu file vào folder_upload\n file.save(os.path.join(folder_upload,filename))\n\n # lưu đường dẫn tới file vào relationships\n path_to_file = os.path.join(folder_name,filename)\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n try:\n c.execute(\"insert into relationships(user_id, sen_id, save_dir_url) values(?,?,?)\",(user_id,sen_id,path_to_file))\n except:\n conn.rollback()\n finally:\n conn.commit()\n\n return jsonify({\"upload\": \"Success\"})\n\n@app.route('/nghethu_each/<int:id>', methods = ['GET', 'POST'])\ndef nghethu_each(id):\n if not id:\n id = session['id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id = {0}\".format(id))\n fullname = c.fetchone()[0]\n global dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content \n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n print(skip_sens_id)\n sens_id_arr, files_path = zip(*sorted(zip(sens_id_arr, files_path)))\n global dict_sentences\n transcripts = []\n for i in sens_id_arr:\n transcripts.append(dict_sentences[i][1].capitalize())\n left_sens = all_sens_num - len(sens_id_arr) - len(skip_sens_id)\n return render_template(\"nghethu_each.html\", id = id,fullname = fullname, complete_sens = len(sens_id_arr), total_duration = dur_item, sens_id_arr = sens_id_arr, transcripts = transcripts, files_path = files_path, skip_sens= len(skip_sens_id), left_sens = left_sens)\n\n\n@app.route(\"/listen_each_sentence/<int:id>\")\ndef listen_each_sentence(id): # id là id của câu\n # tạo ra path tới folder lưu câu này\n admin_id = session['id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select save_folder from transcripts where sen_id = {0}\".format(id))\n save_folder = c.fetchone()[0]\n c.execute(\"select fullname from users where id = {0}\".format(admin_id))\n admin_fullname = c.fetchone()[0]\n conn.commit()\n path = os.path.join(str(app.config['UPLOAD_FOLDER']), save_folder)\n files_arr = os.listdir(path)\n users_arr = []\n duration = 0\n for file in files_arr:\n #lấy id của người dùng\n user=[]\n temp = re.split(\"_\",file) #['namcute', '11', '3.wav']\n user_id = int(temp[1])\n user.append(user_id) # lấy id của người dùng\n user.append(temp[0]) # lấy username\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n # lấy fullname của user\n c.execute(\"select fullname from users where id = {0}\".format(user_id))\n fullname = c.fetchone()[0]\n # lấy nội dung của câu\n c.execute(\"select sen_content from transcripts where sen_id = {0}\".format(user_id))\n sen_content = c.fetchone()[0]\n conn.commit()\n user.append(fullname)\n files_path_temp=os.path.join(\"static/audios\",save_folder)\n\n file_path = os.path.join(str(files_path_temp),file)\n duration = duration + librosa.get_duration(filename = file_path)\n user.append(file_path)\n users_arr.append(user)\n duration = float(\"{:.2f}\".format(duration/3600))\n return render_template('listen_each_sentence.html', fullname = admin_fullname,users_arr=users_arr, id=admin_id,sen_id = id,sen_content = sen_content, num_files = len(users_arr), duration = duration )\n@app.route(\"/skip_show/<int:id>\")\ndef skip_show(id):\n id = session['id']\n username = session['username']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id = {0}\".format(id))\n fullname = c.fetchone()[0]\n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n left_sens = all_sens_num -len(sens_id_arr) - len(skip_sens_id) #số câu còn lại \n return render_template('skip_show.html', username = username, id = id, fullname = fullname, skip_sens_id= skip_sens_id, complete_sens = len(sens_id_arr), skip_sens = len(skip_sens_id), left_sens= left_sens, skip_sens_content = skip_sens_content, total_duration = dur_item)\n\n@app.route(\"/upload_skip\")\ndef upload_skip():\n id = session['id']\n username = session['username']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id = {0}\".format(id))\n fullname = c.fetchone()[0]\n global dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content\n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n print(skip_sens_id)\n left_sens = all_sens_num -len(skip_sens_id) - len(sens_id_arr) # số câu còn lại phải thu\n return render_template('skip_show.html', fullname=fullname,id=id,total_duration = dur_item, complete_sens = len(sens_id_arr), skip_sens = len(skip_sens_id), left_sens= left_sens, skip_sens_id = skip_sens_id, skip_sens_content = skip_sens_content)\n\n\n\n@app.route(\"/thongke_admin\")\ndef thongke_admin():\n id = session['id']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id ={0}\".format(id))\n fullname = c.fetchone()[0]\n conn.commit()\n\n # lấy thông tin của tất cả các câu:\n # global dict_sentences\n # dict_sentences_show=[]\n # with sqlite3.connect(\"transcripts.db\") as conn:\n # c = conn.cursor()\n # c.execute(\"select save_folder from transcripts\")\n # save_dir_arr = c.fetchall()\n # conn.commit()\n \n # for i in range(0,len(save_dir_arr)):\n # item=[]\n # item.append(dict_sentences[0])\n # item.append(dict_sentences[1])\n # temp = \"static/audios/\"+str(save_dir_arr)\n # item.append(temp)\n # dict_sentences_show.append(item)\n # lấy thông tin thống kê theo câu\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select save_dir_url from relationships\")\n arr = c.fetchall()\n conn.commit()\n recorded_sen =[] #những câu đã thu rồi\n for item in arr:\n temp1=item[0].split(\"_\")\n temp2= temp1[3].split(\".\")\n id = int(temp2[0])\n recorded_sen.append(id)\n max_sen = max(recorded_sen) # câu lớn nhất thu được\n print(recorded_sen)\n skipped_sen=[] # những câu bị bỏ qua (thống kê tổng quát, k kể người dùng)\n for i in range(0,max_sen+1):\n if i not in recorded_sen:\n skipped_sen.append(i)\n \n sentences_info=[]\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select * from transcripts \")\n arr = c.fetchall()\n for item in arr:\n # nếu tồn tại save_folder của câu này ( đã được lưu rồi)\n if item[2]: # sentence_01 - tên của folder lưu các file ghi âm của từng câu\n folder_save = os.path.join(str(app.config['UPLOAD_FOLDER']),item[2])\n if os.path.exists(folder_save):\n files_arr=os.listdir(folder_save) # mảng lưu tên file của các files ghi âm từng câu ['namcute_11_55.wav', 'namcute_2_55.wav']\n # nếu câu này có người thu rồi\n if len(files_arr):\n sentence=[] \n sentence.append(item[0]) #id của câu\n sentence.append(item[1].capitalize()) # nội dung của câu\n sentence.append(len(files_arr)) # số người dùng đã thu\n sentences_info.append(sentence)\n # câu còn lại\n remain_per = all_sens_num - len(skipped_sen)- len(sentences_info)\n return render_template(\"thongke_admin.html\", fullname = fullname, sentences_info = sentences_info, complete_per= len(sentences_info), skip_per = len(skipped_sen), remain_per= remain_per, all_sens_num = all_sens_num)\n@app.route(\"/export_xls_user/<int:id>\")\ndef export_xls_user(id):\n if not id:\n id = session['id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id = {0}\".format(id))\n fullname = c.fetchone()[0]\n global dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content \n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n upload_file = request.files.getlist('files_path')\n sens_id_arr, files_path = zip(*sorted(zip(sens_id_arr, files_path)))\n global dict_sentences\n transcripts = []\n duration = []\n for i in sens_id_arr:\n transcripts.append(dict_sentences[i][1])\n for file_path in files_path:\n duration.append(round(librosa.get_duration(filename=file_path),2))\n\n file_download = \"static/collect_data_\"+str(id) +\".xls\"\n with open(file_download,\"w+\",encoding=\"utf-8\" ) as f_write:\n f_write.write(\"ID\\tDuration\\tPath to file\\tContent\\n\")\n for i in range(0,len(sens_id_arr)):\n f_write.write(str(sens_id_arr[i])+\"\\t\"+str(duration[i])+\"\\t\"+files_path[i]+\"\\t\"+transcripts[i].capitalize())\n return send_file(file_download, as_attachment=True)\n\n@app.route(\"/export_xls_sen/<int:id>\")\ndef export_xls_sen(id): # id là id của câu\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select save_folder from transcripts where sen_id = {0}\".format(id))\n save_folder = c.fetchone()[0]\n conn.commit()\n path = os.path.join(str(app.config['UPLOAD_FOLDER']), save_folder)\n files_arr = os.listdir(path)\n users_arr = [] # lấy user_name và user_id\n duration = []\n files_path=[] # mảng lưu đường dẫn tới các file của các câu\n path_to_file =[]\n for file in files_arr:\n #lấy id của người dùng\n user=[]\n temp = re.split(\"_\",file) #['namcute', '11', '3.wav']\n user_id = int(temp[1])\n user.append(user_id) # lấy id của người dùng\n user.append(temp[0]) # lấy username\n users_arr.append(user)\n file_path = os.path.join(str(path),file)\n files_path.append(file_path)\n duration.append(round(librosa.get_duration(filename=file_path),2))\n file = os.path.join(\"static/audios\",file)\n path_to_file.append(file)\n file_download = \"static/collect_sen_\"+str(id)+\".xls\"\n with open(file_download,\"w+\",encoding=\"utf-8\" ) as f_write:\n f_write.write(\"stt\\tuser_id\\tusername\\tduration\\tpath_to_file:\\n\")\n for i in range(0,len(files_arr)):\n f_write.write(str(i)+\"\\t\"+str(users_arr[i][0])+\"\\t\"+users_arr[i][1]+\"\\t\"+str(duration[i])+\"\\t\"+path_to_file[i]+\"\\n\")\n return send_file(file_download, as_attachment=True)\n\n@app.route(\"/download_users\")\ndef download_users():\n users_info=[]\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select id from users where role=0\")\n for i in c.fetchall():\n id=i[0]\n user=[]\n user.append(id) #id\n global dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content\n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(id)\n c.execute(\"select fullname from users where id = {0}\".format(id))\n user.append(c.fetchone()[0])\n user.append(len(sens_id_arr))\n user.append(dur_item)\n users_info.append(user)\n\n file_download = \"static/users.xls\"\n with open(file_download,\"w+\",encoding=\"utf-8\" ) as f_write:\n f_write.write(\"ID\\tTên đầy đủ\\tSố câu đã thu\\tTổng thời gian\\n\")\n for user in users_info:\n f_write.write(str(user[0])+\"\\t\"+str(user[1])+\"\\t\"+str(user[2])+\"\\t\"+str(user[3])+\"\\n\")\n\n return send_file(file_download, as_attachment=True)\n\n@app.route(\"/skip\")\ndef skip():\n id = session['id']\n cur_id = session['cur_id']\n cur_id = cur_id+5\n session['cur_id'] = cur_id\n with sqlite3.connect(\"transcripts.db\") as conn:\n try:\n c = conn.cursor()\n c.execute(\"select skip_sens from users where id = {0}\".format(id))\n skip_str = c.fetchone()[0]\n for i in range(cur_id, cur_id+5):\n skip_str = skip_str + str(i) +','\n c.execute(\"update users set skip_sens = '{0}' where id = {1}\".format(skip_str,id))\n c.execute(\"update users set cur_id = '{0}' where id = {1}\".format(cur_id,id))\n c.execute(\"select skip_sens from users where id ={0}\".format(id))\n skip_sens_str = c.fetchone()[0]\n conn.commit()\n except:\n conn.rollback()\n \n skip_sens_str_arr =re.split(\",\",skip_sens_str) # string chứa id của những câu bị bỏ qua, lưu dưới dạng chuỗi, mỗi id cách nhau bởi dấu phẩy\n skip_sens_str_arr.pop() # xóa phần tử rỗng bị thừa ở cuối mảng ['1','2','']\n skip_sens_arr=[]\n for item in skip_sens_str_arr:\n skip_sens_arr.append(int(item))\n skip_sens = len(skip_sens_arr) # số câu đã bỏ qua\n complete_sens = cur_id-skip_sens # số câu đã thu \n left_sens = all_sens_num -cur_id # số câu còn lại phải thu\n \n return redirect(url_for('record',left_sens = left_sens, complete_sens = complete_sens, skip_sens = skip_sens))\n \n\n@app.route(\"/upload_continue\")\ndef upload_continue():\n id = session['id']\n cur_id = session['cur_id']\n session['cur_id'] = cur_id +5\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n try:\n c.execute(\"update users set cur_id = '{0}' where id = {1}\".format(cur_id +5,id))\n conn.commit()\n except:\n conn.rollback()\n skip_sens_str = \"\"\n skip_sens_arr=[]\n complete_sens_arr=[] # mảng chứa các câu đã được thu rồi\n c.execute(\"select save_dir_url from relationships where user_id = {0}\".format(id))\n for item in c.fetchall():\n item_temp = \".\".join(item[0].split(\".\")[:-1])\n sen_id = int(item_temp.split(\"_\")[-1])\n complete_sens_arr.append(sen_id)\n for i in range(0, cur_id+5):\n if i not in complete_sens_arr:\n skip_sens_arr.append(i)\n skip_sens_str = skip_sens_str + str(i)+\",\"\n with sqlite3.connect(\"transcripts.db\") as conn:\n try:\n c = conn.cursor()\n c.execute(\"update users set skip_sens = '{0}'\".format(skip_sens_str))\n conn.commit()\n except:\n conn.rollback()\n skip_sens = len(skip_sens_arr) # số câu đã bỏ qua\n complete_sens = len(complete_sens_arr) # số câu đã thu \n\n left_sens = all_sens_num -complete_sens - skip_sens\n return redirect(url_for('record',left_sens = left_sens, complete_sens = complete_sens, skip_sens = skip_sens))\n\n@app.route(\"/user_collections\")\ndef user_collections():\n id = session['id']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id ={0}\".format(id))\n fullname = c.fetchone()[0]\n conn.commit()\n # lấy thông tin của người dùng\n users_info = []\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select * from users\")\n users_arr= c.fetchall()\n for item in users_arr:\n if (item[5]!=1):\n user=[]\n user.append(item[0]) # id\n user.append(item[1]) #username\n user.append(item[3]) #fullname\n \n dur_item, files_path, sens_id_arr, skip_sens_id, skip_sens_content = get_each_info(item[0])\n user.append(len(sens_id_arr)) # số câu đã thu\n user.append(len(skip_sens_id)) # số câu bị bỏ qua\n user.append(dur_item) # thời gian\n users_info.append(user)\n return render_template(\"user_collections.html\", fullname = fullname, users_info = users_info, users_num = len(users_info))\n\n@app.route(\"/sentences_collections\")\ndef sentences_collections():\n id = session['id']\n cur_id = session['cur_id']\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select fullname from users where id ={0}\".format(id))\n fullname = c.fetchone()[0]\n conn.commit()\n sentences_info=[]\n with sqlite3.connect(\"transcripts.db\") as conn:\n c = conn.cursor()\n c.execute(\"select * from transcripts \")\n arr = c.fetchall()\n for item in arr:\n # nếu tồn tại save_folder của câu này ( đã được lưu rồi)\n if item[2]: # sentence_01 - tên của folder lưu các file ghi âm của từng câu\n folder_save = os.path.join(str(app.config['UPLOAD_FOLDER']),item[2])\n if os.path.exists(folder_save):\n files_arr=os.listdir(folder_save) # mảng lưu tên file của các files ghi âm từng câu ['namcute_11_55.wav', 'namcute_2_55.wav']\n # nếu câu này có người thu rồi\n if len(files_arr):\n sentence=[] \n sentence.append(item[0]) #id của câu\n sentence.append(item[1].capitalize()) # nội dung của câu\n sentence.append(len(files_arr)) # số người dùng đã thu\n sentences_info.append(sentence)\n return render_template(\"sentences_collections.html\", fullname= fullname, sentences_info= sentences_info, sens_num = len(sentences_info))\n\n@app.route(\"/logout\")\ndef logout():\n # xóa id, username khỏi session\n session.pop('id')\n session.pop('username')\n # return render_template('login.html',title=\"Đăng nhập\")\n return redirect(\"/\")\nif (__name__ == \"__main__\"):\n app.run(debug=True) \n ","sub_path":"MyVenv/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":31438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325466147","text":"# -*- coding: utf-8 -*-\n#! usr/bin/env python3\n\"\"\"Smoothly interpolating integer container.\n\"\"\"\nfrom ..typecasting import get_float\nfrom .utility import clamp\nfrom .common import *\n\nclass Changer:\n REPR = 'Changer<{}> :' \\\n ' old: {} /' \\\n ' new: {} /' \\\n ' cur: {} /' \\\n ' min: {} /' \\\n ' max: {} /' \\\n ' relax: {} /' \\\n ' loop: {}'\n\n def __init__(self,\n value=0,\n default=0,\n lower=0,\n upper=MAX,\n relax=False,\n loop=False,\n keep_count=False,\n tag='',\n ):\n \"\"\"Changer object.\n \"\"\"\n self.cycles = 0 # Number of elapsed cycles\n self.default = default # Failsafe value\n self.keep_count = keep_count # Track elapsed cycles?\n self.is_changing = False # Is value changing?\n self.tag = tag # Label/descriptor\n self.lower = lower\n self.upper = upper\n self.relax = relax\n self.loop = loop\n self.value = {'old': value, 'cur': value, 'new': value}\n self.new = self.value['new']\n self.cur = self.value['cur']\n self.old = self.value['old']\n self.int_cur = int(round(self.cur))\n\n def __eq__(self, other):\n \"\"\"Implements self == other.\n \"\"\"\n return self.int_cur == other\n\n def __ge__(self, other):\n \"\"\"Implements self >= other.\n \"\"\"\n return self.cur >= other\n\n def __le__(self, other):\n \"\"\"Implements self <= other.\n \"\"\"\n return self.cur <= other\n\n def __gt__(self, other):\n \"\"\"Implements self > other.\n \"\"\"\n return self.cur > other\n\n def __lt__(self, other):\n \"\"\"Implements self < other.\n \"\"\"\n return self.cur < other\n\n def __ne__(self, other):\n \"\"\"Implements self != other.\n \"\"\"\n return self.cur != other\n\n def __repr__(self):\n \"\"\"Implements repr(self).\n \"\"\"\n return Changer.REPR.format(\n self.tag,\n self.old,\n self.new,\n self.cur,\n self.lower,\n self.upper,\n self.relax,\n self.loop,\n )\n\n def __str__(self):\n \"\"\"Implements str(self).\n \"\"\"\n return self.tag\n\n def __getitem__(self, item):\n \"\"\"Implements self[item].\n \"\"\"\n assert(item in self.value)\n return self.value[item]\n\n def __setitem__(self, key, value):\n \"\"\"Implements self[key] = value.\n \"\"\"\n assert(key in self.value)\n self.value[key] = value\n\n def reset(self,\n value=0,\n default=0,\n lower=0,\n upper=MAX,\n relax=False,\n loop=False,\n keep_count=False,\n tag='',\n ):\n \"\"\"Resets this Changer's attributes.\n \"\"\"\n self.cycles = 0 # Number of elapsed cycles\n self.default = default # Failsafe value\n self.keep_count = keep_count # Track elapsed cycles?\n self.is_changing = False # Is value changing?\n self.tag = tag # Label/descriptor\n self.lower = lower\n self.upper = upper\n self.relax = relax\n self.loop = loop\n self.value['old'] = value\n self.value['new'] = value\n self.value['cur'] = value\n self.new = self.value['new']\n self.cur = self.value['cur']\n self.old = self.value['old']\n self.int_cur = int(round(self.cur))\n\n def set_all(self, value):\n \"\"\"Manually sets all values.\n \"\"\"\n self.value['cur'] = self.cur = value\n self.value['old'] = self.old = value\n self.value['new'] = self.new = value\n self.is_changing = False\n self.int_cur = int(round(self.cur))\n\n def set_cur(self, value):\n \"\"\"Manually sets current value.\n \"\"\"\n self.value['cur'] = self.cur = value\n self.is_changing = self.new != value\n self.int_cur = int(round(self.cur))\n\n def set_new(self, value):\n \"\"\"Manually sets new value.\n \"\"\"\n self.value['new'] = self.new = value\n self.is_changing = self.cur != value\n\n def set_old(self, value):\n \"\"\"Manually sets old value.\n \"\"\"\n self.value['old'] = self.old = value\n\n def update(self, dt, delta, cl=clamp, gf=get_float):\n \"\"\"Updates current value with respect to time step.\n \"\"\"\n cur = self.value['cur']\n new = self.value['new']\n old = self.value['old']\n\n # Get interpolated value to step by\n d = 0\n try:\n d = dt * delta\n except TypeError:\n if isinstance(delta, (Changer, dict)):\n d = gf(delta['cur']) * dt\n elif isinstance(delta, list):\n d = gf(delta) * dt\n\n # Interpolate up or down to target value\n if cur < new:\n cur = cl(cur + d, lower=self.lower, upper=new)\n elif cur > new:\n cur = cl(cur - d, lower=new, upper=self.upper)\n\n # Handling upon reaching target value\n if int(round(cur)) == new:\n if self.relax:\n # Return to original value if needed\n new = old\n elif self.loop:\n # Wrap around if needed\n cur = old\n if self.keep_count:\n # Keep track of cycles if needed\n self.cycles += 1\n\n # Cache all current values\n self.value['cur'] = self.cur = cur\n self.value['old'] = self.old = old\n self.value['new'] = self.new = new\n self.is_changing = cur != new\n self.int_cur = int(round(self.cur))\n","sub_path":"engine/math/changer.py","file_name":"changer.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"32390432","text":"from bottle import route, run, template, request, abort, redirect, static_file\nfrom model import Model\n\nmodel = Model('questions.json', 'faculties.json')\n\n\n@route('/res/<filename:path>')\ndef resource(filename):\n \"\"\"This route resolves and returns static images.\"\"\"\n return static_file(filename, root='res/')\n\n\n@route('/')\ndef index():\n return template('templates/index.tpl')\n\n\n@route('/question/<id>')\ndef question(id):\n id = int(id)\n\n if not model.has_question(id):\n abort(404, 'No such question.')\n\n question = model.get_question(id)\n\n return template('templates/question.tpl', text=question['text'], id=id)\n\n\n@route('/submit', method='POST')\ndef submit():\n id = int(request.forms.get('question'))\n answer = int(request.forms.get('answer'))\n\n user = model.current_user()\n model.answer(user, id, answer)\n\n # Redirect the user to the next question or the results page.\n if model.has_question(id + 1):\n redirect('/question/{}'.format(id + 1))\n else:\n redirect('/result')\n\n\n@route('/result')\ndef result():\n return template('templates/result-cover.tpl')\n\n\n@route('/showresult')\ndef show_result():\n user = model.current_user()\n model.drop_user(user) # The user data is no longer needed in cache at this point.\n\n result = model.get_results(user)\n return template('templates/results.tpl', result=result)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107536731","text":"import datetime\n\nimport adafruit_bme280\nimport board\nimport busio\nimport digitalio\n\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\n# prepare config\nimport config\n\n# spiでbme280を操作する\nspi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)\n# D5は任意のGPIOピン\ncs = digitalio.DigitalInOut(board.D5)\nbme280 = adafruit_bme280.Adafruit_BME280_SPI(spi, cs)\n\n# google authの認証をしてAPIサービスの生成\nSCOPES = [\"https://www.googleapis.com/auth/spreadsheets\"]\nSAMPLE_RANGE_NAME = \"A:D\"\nVALUE_INPUT_OPTION = \"RAW\"\nINSERT_DATA_OPTION = \"INSERT_ROWS\"\n\ncredentials = service_account.Credentials.from_service_account_file(\n config.SERVICE_ACCOUNT_FILE, scopes=SCOPES\n)\nservice = build(\"sheets\", \"v4\", credentials=credentials)\n\n\n# 特定のシートで書き込みできるか調べる\nsheet = service.spreadsheets()\n\nprint(\"time:{:%Y-%m-%d: %H:%M:%S}\".format(datetime.datetime.now()))\nprint(\"Temperature: %0.1f C\" % bme280.temperature)\nprint(\"Humidity: %0.1f %%\" % bme280.humidity)\nprint(\"Pressure: %0.1f hPa\\n\" % bme280.pressure)\n\nbody = {\n \"values\": [\n [\n \"{:%Y-%m-%d %H:%M:%S}\".format(datetime.datetime.now()),\n \"{:0.1f}\".format(bme280.temperature),\n \"{:0.1f}\".format(bme280.pressure),\n \"{:0.1f}\".format(bme280.humidity),\n ]\n ],\n}\nresult = (\n service.spreadsheets()\n .values()\n .append(\n spreadsheetId=config.SAMPLE_SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME,\n valueInputOption=VALUE_INPUT_OPTION,\n insertDataOption=INSERT_DATA_OPTION,\n body=body,\n )\n .execute()\n)\n","sub_path":"recode_bme280_to_gsheet.py","file_name":"recode_bme280_to_gsheet.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623620152","text":"#!/usr/bin/env python3\n\nimport sqlite3\nimport wptools # WikiPage Tools lib\nimport re\nfrom bs4 import BeautifulSoup\n\n\nclass CrawlWikipedia:\n def __init__(self, db_file):\n \"\"\" \n Initialize the crawler class for getting data from Wikipedia.\n Setup a small SQLite DB in file.\n\n Wikipedia pages are \"members\" of \"categories\"\n \"\"\" \n self.categories = []\n # Create DB\n print('Use DB file {}'.format(db_file))\n self.conn = sqlite3.connect(db_file)\n cursor = self.conn.cursor()\n # Create Table for Pages\n cursor.execute('CREATE TABLE IF NOT EXISTS content (pageid text, category text, url text, content text)') \n self.conn.commit()\n self.cursor = self.conn.cursor()\n\n\n def save_page_content(self, category, pageid, url, content):\n self.cursor.execute('INSERT INTO content VALUES (?, ?, ?, ?)', (pageid, category, url, content))\n self.conn.commit()\n\n def get_page_urls(self):\n return [url for url in self.cursor.execute('SELECT url FROM content')]\n\n def get_page_ids(self):\n return [pageid for pageid in self.cursor.execute('SELECT pageid FROM content')]\n\n def get_categories_and_members(self, category, depth):\n \"\"\"\n Start with the defined category and download Wikipedia content\n up to a set depth of categories.\n \"\"\"\n print(u'Checking for subcategories of {} at depth {}'.format(category, depth))\n\n if depth:\n # Get Details and Members of this Category\n cat = wptools.category(category) \n cat_members = cat.get_members()\n\n print(u'cat_members: {}'.format(cat_members.data))\n print(u'cat_members_keys: {}'.format(cat_members.data.keys()))\n\n # 1 - save any pages (members) for this category\n if 'members' in cat_members.data.keys():\n for cat_member in cat_members.data['members']:\n print(u'member: {}'.format(cat_member))\n # IF NOT already saved...\n if cat_member['pageid'] not in self.get_page_ids():\n # Get the page Content\n page = wptools.page(pageid=cat_member['pageid']).get_parse()\n \n url = page.get_query().data['url']\n # CLEAN: remove HTML syntax and <ref>\n text = BeautifulSoup(page.data['wikitext'], 'html.parser').get_text()\n # CLEAN: remove markup such as [[...]] and {{...}}\n clean_text = re.sub(r'\\s*{.*}\\s*|\\s*[.*]\\s*', '', text)\n\n # Save/storethe page\n print('Saving pageid {} / url {}'.format(cat_member['pageid'], url))\n self.save_page_content(category, cat_member['pageid'], url, clean_text)\n\n # 2 - iterate through any subcategories\n if 'subcategories' in cat_members.data.keys():\n subcats = cat_members.data['subcategories']\n\n for subcat in subcats:\n self.categories.append(subcat)\n\n # RECURSE: Until depth reached on pages\n self.get_categories_and_members(subcat['title'], depth - 1)\n\n # INFO\n for cat in self.categories:\n print(u'Category: {}'.format(cat))","sub_path":"crawler/get_wikipedia_content.py","file_name":"get_wikipedia_content.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"221706756","text":"from flask import Flask\nfrom redis import Redis\n\napp = Flask(__name__)\napp.config.from_pyfile('config.cfg')\n\nredis = Redis(host=app.config['DB'], port=6379)\n\n@app.route('/')\ndef hello():\n count = redis.incr('hits')\n return 'Hello World from flask and {0}! I have been seen {1} times.\\n'.format(app.config['DB'], count)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"11-docker/32-apps/11-python/02-redis-python-config/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268821791","text":"\"\"\"Main\"\"\"\n# Flask\nfrom flask import Flask, render_template,request, redirect, url_for\n\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database/tasks.db' # Conexion con una base de datos.\ndb = SQLAlchemy(app) # Crea una instacia de SQLAlchemy para manejar el ORM\n# ROUTERS\nclass Task(db.Model):\n \"\"\"Modelo de Tarea.\"\"\"\n id=db.Column(db.Integer,primary_key=True)\n content=db.Column(db.String(200))\n done= db.Column(db.Boolean)\n\n@app.route('/')\n# Ruta raiz\ndef home():\n tasks = Task.query.all()\n return render_template('index.html', tasks = tasks) # Colocamos entre comillas simples el nombre del archivo html\n # Por defecto Flask buscara las plantillas en la carpeta de nombre templates que este al nivel del archivo controlador(app.py)\n\n@app.route('/create-task',methods=['POST'])\ndef create():\n \"\"\"Crear Tareas\"\"\"\n task = Task(content=request.form['tarea'], done=False)\n db.session.add(task) # Agregamos un nuevo dato a la base de datos.\n db.session.commit() # Especificamos a la DB que terminamos de hacer operaciones\n return redirect(url_for('home')) # Redireccionamos a la ruta raiz usando jinja2, tambien funciona asi redirect('/')\n\n@app.route('/done/<id>')\ndef done(id):\n task = Task.query.filter_by(id=int(id)).first()\n task.done = not(task.done) # Invertimos el valor por cada llamada.\n db.session.commit()\n return redirect(url_for('home'))\n@app.route('/delete/<id>') # Colocamos el id entre corchetes indicando que se debe enviar como parametro\ndef delete(id):\n task = Task.query.filter_by(id=int(id)).delete() # Buscamos la traera y lo eliminamos.\n db.session.commit()\n return redirect(url_for('home'))\n\nif __name__ == '__main__':\n app.run(debug=True) # Colocamos degug = True para que nuestro servidor se reinicia cada vez que guardamos cambios\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486988908","text":"import turtle as t\r\nt_=t.Turtle()\r\nt_.penup()\r\nt_.goto(-250,250)\r\nt_.pendown()\r\nt_.color(\"white\")\r\nt_.write(\"RULES\\n 1.RIGHT CLICK FOR CROSS\\n 2.LEFT CLICK FOR ZERO\\n 3.WINNER HAS TO BE CHOOSEN MANUALLY\\n\" )\r\nt_.hideturtle()\r\n#defining function fo mouse click\r\ndef right_click1(x,y):\r\n t1.pensize(10)\r\n t1.circle(20)\r\n t1.hideturtle()\r\ndef right_click2(x,y):\r\n t2.pensize(10)\r\n t2.circle(20)\r\n t2.hideturtle()\r\ndef right_click3(x,y):\r\n t3.pensize(10)\r\n t3.circle(20)\r\n t3.hideturtle()\r\ndef right_click4(x,y):\r\n t4.pensize(10)\r\n t4.circle(20)\r\n t4.hideturtle()\r\ndef right_click5(x,y):\r\n t5.pensize(10)\r\n t5.circle(20)\r\n t5.hideturtle()\r\ndef right_click6(x,y):\r\n t6.pensize(10)\r\n t6.circle(20)\r\n t6.hideturtle()\r\ndef right_click7(x,y):\r\n t7.pensize(10)\r\n t7.circle(20)\r\n t7.hideturtle()\r\ndef right_click8(x,y):\r\n t8.pensize(10)\r\n t8.circle(20)\r\n t8.hideturtle()\r\ndef right_click9(x,y):\r\n t9.pensize(10)\r\n t9.circle(20)\r\n t9.hideturtle()\r\n#def left click function\r\ndef left_click1(x,y):\r\n t1.pensize(10)\r\n t1.right(45)\r\n t1.fd(30)\r\n t1.bk(60)\r\n t1.penup()\r\n t1.right(45)\r\n t1.fd(45)\r\n t1.pendown()\r\n t1.left(135)\r\n t1.fd(60)\r\n t1.hideturtle()\r\ndef left_click2(x,y):\r\n t2.pensize(10)\r\n t2.right(45)\r\n t2.fd(30)\r\n t2.bk(60)\r\n t2.penup()\r\n t2.right(45)\r\n t2.fd(45)\r\n t2.pendown()\r\n t2.left(135)\r\n t2.fd(60)\r\n t2.hideturtle()\r\ndef left_click3(x,y):\r\n t3.pensize(10)\r\n t3.right(45)\r\n t3.fd(30)\r\n t3.bk(60)\r\n t3.penup()\r\n t3.right(45)\r\n t3.fd(45)\r\n t3.pendown()\r\n t3.left(135)\r\n t3.fd(60)\r\n t3.hideturtle()\r\ndef left_click4(x,y):\r\n t4.pensize(10)\r\n t4.right(45)\r\n t4.fd(30)\r\n t4.bk(60)\r\n t4.penup()\r\n t4.right(45)\r\n t4.fd(45)\r\n t4.pendown()\r\n t4.left(135)\r\n t4.fd(60)\r\n t4.hideturtle()\r\ndef left_click5(x,y):\r\n t5.pensize(10)\r\n t5.right(45)\r\n t5.fd(30)\r\n t5.bk(60)\r\n t5.penup()\r\n t5.right(45)\r\n t5.fd(45)\r\n t5.pendown()\r\n t5.left(135)\r\n t5.fd(60)\r\n t5.hideturtle()\r\ndef left_click6(x,y):\r\n t6.pensize(10)\r\n t6.right(45)\r\n t6.fd(30)\r\n t6.bk(60)\r\n t6.penup()\r\n t6.right(45)\r\n t6.fd(45)\r\n t6.pendown()\r\n t6.left(135)\r\n t6.fd(60)\r\n t6.hideturtle()\r\ndef left_click7(x,y):\r\n t7.pensize(10)\r\n t7.right(45)\r\n t7.fd(30)\r\n t7.bk(60)\r\n t7.penup()\r\n t7.right(45)\r\n t7.fd(45)\r\n t7.pendown()\r\n t7.left(135)\r\n t7.fd(60)\r\n t7.hideturtle()\r\ndef left_click8(x,y):\r\n t8.pensize(10)\r\n t8.right(45)\r\n t8.fd(30)\r\n t8.bk(60)\r\n t8.penup()\r\n t8.right(45)\r\n t8.fd(45)\r\n t8.pendown()\r\n t8.left(135)\r\n t8.fd(60)\r\n t8.hideturtle()\r\ndef left_click9(x,y):\r\n t9.pensize(10)\r\n t9.right(45)\r\n t9.fd(30)\r\n t9.bk(60)\r\n t9.penup()\r\n t9.right(45)\r\n t9.fd(45)\r\n t9.pendown()\r\n t9.left(135)\r\n t9.fd(60)\r\n t9.hideturtle()\r\n\r\ntu=t.Turtle()\r\ntu.speed(0)\r\ntu.pensize(10)\r\nt.bgcolor(\"black\")\r\ntu.penup()\r\ntu.color(\"red\")\r\ntu.goto(-150,-150)\r\ntu.left(90)\r\ntu.pendown()\r\n#making square\r\nfor i in range(4):\r\n tu.forward(300)\r\n tu.right(90)\r\n#making boxs\r\ntu.penup()\r\ntu.right(90)\r\ntu.forward(100)\r\ntu.pendown()\r\ntu.left(90)\r\ntu.forward(300)\r\ntu.penup()\r\ntu.right(90)\r\ntu.forward(100)\r\ntu.pendown()\r\ntu.right(90)\r\ntu.forward(300)\r\ntu.penup()\r\ntu.left(90)\r\ntu.forward(100)\r\ntu.left(90)\r\ntu.forward(100)\r\ntu.pendown()\r\ntu.left(90)\r\ntu.forward(300)\r\ntu.right(90)\r\ntu.penup()\r\ntu.forward(100)\r\ntu.pendown()\r\ntu.right(90)\r\ntu.forward(300)\r\n\r\n#placing turtles\r\n#first turtle\r\nt1=t.Turtle()\r\nt1.shape(\"turtle\")\r\nt1.color(\"white\")\r\n\r\n#second turtle\r\nt2=t.Turtle()\r\nt2.shape(\"turtle\")\r\nt2.color(\"white\")\r\nt2.penup()\r\nt2.goto(90,90)\r\nt2.pendown()\r\n#third turtle\r\nt3=t.Turtle()\r\nt3.shape(\"turtle\")\r\nt3.color(\"white\")\r\nt3.penup()\r\nt3.goto(0,100)\r\nt3.pendown()\r\n#forth turtle\r\nt4=t.Turtle()\r\nt4.shape(\"turtle\")\r\nt4.color(\"white\")\r\nt4.penup()\r\nt4.goto(90,0)\r\nt4.pendown()\r\n#fifth turtle\r\nt5=t.Turtle()\r\nt5.shape(\"turtle\")\r\nt5.color(\"white\")\r\nt5.penup()\r\nt5.goto(-90,0)\r\nt5.pendown()\r\n#sixth turtle\r\nt6=t.Turtle()\r\nt6.shape(\"turtle\")\r\nt6.color(\"white\")\r\nt6.penup()\r\nt6.goto(0,-90)\r\nt6.pendown()\r\n#seventh turtle\r\nt7=t.Turtle()\r\nt7.shape(\"turtle\")\r\nt7.color(\"white\")\r\nt7.penup()\r\nt7.goto(90,-90)\r\nt7.pendown()\r\n#eigth turtle\r\nt8=t.Turtle()\r\nt8.shape(\"turtle\")\r\nt8.color(\"white\")\r\nt8.penup()\r\nt8.goto(-90,-90)\r\nt8.pendown()\r\n#nineth turtle\r\nt9=t.Turtle()\r\nt9.shape(\"turtle\")\r\nt9.color(\"white\")\r\nt9.penup()\r\nt9.goto(-90,90)\r\nt9.pendown()\r\n\r\nt.listen()\r\nt1.onclick(right_click1,1)\r\nt2.onclick(right_click2,1)\r\nt3.onclick(right_click3,1)\r\nt4.onclick(right_click4,1)\r\nt5.onclick(right_click5,1)\r\nt6.onclick(right_click6,1)\r\nt7.onclick(right_click7,1)\r\nt8.onclick(right_click8,1)\r\nt9.onclick(right_click9,1)\r\n\r\n#right click\r\n\r\nt1.onclick(left_click1,3)\r\nt2.onclick(left_click2,3)\r\nt3.onclick(left_click3,3)\r\nt4.onclick(left_click4,3)\r\nt5.onclick(left_click5,3)\r\nt6.onclick(left_click6,3)\r\nt7.onclick(left_click7,3)\r\nt8.onclick(left_click8,3)\r\nt9.onclick(left_click9,3)\r\n\r\nt.done()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"tictaktoe.py","file_name":"tictaktoe.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"418859294","text":"print(\"Character aware!\")\n\n# Character-aware version of the `Tabula Rasa' language model\n\nimport sys\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--language\", dest=\"language\", type=str, default=\"english\")\nparser.add_argument(\"--load-from\", dest=\"load_from\", type=str)\n\nimport random\n\nparser.add_argument(\"--batchSize\", type=int, default=random.choice([1]))\nparser.add_argument(\"--word_embedding_size\", type=int, default=random.choice([512]))\nparser.add_argument(\"--hidden_dim\", type=int, default=random.choice([1024]))\nparser.add_argument(\"--layer_num\", type=int, default=random.choice([2]))\nparser.add_argument(\"--weight_dropout_in\", type=float, default=random.choice([0.05]))\nparser.add_argument(\"--weight_dropout_out\", type=float, default=random.choice([0.05]))\nparser.add_argument(\"--char_dropout_prob\", type=float, default=random.choice([0.01]))\n#parser.add_argument(\"--char_noise_prob\", type = float, default=random.choice([0.0]))\nparser.add_argument(\"--learning_rate\", type = float, default= random.choice([1.0]))\nparser.add_argument(\"--myID\", type=int, default=random.randint(0,1000000000))\nparser.add_argument(\"--sequence_length\", type=int, default=random.choice([50]))\nparser.add_argument(\"--verbose\", type=bool, default=False)\nparser.add_argument(\"--lr_decay\", type=float, default=random.choice([1.0]))\nparser.add_argument(\"--char_emb_dim\", type=int, default=128)\nparser.add_argument(\"--char_enc_hidden_dim\", type=int, default=64)\nparser.add_argument(\"--char_dec_hidden_dim\", type=int, default=128)\n\nmodel = \"REAL_REAL\"\n\nimport math\n\nargs=parser.parse_args()\n\n#if \"MYID\" in args.save_to:\n# args.save_to = args.save_to.replace(\"MYID\", str(args.myID))\n\n#assert \"word\" in args.save_to, args.save_to\n\nprint(args)\n\n\n\nimport corpusIterator_Grodner2002\n\n\ndef plus(it1, it2):\n for x in it1:\n yield x\n for x in it2:\n yield x\n\nchar_vocab_path = {\"german\" : \"vocabularies/german-wiki-word-vocab-50000.txt\", \"italian\" : \"vocabularies/italian-wiki-word-vocab-50000.txt\", \"english\" : \"vocabularies/english-wiki-word-vocab-50000.txt\"}[args.language]\n\nwith open(char_vocab_path, \"r\") as inFile:\n itos = [x.split(\"\\t\")[0] for x in inFile.read().strip().split(\"\\n\")[:50000]]\nstoi = dict([(itos[i],i) for i in range(len(itos))])\n\nitos_complete = [\"SOS\", \"EOS\", \"OOV\"] + itos\n\n\nwith open(\"vocabularies/char-vocab-wiki-\"+args.language, \"r\") as inFile:\n itos_chars = [x for x in inFile.read().strip().split(\"\\n\")]\nstoi_chars = dict([(itos_chars[i],i) for i in range(len(itos_chars))])\n\n\nitos_chars_total = [\"SOS\", \"EOS\", \"OOV\"] + itos_chars\n\n\nimport random\n\n\nimport torch\n\nprint(torch.__version__)\n\n#from weight_drop import WeightDrop\n\n\nrnn = torch.nn.LSTM(2*args.word_embedding_size, args.hidden_dim, args.layer_num).cuda()\n\nrnn_parameter_names = [name for name, _ in rnn.named_parameters()]\nprint(rnn_parameter_names)\n#quit()\n\n\nrnn_drop = rnn #WeightDrop(rnn, layer_names=[(name, args.weight_dropout_in) for name, _ in rnn.named_parameters() if name.startswith(\"weight_ih_\")] + [ (name, args.weight_dropout_hidden) for name, _ in rnn.named_parameters() if name.startswith(\"weight_hh_\")])\n\noutput = torch.nn.Linear(args.hidden_dim, len(itos)+3).cuda()\n\nword_embeddings = torch.nn.Embedding(num_embeddings=len(itos)+3, embedding_dim=args.word_embedding_size).cuda()\n\nlogsoftmax = torch.nn.LogSoftmax(dim=2)\n\ntrain_loss = torch.nn.NLLLoss(ignore_index=0)\nprint_loss = torch.nn.NLLLoss(size_average=False, reduce=False, ignore_index=0)\nchar_dropout = torch.nn.Dropout2d(p=args.char_dropout_prob)\n\n\ntrain_loss_chars = torch.nn.NLLLoss(ignore_index=0, reduction='sum')\n\nmodules = [rnn, output, word_embeddings]\n\n\ncharacter_embeddings = torch.nn.Embedding(num_embeddings = len(itos_chars_total)+3, embedding_dim=args.char_emb_dim).cuda()\n\nchar_composition = torch.nn.LSTM(args.char_emb_dim, args.char_enc_hidden_dim, 1, bidirectional=True).cuda()\nchar_composition_output = torch.nn.Linear(2*args.char_enc_hidden_dim, args.word_embedding_size).cuda()\n\nchar_decoder_rnn = torch.nn.LSTM(args.char_emb_dim + args.hidden_dim, args.char_dec_hidden_dim, 1).cuda()\nchar_decoder_output = torch.nn.Linear(args.char_dec_hidden_dim, len(itos_chars_total))\n\n\nmodules += [character_embeddings, char_composition, char_composition_output, char_decoder_rnn, char_decoder_output]\ndef parameters():\n for module in modules:\n for param in module.parameters():\n yield param\n\nparameters_cached = [x for x in parameters()]\n\n\nlearning_rate = args.learning_rate\n\noptim = torch.optim.SGD(parameters(), lr=learning_rate, momentum=0.0) # 0.02, 0.9\n\n#named_modules = {\"rnn\" : rnn, \"output\" : output, \"word_embeddings\" : word_embeddings, \"optim\" : optim}\n\n\n# state = {\"arguments\" : str(args), \"words\" : itos, \"components\" : [c.state_dict() for c in modules]}\n# torch.save(state, \"/u/scr/mhahn/CODEBOOKS/\"+args.language+\"_\"+__file__+\"_code_\"+str(args.myID)+\".txt\")\n\n\n\nif args.load_from is not None:\n checkpoint = torch.load(\"/u/scr/mhahn/CODEBOOKS/\"+args.language+\"_\"+__file__.replace(\"RUN_Grodner2002_\", \"\")+\"_code_\"+str(args.load_from)+\".txt\")\n for i in range(len(checkpoint[\"components\"])):\n modules[i].load_state_dict(checkpoint[\"components\"][i])\nelse:\n assert False\n\nfrom torch.autograd import Variable\n\n\n# ([0] + [stoi[training_data[x]]+1 for x in range(b, b+sequence_length) if x < len(training_data)]) \n\n#from embed_regularize import embedded_dropout\n\npositionHere = 0\n\ndef prepareDatasetChunks(data, train=True):\n numeric = [0]\n count = 0\n print(\"Prepare chunks\")\n numerified = []\n numerified_chars = []\n line_numbers = []\n for chunk, chunk_line_numbers in data:\n for char, linenum in zip(chunk, chunk_line_numbers):\n count += 1\n numerified.append((stoi[char]+3 if char in stoi else 2))\n numerified_chars.append([0] + [stoi_chars[x]+3 if x in stoi_chars else 2 for x in char])\n line_numbers.append(linenum)\n\n if len(numerified) > (args.batchSize*args.sequence_length):\n sequenceLengthHere = args.sequence_length\n\n cutoff = int(len(numerified)/(args.batchSize*sequenceLengthHere)) * (args.batchSize*sequenceLengthHere)\n numerifiedCurrent = numerified[:cutoff]\n numerifiedCurrent_chars = numerified_chars[:cutoff]\n\n for i in range(len(numerifiedCurrent_chars)):\n numerifiedCurrent_chars[i] = numerifiedCurrent_chars[i][:15] + [1]\n numerifiedCurrent_chars[i] = numerifiedCurrent_chars[i] + ([0]*(16-len(numerifiedCurrent_chars[i])))\n\n\n numerified = numerified[cutoff:]\n numerified_chars = numerified_chars[cutoff:]\n \n line_numbersCurrent = line_numbers[:cutoff]\n line_numbers = line_numbers[cutoff:]\n \n numerifiedCurrent = torch.LongTensor(numerifiedCurrent).view(args.batchSize, -1, sequenceLengthHere).transpose(0,1).transpose(1,2).cuda()\n numerifiedCurrent_chars = torch.LongTensor(numerifiedCurrent_chars).view(args.batchSize, -1, sequenceLengthHere, 16).transpose(0,1).transpose(1,2).cuda()\n\n line_numbersCurrent = torch.LongTensor(line_numbersCurrent).view(args.batchSize, -1, sequenceLengthHere).transpose(0,1).transpose(1,2).cuda()\n numberOfSequences = numerifiedCurrent.size()[0]\n for i in range(numberOfSequences):\n yield numerifiedCurrent[i], numerifiedCurrent_chars[i], line_numbersCurrent[i]\n hidden = None\n else:\n print(\"Skipping\")\n\n\ncompleteData = []\n\n\n\nhidden = None\n\nzeroBeginning = torch.LongTensor([0 for _ in range(args.batchSize)]).cuda().view(1,args.batchSize)\nbeginning = None\n\nzeroBeginning_chars = torch.zeros(1, args.batchSize, 16).long().cuda()\n\n\nzeroHidden = torch.zeros((args.layer_num, args.batchSize, args.hidden_dim)).cuda()\n\nbernoulli = torch.distributions.bernoulli.Bernoulli(torch.tensor([0.1 for _ in range(args.batchSize)]).cuda())\n\nbernoulli_input = torch.distributions.bernoulli.Bernoulli(torch.tensor([1-args.weight_dropout_in for _ in range(args.batchSize * 2 * args.word_embedding_size)]).cuda())\nbernoulli_output = torch.distributions.bernoulli.Bernoulli(torch.tensor([1-args.weight_dropout_out for _ in range(args.batchSize * args.hidden_dim)]).cuda())\n\n\n\n\ndef forward(numericAndLineNumbers, train=True, printHere=False):\n global hidden\n global beginning\n global beginning_chars\n if hidden is None:\n hidden = None\n beginning = zeroBeginning\n beginning_chars = zeroBeginning_chars\n elif hidden is not None:\n hidden = tuple([Variable(x.data).detach() for x in hidden])\n\n numeric, numeric_chars, lineNumbers = numericAndLineNumbers\n\n\n numeric = torch.cat([beginning, numeric], dim=0)\n\n numeric_chars = torch.cat([beginning_chars, numeric_chars], dim=0)\n\n beginning = numeric[numeric.size()[0]-1].view(1, args.batchSize)\n beginning_chars = numeric_chars[numeric_chars.size()[0]-1].view(1, args.batchSize, 16)\n\n\n input_tensor = Variable(numeric[:-1], requires_grad=False)\n target_tensor = Variable(numeric[1:], requires_grad=False)\n\n input_tensor_chars = Variable(numeric_chars[:-1], requires_grad=False)\n target_tensor_chars = Variable(numeric_chars[:-1], requires_grad=False)\n\n embedded_chars = input_tensor_chars.transpose(0,2).transpose(2,1)\n embedded_chars = embedded_chars.contiguous().view(16, -1)\n _, embedded_chars = char_composition(character_embeddings(embedded_chars), None)\n embedded_chars = embedded_chars[0].view(2, args.sequence_length, args.batchSize, args.char_enc_hidden_dim)\n #print(embedded_chars.size())\n\n embedded_chars = char_composition_output(torch.cat([embedded_chars[0], embedded_chars[1]], dim=2))\n #print(embedded_chars.size())\n\n # print(word_embeddings)\n #if train and (embedding_full_dropout_prob is not None):\n # embedded = embedded_dropout(word_embeddings, input_tensor, dropout=embedding_full_dropout_prob, scale=None) #word_embeddings(input_tensor)\n #else:\n embedded = word_embeddings(input_tensor)\n #print(embedded.size())\n# print(\"=========\")\n# print(numeric[:,5])\n# print(embedded[:,5,:].mean(dim=1)[numeric[:-1,5] == 3])\n# print(embedded_chars[:,5,:].mean(dim=1)[numeric[:-1,5] == 3])\n embedded = torch.cat([embedded, embedded_chars], dim=2)\n #print(embedded.size())\n\n out, hidden = rnn_drop(embedded, hidden)\n# if train:\n# out = dropout(out)\n\n logits = output(out) \n log_probs = logsoftmax(logits)\n # print(logits)\n # print(log_probs)\n # print(target_tensor)\n\n \n lossTensor = print_loss(log_probs.view(-1, len(itos)+3), target_tensor.view(-1)).view(-1, args.batchSize)\n losses = lossTensor.data.cpu().numpy()\n\n\n\n\n for i in range(0,args.sequence_length): #range(1,maxLength+1): # don't include i==0\n j = 0\n numericCPU = numeric.cpu().data.numpy()\n lineNum = int(lineNumbers[i][j])\n\n print (i, itos_complete[numericCPU[i+1][j]], losses[i][j], lineNum)\n\n while lineNum >= len(completeData):\n completeData.append([[], 0])\n completeData[lineNum][0].append(itos_complete[numericCPU[i+1][j]])\n completeData[lineNum][1] += losses[i][j]\n\n\n return None, target_tensor.view(-1).size()[0]\n\n\n\n\nimport time\n\ntestLosses = []\n\nif True:\n rnn_drop.train(False)\n\n\n test_data = corpusIterator_Grodner2002.load(args.language, tokenize=True)\n print(\"Got data\")\n test_chars = prepareDatasetChunks(test_data, train=False)\n\n\n \n test_loss = 0\n test_char_count = 0\n counter = 0\n hidden, beginning = None, None\n while True:\n counter += 1\n try:\n numeric = next(test_chars)\n except StopIteration:\n break\n printHere = (counter % 50 == 0)\n loss, numberOfCharacters = forward(numeric, printHere=printHere, train=False)\n test_char_count += numberOfCharacters\n testLosses.append(test_loss/test_char_count)\n print(testLosses)\n\n\nwith open(\"output/Grodner2002_\"+args.language+\"_\"+args.load_from, \"w\") as outFile:\n print(\"\\t\".join([\"LineNumber\", \"RegionLSTM\", \"Surprisal\"]), file=outFile)\n for num, entry in enumerate(completeData):\n print(\"\\t\".join([str(x) for x in [num, \"\".join(entry[0]), entry[1]]]), file=outFile)\n\n\n\n","sub_path":"RUN_Grodner2002_char-lm-ud-stationary-vocab-wiki-nospaces-bptt-2-words_NoNewWeightDrop.py","file_name":"RUN_Grodner2002_char-lm-ud-stationary-vocab-wiki-nospaces-bptt-2-words_NoNewWeightDrop.py","file_ext":"py","file_size_in_byte":12288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"204413213","text":"import time\nimport heapq\n\nimport selectors\n\nimport logging\n\nfrom .control import Control\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mainloop:\n \"\"\"\n Mainloop class that handles IO and timer events.\n\n By registering a filedescriptor using the register() method, a\n IOProxy object is achieved. This object is used to let the mainloop\n know in which events we're interested and what functions should be\n called on a certain event.\n\n By creating a timer using the timer() method, a new timer is\n achieved. This object is used to let the mainloop know what function\n to call when the timer expires, and to set the timer timeout.\n\n All this is executed by running the mainloop. This should be done by\n calling the run_once() or run_forever() methods.\n\n The run_once method will run the loop for only a few events or when\n the timeout expires, after which it returns.\n\n The run_forever method will run the loop forever until the\n shutdown() method is called. The mainloop will then run for at most\n one more cycle.\n \"\"\"\n\n SIG_WAKEUP = 0\n SIG_SHUTDOWN = 1\n\n def __init__(self):\n\n self.selector = selectors.DefaultSelector()\n\n self.fd_events = dict()\n self.fd_read_handlers = dict()\n self.fd_write_handlers = dict()\n\n self.timer_deadlines = list()\n self.timer_handlers = dict()\n\n self.control = Control(self)\n\n self.shutdown_flag = False\n\n def now(self):\n \"\"\" Return the current monotonic timestamp\n \"\"\"\n\n return time.monotonic()\n\n def shutdown(self):\n self.control.signal(Mainloop.SIG_SHUTDOWN)\n\n def run_forever(self):\n\n while not self.shutdown_flag:\n self.run_once(None)\n\n def run_once(self, max_timeout=None):\n \"\"\"\n Run for one cycle.\n \"\"\"\n\n # stats\n nr_timers = 0\n nr_writes = 0\n nr_reads = 0\n nr_signals = 0\n\n # retrieve the remaining time for the first timer to expire\n timer_timeout = self._get_next_timer_deadline()\n\n # calculate the timeout used for the select() call\n timeout = None\n\n if max_timeout and timer_timeout:\n timeout = min(max_timeout, timer_timeout)\n\n elif max_timeout:\n timeout = max_timeout\n\n elif timer_timeout:\n timeout = timer_timeout\n\n # wait for events\n events = self.selector.select(timeout)\n\n # process expired timers\n expired_timers = self._get_expired_timers()\n\n for key in expired_timers:\n\n nr_timers += 1\n\n handler = self.timer_handlers.pop(key, None)\n\n # if handler is None, it means the timer is canceled.\n\n if handler:\n handler()\n\n # process IO events\n for key, mask in events:\n\n if mask & selectors.EVENT_WRITE:\n\n nr_writes += 1\n\n handler = self.fd_write_handlers.get(key.fd, None)\n if handler:\n handler()\n\n if mask & selectors.EVENT_READ:\n\n nr_reads += 1\n\n handler = self.fd_read_handlers.get(key.fd, None)\n if handler:\n handler()\n\n # process control signals\n for signal in self.control.signals():\n\n nr_signals += 1\n\n if signal == Mainloop.SIG_SHUTDOWN:\n self.shutdown_flag = True\n\n return nr_timers + nr_writes + nr_reads + nr_signals\n\n def register(self, fd):\n \"\"\"\n Register a filedescriptor on the mainloop, returning an IOProxy\n object.\n \"\"\"\n\n proxy = IOProxy(self, fd)\n return proxy\n\n def timer(self):\n \"\"\"\n Create a new timer on the mainloop, returning a Timer object.\n \"\"\"\n\n timer = Timer(self)\n return timer\n\n def _update_interest(self, fd, read=None, write=None):\n \"\"\"\n Modify the events that the selector should select.\n \"\"\"\n\n new_events = self.fd_events.get(fd, 0)\n old_events = new_events\n\n # calculate bitmask\n if read is True:\n new_events |= selectors.EVENT_READ\n\n elif read is False:\n new_events &= ~selectors.EVENT_READ\n\n if write is True:\n new_events |= selectors.EVENT_WRITE\n\n elif write is False:\n new_events &= ~selectors.EVENT_WRITE\n\n # register, modify or unregister\n if new_events and not old_events:\n self.selector.register(fd, new_events)\n\n elif new_events:\n self.selector.modify(fd, new_events)\n\n else:\n self.selector.unregister(fd)\n\n self.fd_events[fd] = new_events\n\n def _update_read_handler(self, fd, func):\n \"\"\"\n Set the handler for that will be called on read events.\n \"\"\"\n\n self.fd_read_handlers[fd] = func\n\n def _update_write_handler(self, fd, func):\n \"\"\"\n Set the handler that will be called on write events.\n \"\"\"\n\n self.fd_write_handlers[fd] = func\n\n def _unregister(self, fd):\n if fd in self.fd_events:\n self._update_interest(fd, read=False, write=False)\n del self.fd_events[fd]\n self.fd_read_handlers.pop(fd, None)\n self.fd_write_handlers.pop(fd, None)\n\n def _update_timer_timeout(self, timer_id, timeout):\n \"\"\"\n Set the timeout after which the timer will expire\n \"\"\"\n\n now = self.now()\n deadline = now + timeout\n\n heapq.heappush(self.timer_deadlines, (deadline, timer_id))\n\n self.control.signal(Mainloop.SIG_WAKEUP)\n\n def _update_timer_handler(self, timer_id, func):\n \"\"\"\n Set the handler that will be called when a timer expires.\n \"\"\"\n\n if callable(func):\n self.timer_handlers[timer_id] = func\n\n else:\n if timer_id in self.timer_handlers:\n del self.timer_handlers[timer_id]\n\n def _get_next_timer_deadline(self):\n \"\"\"\n Calculate the time that's left until the next timer will expire.\n Returns None if there are no timers to expire.\n \"\"\"\n\n timeout = None\n\n while self.timer_deadlines and not timeout:\n\n now = self.now()\n\n deadline, timer_id = self.timer_deadlines[0]\n\n # skip timers that have no handler attached\n if timer_id not in self.timer_handlers:\n heapq.heappop(self.timer_deadlines)\n continue\n\n timeout = deadline - now\n\n return timeout\n\n def _get_expired_timers(self):\n \"\"\"\n Returns a list of the timers that have expired\n \"\"\"\n\n now = self.now()\n\n while self.timer_deadlines:\n\n deadline, timer_id = self.timer_deadlines[0]\n\n if deadline > now:\n break\n\n heapq.heappop(self.timer_deadlines)\n\n yield timer_id\n\n\nclass IOProxy:\n \"\"\"\n The IOProxy class is used to interact with the mainloop about a\n single filedescriptor. The object should not be created directly,\n but always be achieved by calling the register() method of the\n Mainloop class.\n\n After an object is achieved, the set_interest() method can be used\n to let the mainloop know in what events we're interested.\n\n The set_read_handler() and set_write_handler() methods are used to\n specify which functions should be called when corrosponding event is\n raised.\n \"\"\"\n\n def __init__(self, mainloop, fd):\n # if we are given a file or socket, call it's fileno() function\n # in order to get the filedescriptor\n fileno_func = getattr(fd, 'fileno', None)\n if fileno_func and callable(fileno_func):\n fd = fileno_func()\n\n self.mainloop = mainloop\n self.fd = fd\n self.opened = True\n\n def set_interest(self, read=None, write=None):\n \"\"\"\n Let the mainloop know in which events we're interested. This is\n done by setting either read, write or both to True or False.\n\n When set to None, the interest will not change.\n \"\"\"\n\n self.mainloop._update_interest(self.fd, read, write)\n\n def start_writing(self):\n \"\"\"\n Alias for set_interest(write=True), allows for writing better\n readable code.\n \"\"\"\n\n self.set_interest(write=True)\n\n def stop_writing(self):\n \"\"\"\n Alias for set_interest(write=False), allows for writing better\n readable code.\n \"\"\"\n\n self.set_interest(write=False)\n\n def set_read_handler(self, handler=None):\n \"\"\"\n Set the function that will be called when a read event is\n raised. When handler is None, no function will be called.\n \"\"\"\n\n self.mainloop._update_read_handler(self.fd, handler)\n\n def set_write_handler(self, handler=None):\n \"\"\"\n Set the function that will be called when a write event is\n raised. When handler is None, no function will be called.\n \"\"\"\n\n self.mainloop._update_write_handler(self.fd, handler)\n\n def unregister(self):\n \"\"\"\n Disconnect the proxy from the mainloop. Deleting all references.\n \"\"\"\n\n self.mainloop._unregister(self.fd)\n self.opened = False\n\n def is_open(self):\n \"\"\"\n Returns wheather or not this proxy is still active, and thus\n we could expect to receive or be able to write data.\n \"\"\"\n\n return self.opened\n\n def __repr__(self):\n return \"<IOProxy fd={fd}>\".format(\n fd=self.fd\n )\n\n\nclass Timer:\n \"\"\"\n The Timer class is used to interact with the mainloop about timers.\n The object should not be created directly but always be achieved by\n calling the timer() method of the Mainloop class.\n\n After a Timer object is achieved, the set_handler method can be used\n to specify which function should be called when the timer expires.\n\n The set() method is used to set the timer. By calling the cancel()\n method, the current timer will be canceled.\n \"\"\"\n\n next_timer_key = 1\n next_timer_id = 1\n\n def __init__(self, mainloop):\n self.mainloop = mainloop\n\n self.key = Timer.next_timer_key\n Timer.next_timer_key += 1\n\n self.timer_id = None\n self.handler = None\n self.handler_args = None\n self.handler_kwargs = None\n\n self.expired = False\n\n def set_handler(self, handler=None, *args, **kwargs):\n \"\"\"\n Set the function that will be called when the timer expires.\n \"\"\"\n\n self.handler = handler\n self.handler_args = args\n self.handler_kwargs = kwargs\n\n def set(self, timeout):\n \"\"\"\n Sets the timer, this will cause the handler to be called after\n the given timeout expires.\n \"\"\"\n\n self.timer_id = Timer.next_timer_id\n Timer.next_timer_id += 1\n\n self.mainloop._update_timer_handler(self.timer_id, self.__handler)\n self.mainloop._update_timer_timeout(self.timer_id, timeout)\n\n self.expired = False\n\n def cancel(self):\n \"\"\"\n Cancel a set timer.\n \"\"\"\n\n if self.timer_id:\n self.mainloop._update_timer_handler(self.timer_id, None)\n self.timer_id = None\n\n def has_expired(self):\n return self.expired\n\n def __handler(self):\n \"\"\"\n Called by the mainloop when the timer expires.\n \"\"\"\n\n self.expired = True\n\n if self.handler:\n self.handler(*self.handler_args, **self.handler_kwargs)\n","sub_path":"nervix/mainloop/mainloop.py","file_name":"mainloop.py","file_ext":"py","file_size_in_byte":11614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217679977","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ntry:\n chrome = webdriver.Chrome(executable_path='E:\\\\chromedriver.exe')\n chrome.get(url='http://inst.eecs.berkeley.edu/~cs61a/fa18/')\n print(chrome.title)\n list = chrome.find_elements(By.XPATH, '//li/a[text()=\"8pp\"]/@href') # Invalid Xpath\n print(len(list))\n# chrome.execute_script('window.open(\"https://www.google.co.in\", \"_blank\")')\n\nexcept Exception as e:\n print(e)\n\nfinally:\n chrome.quit()\n","sub_path":"learn_codes/Selenium/SeleniumRough.py","file_name":"SeleniumRough.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503336327","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport views.main_ui\r\nimport views.shutdown_ui\r\nimport views.about_ui\r\nimport views.activate_ui\r\nimport json\r\nimport os\r\nfrom helpers import validateJSON\r\nfrom pchome_cookie import PchomePanic\r\nimport threading\r\nfrom get_serial_number import getMachine_addr\r\nfrom windows.utils import critical\r\nimport requests\r\n\r\n\r\nclass Application():\r\n def __init__(self):\r\n \r\n self.serial_code = getMachine_addr()\r\n\r\n import sys\r\n self.app = QtWidgets.QApplication(sys.argv)\r\n self.MainWindow = QtWidgets.QMainWindow()\r\n self.main_ui = views.main_ui.Ui_MainWindow()\r\n self.main_ui.setupUi(self.MainWindow)\r\n self.ShotdownWindow = QtWidgets.QMainWindow()\r\n self.shutdown_ui = views.shutdown_ui.Ui_MainWindow()\r\n self.shutdown_ui.setupUi(self.ShotdownWindow)\r\n self.DialogAbout = QtWidgets.QDialog()\r\n self.about_ui = views.about_ui.Ui_Dialog()\r\n self.about_ui.setupUi(self.DialogAbout)\r\n self.ActivateWindow = QtWidgets.QMainWindow()\r\n self.activate_ui = views.activate_ui.Ui_MainWindow()\r\n self.activate_ui.setupUi(self.ActivateWindow)\r\n if self.is_activate():\r\n self.MainWindow.show()\r\n else: \r\n self.activate_ui.serial_code.setText(getMachine_addr())\r\n self.ActivateWindow.show()\r\n self.clear_form()\r\n self.setup_app()\r\n self.bind_events()\r\n self.pchome = PchomePanic()\r\n sys.exit(self.app.exec_())\r\n\r\n def load_activate_code(self):\r\n if os.path.exists('activate_key'):\r\n f = open('activate_key')\r\n return f.read()\r\n return None\r\n\r\n def is_activate(self):\r\n activate_code = self.load_activate_code()\r\n if activate_code == None:\r\n return False\r\n else:\r\n response = requests.post('https://dev.kevins.fun/v1.0/activate/verify-code', json={'activate_code': activate_code, 'serial_code': getMachine_addr()})\r\n if response.json()['returnCode'] == '000000':\r\n return True\r\n \r\n def setup_app(self):\r\n if os.path.exists('record.json'):\r\n f = open('record.json')\r\n json_data = f.read()\r\n isValid = validateJSON(json_data)\r\n if isValid:\r\n form_data = json.loads(json_data)\r\n self.main_ui.email.setText(form_data['email'])\r\n self.main_ui.password.setText(form_data['password'])\r\n self.main_ui.target_url.setText(form_data['target_url'])\r\n self.main_ui.browser_qty.setValue(form_data['browser_qty'])\r\n self.main_ui.record.setChecked(form_data['record'])\r\n\r\n def about_show(self):\r\n self.serial_code = getMachine_addr()\r\n self.about_ui.serial_code.setText(self.serial_code)\r\n self.about_ui.activate_code.setText(self.load_activate_code())\r\n self.DialogAbout.show()\r\n\r\n def about_close(self):\r\n self.DialogAbout.close()\r\n\r\n def submit_btn_handler(self):\r\n form_data = {\r\n 'email': self.main_ui.email.text(),\r\n 'password': self.main_ui.password.text(),\r\n 'target_url': self.main_ui.target_url.text(),\r\n 'browser_qty': self.main_ui.browser_qty.value(),\r\n 'record': self.main_ui.record.isChecked()\r\n }\r\n if self.main_ui.record.isChecked():\r\n json_form_data = json.dumps(form_data)\r\n with open('record.json', 'w') as f:\r\n f.write(json_form_data)\r\n self.MainWindow.close()\r\n self.ShotdownWindow.show()\r\n \r\n t = threading.Thread(name='auto start', target=self.pchome.run, args=[form_data])\r\n t.setDaemon(True)\r\n t.start()\r\n\r\n \r\n\r\n def close_btn_handler(self):\r\n self.pchome.stop()\r\n import sys\r\n sys.exit(0)\r\n \r\n def clear_record_handler(self):\r\n if os.path.exists('record.json'):\r\n os.remove(\"record.json\")\r\n self.clear_form()\r\n\r\n def activate_submit_handler(self):\r\n response = requests.post('https://dev.kevins.fun/v1.0/activate/verify-code', json={'activate_code': self.activate_ui.activate_code.text(), 'serial_code': getMachine_addr()})\r\n if response.json()['returnCode'] == '000000':\r\n with open('activate_key', 'w') as f:\r\n f.write(self.activate_ui.activate_code.text())\r\n self.ActivateWindow.close()\r\n self.MainWindow.show()\r\n else:\r\n critical(content=f'{response.json()[\"returnMessage\"]}({response.json()[\"returnCode\"]})')\r\n\r\n def bind_events(self):\r\n self.main_ui.submit_btn.clicked.connect(self.submit_btn_handler)\r\n self.main_ui.close_btn.clicked.connect(self.close_btn_handler)\r\n self.main_ui.clear_record.triggered.connect(self.clear_record_handler)\r\n self.main_ui.about.triggered.connect(self.about_show)\r\n self.shutdown_ui.stop_app.clicked.connect(self.close_btn_handler)\r\n self.activate_ui.submit_btn.clicked.connect(self.activate_submit_handler)\r\n self.about_ui.ok_btn.clicked.connect(self.about_close)\r\n\r\n\r\n def clear_form(self):\r\n self.main_ui.email.clear()\r\n self.main_ui.password.clear()\r\n self.main_ui.target_url.clear()\r\n self.main_ui.browser_qty.setValue(1)\r\n self.main_ui.record.setChecked(False)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n qt_app = Application()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392338598","text":"from tkinter import *\nfrom tkinter.ttk import Notebook\n\n\n#********main menu***********\ndef doNothing():\n print(\"Do nothting\")\n\n#basic blank window creation\nroot=Tk()\nroot.title(\"!ABC\")\n#adding menu into blank window\nmenu=Menu(root)\nroot.config(menu=menu)\n\nsubMenu=Menu(menu)\nmenu.add_cascade(label=\"File\",menu=subMenu)\nsubMenu.add_command(label=\"New...\",command=doNothing)\nsubMenu.add_command(label=\"Save...\",command=doNothing)\nsubMenu.add_command(label=\"Save as...\",command=doNothing)\nsubMenu.add_command(label=\"Close...\",command=doNothing)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"Exit...\",command=root.quit)\n\n\neditMenu=Menu(menu)\nmenu.add_cascade(label=\"Edit\",menu=editMenu)\neditMenu.add_command(label=\"Redo\",command=doNothing)\n\ndrawMenu=Menu(menu)\nmenu.add_cascade(label=\"Draw\",menu=drawMenu)\ndrawMenu.add_command(label=\"Line\",command=doNothing)\ndrawMenu.add_command(label=\"Circle\",command=doNothing)\ndrawMenu.add_command(label=\"Rectangle\",command=doNothing)\n\n\n\n#**********tool bar\ntoolbar=Frame(root,bg=\"blue\")\ninsertButt=Button(toolbar,text=\"insert image\",command=doNothing)\ninsertButt.pack(side=LEFT,padx=2, pady=2)\nprintButt=Button(toolbar,text=\"print image\",command=doNothing)\nprintButt.pack(side=LEFT,padx=2, pady=2)\ntoolbar.pack(side=TOP,fill=X)\n\nn=Notebook(root)\nf1=Frame(n,width=200,height=200)\nf2=Frame(n,width=200,height=200)\nn.add(f1,text=\"first\")\nn.add(f2,text=\"second\")\nn.pack(fill=X)\n\n\nleft=Frame(root,bg=\"grey\",bd=2,relief=GROOVE)\n\n\ntopLeft=Frame(left, bg=\"red\", bd=5, relief=SUNKEN,height=800)\n\ninsertTopLeftButt1=Button(topLeft,text=\"insert image\",command=doNothing)\ninsertTopLeftButt1.pack(side=TOP,padx=5, pady=5)\n\ninsertTopLeftButt2=Button(topLeft,text=\"insert image\",command=doNothing)\ninsertTopLeftButt2.pack(side=TOP,padx=5, pady=5)\n\ntopLeft.pack(side=TOP,fill=X,padx=2,pady=2)\n\nseparator = Frame(left, height=2, bd=1, relief=SUNKEN)\nseparator.pack(fill=X,padx=2,pady=2)\n\n\nbottomLeft=Frame(left, bg=\"red\",bd=5, relief=SUNKEN,height=400)\n\ninsertbottomLeftButt1=Button(bottomLeft,text=\"insert image\",command=doNothing)\ninsertbottomLeftButt1.pack(side=TOP,padx=5, pady=5)\n\ninsertbottomLeftButt2=Button(bottomLeft,text=\"insert image\",command=doNothing)\ninsertbottomLeftButt2.pack(side=TOP,padx=5, pady=5)\n\nbottomLeft.pack(side=TOP,fill=X,padx=2,pady=2)\n\nleft.pack(side=LEFT,fill=Y)\n\ncanvas=Canvas(root,width=400,height=100,bg=\"blue\",bd=3)\ncanvas.pack()\nblackline=canvas.create_line(0,0,10,10)\nredline=canvas.create_line(0,100,200,50,fill=\"red\")\ngreenBox=canvas.create_rectangle(0,100,200,50, fill=\"green\")\n\nbottom=Frame(root,bg=\"grey\", height=25)\nbottom.pack(side=BOTTOM, fill=X)\n\nroot.mainloop()\n","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446377901","text":"from envs.deep_cure_env import DeepCure, random_base_infect_rate, random_lifetime, ForeignCountry\nfrom plotting import plot\nimport gym\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport matplotlib.pyplot as plt\n\ndef relu(x):\n x_and_zeros = np.array([x, np.zeros(x.shape)])\n return np.max(x_and_zeros, axis=0)\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\nclass NeuralNetworkPolicy:\n\n def __init__(self, env, h_size=16, one_layer=False): # h_size = number of neurons on the hidden layer\n\n if one_layer:\n self.activation_functions = (sigmoid,)\n weights = (np.zeros([env.observation_space.shape[0] + 1, env.action_space.shape[0]]),)\n else:\n self.activation_functions = (relu, sigmoid)\n # Make a neural network with 1 hidden layer of `h_size` units\n weights = (np.zeros([env.observation_space.shape[0] + 1, h_size]),\n np.zeros([h_size + 1, env.action_space.shape[0]]))\n\n self.shape_list = weights_shape(weights)\n self.num_params = len(flatten_weights(weights))\n\n\n def __call__(self, state, theta):\n weights = unflatten_weights(theta, self.shape_list)\n\n return feed_forward(inputs=state,\n weights=weights,\n activation_functions=self.activation_functions)\n\n\ndef feed_forward(inputs, weights, activation_functions, verbose=False):\n x = inputs.copy()\n for layer_weights, layer_activation_fn in zip(weights, activation_functions):\n y = np.dot(x, layer_weights[1:])\n y += layer_weights[0]\n layer_output = layer_activation_fn(y)\n x = layer_output\n return layer_output\n\n\ndef weights_shape(weights):\n return [weights_array.shape for weights_array in weights]\n\n\ndef flatten_weights(weights):\n \"\"\"Convert weight parameters to a 1 dimension array (more convenient for optimization algorithms)\"\"\"\n nested_list = [weights_2d_array.flatten().tolist() for weights_2d_array in weights]\n flat_list = list(itertools.chain(*nested_list))\n return flat_list\n\n\ndef unflatten_weights(flat_list, shape_list):\n \"\"\"The reverse function of `flatten_weights`\"\"\"\n length_list = [shape[0] * shape[1] for shape in shape_list]\n\n nested_list = []\n start_index = 0\n\n for length, shape in zip(length_list, shape_list):\n nested_list.append(np.array(flat_list[start_index:start_index+length]).reshape(shape))\n start_index += length\n\n return nested_list\n\nclass ObjectiveFunction:\n\n def __init__(self, env, policy, num_episodes=1, max_time_steps=float('inf'), minimization_solver=True):\n self.ndim = policy.num_params # Number of dimensions of the parameter (weights) space\n self.env = env\n self.policy = policy\n self.num_episodes = num_episodes\n self.max_time_steps = max_time_steps\n self.minimization_solver = minimization_solver\n\n self.num_evals = 0\n\n\n def eval(self, policy_params, num_episodes=None):\n \"\"\"Evaluate a policy\"\"\"\n\n self.num_evals += 1\n\n if num_episodes is None:\n num_episodes = self.num_episodes\n\n average_total_rewards = 0\n\n for i_episode in range(num_episodes):\n\n total_rewards = 0.\n state = self.env.reset()\n done = False\n while not done:\n action = self.policy(state, policy_params)\n action = action >= 0.5\n state, reward, done, info = self.env.step(action)\n total_rewards += reward\n\n average_total_rewards += float(total_rewards) / num_episodes\n\n if self.minimization_solver:\n average_total_rewards *= -1.\n\n return average_total_rewards # Optimizers do minimization by default...\n\n\n def __call__(self, policy_params, num_episodes=None):\n return self.eval(policy_params, num_episodes)\n\n\ndef saes(objective_function,\n x_array,\n sigma_array,\n max_iterations=500,\n tau=None,\n hist_dict=None):\n \"\"\"\n x_array : shape (n,)\n sigma_array: shape (n,)\n \"\"\"\n\n if tau is None:\n # Self-adaptation learning rate\n tau = 1./(2.* len(x_array))\n\n fx = objective_function(x_array)\n for i in range(max_iterations):\n sigma_array_ = sigma_array * np.exp(tau*np.random.normal(0,1,size=sigma_array.shape))\n x_array_ = x_array + sigma_array_ * np.random.normal(0,1,size=x_array.shape)\n fx_ = objective_function(x_array_)\n if fx_ < fx:\n fx = fx_\n x_array = x_array_\n sigma_array = sigma_array_\n if hist_dict is not None:\n hist_dict[i] = [fx] + x_array.tolist() + sigma_array.tolist()\n\n return x_array\n\nif __name__ == \"__main__\":\n SEED = 42\n\n np.random.seed(SEED)\n\n env = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], save_history=True, seed=SEED)\n env.reset()\n\n nn_policy = NeuralNetworkPolicy(env, one_layer=True)\n objective_function = ObjectiveFunction(env=env, policy=nn_policy, num_episodes=25)\n\n hist_dict = {}\n\n initial_solution_array = np.random.random(nn_policy.num_params)\n initial_sigma_array = np.ones(nn_policy.num_params) * 1.\n\n theta = saes(objective_function=objective_function,\n x_array=initial_solution_array,\n sigma_array=initial_sigma_array,\n max_iterations=1000,\n hist_dict=hist_dict)\n\n np.save('saes-theta.npy', theta)\n print(theta)\n\n np.random.seed(SEED)\n\n nn_policy2 = NeuralNetworkPolicy(env, h_size=10, one_layer=False)\n objective_function2 = ObjectiveFunction(env=env, policy=nn_policy2, num_episodes=25)\n\n hist_dict2 = {}\n\n initial_solution_array = np.random.random(nn_policy2.num_params)\n initial_sigma_array = np.ones(nn_policy2.num_params) * 1.\n\n theta2 = saes(objective_function=objective_function2,\n x_array=initial_solution_array,\n sigma_array=initial_sigma_array,\n max_iterations=1000,\n hist_dict=hist_dict2)\n\n np.save('saes-theta2.npy', theta2)\n print(theta2)\n\n rewards = pd.DataFrame.from_dict(hist_dict, orient='index').iloc[:,0].to_numpy()\n rewards2 = pd.DataFrame.from_dict(hist_dict2, orient='index').iloc[:,0].to_numpy()\n\n plt.figure()\n plt.plot(range(len(rewards)), rewards, label='1 layer')\n plt.plot(range(len(rewards2)), rewards2, label='2 layer')\n plt.xlabel(\"Training Steps\")\n plt.ylabel(\"Reward\")\n plt.legend()\n plt.show()\n","sub_path":"deep_cure_learning/saes_agent.py","file_name":"saes_agent.py","file_ext":"py","file_size_in_byte":6623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523155334","text":"\"\"\"\nAuthor: Victor Negîrneac\nLast update: 2020-02-15\n\nMinimization toolbox for 1D domain functions.\nDeveloped based on the `adaptive.Learner1D` from adaptive v0.10.0:\nhttps://github.com/python-adaptive/adaptive/releases/tag/v0.10.0\n\nI hope it survives any changes that the `adaptive` package might suffer\n\"\"\"\n\nfrom adaptive.learner import Learner1D\nimport numpy as np\nfrom functools import partial\nimport logging\nimport operator\nimport random\nfrom pycqed.utilities.general import get_module_name\n\nlog = logging.getLogger(__name__)\n\n# ######################################################################\n# Learner1D wrappings to be able to access all learner data\n# ######################################################################\n\n\nclass Learner1D_Minimizer(Learner1D):\n \"\"\"\n Does everything that the LearnerND does plus wraps it such that\n `mk_optimize_res_loss_func` can be used\n\n It also accepts using loss fucntions made by\n `mk_non_uniform_res_loss_func` and `mk_res_loss_func`\n inluding providing one of the loss functions from\n adaptive.learner.learnerND\n\n The resolution loss function in this doc are built such that some\n other loss function is used when the resolution boundaries are respected\n \"\"\"\n\n def __init__(self, func, bounds, loss_per_interval=None):\n # Sanity check that can save hours of debugging...\n assert bounds[1] > bounds[0]\n\n super().__init__(func, bounds, loss_per_interval)\n # Keep the orignal learner behaviour but pass extra arguments to\n # the provided input loss function\n if hasattr(self.loss_per_interval, \"needs_learner_access\"):\n # Save the loss function that requires the learner instance\n input_loss_per_interval = self.loss_per_interval\n self.loss_per_interval = partial(input_loss_per_interval, learner=self)\n\n if hasattr(input_loss_per_interval, \"threshold\"):\n self.threshold = input_loss_per_interval.threshold\n else:\n self.threshold = None\n\n self.compare_op = None\n if hasattr(input_loss_per_interval, \"converge_below\"):\n self.converge_below = input_loss_per_interval.converge_below\n else:\n self.converge_below = None\n\n self.moving_threshold = np.inf\n self.no_improve_count = 0\n\n if hasattr(input_loss_per_interval, \"max_no_improve_in_local\"):\n self.max_no_improve_in_local = (\n input_loss_per_interval.max_no_improve_in_local\n )\n assert self.max_no_improve_in_local >= 2\n else:\n self.max_no_improve_in_local = 4\n\n if hasattr(input_loss_per_interval, \"update_losses_after_no_improv\"):\n self.update_losses_after_no_improv = (\n input_loss_per_interval.update_losses_after_no_improv\n )\n else:\n self.update_losses_after_no_improv = True\n\n self.last_min = np.inf\n\n # State variable local vs \"global search\"\n # Note that all the segments that were considered interesting at\n # some point will be still have very high priority when this\n # variable is set back to False\n self.sampling_local_minima = False\n\n # Recompute all losses if the function scale changes i.e. a new best\n # min or max appeared\n # This happens in `adaptive.Learner1D.tell`\n self._recompute_losses_factor = 1\n\n def _recompute_all_losses(self):\n \"\"\"\n This is the equivalent fucntion that exists in LearnernND for this\n purpuse.\n\n It is just a copy paste of a few lines from the `Learner1D.tell`\n\n It is used to recompute losses when the `Learner1D_Minimizer` is \"done\"\n with sampling a local minimum.\n \"\"\"\n\n # NB: We are not updating the scale here as the `tell` method does\n # because we assume this method will be called only after sampling\n # `max_no_improve_in_local` points in the local minimum\n\n for interval in reversed(self.losses):\n self._update_interpolated_loss_in_interval(*interval)\n\n\n# ######################################################################\n# Utilities for adaptive.learner.learner1D\n# ######################################################################\n\n\ndef mk_res_loss_func(\n default_loss_func, min_distance=0.0, max_distance=1.0, dist_is_norm=False\n):\n min_distance_orig = min_distance\n max_distance_orig = max_distance\n\n # Wrappers to make it work with the default loss of `adaptive` package\n if get_module_name(default_loss_func, level=0) == \"adaptive\":\n def _default_loss_func(xs, values, *args, **kw):\n return default_loss_func(xs, values)\n else:\n def _default_loss_func(xs, values, *args, **kw):\n return default_loss_func(xs, values, *args, **kw)\n\n def func(xs, values, *args, **kw):\n if dist_is_norm:\n min_distance_used = min_distance_orig\n max_distance_used = max_distance_orig\n else:\n min_distance_used = min_distance_orig / kw[\"learner\"]._scale[0]\n max_distance_used = max_distance_orig / kw[\"learner\"]._scale[0]\n\n # `dist` is normalised 0 <= dist <= 1 because xs are scaled\n dist = abs(xs[1] - xs[0])\n if dist < min_distance_used:\n loss = 0.0 # don't keep splitting sufficiently small intervals\n elif dist > max_distance_used:\n # maximally prioritize intervals that are too large\n # the learner will compare all the segments that have inf loss based\n # on the distance between them\n loss = np.inf\n else:\n loss = _default_loss_func(xs, values, *args, **kw)\n return loss\n\n if not dist_is_norm:\n func.needs_learner_access = True\n\n # Preserve loss function atribute in case a loss function from\n # adaptive.learner.learnerND is given\n if hasattr(default_loss_func, \"nth_neighbors\"):\n func.nth_neighbors = default_loss_func.nth_neighbors\n return func\n\n\ndef mk_non_uniform_res_loss_func(\n default_loss_func, npoints: int = 49, res_bounds=(0.5, 3.0)\n):\n \"\"\"\n This function is intended to allow for specifying the min and max\n interval size in a more user friendly and not precise way.\n For a more precise way use the mk_res_loss_func to specify the\n interval size limits directly\n \"\"\"\n # Learner1D normalizes the parameter space to unity\n normalized_domain_size = 1.0\n assert res_bounds[1] > res_bounds[0]\n uniform_resolution = normalized_domain_size / npoints\n min_distance = uniform_resolution * res_bounds[0]\n max_distance = uniform_resolution * res_bounds[1]\n func = mk_res_loss_func(\n default_loss_func,\n min_distance=min_distance,\n max_distance=max_distance,\n dist_is_norm=True,\n )\n\n # Preserve loss function atribute in case a loss function from\n # adaptive.learner.learnerND is given\n if hasattr(default_loss_func, \"nth_neighbors\"):\n func.nth_neighbors = default_loss_func.nth_neighbors\n return func\n\n\n# ######################################################################\n# Loss and goal functions to be used with the Learner1D_Minimizer\n# ######################################################################\n\n\ndef mk_minimization_loss(\n threshold: float = None,\n converge_at_local: bool = False,\n randomize_global_search: bool = False,\n interval_weight: float = 5.0,\n):\n assert interval_weight >= 0.0 and interval_weight <= 1000.0\n compare_op_start = operator.le if converge_at_local else operator.lt\n\n # `w` controls how \"square\" is the resulting function\n # more \"square\" => x needs to be lower in order for the interval_factor\n # to be lower\n w = interval_weight / 1000.0\n with np.errstate(divide=\"ignore\"):\n A = np.divide(1.0, np.arctan(np.divide(1.0, w)))\n\n def interval_factor(vol):\n with np.errstate(divide=\"ignore\"):\n out = A * np.arctan(np.divide(vol, w))\n return out\n\n w_not = 1.0 - w\n with np.errstate(divide=\"ignore\"):\n A_not = np.divide(1.0, np.arctan(np.divide(1.0, w_not)))\n\n def close_to_optimal_factor(scale, dist):\n with np.errstate(divide=\"ignore\"):\n out = A_not * np.arctan(np.divide(dist, scale * w_not))\n return out\n\n def func(xs, values, learner, *args, **kw):\n threshold_is_None = threshold is None\n comp_threshold = learner.moving_threshold if threshold_is_None else threshold\n compare_op = (\n compare_op_start if learner.compare_op is None else learner.compare_op\n )\n\n # `dist` is normalised 0 <= dist <= 1 because xs are scaled\n dist = np.abs(xs[0] - xs[1])\n\n # learner._scale[1] makes sure it is the biggest loss and is a\n # finite value such that `dist` can be added\n\n # `dist_best_val_in_interval` is the distance (>0) of the best\n # pnt (minimum) in the ineterval with respect to the maximum\n # seen ao far, in units of sampling function\n dist_best_val_in_interval = (\n learner._bbox[1][1] - np.min(values) * learner._scale[1]\n )\n\n if dist_best_val_in_interval == 0.0:\n # In case the function landscape is constant so far\n return dist\n\n values = np.array(values)\n scaled_threshold = comp_threshold / learner._scale[1]\n if np.any(compare_op(values, scaled_threshold)):\n # This interval is the most interesting because we are beyond the\n # threshold, set its loss to maximum\n\n if threshold_is_None:\n # We treat a moving threshold for a global minimization in a\n # different way than a fixed threshold\n\n # The `dist` is added to ensure that both sides of the best\n # point are sampled when the threshold is not moving, avoiding the\n # sampling to get stuck at one side of the best seen point\n loss = dist_best_val_in_interval + dist\n else:\n # This makes sure the sampling around the minimum beyond the\n # threshold is uniform\n\n # `scaled_threshold - np.min(values)` is added to ensure that,\n # from intervals with same length with a point that has a\n # function value beyond the fixed threshold, the points closer\n # to the best value are sampled first\n\n # `scaled_threshold - np.min(values)` is normalized\n # 0 <= scaled_threshold - np.min(values) <= 1\n side_weight = dist * (1.0 + scaled_threshold - np.min(values))\n loss = (learner._bbox[1][1] - comp_threshold) + side_weight\n else:\n # This interval is not interesting, but we bias our search towards\n # lower function values and make sure to not oversample by\n # taking into account the interval distance\n\n # Big loss => interesting point => difference from maximum function\n # value gives high loss\n loss = close_to_optimal_factor(learner._scale[1], dist_best_val_in_interval) * interval_factor(dist)\n\n if randomize_global_search:\n # In case the learner is not working well some biased random\n # sampling might help\n # [2020-02-14] Not tested much\n loss = random.uniform(0.0, loss)\n\n return loss\n\n return func\n\n\ndef mk_minimization_loss_func(\n threshold=None,\n converge_below=None,\n min_distance=0.0,\n max_distance=np.inf,\n dist_is_norm=False,\n converge_at_local=False,\n randomize_global_search=False,\n max_no_improve_in_local=4,\n update_losses_after_no_improv=True,\n interval_weight=50.,\n):\n \"\"\"\n If you don't specify the threshold you must make use of\n mk_minimization_goal_func!!!\n Otherwise the global optimization does not work!\n If you specify the threshold you must use mk_threshold_goal_func\n\n This tool is intended to be used for sampling continuous (possibly\n noisy) functions.\n \"\"\"\n threshold_loss_func = mk_minimization_loss(\n threshold=threshold,\n converge_at_local=converge_at_local,\n randomize_global_search=randomize_global_search,\n interval_weight=interval_weight\n )\n\n func = mk_res_loss_func(\n threshold_loss_func,\n min_distance=min_distance,\n max_distance=max_distance,\n dist_is_norm=dist_is_norm,\n )\n\n func.needs_learner_access = True\n\n # This is inteded to accessed by the learner\n # Just to make life easier for the user\n func.threshold = threshold\n func.converge_at_local = converge_at_local\n func.max_no_improve_in_local = max_no_improve_in_local\n func.converge_below = converge_below\n func.update_losses_after_no_improv = update_losses_after_no_improv\n return func\n\n\ndef mk_minimization_goal_func():\n \"\"\"\n The generated function alway returns False such that it can be chained with\n the user's stop condition e.g. `goal=lambda l: goal(l) or l.npoints > 100`,\n but is required for the mk_minimization_loss_func to work!!!\n This is required because it updates important variables for the loss\n function to work properly\n \"\"\"\n\n def goal(learner):\n # No action if no points\n if len(learner.data):\n if len(learner.data) < 2:\n # First point, just take it as the threshold\n # Do it here to make sure calculation with the\n # `moving_threshold` don't run into numerical issues with inf\n learner.moving_threshold = learner._bbox[1][0]\n else:\n # Update second best minimum\n found_new_min = learner._bbox[1][0] < learner.last_min\n if found_new_min:\n learner.moving_threshold = learner.last_min\n # learner.second_min = learner.last_min\n learner.no_improve_count = 1\n learner.sampling_local_minima = True\n\n if learner.sampling_local_minima:\n if learner.no_improve_count >= learner.max_no_improve_in_local:\n # We decide to \"get out of the local minimum\"\n learner.sampling_local_minima = False\n # Reset count to minimum\n learner.no_improve_count = 0\n if learner.update_losses_after_no_improv:\n # Update the threshold so that _recompute_all_losses\n # has the desired effect\n learner.moving_threshold = learner._bbox[1][0]\n\n # Force update all losses such that the learner stops\n # sampling points in the local minimum\n\n # This has some computation overhead but should not\n # happen too often as finding a new minimum is not\n # expected to happen many times\n\n # NB: this method does not exist in the original\n # `Learner1D`\n learner._recompute_all_losses()\n else:\n learner.no_improve_count += 1\n else:\n # We are back in global search\n # Now we can move the `moving_threshold` to latest minimum\n learner.moving_threshold = learner._bbox[1][0]\n if (\n learner.converge_below is not None\n and learner.converge_below > learner._bbox[1][0]\n ):\n learner.compare_op = operator.le\n\n # Keep track of the last iteration best minimum to be used in the\n # next iteration\n learner.last_min = learner._bbox[1][0]\n return False\n\n return goal\n\n\ndef mk_min_threshold_goal_func(max_pnts_beyond_threshold: int):\n compare_op = operator.lt\n minimization_goal = mk_minimization_goal_func()\n\n def goal(learner):\n threshold = learner.threshold\n if threshold is None:\n raise ValueError(\n \"You must specify a threshold argument in `mk_minimization_loss_func`!\"\n )\n # This needs to be a func to avoid evaluating it if there is no data yet\n num_pnts = lambda: np.sum(\n compare_op(np.array(list(learner.data.items())).T[1], threshold)\n )\n return len(learner.data) and num_pnts() >= max_pnts_beyond_threshold\n\n return lambda l: minimization_goal(l) or goal(l)\n","sub_path":"pycqed/utilities/learner1D_minimizer.py","file_name":"learner1D_minimizer.py","file_ext":"py","file_size_in_byte":16806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168049373","text":"# libreries importing\nimport time\nimport random\nimport json\nfrom paho.mqtt import client as mqtt_client\nfrom w1thermsensor import W1ThermSensor\n\n# Sensor initialization\nsensor = W1ThermSensor()\n\n# broker configuration \nbroker = 'mripta.online'\nport = 1883\ntopic = \"/sensor\"\nclient_id = f'python-mqtt-{random.randint(0, 1000)}'\nusername = 'rasp'\npassword = 'raspberry123'\n\ndef connect_mqtt():\n '''\n\tthis function helps to connect to the broker\n '''\n\n def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n\n client = mqtt_client.Client(client_id)\n client.username_pw_set(username, password)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\ndef publish(client):\n while True:\n time.sleep(10)\n temperature = sensor.get_temperature()\n # msg = f'{\"ref\":\"therm\", \"temp\":\"{temperature}\"}'\n msg = json.dumps({\"ref\":\"therm\", \"temp\":temperature})\n result = client.publish(topic, msg)\n # result: [0, 1]\n status = result[0]\n if status == 0:\n print(f\"Send `{msg}` to topic `{topic}`\")\n else:\n print(f\"Failed to send message to topic {topic}\")\n\ndef run():\n client = connect_mqtt()\n client.loop_start()\n publish(client)\n\nif __name__ == '__main__':\n run()\n","sub_path":"code/IOT_sensor.py","file_name":"IOT_sensor.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"55748513","text":"# -*- coding: utf-8 -*-\n\"\"\"\n deerconsole\n ~~~~~\n\n :copyright: (c) 2015 by Matteo Assinnata\n :license: MIT, see LICENSE for more details.\n\"\"\"\n \nfrom pybitcointools import *\nfrom hashlib import sha256 \n \ndef getKeys(seed):\n\tupcat = seed\n\tpkey = sha256(sha256(upcat).digest()).digest()\n\tpk = pkey #encode_privkey(pkey, \"wif\", 65)\n\tpbk = privtopub(pk)\n\taddr = pubkey_to_address(pbk)\n\treturn pk, pbk, addr","sub_path":"deerconsole/getKeys.py","file_name":"getKeys.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"478050731","text":"# Adapted from https://github.com/gkhayes/mlrose\n\nimport csv\n\nimport mlrose\nimport numpy as np\n\nfrom timeit import default_timer as timer\n\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\nfrom sklearn.metrics import accuracy_score\n\n# Initialize fitness function object using pre-defined class\nfitness = mlrose.Queens()\n# Define optimization problem object\nproblem = mlrose.DiscreteOpt(\n length=100, fitness_fn=fitness, maximize=True, max_val=2)\n\n# Randomized Hill Climb\nprint('=============================')\nprint('====Randomized Hill Climb====')\nprint('=============================')\nstart = timer()\nbest_state, best_fitness, curve = mlrose.random_hill_climb(\n problem, max_attempts=10, max_iters=200, restarts=200, curve=True, random_state=0)\nend = timer()\nwith open('QUEENS-RHC.csv', 'w') as f:\n f.write('iteration,fitness\\n')\nprint('Best State: ', best_state)\nprint('Best Fitness: ', best_fitness)\nprint('Curve: ', len(curve))\nprint('Elapsed Time: ', end - start)\nwith open('QUEENS-RHC.csv', 'a') as f:\n for i in range(len(curve)):\n f.write('{},{}\\n'.format(i, curve[i]))\n\n# Simulated Annealing\nprint('=============================')\nprint('=====Simulated Annealing=====')\nprint('=============================')\nwith open('QUEENS-SA.csv', 'w') as f:\n f.write('iterations,fitness\\n')\n# for D in [0.95, 0.75, 0.55, 0.35, 0.15]:\n # for T in [1.0, 0.8, 0.6, 0.4, 0.2]:\nstart = timer()\n# Define decay schedule\nschedule = mlrose.GeomDecay(init_temp=1E12, decay=0.95)\n\nbest_state, best_fitness, curve = mlrose.simulated_annealing(\n problem, schedule=schedule, max_attempts=1000, max_iters=1000, random_state=0, curve=True)\nend = timer()\nprint('Best State: ', best_state)\nprint('Best Fitness: ', best_fitness)\nprint('Curve: ', len(curve))\nprint('Elapsed Time: ', end - start)\nwith open('QUEENS-SA.csv', 'a') as f:\n for i in range(len(curve)):\n f.write('{},{}\\n'.format(i, curve[i]))\n\n# Genetic Algorithm\nprint('=============================')\nprint('======Genetic Algorithm======')\nprint('=============================')\n\nwith open('QUEENS-GA.csv', 'w') as f:\n f.write('iterations,fitness\\n')\nstart = timer()\nbest_state, best_fitness, curve = mlrose.genetic_alg(\n problem, pop_size=200, mutation_prob=0.2, max_attempts=500, max_iters=1000, curve=True, random_state=0)\nend = timer()\nprint('Best State: ', best_state)\nprint('Best Fitness: ', best_fitness)\nprint('Curve: ', len(curve))\nprint('Elapsed Time: ', end - start)\nwith open('QUEENS-GA.csv', 'a') as f:\n for i in range(len(curve)):\n f.write('{},{}\\n'.format(i, curve[i]))\n\n# MIMIC\nprint('=============================')\nprint('===========MIMIC=============')\nprint('=============================')\n\nwith open('QUEENS-MIMIC.csv', 'w') as f:\n f.write('iterations,fitness\\n')\nstart = timer()\nbest_state, best_fitness, curve = mlrose.mimic(\n problem, pop_size=200, keep_pct=0.2, max_attempts=500, max_iters=1000, curve=True, random_state=0)\nend = timer()\nprint('Best State: ', best_state)\nprint('Best Fitness: ', best_fitness)\nprint('Curve: ', len(curve))\nprint('Elapsed Time: ', end - start)\nwith open('QUEENS-MIMIC.csv', 'a') as f:\n for i in range(len(curve)):\n f.write('{},{}\\n'.format(i, curve[i]))\n","sub_path":"assignment2/n-queens.py","file_name":"n-queens.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317859807","text":"s=input()\r\nt=input()\r\nif (len(s)!=len(t)):\r\n print(-1)\r\n exit()\r\nfor i in range(len(s)+1):\r\n if (s==t):\r\n print(i)\r\n exit()\r\n s=s[-1]+s[:-1]\r\nprint(-1)","sub_path":"submissions/indeednow-qualb/indeednow_2015_qualb_2/AC_14886756.py","file_name":"AC_14886756.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232768546","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 5.2a\n\nВсё, как в задании 5.2, но, если пользователь ввел адрес хоста, а не адрес сети,\nнадо преобразовать адрес хоста в адрес сети и вывести адрес сети и маску, как в задании 5.2.\n\nПример адреса сети (все биты хостовой части равны нулю):\n* 10.0.1.0/24\n* 190.1.0.0/16\n\nПример адреса хоста:\n* 10.0.1.1/24 - хост из сети 10.0.1.0/24\n* 10.0.5.1/30 - хост из сети 10.0.5.0/30\n\nЕсли пользователь ввел адрес 10.0.1.1/24,\nвывод должен быть таким:\n\nNetwork:\n10 0 1 0\n00001010 00000000 00000001 00000000\n\nMask:\n/24\n255 255 255 0\n11111111 11111111 11111111 00000000\n\nПроверить работу скрипта на разных комбинациях сеть/маска.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\n\nУказав индексы значений, которые передаются методу format, можно избавиться от дублирования:\n\nIn [21]: ip_template = \n ...: IP address:\n ...: {0:<8} {1:<8} {2:<8} {3:<8}\n ...: {0:08b} {1:08b} {2:08b} {3:08b}\n ...: \n\nIn [22]: print(ip_template.format(192, 100, 1, 1))\n\nIP address:\n192 100 1 1\n11000000 01100100 00000001 00000001\n\n'''\n\nip_template = '''{0:<8} {1:<8} {2:<8} {3:<8}\n{0:08b} {1:08b} {2:08b} {3:08b}\n'''\n\nip = input('Введите IP-сеть в формате: 10.1.1.0/24: ')\nnet = ip.split('/')[0]\nmask = int(ip.split('/')[1])\noctet = net.split('.')\nbin_mask = '1'*mask+'0'*(32-mask)\n#ip address:\nna = int(octet[0])\nnb = int(octet[1])\nnc = int(octet[2])\nnd = int(octet[3])\n#mask:\nma = int(bin_mask[0:8],2)\nmb = int(bin_mask[8:16],2)\nmc = int(bin_mask[16:24],2)\nmd = int(bin_mask[24:32],2)\n\nprint('Network:')\nprint(ip_template.format(na & ma, nb & mb, nc & mc, nd & md))\nprint('Mask:')\nprint('/'+str(mask))\nprint(ip_template.format( ma, mb, mc, md))\n\n","sub_path":"exercises/05_basic_scripts/task_5_2a.py","file_name":"task_5_2a.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4850476","text":"class Employee:\n num_of_emps = 0\n raise_amount = 1.04\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n\n Employee.num_of_emps += 1\n\n def fullname(self):\n return '\\n{} {} {}'.format(self.first, self.last, self.email)\n\n\n\nemp_1 = Employee('Jørgen', 'Rosager', 1000000)\nemp_2 = Employee('Herman', 'Nygaard', 100)\nprint(emp_1.__dict__)\nprint(Employee.fullname(emp_1))\nprint(emp_1.fullname())\n# print(emp_2.fullname())\n","sub_path":"Classes/Classes 01.py","file_name":"Classes 01.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447906896","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import range\n\nimport os\nfrom os.path import join\nimport argparse\nimport pickle\nimport random\nimport json\nimport shutil\nimport time\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc\nimport h5py\n\nimport model as model\nfrom utils import image_processing\nfrom data_loader import get_caption_vectors\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--z_dim', type=int, default=100,\\\n help='Noise dimension')\n parser.add_argument('--t_dim', type=int, default=256,\\\n help='Text feature dimension')\n parser.add_argument('--image_size', type=int, default=64,\\\n help='Image Size a, a x a')\n parser.add_argument('--gf_dim', type=int, default=64,\\\n help='Number of conv in the first layer gen.')\n parser.add_argument('--df_dim', type=int, default=64,\\\n help='Number of conv in the first layer discr.')\n parser.add_argument('--gfc_dim', type=int, default=1024,\\\n help='Dimension of gen untis for for fully connected layer.')\n parser.add_argument('--caption_vector_length', type=int, default=4800,\\\n help='Caption Vector Length')\n parser.add_argument('--model', type=str, default=None,\\\n help='Path to the trained model')\n parser.add_argument('--caption_file', type=str, default=None,\\\n help='Path to the testing caption file')\n parser.add_argument('--n_images', type=int, default=5,\\\n help='Number of sampling image for per caption')\n args = parser.parse_args()\n\n model_options = {\n 'z_dim': args.z_dim,\n 't_dim': args.t_dim,\n 'batch_size': args.n_images,\n 'image_size': args.image_size,\n 'gf_dim': args.gf_dim,\n 'df_dim': args.df_dim,\n 'gfc_dim': args.gfc_dim,\n 'caption_vector_length': args.caption_vector_length\n }\n\n gan = model.GAN(model_options)\n _, _, _, _, _ = gan.build_model()\n\n sess = tf.InteractiveSession()\n saver = tf.train.Saver()\n saver.restore(sess, args.model)\n\n input_tensors, outputs = gan.build_generator()\n\n ids, captions_list = get_caption_vectors(args.caption_file)\n captions = np.array(captions_list)\n\n caption_image_dict = {}\n\n if os.path.exists('samples'):\n shutil.rmtree('samples')\n os.makedirs('samples')\n\n print('Sampling data')\n for cn, caption_vector in enumerate(captions):\n caption_images = []\n z_noise = np.random.uniform(-1, 1, [args.n_images, args.z_dim])\n #z_noise = np.ones([args.n_images, args.z_dim])\n print(z_noise)\n caption = [caption_vector[:args.caption_vector_length]] * args.n_images\n\n [gen_image] = sess.run([outputs['generator']],\\\n feed_dict = {\n input_tensors['t_real_caption']: caption,\n input_tensors['t_z']: z_noise\n }\n )\n for i in range(0, args.n_images):\n fake_image_255 = gen_image[i, :, :, :]\n scipy.misc.imsave('samples/sample_{}_{}.jpg'.format(ids[cn], i + 1), fake_image_255)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw4/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"501046258","text":"import os\r\n\r\nimport sys\r\ndef GetVideoName(dir):\r\n listName = []\r\n for fileName in os.listdir(dir):\r\n if os.path.splitext(fileName)[1] == '.cif':\r\n fileName = os.path.splitext(fileName)[0]\r\n listName.append(fileName)\r\n return listName\r\n\r\nfile_list = GetVideoName(\"./CIF\")\r\nprint(file_list)\r\nfor j in file_list:\r\n os.chdir(\"%s\" %j)\r\n os.system(\"mkdir 1 2 3 4\")\r\n # rootpath = os.path.dirname(sys.path[0])\r\n # os.chdir(rootpath)","sub_path":"Zeolite_Ds_Analyze/cd_second_file.py","file_name":"cd_second_file.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"419129387","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a Job910 spider created on top of the ATSSpider\nscrapy crawl job910 -a url=\"http://www.job910.com/search.html\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://www.job910.com/search.html\n\"\"\"\nfrom urlparse import urljoin\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace, ConvertDateString\n\n\nclass Job910(ATSSpider):\n\n name = \"job910\"\n download_delay = 0.5\n ref_re = compile(\"_(\\d+)\\.\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//div[@class='mainbody']/ul/li\")\n for job in jobs:\n job_link = job.xpath(\n \"./span[@class='titleA']/a/@href\"\n ).extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {\n \"title\": job.xpath(\n \"./span[@class='titleA']/a/text()\"\n ).extract(),\n \"company\": job.xpath(\n \"./span[@class='titleB']/a/text()\"\n ).extract(),\n \"location\": job.xpath(\n \"./span[@class='titleC']/text()\"\n ).extract(),\n \"experiencerequirements\": job.xpath(\n \"./span[@class='titleD']/text()\"\n ).extract(),\n \"baseSalary\": job.xpath(\n \"./span[@class='titleE']/text()\"\n ).extract(),\n \"date\": job.xpath(\n \"./span[@class='titleF']/text()\"\n ).extract(),\n }\n yield Request(\n job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\"//a[text()='>>']/@href\").extract()\n if next_page:\n next_url = urljoin(response.url, next_page[0])\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('company', response.meta['company'])\n loader.add_value('location', response.meta['location'])\n loader.add_value(\n 'experiencerequirements',\n response.meta['experiencerequirements']\n )\n loader.add_value('baseSalary', response.meta['baseSalary'])\n loader.add_value(\n 'date', response.meta['date'],\n ConvertDateString(\"%Y/%m/%d\")\n )\n loader.add_value(\n 'referencenumber', response.url, Prefix(\"%s-\" % self.name),\n re=self.ref_re\n )\n loader.add_xpath(\n \"description\", \"//span[@id='JobDuties']/node()\"\n )\n loader.add_xpath(\n \"jobtype\",\n \"//li[starts-with(text(),'%s')]/text()\"\n % unicode(\"工作性质:\", 'utf-8'),\n Replace(unicode(\"工作性质:\", 'utf-8'), \"\")\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/job910.py","file_name":"job910.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530165840","text":"import numpy as np\nfrom PIL import Image\nfrom numba import jit\n\n@jit\ndef make_mandelbrot(width, height, max_iterations):\n result = np.zeros((height, width))\n\n # for each pixel at (ix, iy)\n for iy in np.arange(height):\n for ix in np.arange(width):\n \n # start iteration at x0 in [-2, 1] and y0 in [-1, 1]\n x0 = ix*3.0/width - 2.0\n y0 = iy*2.0/height - 1.0\n \n x = 0.0\n y = 0.0\n for iteration in range(max_iterations):\n x_new = x*x - y*y + x0\n y = 2*x*y + y0\n x = x_new\n \n if x*x + y*y > 4.0:\n # color using pretty linear gradient\n color = 1.0 - 0.01*(iteration - np.log2(np.log2(x*x + y*y)))\n break\n else:\n # failed, color in black\n color = 0.0\n \n result[iy, ix] = color\n\n return result\n\nmandelbrot = make_mandelbrot(3000, 2000, 255)\n\n# convert from float in [0, 1] to to uint8 in [0, 255] for PIL\nmandelbrot = np.clip(mandelbrot*255, 0, 255).astype(np.uint8)\nmandelbrot = Image.fromarray(mandelbrot)\nmandelbrot.save(\"mandelbrot.png\")\nmandelbrot.show()\n","sub_path":"py/mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"98814684","text":"import retrieve_file\r\nfrom datetime import datetime, date, time, timedelta\r\nimport re\r\nimport os\r\n\r\n# Opens file to be analyzed\r\nopen_file = open(\"local_copy.log\")\r\n\r\n# Create counters\r\ntotal_requests = 0\r\nerror_count = 0\r\nerrors = []\r\nmonth_count = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0}\r\nday_count = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0}\r\nfail_count = 0\r\nredirected_count = 0\r\nsuccess_count = 0\r\nfile_count = {\"index.html\":0}\r\n\r\n# Define month file names\r\nmonthfile = {1:\"January.txt\", 2:\"February.txt\", 3:\"March.txt\", 4:\"April.txt\", 5:\"May.txt\", 6:\"June.txt\", 7:\"July.txt\", 8:\"August.txt\", 9:\"September.txt\",10:\"October.txt\", 11:\"November.txt\", 12:\"December.txt\"}\r\n\r\n# Reads the file line by line and adds to the total_requests counter\r\nfor line in open_file:\r\n total_requests += 1\r\n \r\n # Split lines into necessary elements\r\n line_elements = re.split(\"([0-9]{2}/[A-Za-z]{3}/[0-9]{4}):([0-9]{2}:[0-9]{2}:[0-9]{2}).*\\\"([A-Z]+) (.+?) ([HTTP].+)\\\" ([0-9]{3})\", line)\r\n \r\n # Check if regex worked for each line\r\n if len(line_elements) >= 7:\r\n \r\n # Add to the day and month counters\r\n date = datetime.strptime(line_elements[1], \"%d/%b/%Y\")\r\n day_count[date.isoweekday()] += 1\r\n month_count[date.month] += 1\r\n \r\n # Check for specific month file, if one has been created add line to file otherwise create the file and write the line to the file\r\n if not os.path.exists(monthfile[date.month]):\r\n file = open(monthfile[date.month], \"w\")\r\n file.write(line)\r\n file.close()\r\n else:\r\n file = open(monthfile[date.month], \"a\")\r\n file.write(line)\r\n file.close()\r\n \r\n # Add to the HTTP status code counters\r\n if line_elements[6] =='200':\r\n success_count += 1\r\n elif line_elements[6] == '302' or line_elements[6] == '304' or line_elements[6] =='306':\r\n redirected_count += 1\r\n else:\r\n fail_count += 1\r\n \r\n # Add to the most requested file counter\r\n if line_elements[4] in file_count:\r\n file_count[line_elements[4]] += 1\r\n else:\r\n file_count[line_elements[4]] = 1\r\n \r\n # If regex did not work\r\nelse:\r\n error_count += 1\r\n errors.append(line)\r\n \r\n# Determine the most requested file\r\nmost_requested = \"index.html\"\r\nmost_count = file_count[\"index.html\"]\r\nfor filer, count in file_count.items():\r\n if count > most_count:\r\n most_requested = filer\r\n most_count = file_count[filer]\r\n \r\n# Determine the least requested file\r\nleast_requested = \"index.html\"\r\nleast_count = file_count[\"index.html\"]\r\nfor filer, count in file_count.items():\r\n if count < least_count:\r\n least_requested = filer\r\n least_count = file_count[filer]\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"There were\", total_requests, \"total requests in the time period represented in the log.\")\r\n\r\nfor d in day_count:\r\n print(\"There were\", day_count[d],\" requests during weekday \", d, \" during the time period represented in the log.\")\r\n \r\nfor m in month_count:\r\n print(\"There were\", month_count[m],\" requests during month \", m, \" during the time period represented in th log.\")\r\n \r\nprint(round(((fail_count/total_requests)*100),2), \" percent of all requests were not successful.\")\r\n \r\nprint(round(((redirected_count/total_requests)*100),2), \" percent of all requests were redirected somewhere else.\")\r\n\r\nprint(\"The most requested file was\", most_requested, \"with a total of\", most_count, \"requests.\")\r\n\r\nprint(\"The least requested file was\", least_requested, \"with a total of\", least_count, \"requests.\")\r\n\r\nopen_file.close() \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"final_code.py","file_name":"final_code.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91943848","text":"#!/usr/bin/env python\nfrom unittest import TestCase\nimport unittest\nimport tempfile\nimport os\n\nfrom proxylist import ProxyList\nfrom test_server import TestServer\n\nDEFAULT_PROXY_LIST_DATA = '''\n'1.1.1.1:8080\n'1.1.1.2:8080\n'''\n\n\nclass ProxyListTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.server = TestServer()\n cls.server.start()\n\n @classmethod\n def tearDownClass(cls):\n cls.server.stop()\n\n def setUp(self):\n self.server.reset()\n\n def generate_plist_file(self, data):\n df, path = tempfile.mkstemp()\n with open(path, 'w') as out:\n out.write(data)\n return path\n\n def test_basic(self):\n pl = ProxyList()\n self.assertEqual(0, pl.size())\n\n\n def test_file_proxy_source(self):\n pl = ProxyList()\n path = self.generate_plist_file(DEFAULT_PROXY_LIST_DATA)\n pl.load_file(path)\n self.assertEqual(2, pl.size())\n os.unlink(path)\n\n def test_web_proxy_source(self):\n pl = ProxyList()\n self.server.response['data'] = DEFAULT_PROXY_LIST_DATA\n pl.load_url(self.server.get_url())\n self.assertEqual(2, pl.size())\n\n def test_get_next_proxy(self):\n pl = ProxyList()\n path = self.generate_plist_file('foo:1\\nbar:1')\n pl.load_file(path)\n self.assertEqual(pl.get_next_proxy().host, 'foo')\n self.assertEqual(pl.get_next_proxy().host, 'bar')\n self.assertEqual(pl.get_next_proxy().host, 'foo')\n pl.load_file(path)\n self.assertEqual(pl.get_next_proxy().host, 'foo')\n os.unlink(path)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"558892010","text":"import json\nimport os\n\nfrom algoliasearch import algoliasearch\n\n## Algolia Credentials\n\nclient = algoliasearch.Client(\"7EK9KHJW8M\", os.environ['ALGOLIA_API_KEY'])\nindex = client.init_index('schema')\n\n## Load plotschema.json\n# Note _data/plotschema.json is updated upon each deploy\n\np = json.load(open('_data/plotschema.json'))\nschema = []\n\n## Data Level 1: Traces\n# Add dictionaries to schema array.\n# The trace dictionary include name: trace name, permalink: reference/#trace-name, and description if applicable.\n\nfor i in p['schema']['traces']:\n trace = {}\n trace ['name'] = i\n trace ['permalink'] = 'reference/#'+i\n if p['schema']['traces'][i]['meta']:\n trace ['description'] = (p['schema']['traces'][i]['meta']['description']).replace('*', '\"')\n else: pass\n schema.append(trace)\n\ndef next_level(previous_level,chain_dict):\n for sub_attr in previous_level:\n try:\n if isinstance(previous_level[sub_attr],dict):\n if not any(value in sub_attr for value in (\"src\", \"_deprecated\", \"impliedEdits\", \"uid\", \"editType\")):\n try:\n attribute = {}\n attribute ['name'] = chain_dict['name']+' > '+sub_attr\n attribute ['permalink'] = chain_dict['permalink']+'-'+sub_attr\n attribute ['description'] = (previous_level[sub_attr]['description']).replace('*', '\"')\n schema.append(attribute)\n next_level(previous_level[sub_attr],{'name':attribute['name'], 'permalink':attribute['permalink']})\n except:\n attribute = {}\n attribute ['name'] = chain_dict['name']+' > '+sub_attr\n attribute ['permalink'] = chain_dict['permalink']+'-'+sub_attr\n attribute ['description'] = 'Properties for '+sub_attr\n schema.append(attribute)\n next_level(previous_level[sub_attr],{'name':attribute['name'], 'permalink':attribute['permalink']})\n except:\n pass\n\nlayout_chain_dict = {'name':'Layout', 'permalink':'reference/#layout'}\n\n# recursively add trace attributes to schema\nfor i in p['schema']['traces']:\n chain_dict = {'name':i, 'permalink':'reference/#'+i }\n next_level(p['schema']['traces'][i]['attributes'], chain_dict)\n\n # if there are layoutAttributes in the trace add them too.\n if p['schema']['traces'][i].get('layoutAttributes'):\n next_level(p['schema']['traces'][i]['layoutAttributes'], layout_chain_dict)\n\n# recursively add layout attributes to schema\nnext_level(p['schema']['layout']['layoutAttributes'], layout_chain_dict)\n\n## Send to Algolia\n\nindex.clear_index()\nindex.add_objects(schema)\n","sub_path":"update_ref_search.py","file_name":"update_ref_search.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"538874936","text":"#!/usr/bin/env python3\r\n\r\nfrom Service import Service, ServiceTest\r\nfrom threading import Thread\r\nfrom binascii import b2a_hex\r\nfrom time import sleep\r\nimport os\r\n\r\ndebug = True\r\nround_time_in_seconds = 10\r\nexit_file = 'exit_server.txt'\r\nip = '0.0.0.0'\r\nfile_directory = os.getcwd()\r\n\r\n#ServiceClass, individual port, service name\r\nservice_list = [\r\n (ServiceTest, 1, \"Service Test\")#, (NewService, 1338, \"New Service\")]\r\n]\r\n\r\n#name, directory, auth_string, base_port\r\nteams = [\r\n (\"R00t Reg\", 'team1', 'teamPass1', 40000),\r\n (\"Sh3ll Squad\", 'team2', 'teamPass2', 41000),\r\n (\"Pwn Plat00n\", 'team3', 'teamPass3', 42000)\r\n]\r\n\r\nflags = []\r\nflag_files = []\r\n\r\ndef add_flag(flag_file):\r\n with open(flag_file, 'wt') as f:\r\n flag = b2a_hex(os.urandom(16)).decode('utf-8')\r\n flags.append(flag)\r\n f.write(flag)\r\n\r\ndef create_flag(team_dir, service_name):\r\n flag_file = os.path.join(team_dir, service_name + '.flag')\r\n flag_files.append(flag_file)\r\n add_flag(flag_file)\r\n return flag_file\r\n\r\ndef update_flags():\r\n print(\"Updating Flags\")\r\n for flag_file in flag_files:\r\n with open(flag_file, 'rt') as f:\r\n old_flag = f.read().strip()\r\n add_flag(flag_file)\r\n flags.remove(old_flag)\r\n\r\n\r\ndef launch_service(Service, ip, port, flag_location, name, debug, auth_string):\r\n new_service = Service(ip, port, flag_location, name, debug, auth_string)\r\n new_service.run_server()\r\n\r\nif __name__ == \"__main__\":\r\n print(\"[*] Starting Services\")\r\n with open(exit_file, 'wt') as f:\r\n f.write('\\n')\r\n #create team directories if they do not exist\r\n for team_name, team_dir, auth_string, port_base in teams:\r\n real_team_dir = os.path.join(file_directory, team_dir)\r\n if not os.path.exists(real_team_dir):\r\n os.makedirs(real_team_dir)\r\n print(\"Making \" + team_dir)\r\n #launch the threaded exploits\r\n for Service, port, name in service_list:\r\n flag_location=create_flag(real_team_dir, name)\r\n t = Thread(\r\n name=\"Port \"+str(port),\r\n target=launch_service,\r\n args=(Service, ip, port_base + port, flag_location, name, debug, auth_string))\r\n t.start()\r\n while True:\r\n with open(exit_file, 'rt') as f:\r\n if f.read().strip() != \"\":\r\n os._exit(1)\r\n sleep(round_time_in_seconds)\r\n update_flags()\r\n","sub_path":"test_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540196097","text":"from matplotlib import pyplot as plt\nfrom string import ascii_lowercase\nimport math\nimport itertools\nimport sys\nimport threading\nimport time\nimport json\nimport sqlite3\n\n\ncords = [(3, 12), (2, 10), (1, 9), (3, 6), (3, 4), (4, 2), (7, 7), (8, 8)]\ncords = [(3, 12), (2, 10), (1, 9), (3, 6), (3, 4), (4, 2), (7, 7), (8, 8), (9, 9), (10, 10)]\nl = len(cords)\nletters = ascii_lowercase[:l]\ncordsA = dict(zip(letters, cords))\ncordsB = dict(zip(cords, letters))\ncordsLenght = {}\ncordsCompute = []\n\nfor (x1, y1) in cords:\n for (x2, y2) in cords:\n if (x1, y1) != (x2, y2):\n key = cordsB[(x1, y1)] + cordsB[(x2, y2)]\n sortedKey = \"\".join(sorted(key))\n value = round(math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)), 2)\n if sortedKey not in cordsLenght:\n cordsCompute.append(value)\n cordsLenght[key] = value\n\nprom = round(sum(cordsCompute) / len(cordsCompute), 2)\n#print(prom, \"\\n\")\n\n\nclass Progress(threading.Thread):\n\n def __init__(self, total):\n super().__init__()\n self.total = total\n self.value = 0\n self.before = None\n self.stop = False\n\n def run(self):\n self.start = time.time()\n while not self.stop:\n self.printer()\n time.sleep(.2)\n\n def update(self, value):\n \n self.timer = time.time()\n if self.value:\n self.before = self.value\n self.value = value \n\n def printer(self):\n if self.value == self.total:\n self.stop = True\n self.timer = self.start\n estimated = self.estimator()\n sys.stdout.write(\"\\r[\" + \"#\" * 30 + \"]\" + \" Completed...\" + f\" in {estimated}\" + \" \" * 30)\n sys.stdout.write(\"\\n\")\n else:\n ratio = self.value / self.total\n loaded = int(30 * ratio)\n estimated = self.estimator()\n sys.stdout.write(\"\\r[\" + \"#\" * loaded + \".\" * (30 - loaded) + \"]\" + f\" Progress: {int(100 * ratio):>3}% Estimated time: {estimated:<12}\") \n sys.stdout.flush()\n\n def estimator(self):\n if self.before:\n passes = round(time.time() - self.timer, 2)\n change = self.value - self.before\n left = self.total - self.value\n seconds = left // (change / passes) # Make it to seconds\n minutes, seconds = divmod(seconds, 60)\n if minutes:\n hours, minutes = divmod(minutes, 60)\n if hours:\n return f\"{hours + (minutes % 60) / 10} hours\"\n else:\n return f\"{minutes + (seconds % 60) / 10} minutes\"\n else:\n return f\"{seconds} seconds\"\n else:\n return \"\"\n\n\ndef tester(x):\n timer = Progress(x)\n timer.start()\n for i in range(1, x + 1):\n time.sleep(.1)\n timer.update(i)\n timer.join()\n\n\ndef getComplete(nodes, values):\n allProbs = {}\n perms = itertools.permutations(nodes, len(nodes))\n for perm in perms:\n weight = 0\n for c in range(len(nodes) - 1):\n weight += values[perm[c] + perm[c + 1]]\n weight += values[perm[0] + perm[c + 1]]\n weight = round(weight, 3)\n allProbs[\"\".join(perm)] = weight\n return allProbs\n\n\ndef getABComplete(start, finish, nodes, values):\n allProbs = {}\n nodeList = [char for char in nodes if char not in start + finish]\n perms = itertools.permutations(nodeList)\n for perm in perms:\n weight = values[start + perm[0]]\n for c in range(len(nodeList) - 1):\n weight += values[perm[c] + perm[c + 1]]\n weight += values[perm[c + 1] + finish]\n weight = round(weight, 3)\n allProbs[start + \"\".join(perm) + finish] = weight\n return allProbs\n\nprint(\"computing...\")\na = getComplete(letters, cordsLenght)\n\n#a = getABComplete(\"a\", \"b\", letters, cordsLenght)\n\nprint(\"sorting\")\nallvv = sorted(a, key=a.get)\nvv = allvv[:10]\nids = 0\ng = []\nfor item in allvv:\n g.append((item, a[item]))\n\n\nprint(\"Saving\")\n#with open(\"all.json\", \"w\") as data:\n# json.dump(g, data, indent=4)\n\nconn = sqlite3.connect(f\"combinations.db\")\nc = conn.cursor()\ntry:\n c.execute(\"\"\"CREATE TABLE combinations (\n groupP integer,\n amount integer,\n combination text,\n value real\n )\"\"\")\nexcept Exception as e:\n print(e)\nfinally:\n try:\n print(\"inserting\")\n for item, value in g:\n data = (1, 10, item, value)\n sql = '''INSERT INTO combinations (groupP, amount, combination, value) VALUES (?, ?, ?, ?)'''\n c.execute(sql, data)\n except Exception as e:\n print(e)\n conn.commit()\n conn.close()\n\nprint(vv)\n[print(a[v], end=\" \") for v in vv]\n\n\n#getBrute(list(cordsA.keys()))\n \n\ndef graph(current, val):\n plt.figure()\n plt.xlim(0, 13)\n plt.ylim(0, 13)\n plt.gca().set_aspect('equal', adjustable='box') \n plt.title(f\"{current}: {val}\")\n\n notR = []\n R = []\n for cord in cords:\n cordName = cordsB[cord]\n if cord == val:\n plt.scatter(*cord, color=\"gold\")\n elif cordsLenght[current + cordName] < prom:\n plt.scatter(*cord, color=\"darkorange\")\n else:\n plt.scatter(*cord, color=\"dodgerblue\")\n\n#for k, v in cordsA.items():\n# graph(k, v)\n\n\n#plt.show()\n\n\n","sub_path":"Investigacion/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312575924","text":"import logging\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom funfolding import binning, model, solution\nfrom funfolding.visualization.visualize_classic_binning import plot_binning\nfrom funfolding.visualization.visualize_classic_binning import mark_bin\n\nimport corner\n\ndef read_in(filename='Gamma_clas_sep.hdf5'):\n df = pd.read_hdf(filename)\n df_cutted = df[df.confidence_true_ >= 0.9]\n\n df_cutted.MCorsikaEvtHeader_fTotalEnergy = np.log10(\n df_cutted.MCorsikaEvtHeader_fTotalEnergy)\n df_cutted.E_RF = np.log10(df_cutted.E_RF)\n df_cutted.ConcCore = np.log10(df_cutted.ConcCore)\n df_cutted.Size = np.log10(df_cutted.Size)\n df_cutted.Length = np.log10(df_cutted.Length)\n df_cutted.numPixelInShower = np.log10(\n df_cutted.numPixelInShower)\n\n df_cutted = df_cutted[df_cutted.MCorsikaEvtHeader_fTotalEnergy <= 4.2]\n df_cutted = df_cutted[df_cutted.MCorsikaEvtHeader_fTotalEnergy >= 2.4]\n\n df_cutted = df_cutted[df_cutted.ZdTracking <= 31.0]\n df_cutted = df_cutted[df_cutted.ZdTracking >= 5]\n\n return df_cutted\n\n\n\n\nif __name__ == '__main__':\n logging.captureWarnings(True)\n logging.basicConfig(\n format='%(processName)-10s %(name)s %(levelname)-8s %(message)s',\n level=logging.INFO)\n df = read_in()\n df_A = df.iloc[5000:]\n df_test = df.iloc[:5000]\n\n X = df_A.get(['ConcCore', 'E_RF']).values\n X_test = df_test.get(['ConcCore', 'E_RF']).values\n\n binning_E = np.linspace(2.4, 4.2, 10)\n binned_E = np.digitize(df_A.MCorsikaEvtHeader_fTotalEnergy,\n binning_E)\n binned_E_test = np.digitize(df_test.MCorsikaEvtHeader_fTotalEnergy,\n binning_E)\n classic_binning = binning.ClassicBinning(\n bins = [15, 25])\n classic_binning.fit(X)\n\n fig, ax = plt.subplots()\n plot_binning(ax,\n classic_binning,\n X,\n log_c=False,\n cmap='viridis')\n fig.savefig('05_fact_example_original_binning.png')\n\n closest = classic_binning.merge(X_test,\n min_samples=10,\n max_bins=None,\n mode='closest')\n fig, ax = plt.subplots()\n plot_binning(ax,\n closest,\n X,\n log_c=False,\n cmap='viridis')\n fig.savefig('05_fact_example_original_binning_closest.png')\n\n unmerged_model = model.BasicLinearModel()\n binned_g = classic_binning.digitize(X)\n unmerged_model.initialize(g=binned_g,\n f=binned_E)\n\n\n merged_model = model.BasicLinearModel()\n binned_g = closest.digitize(X)\n merged_model.initialize(g=binned_g,\n f=binned_E)\n\n single_obs_model = model.BasicLinearModel()\n max_e = np.max(X[:, 1]) + 1e-3\n min_e = np.min(X[:, 1]) - 1e-3\n binning = np.linspace(min_e, max_e, 11)\n binned_g = np.digitize(X[:, 1], binning)\n single_obs_model.initialize(g=binned_g,\n f=binned_E)\n\n\n n_bins = len(closest.i_to_t)\n single_obs_model_more_bins = model.BasicLinearModel()\n max_e = np.max(X[:, 1]) + 1e-3\n min_e = np.min(X[:, 1]) - 1e-3\n binning = np.linspace(min_e, max_e, n_bins + 1)\n binned_g = np.digitize(X[:, 1], binning)\n single_obs_model_more_bins.initialize(g=binned_g,\n f=binned_E)\n\n vec_g, vec_f = merged_model.generate_vectors(binned_g, binned_E)\n\n\n tree_obs = [\"Size\",\n \"Width\",\n \"Length\",\n \"M3Trans\",\n \"M3Long\",\n \"ConcCore\",\n \"m3l\",\n \"m3t\",\n \"Concentration_onePixel\",\n \"Concentration_twoPixel\",\n \"Leakage\",\n \"Leakage2\",\n \"concCOG\",\n \"numIslands\",\n \"numPixelInShower\",\n \"phChargeShower_mean\",\n \"phChargeShower_variance\",\n \"phChargeShower_max\"]\n\n X_tree = df_A.get(tree_obs).values\n X_tree_test = df_test.get(tree_obs).values\n\n tree_binning = binning.TreeBinningSklearn(\n regression=False,\n max_features=None,\n min_samples_split=2,\n max_depth=None,\n min_samples_leaf=100,\n max_leaf_nodes=100,\n random_state=1337)\n\n tree_binning.fit(X_tree,\n binned_E,\n uniform=True)\n\n\n binned_g = tree_binning.digitize(X_tree)\n\n tree_model = model.BasicLinearModel()\n tree_model.initialize(g=binned_g,\n f=binned_E)\n\n ax_condition = unmerged_model.evaluate_condition(\n label='2 Observables (Unmerged; {} Bins)'.format(\n classic_binning.n_bins))\n merged_model.evaluate_condition(\n ax=ax_condition,\n label='2 Observables (Merged; {} Bins)'.format(closest.n_bins))\n single_obs_model.evaluate_condition(\n ax=ax_condition,\n label='Single Observable (10 Bins)')\n single_obs_model_more_bins.evaluate_condition(\n ax=ax_condition,\n label='Single Observable ({} Bins)'.format(closest.n_bins))\n tree_model.evaluate_condition(\n ax=ax_condition,\n label='Tree Based ({} Bins)'.format(tree_binning.n_bins))\n\n plt.legend(loc='lower left')\n ax_condition.set_yscale(\"log\", nonposy='clip')\n plt.savefig('05_condition.png')\n\n binned_g_test = tree_binning.digitize(X_tree_test)\n vec_g, vec_f = tree_model.generate_vectors(binned_g_test,\n binned_E_test)\n print('\\nMCMC Solution: (constrained: sum(vec_f) == sum(vec_g)) :')\n llh_mcmc = solution.LLHSolutionMCMC(n_used_steps=2000,\n random_state=1337)\n llh_mcmc.initialize(vec_g=vec_g, model=tree_model)\n vec_f_est_mcmc, sample, probs = llh_mcmc.run(tau=0)\n std = np.std(sample, axis=0)\n str_0 = 'unregularized:'\n str_1 = ''\n for f_i_est, f_i in zip(vec_f_est_mcmc, vec_f):\n str_1 += '{0:.2f}\\t'.format(f_i_est / f_i)\n print('{}\\t{}'.format(str_0, str_1))\n\n # corner.corner(sample, truths=vec_f_est_mcmc, truth_color='r')\n # plt.savefig('05_corner_fact.png')\n\n fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n bin_mids = (binning_E[1:] + binning_E[:-1]) / 2.\n bin_width = (binning_E[1:] - binning_E[:-1]) / 2.\n _, vec_f_truth = tree_model.generate_vectors(binned_g,\n binned_E)\n vec_f_truth = vec_f_truth * sum(vec_f) / sum(vec_f_truth)\n plt.hist(bin_mids, bins=binning_E, weights=vec_f_truth, histtype='step')\n ax.errorbar(bin_mids,\n vec_f_est_mcmc,\n yerr=std,\n xerr=bin_width,\n ls=\"\",\n color=\"r\", label=\"Unfolding\")\n ax.set_yscale(\"log\", nonposy='clip')\n fig.savefig('05_unfolding_mcmc.png')\n plt.close(fig)\n\n import cPickle\n\n with open('probs.dat', 'wb') as f:\n cPickle.dump(probs, f)\n\n fig, ax = plt.subplots(1, 1, figsize=(6, 4.5))\n ax.hist(2*(np.max(probs) - probs),\n bins=50,\n weights=np.ones_like(probs) * 1./len(probs),\n histtype='step', lw=2)\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_xlabel(r'$-2\\cdot\\ln\\left(\\frac{\\mathdefault{LLH}}{\\mathdefault{LLH}_{\\mathdefault{Best Fit}}}\\right)$')\n ax.set_ylabel(r'$\\frac{\\mathdefault{Bin}_i}{\\sum_i \\mathdefault{Bin}_i}$')\n plt.tight_layout()\n plt.savefig('05_hist_probs.png')\n","sub_path":"examples/05_fact_example.py","file_name":"05_fact_example.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"260478427","text":"from PyQt5.QtWidgets import QWidget, QAction, QMenuBar, QTableWidget, QTableWidgetItem, QLabel, QVBoxLayout\nfrom PyQt5.QtCore import Qt\nfrom Component.Materials import Checkbox\nfrom Component.Materials import CheckboxItem\nfrom Connector.FireArea import ReturnArea\n\n__author__ = \"Wonyeong Choe <choewy@stdte.co.kr>\"\n\n\nclass AreaTable(QWidget):\n def __init__(self, db_name, tab_project):\n QWidget.__init__(self)\n self.tab_project = tab_project\n self.__return_area__ = ReturnArea(db_name)\n self.db_name = db_name\n self.__variables__()\n self.__component__()\n\n def __variables__(self):\n self.columns = ['선택']+self.__return_area__.Columns()\n self.dataframe = self.__return_area__.Dataframe()\n self.checked = []\n\n def __component__(self):\n self.__menubar__()\n self.__table__()\n self.__layout__()\n\n def __menubar__(self):\n self.action_all = QAction('전체 선택', self)\n self.action_all.triggered.connect(self.All)\n self.menubar = QMenuBar(self)\n self.menubar.addAction(self.action_all)\n\n def All(self):\n if self.action_all.text() == '전체 선택':\n self.__select_all__()\n elif self.action_all.text() == '선택 해제':\n self.__unselect_all__()\n\n def __select_all__(self):\n row_count = self.table.rowCount()\n self.checked.clear()\n for row in range(row_count):\n self.table.cellWidget(row, 0).setChecked(True)\n if row != -1:\n self.checked.append(row)\n self.action_all.setText('선택 해제')\n\n def __unselect_all__(self):\n row_count = self.table.rowCount()\n for row in range(row_count):\n self.table.cellWidget(row, 0).setChecked(False)\n self.checked.clear()\n self.action_all.setText('전체 선택')\n\n def __checkbox__(self, table, row, col):\n checkboxitem = CheckboxItem()\n checkbox = Checkbox(checkboxitem)\n checkbox.clicked.connect(self.Checked)\n table.setItem(row, col, checkboxitem)\n table.setCellWidget(row, col, checkbox)\n\n def Checked(self, check):\n row = self.table.currentRow()\n if check:\n self.checked.append(row)\n self.action_all.setText('선택 해제')\n elif not check:\n self.checked.remove(row)\n if not self.checked:\n self.action_all.setText('전체 선택')\n\n def __table__(self):\n self.table = QTableWidget()\n self.table.setRowCount(0)\n self.table.setColumnCount(len(self.columns))\n self.table.setHorizontalHeaderLabels(self.columns)\n self.table.setAlternatingRowColors(True)\n style = \"QHeaderView::section {font-weight: bold; border: 2px black;}\"\n self.table.setStyleSheet(style)\n for row, lst in enumerate(self.dataframe.values):\n self.table.insertRow(row)\n self.table.setRowHeight(row, 50)\n self.__checkbox__(self.table, row, 0)\n for col, data in enumerate(lst):\n if col in [3, 4]:\n item = QTableWidgetItem(str(data))\n item.setFlags(Qt.ItemIsEditable)\n else:\n item = QTableWidgetItem(str(data))\n item.setFlags(Qt.ItemIsEditable)\n item.setTextAlignment(Qt.AlignCenter)\n self.table.setItem(row, col+1, item)\n self.table.hideColumn(1)\n self.table.resizeColumnsToContents()\n self.table.verticalHeader().setVisible(False)\n self.table.horizontalHeader().setStretchLastSection(True)\n\n def __layout__(self):\n layout = QVBoxLayout()\n layout.addWidget(QLabel('※ 방화지역 정보\\n'))\n layout.addWidget(self.menubar)\n layout.addWidget(self.table)\n self.setLayout(layout)","sub_path":"Component/Projects/Analysis/AreaTable.py","file_name":"AreaTable.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284288975","text":"# p.049 TCP Server and Client That can Deadlock\n# TCP client and server that leave too much data waiting\n\nimport socket, sys\n\nserverIP = \"192.168.4.3\" # input(\"Please enter server IP you would like to connect to: \")\nserverPort = int(input(\"Please enter application port number: \"))\nbytecount = 1073741824 # int(input(\"Please enter up to how many bytes you will send to the server: \"))\nclientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nbytecount = (bytecount + 15) // 16 * 16 # round up to a multiple of 16\nmessage = b\"capitalize this!\" # 16-byte message to repeat over and over\n\nprint(\"Sending\", bytecount, \"bytes of data, in chunks of 16 bytes\")\nclientSock.connect((serverIP, serverPort))\nsent = 0\nwhile sent < bytecount:\n clientSock.sendall(message)\n sent += len(message)\n print(\"\\r %d bytes sent\" % (sent,), end=\" \")\n sys.stdout.flush()\n\nprint()\nclientSock.shutdown(socket.SHUT_WR)\n\nprint(\"Receiving all the data the server sends back\")\n\nreceived = 0\nwhile True:\n data = clientSock.recv(42)\n if not received:\n print(\" The first data received says\", repr(data))\n if not data:\n break\n received += len(data)\n print(\"\\r %d bytes received\" % (received,), end=\" \")\n\nprint()\nclientSock.close()\n","sub_path":"overloadingZ1_client.py","file_name":"overloadingZ1_client.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"528108506","text":"\"\"\"\nAdvances API\n\"\"\"\nfrom typing import Dict\n\nfrom ..api_base import ApiBase\n\n\nclass Advances(ApiBase):\n \"\"\"Class for Advances APIs.\"\"\"\n\n GET_ADVANCES = '/v3/advances'\n\n def get(self, limit: int = None, offset: int = None, **kwargs) -> Dict:\n \"\"\"\n Get Advances\n :param limit: No. of advances to be fetched\n :param offset: Pagination offset\n :return: List of Advance Objects\n \"\"\"\n return self._get_request(params={\n 'limit': limit,\n 'offset': offset,\n **kwargs\n }, api_url=self.GET_ADVANCES)\n","sub_path":"fylesdk/apis/fyle_v3/advances.py","file_name":"advances.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142427311","text":"# Exercises 1: Write a while loop that starts at the last character in the string and works its way backwards\n# to the first character in the string, printing each letter on a separate line, except backwards\n'''\nindex = 1\nword = 'hello'\nfor char in word:\n print(word[len(word)-index])\n index = index + 1\n'''\n\n# solution 2\n'''\ndef backwards(word):\n index = len(word) - 1 # adjust for 0th index\n while index >= 0:\n print(word[index])\n index -= 1\n \nword = \"python\"\nbackwards(word)\n\n\n#String slices\ns = 'Monty Python'\nprint(s[0:5]) # Monty\nprint(s[:3]) # Mon \nnew_s = 'P' + s[:3]\nprint(new_s) #PMon\n\n# Sting is immutable \n\n\n#Looping and counting\n\nword = 'banana'\ncount = 0\nfor latter in word:\n count = count + 1\nprint(count)\n\n# The in operatior \nprint('a' in 'banana') # True\n\n'''\n\n\n# Exercise 4 : write an invocation that counts the number of times the letter a occurs in \"banana\"\n\nword = 'banana'\ncount = 0\nfor letter in word:\n if letter == 'a':\n count = count + 1\nprint(count)\n\n# Parsing strings\ndata = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'\natpos = data.find('uct')\nprint(atpos)\n\n# Format operator\ncamels = 42 \nprint('%d' % camels) # % is the format operatopr, allows us to construct strings, replacing parts of stings with data in variables\n# %d means that the second operand\n# should be formatted as an integer(\"d\" stands for \"decimal\")\n# this result is the string '42', not the integer value 42\nprint('I have spotted %d camels.' % camels)\n\n# Debugging\n'''\nwhile True:\n line = input('> ')\n if len(line) > 0 and line[0] == '#':\n continue\n if line == 'done':\n break\n print(line)\nprint('Done!') \n'''\n\n# Exercise 5 \nstr = 'X-DSPAM-Confidence:0.8475'\natpos = str.find(':')\nfinal = str[atpos+1 :]\nffinal = float(final)\nprint('This is a floating point number %g' %ffinal) \n\n# Stripping\ngreet = ' Hello Bob '\nprint(greet.lstrip())\nprint(greet.strip())","sub_path":"Strings.py","file_name":"Strings.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"267472406","text":"import functools, threading\n\n \n_db_convert_default = '?'\n_db_convert = None\n\n\n# 数据库引擎对象:\nclass _Engine(object):\n def __init__(self, connect):\n self._connect = connect\n def connect(self):\n return self._connect()\n\nengine = None\n\ndef create_engine(**db_args):\n import mysql.connector\n global engine\n engine = _Engine(lambda: mysql.connector.connect(**db_args))\n\n\n# 持有数据库连接的上下文对象:\nclass _DbCtx(threading.local):\n def __init__(self):\n self.connection = None\n self.transactions = 0\n\n def is_init(self):\n return not self.connection is None\n\n def init(self):\n self.connection = _LazyConnection()\n self.transactions = 0\n\n def cleanup(self):\n self.connection.cleanup()\n self.connection = None\n\n def cursor(self):\n return self.connection.cursor()\n\n_db_ctx = _DbCtx()\n\nclass _LazyConnection():\n def __init__(self):\n self.connection = None\n\n def cursor(self):\n if self.connection is None:\n self.connection = engine.connect()\n return self.connection.cursor()\n\n def cleanup(self):\n if self.connection :\n connection = self.connection\n self.connection = None\n connection.close()\n \n def commit(self):\n self.connection.commit()\n \n def rollback(self):\n self.connection.rollback()\n\n\nclass _ConnectionCtx(object):\n def __enter__(self):\n global _db_ctx\n self.should_cleanup = False\n if not _db_ctx.is_init():\n _db_ctx.init()\n self.should_cleanup = True\n return self\n\n def __exit__(self, exctype, excvalue, traceback):\n global _db_ctx\n if self.should_cleanup:\n _db_ctx.cleanup()\n\ndef connection():\n return _ConnectionCtx()\n\ndef with_connection(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n with connection():\n return func(*args, **kw)\n return wrapper\n\n@with_connection\ndef select(sql,*arg):\n global _db_convert_default, _db_convert, _db_ctx\n if _db_convert != _db_convert_default:\n sql = sql.replace(_db_convert_default,_db_convert)\n \n cursor = _db_ctx.cursor()\n try:\n values = []\n cursor.execute(sql,arg)\n columns = cursor.column_names\n for row in cursor:\n current_row = {}\n for c,r in zip(columns,row):\n current_row[c] = r\n values.append(current_row)\n return values\n finally:\n if cursor:\n cursor.close()\n\n@with_connection\ndef update(sql, *args):\n global _db_convert_default, _db_convert, _db_ctx\n if _db_convert != _db_convert_default:\n sql = sql.replace(_db_convert_default,_db_convert)\n\n cursor = _db_ctx.cursor()\n try:\n cursor.execute(sql,arg)\n r = cursor.rowcount\n if _db_ctx.transactions == 0:\n _db_ctx.commit()\n return r\n finally:\n if cursor:\n cursor.close()\n\n\nif __name__ == '__main__':\n create_engine(user='oneadmin', password='onepass', database='oneblog', host='127.0.0.1', port=3306)\n _db_convert = '%s'\n output = select('select * from users where id = ?', '1')\n print(output)\n","sub_path":"www/transwarp/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"407635762","text":"from math import sqrt, floor\nfrom time import time\n\nstart = time()\n\ndef is_triangular(n):\n\tx = (sqrt(8*n+1)-1)/2\n\tif x - floor(x) == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef is_pentagonal(n):\n\tx = (sqrt(24*n+1)+1)/6\n\tif x - floor(x) == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef is_hexagonal(n):\n\tx = (sqrt(8*n+1)+1)/4\n\tif x - floor(x) == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\nnumbers = [40755]\nn = 40756\n\nwhile len(numbers) < 2:\n\tif is_triangular(n) and is_pentagonal(n) and is_hexagonal(n):\n\t\tnumbers.append(n)\n\tn+=1\n\tif n % 10**6 == 0:\n\t\tprint(n)\n\nprint(numbers[2])\nend = time()\nprint(end - start)\n","sub_path":"Python/045_tripenhex.py","file_name":"045_tripenhex.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103708334","text":"\"\"\"\n @Author:DarknessFor9\n @DateTime:6/29/19 2:31 PM\n\"\"\"\nfile = open(\"sleepA.py\", \"r\", encoding=\"utf-8\")\ncontent = \"\"\nfor line in file:\n content += line\nprint(content)\nfile.close()\n\nfile_new = open(\"123\", \"w\", encoding=\"utf-8\")\nfile_new.write(content)\nfile_new.close()\n","sub_path":"code/built-in_function/file_open.py","file_name":"file_open.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"339202387","text":"\"\"\"\nmain:\n train:负责训练 获得模型->获得数据->确定损失函数、参数更新优化器\n ->模型评估方法->遍历数据集训练(正向传播 损失计算 反向传播 参数优化 统计指标更新和可视化)\n ->保存模型->在验证集上验证->超参数更新(主要是学习率)\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport os.path as osp\nimport shlex\nimport subprocess\nimport numpy as np\nimport torch\nimport torchvision\nimport fcn\nfrom torch.optim import lr_scheduler\nimport yaml\nimport sys\nfrom torch.utils.data import DataLoader\nimport data\nimport models\nimport torch.nn.functional as F\nfrom distutils.version import LooseVersion\nimport utils\nimport scipy.misc\n\n\n\n\n\ndef ModelStatics(modelPth,valImagLoader,outImg,cuda=True):\n # 导入模型\n model = models.segnet(n_classes=10)\n utils.ModelLoad(loadRoot=modelPth, model=model)\n # import data\n val_dataset = data.UAVDataClassSeg(\n '/home/mlxuan/project/DeepLearning/data/image_Segmentation/dataAug/train/trainFull.txt', train=False)\n valloader = DataLoader(val_dataset, batch_size=1, shuffle=False)\n\n\n model.eval()\n # 读入输入图像,然后用模型去预测\n # 将ValImgLoader做成loader\n histAdd = np.zeros((10,10))\n histAdd = histAdd.astype(np.uint64)\n #从data获得原图 从target获得label\n for batch_idx, (datas, target) in enumerate(valloader):\n if cuda: # 是否使用GPU\n datas = datas.cuda()\n model.cuda()\n # target = target.cuda()\n with torch.no_grad():\n score = model(datas) # 使用模型处理输入数据得到结果\n imgs = datas.data.cpu()\n lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]\n lbl_true = target.data.cpu()\n\n img, lt = valloader.dataset.untransform(imgs[0], lbl_true[0])\n hist = fcn.utils._fast_hist(label_true=lt, label_pred=lbl_pred[0], n_class=10)\n # np.savetxt('hist.txt',hist,fmt='%10.0f', header='a', comments=str(1))\n histAdd = histAdd+hist\n # _ = fcn.utils.label2rgb(lbl=lbl_pred, img=img)\n # scipy.misc.imsave('./t5.jpg', _[0])\n # lbl_true = target.datas.cpu()\n # 将预测后的结果保存为输出图像\n np.savetxt('histAddTrain.txt', histAdd, fmt='%20.0f')\n \"\"\"\n 如何分析得到的混淆矩阵:\n static = np.loadtxt('./histAdd.txt',dtype = np.uint64)读入混淆矩阵为numpy astray\n static.max()获得最大值\n t1 = [[static[i][j] if i!=j else 0for i in range(10)]for j in range(10)]\n t2 =np.array(t1)将混淆矩阵对角线元素为0\n t3 = np.where(t2 == np.max(t2)) 获得最大值的航和列\n t4 = np.sort(t2,axis=None)对混淆矩阵排序\n \"\"\"\n#需要重写testDataloader的代码,步骤:导入模型 准备输入数据,遍历输入数据(model(input),处理模型的输出)\ndef valModel(modelPth,valImagLoader,outImg,cuda=True):\n \"\"\"\n\n :param ModelPth: 想要预测的模型的路径\n :param Img: 用于预测的输入图像\n :param OutImg: 预测后的输出图像\n :return:\n \"\"\"\n #导入模型\n model = models.segnet(n_classes=10)\n utils.ModelLoad(loadRoot=modelPth,model=model)\n # import data\n testDataset = data.UAVDataClassSeg('/home/mlxuan/project/DeepLearning/data/image_Segmentation/js-segment-annotator-master/data/images/Resample200*1500/1.txt',\n train=False,test = True)\n testLoader = DataLoader(testDataset, batch_size=1, shuffle=False)\n\n\n model.eval()\n #读入输入图像,然后用模型去预测\n #将ValImgLoader做成loader\n\n\n for batch_idx, (datas, target) in enumerate(testLoader):\n if cuda:#是否使用GPU\n datas = datas.cuda()\n model.cuda()\n # target = target.cuda()\n with torch.no_grad():\n score = model(datas) # 使用模型处理输入数据得到结果\n imgs = datas.data.cpu()\n img, lt = testLoader.dataset.untransform(imgs[0])\n lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]\n _ = fcn.utils.label2rgb(lbl=lbl_pred[0],img = img,label_names=['b','R','T','G','A','S','w','W','B','H'])\n scipy.misc.imsave(os.path.join('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output_segnet/al',str(batch_idx)+'.jpg'),_)\n # lbl_true = target.datas.cpu()\n #将预测后的结果保存为输出图像\n\n\n\n\ndef train():\n # Setup Dataloader,训练集和验证集数据,决定了如分类类别等\n train_dataset = data.UAVDataClassSeg(\n txt_path='/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/data/data/train.txt')\n trainloader = DataLoader(train_dataset, batch_size=12, shuffle=True, drop_last=True,num_workers=24,pin_memory=True)\n val_dataset = data.UAVDataClassSeg(\n '/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/data/data/valid/valid.txt', train=False)\n valloader = DataLoader(val_dataset, batch_size=1, shuffle=False)\n\n # Setup device89\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\n #Setup model\n model = models.segnet(n_classes=len(val_dataset.class_names))\n #用预训练的Vgg16网络初始化FCN32s的参数\n model.init_vgg16_params(torchvision.models.vgg16(pretrained=True))\n\n\n # Setup optimizer, lr_scheduler and loss function(优化器、学习率调整策略、损失函数)\n\n def cross_entropy2d(input, target, weight=None, size_average=True):\n # input: (n, c, h, w), target: (n, h, w)\n n, c, h, w = input.size()\n # log_p: (n, c, h, w)\n if LooseVersion(torch.__version__) < LooseVersion('0.3'):#简单的版本比较操作,此处传入的时torch.__version__,所以比较的时torch的版本\n # ==0.2.X\n log_p = F.log_softmax(input)\n else:\n # >=0.3\n log_p = F.log_softmax(input, dim=1)\n # log_p: (n*h*w, c) log_p是对input做log_softmax后的结果,表示每个类的概率。tensor.transpose将tensor的维度交换,如行变成列,列变成行\n log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()\n log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]\n log_p = log_p.view(-1, c)\n # target: (n*h*w,)\n mask = target >= 0\n target = target[mask]\n loss = F.nll_loss(log_p, target, weight=weight)\n if size_average:\n loss /= mask.data.sum()\n return loss\n\n\n lossFun = cross_entropy2d\n\n\n optim = torch.optim.Adam(\n params=model.parameters(),\n lr=1.0e-5,\n weight_decay=0.0005)\n #定义学习率调整策略\n scheduler = lr_scheduler.ReduceLROnPlateau(optim, mode='min', patience=1,min_lr=10e-10,eps=10e-9) # min表示当指标不在降低时,patience表示可以容忍的step次数\n\n # utils.ModelLoad('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output_segnet/bestModel/1.4000*3000_trainModel.tar',\n # model)\n now = datetime.datetime.now()\n logFile = utils.Log(osp.join('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output_segnet/visualization_viz/',now.strftime('%Y%m%d_%H%M%S.%f')+'log.csv'),\n ['iteration','train/loss','train/mean_iu','valid/loss','valid/mean_iu','lr'])\n trainer = models.Trainer(\n cuda =True,\n model=model,\n optimizer=optim,\n loss_fcn=lossFun,\n train_loader=trainloader,\n val_loader=valloader,\n out='./output_segnet/',\n max_iter=100000,\n scheduler = scheduler,\n interval_validate=800,\n logFile=logFile\n )\n trainer.train()#进入训练\n\n\n\n\nif __name__ == '__main__':\n # train()\n # ModelStatics('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output_segnet/20190111_124455.109984model_best.pth.tar','','',cuda=True)\n valModel('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output_segnet/20190117_235208.898474model_best.pth.tar','','',cuda=True)\n","sub_path":"mainSegnet.py","file_name":"mainSegnet.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"398641681","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport time\nimport json\nimport datetime\nimport os, sys\n\nparentdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.insert(0, parentdir)\nfrom ekt_lib import ekt_net, ekt_cfg\nfrom ekt_lib.ekt_sfu import Ektsfu\nfrom pathlib2 import Path\nfrom ekt_lib.ekt_stb_tester import stb_tester_execute_testcase\nfrom ekt_lib.threshold_algorithm_SFU import mosaic_algorithm, iterate_to_find_threshold_noise_cn_step_by_step\nfrom ekt_lib.ekt_utils import write_test_result, find_level_offset_by_frequency, write_json_file, read_json_file, \\\n dvbt2_68_performance_in_SFN_echo_json_to_csv\n\nFREQUENCY_666 = 666\nLEVEL_OFFSET_666 = find_level_offset_by_frequency(\"DVBT_T2_FREQUENCY_LEVEL_OFFSET\", 666.0)\nLEVEL_50_666 = str(\"%.2f\" % (-50 - LEVEL_OFFSET_666))\n\nFREQUENCY_199 = 199\nLEVEL_OFFSET_198_5 = find_level_offset_by_frequency(\"DVBT_T2_FREQUENCY_LEVEL_OFFSET\", 198.5)\nLEVEL_50_199 = str(\"%.2f\" % (-50 - LEVEL_OFFSET_198_5))\n\nMODULATION_QPSK = \"T4\"\nMODULATION_16QAM = \"T16\"\nMODULATION_64QAM = \"T64\"\nMODULATION_256QAM = \"T256\"\n\nCODE_RATE_1_2 = \"R1_2\"\nCODE_RATE_3_5 = \"R3_5\"\nCODE_RATE_2_3 = \"R2_3\"\nCODE_RATE_3_4 = \"R3_4\"\nCODE_RATE_4_5 = \"R4_5\"\nCODE_RATE_5_6 = \"R5_6\"\n\nGUARD_G1_128 = \"G1128\"\nGUARD_G1_32 = \"G1_32\"\nGUARD_G1_16 = \"G1_16\"\nGUARD_G19_256 = \"G19256\"\nGUARD_G1_8 = \"G1_8\"\nGUARD_G19_128 = \"G19128\"\nGUARD_G1_4 = \"G1_4\"\n\nKE32 = \"M32E\"\nKN32 = \"M32K\"\n\nPARAMETER_LIST = [\n [FREQUENCY_666, LEVEL_OFFSET_666, LEVEL_50_666, 8, KE32, MODULATION_256QAM, \"PP4\", CODE_RATE_2_3, GUARD_G1_16,\n 24.6, [\n [0, 100.1, 0, 0, 0, 200.1, None],\n [0, 100.1, 3, 0, 3, 200.1, None],\n [0, 100.1, 6, 0, 6, 200.1, None],\n [0, 100.1, 9, 0, 9, 200.1, None],\n [0, 100.1, 12, 0, 12, 200.1, None],\n [0, 100.1, 15, 0, 15, 200.1, None],\n [0, 100.1, 18, 0, 18, 200.1, None],\n [0, 100.1, 21, 0, 21, 200.1, None],\n [0, 100.1, 15, 0, 0, 200.1, None],\n [0, 100.1, 15, 0, 3, 200.1, None],\n [0, 100.1, 15, 0, 6, 200.1, None],\n [0, 100.1, 15, 0, 9, 200.1, None],\n [0, 100.1, 15, 0, 12, 200.1, None],\n [0, 100.1, 15, 0, 18, 200.1, None],\n [0, 100.1, 15, 0, 21, 200.1, None],\n [0, 100.1, 0, 0, 15, 200.1, None],\n [0, 100.1, 3, 0, 15, 200.1, None],\n [0, 100.1, 6, 0, 15, 200.1, None],\n [0, 100.1, 9, 0, 15, 200.1, None],\n [0, 100.1, 12, 0, 15, 200.1, None],\n [0, 100.1, 18, 0, 15, 200.1, None],\n [0, 100.1, 21, 0, 15, 200.1, None]]],\n [FREQUENCY_666, LEVEL_OFFSET_666, LEVEL_50_666, 8, KE32, MODULATION_256QAM, \"PP4\", CODE_RATE_3_5, GUARD_G19_256,\n 22.6, [\n [0, 120.1, 0, 0, 0, 240.1, None],\n [0, 120.1, 3, 0, 3, 240.1, None],\n [0, 120.1, 6, 0, 6, 240.1, None],\n [0, 120.1, 9, 0, 9, 240.1, None],\n [0, 120.1, 12, 0, 12, 240.1, None],\n [0, 120.1, 15, 0, 15, 240.1, None],\n [0, 120.1, 18, 0, 18, 240.1, None],\n [0, 120.1, 21, 0, 21, 240.1, None],\n [0, 120.1, 15, 0, 0, 240.1, None],\n [0, 120.1, 15, 0, 3, 240.1, None],\n [0, 120.1, 15, 0, 6, 240.1, None],\n [0, 120.1, 15, 0, 9, 240.1, None],\n [0, 120.1, 15, 0, 12, 240.1, None],\n [0, 120.1, 15, 0, 18, 240.1, None],\n [0, 120.1, 15, 0, 21, 240.1, None],\n [0, 120.1, 0, 0, 15, 240.1, None],\n [0, 120.1, 3, 0, 15, 240.1, None],\n [0, 120.1, 6, 0, 15, 240.1, None],\n [0, 120.1, 9, 0, 15, 240.1, None],\n [0, 120.1, 12, 0, 15, 240.1, None],\n [0, 120.1, 18, 0, 15, 240.1, None],\n [0, 120.1, 21, 0, 15, 240.1, None]]],\n [FREQUENCY_199, LEVEL_OFFSET_198_5, LEVEL_50_199, 7, KN32, MODULATION_256QAM, \"PP2\", CODE_RATE_2_3, GUARD_G1_8,\n 25.1, [\n [0, 243.1, 0, 0, 0, 486.1, None],\n [0, 243.1, 3, 0, 3, 486.1, None],\n [0, 243.1, 6, 0, 6, 486.1, None],\n [0, 243.1, 9, 0, 9, 486.1, None],\n [0, 243.1, 12, 0, 12, 486.1, None],\n [0, 243.1, 15, 0, 15, 486.1, None],\n [0, 243.1, 18, 0, 18, 486.1, None],\n [0, 243.1, 21, 0, 21, 486.1, None],\n [0, 243.1, 15, 0, 0, 486.1, None],\n [0, 243.1, 15, 0, 3, 486.1, None],\n [0, 243.1, 15, 0, 6, 486.1, None],\n [0, 243.1, 15, 0, 9, 486.1, None],\n [0, 243.1, 15, 0, 12, 486.1, None],\n [0, 243.1, 15, 0, 18, 486.1, None],\n [0, 243.1, 15, 0, 21, 486.1, None],\n [0, 243.1, 0, 0, 15, 486.1, None],\n [0, 243.1, 3, 0, 15, 486.1, None],\n [0, 243.1, 6, 0, 15, 486.1, None],\n [0, 243.1, 9, 0, 15, 486.1, None],\n [0, 243.1, 12, 0, 15, 486.1, None],\n [0, 243.1, 18, 0, 15, 486.1, None],\n [0, 243.1, 21, 0, 15, 486.1, None]]]\n]\n\nmy_file = Path(\"../../ekt_json/dvbt2_68_performance_in_SFN_echo.json\")\nif my_file.exists():\n pass\nelse:\n dict_test_parame_result = {}\n dict_test_parame_result[\"test_parame_result\"] = PARAMETER_LIST\n write_json_file(\"../../ekt_json/dvbt2_68_performance_in_SFN_echo.json\",\n dict_test_parame_result)\n\nif __name__ == '__main__':\n \"\"\"\n 测试流程:\n ①重置设备\n ②选择 TSPLAYER\n ③播放流文件\n ④设置code_rate,modulation,bandwidth,guard, frequency,input_signal_level\n ⑤机顶盒应用中进行锁台并确认锁台成功 (针对stb-tester发送post请求运行testcase)\n ⑤依次修改可变参数,判断机顶盒画面是否含有马赛克并记录结果\n \"\"\"\n load_dict = read_json_file(\"../../ekt_json/dvbt2_68_performance_in_SFN_echo.json\")\n sfu_ip = ekt_cfg.SFU_IP\n specan = Ektsfu(sfu_ip)\n specan.preset_instrument()\n specan = Ektsfu(sfu_ip)\n specan.set_modulation_modulation_source(\"DTV\")\n specan = Ektsfu(sfu_ip)\n specan.set_modulation_modulation_standard_dvt(\"T2DVb\")\n specan = Ektsfu(sfu_ip)\n specan.set_player_timing_openfile(r\"E:\\333\\DIVER.GTS\")\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_input_source_dvbt2(\"TSPLayer\")\n specan = Ektsfu(sfu_ip)\n specan.set_level_level_rf(\"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_noise_noise_noise(\"ADD\")\n specan = Ektsfu(sfu_ip)\n specan.set_noise_noise_awgn(\"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_noise_settings_bandwith(\"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_fading_fading_state(\"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_impairments_modulator(\"OFF\")\n specan = Ektsfu(sfu_ip)\n specan.set_impairments_baseband(\"OFF\")\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_system_papr_dvbt2(\"TR\")\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_ldata_dvbt2(\"61\")\n\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_state(\"2\", \"1\", \"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_state(\"3\", \"1\", \"ON\")\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_state(\"4\", \"1\", \"ON\")\n\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_profile(\"2\", \"1\", \"SPATh\")\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_profile(\"3\", \"1\", \"SPATh\")\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_profile(\"4\", \"1\", \"SPATh\")\n\n LIST_PARAMETER_DATA = load_dict.get(\"test_parame_result\")\n for PARAMETER_FIXED in LIST_PARAMETER_DATA:\n loop_lock_mark = False\n for FADING in PARAMETER_FIXED[10]:\n if FADING[6] == None:\n loop_lock_mark = True\n break\n if loop_lock_mark == True:\n pass\n else:\n continue\n if PARAMETER_FIXED[0] == 199:\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_ldata_dvbt2(\"45\")\n specan = Ektsfu(sfu_ip)\n specan.set_frequency_frequency_frequency(str(int(PARAMETER_FIXED[0])) + \"MHz\")\n specan = Ektsfu(sfu_ip)\n specan.set_level_level_offset(str(PARAMETER_FIXED[1]))\n specan = Ektsfu(sfu_ip)\n specan.set_level_level_level(\"dBm\", PARAMETER_FIXED[2])\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_channelbandwidth_dvbt2(\"BW_{}\".format(str(PARAMETER_FIXED[3])))\n\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_fftsize_dvbt2(PARAMETER_FIXED[4])\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_bicm_constellation_dvbt2(PARAMETER_FIXED[5])\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_pilot_dvbt2(PARAMETER_FIXED[6])\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_bicm_coderate_dvbt2(PARAMETER_FIXED[7])\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_guard_dvbt2(PARAMETER_FIXED[8])\n if PARAMETER_FIXED[0] == 199:\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_system_papr_dvbt2(\"TR\")\n specan = Ektsfu(sfu_ip)\n specan.set_digitaltv_framing_ldata_dvbt2(\"45\")\n\n # specan = Ektsfu(sfu_ip)\n # specan.set_fading_profile_additdelay(\"1\", \"2\", \"1.95E-6\")\n\n net = ekt_net.EktNetClient(ekt_cfg.FRONT_END_SERVER_IP, ekt_cfg.FRONT_END_SERVER_PORT)\n net.send_data(\n json.dumps({\"cmd\": \"set_frequency_data\", \"frequency\": str(int(PARAMETER_FIXED[0]))}))\n time.sleep(1)\n del net\n net = ekt_net.EktNetClient(ekt_cfg.FRONT_END_SERVER_IP, ekt_cfg.FRONT_END_SERVER_PORT)\n net.send_data(json.dumps({\"cmd\": \"set_bandwidth_data\", \"bandwidth\": str(PARAMETER_FIXED[3])}))\n time.sleep(1)\n del net\n \"\"\"\n 触发stb-tester进行频率和符号率设置\n \"\"\"\n stb_tester_execute_testcase(ekt_cfg.STB_TESTER_URL, ekt_cfg.BANCH_ID,\n ekt_cfg.DVB_T2_LOCK_FUNCTION, ekt_cfg.DVB_T2_CATEGORY, ekt_cfg.DVB_T2_REMOTE)\n net = ekt_net.EktNetClient(ekt_cfg.FRONT_END_SERVER_IP, ekt_cfg.FRONT_END_SERVER_PORT)\n lock_state = net.send_rec(json.dumps({\"cmd\": \"get_lock_state\"}))\n if lock_state == \"1\":\n pass\n elif lock_state == \"0\":\n write_test_result(\"../../ekt_log/test_result_sfu.txt\",\n (\n \"dvbt2_68_performance_in_SFN_echo: current_time:{}, frequency:{} MHz,bandwidth:{} Ksym/s, {}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n str(PARAMETER_FIXED[0]), str(PARAMETER_FIXED[3]),\n \"Lock fail\") + \"\\n\"))\n continue\n else:\n write_test_result(\"../../ekt_log/test_result_sfu.txt\", (\"Lock state err\" + \"\\n\"))\n continue\n\n for FADING in PARAMETER_FIXED[10]:\n if FADING[6] == None:\n pass\n else:\n continue\n\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_pathloss(\"2\", \"1\", \"{} dB\".format(str(FADING[0])))\n time.sleep(1)\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_basicdelay(\"2\", \"{}E-6\".format(FADING[1]))\n time.sleep(1)\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_pathloss(\"3\", \"1\", \"{} dB\".format(str(FADING[2])))\n time.sleep(1)\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_basicdelay(\"3\", \"{}E-6\".format(FADING[3]))\n time.sleep(1)\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_pathloss(\"4\", \"1\", \"{} dB\".format(str(FADING[4])))\n time.sleep(1)\n specan = Ektsfu(sfu_ip)\n specan.set_fading_profile_basicdelay(\"4\", \"{}E-6\".format(FADING[5]))\n time.sleep(1)\n\n # specan = Ektsfu(sfu_ip)\n # specan.set_fading_profile_basicdelay(\"2\", \"{}E-6\".format(str(PARAMETER[7])))\n\n res, test_result = iterate_to_find_threshold_noise_cn_step_by_step(sfu_ip, PARAMETER_FIXED[9])\n print(\n \"dvbt2_68_performance_in_SFN_echo: current_time:{}, modulation: {} coderate:{}, frequency:{} MHz,bandwidth:{} MHZ,{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), PARAMETER_FIXED[5],\n PARAMETER_FIXED[7], str(PARAMETER_FIXED[0]), str(PARAMETER_FIXED[3]), res))\n write_test_result(\"../../ekt_log/test_result_sfu.txt\",\n \"dvbt2_68_performance_in_SFN_echo: current_time:{}, modulation: {} coderate:{}, frequency:{} MHz,bandwidth:{} MHZ,{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), PARAMETER_FIXED[5],\n PARAMETER_FIXED[7], str(PARAMETER_FIXED[0]), str(PARAMETER_FIXED[3]), res) + \"\\n\")\n\n FADING[6] = test_result\n write_json_file(\"../../ekt_json/dvbt2_68_performance_in_SFN_echo.json\", load_dict)\n dvbt2_68_performance_in_SFN_echo_json_to_csv(\n \"../../ekt_json/dvbt2_68_performance_in_SFN_echo.json\",\n \"../../ekt_test_report/dvbt2_68_performance_in_SFN_echo.csv\")\n","sub_path":"ekt_testcases/dvbt2/dvbt2_68_performance_in_SFN_echo.py","file_name":"dvbt2_68_performance_in_SFN_echo.py","file_ext":"py","file_size_in_byte":12982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247618936","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n\n def sort_insertion(self, nums):\n \"\"\"\n :type nums: List\n :rtype: List\n \"\"\"\n '''\n i ← 1\n while i < length(A)\n j ← i\n while j > 0 and A[j-1] > A[j]\n swap A[j] and A[j-1]\n j ← j - 1\n end while\n i ← i + 1\n end while\n '''\n\n # [3,2,1,0]\n # [2,3,1,0]\n nums_init = nums\n for i in range(1, len(nums)): # 1, 2, 3\n for j in range(i-1, -1, -1):\n if nums[j] > nums[j+1]:\n nums[j], nums[j+1] = nums[j+1], nums[j]\n\n # print(nums, nums_init)\n return nums\n\n # learned:\n # - walk thru multiple loops\n # - use specific examples and walk thru\n # - use range to control looping thru arrays\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sort_insertion/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"252235954","text":"from django.conf.urls import patterns, url\nimport views\n#import admin\n\nurlpatterns = patterns('',\n url(r'menu/$', views.menu, name='menu'),\n url(r'posts/$', views.listPosts, name='listPosts'),\n url(r'menu/(?P<category>\\w+)/$', views.menuCategory, name='menuCategory'),\n url(r'blog/(?P<postUrl>\\w+)/$', views.getBlogPost, name='getBlogPost'),\n url(r'menu/(?P<category>\\w+)/(?P<tp>\\w+)/(?P<post>\\w+)/$', views.menuCategoryPost, name='menuCategoryPost'),\n\n #url(r'admin/$', views.indexPage, name='indexPage' ),\n)","sub_path":"webface/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572717660","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sage/fatedoc/FATE/federatedml/param/cross_validation_param.py\n# Compiled at: 2020-04-28 09:16:53\n# Size of source mod 2**32: 2419 bytes\nimport copy\nfrom federatedml.param.base_param import BaseParam\nfrom federatedml.util import consts\n\nclass CrossValidationParam(BaseParam):\n __doc__ = \"\\n Define cross validation params\\n\\n Parameters\\n ----------\\n n_splits: int, default: 5\\n Specify how many splits used in KFold\\n\\n mode: str, default: 'Hetero'\\n Indicate what mode is current task\\n\\n role: str, default: 'Guest'\\n Indicate what role is current party\\n\\n shuffle: bool, default: True\\n Define whether do shuffle before KFold or not.\\n\\n random_seed: int, default: 1\\n Specify the random seed for numpy shuffle\\n\\n need_cv: bool, default True\\n Indicate if this module needed to be run\\n\\n \"\n\n def __init__(self, n_splits=5, mode=consts.HETERO, role=consts.GUEST, shuffle=True, random_seed=1, need_cv=False):\n super(CrossValidationParam, self).__init__()\n self.n_splits = n_splits\n self.mode = mode\n self.role = role\n self.shuffle = shuffle\n self.random_seed = random_seed\n self.need_cv = need_cv\n\n def check(self):\n model_param_descr = \"cross validation param's \"\n self.check_positive_integer(self.n_splits, model_param_descr)\n self.check_valid_value((self.mode), model_param_descr, valid_values=[consts.HOMO, consts.HETERO])\n self.check_valid_value((self.role), model_param_descr, valid_values=[consts.HOST, consts.GUEST, consts.ARBITER])\n self.check_boolean(self.shuffle, model_param_descr)\n if self.random_seed is not None:\n self.check_positive_integer(self.random_seed, model_param_descr)","sub_path":"pycfiles/ETAF-1.4.0rc0-py3-none-any/cross_validation_param.cpython-37.py","file_name":"cross_validation_param.cpython-37.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"68024828","text":"# -*- coding: utf-8 *-*\n#\n# Copyright (c) 2012 Mikhail Babich <bma@bma.su>\n# All rights reserved.\n#\n\nfrom time import time\nfrom math import floor, sqrt, pi, atan\nfrom bisect import bisect_left\nfrom datetime import datetime\nimport adsb_logging as logging\n\n\nCPR_CONST = (10.4704713, 14.82817437, 18.18626357, 21.02939493, 23.54504487,\n 25.82924707, 27.9389871, 29.91135686, 31.77209708, 33.53993436,\n 35.22899598, 36.85025108, 38.41241892, 39.92256684, 41.38651832,\n 42.80914012, 44.19454951, 45.54626723, 46.86733252, 48.16039128,\n 49.42776439, 50.67150166, 51.89342469, 53.09516153, 54.27817472,\n 55.44378444, 56.59318756, 57.72747354, 58.84763776, 59.95459277,\n 61.04917774, 62.13216659, 63.20427479, 64.26616523, 65.3184531,\n 66.36171008, 67.39646774, 68.42322022, 69.44242631, 70.45451075,\n 71.45986473, 72.45884545, 73.45177442, 74.43893416, 75.42056257,\n 76.39684391, 77.36789461, 78.33374083, 79.29428225, 80.24923213,\n 81.19801349, 82.13956981, 83.07199445, 83.99173563, 84.89166191,\n 85.75541621, 86.53536998, 87.0)\n\nCS_CONST = ('@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',\n 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',\n 'Z', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',\n ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '0', '1', '2', '3', '4', '5',\n '6', '7', '8', '9', ' ', ' ', ' ', ' ', ' ', ' ')\n\n\nMSG_INUSE = [x for x in xrange(1, 9)] # 1..8\n\n\ndef get_datetime(current_time):\n dt = datetime.utcfromtimestamp(current_time)\n d_str = dt.strftime(\"%Y/%m/%d,%H:%M:%S.{ms},%Y/%m/%d,%H:%M:%S.{ms}\")\n return d_str.format(ms=dt.strftime(\"%f\")[:3])\n\n\ndef bin2int(buf):\n data = 0\n for x in buf:\n data = (data << 8) | x\n return data\n\n\ndef crc112(buf):\n data = bin2int(buf[:4])\n data1 = bin2int(buf[4:8])\n data2 = bin2int(buf[8:12]) & 0xffffff00\n poly = 0xfffa0480\n for n in xrange(0, 88):\n if data & 0x80000000 != 0:\n data ^= poly\n data = (data << 1) & 0xffffffff\n if data1 & 0x80000000 != 0:\n data |= 1\n data1 = (data1 << 1) & 0xffffffff\n if data2 & 0x80000000 != 0:\n data1 |= 1\n data2 = (data2 << 1) & 0xffffffff\n return data\n\n\ndef crc56(buf):\n data = bin2int(buf[:4])\n poly = 0xfffa0480\n for n in xrange(0, 32):\n if data & 0x80000000 != 0:\n data ^= poly\n data = (data << 1) & 0xffffffff\n return data\n\ndef decode_icao(data):\n icao = (crc56(data) >> 8) ^ (data[4] << 16 | data[5] << 8 | data[6])\n return \"{:06X}\".format(icao)\n\n\ndef decode_icao112(data):\n icao = (crc112(data) >> 8) ^ (data[11] << 16 | data[12] << 8 | data[13])\n return \"{:06X}\".format(icao)\n\n\ndef NL(x):\n return 59 - bisect_left(CPR_CONST, abs(x))\n\n\ndef decode_fs(fs):\n if fs == 0:\n return \"0,0,0,0\"\n elif fs == 1:\n return \"0,0,0,1\"\n elif fs == 2:\n return \"1,0,0,0\"\n elif fs == 3:\n return \"1,0,0,1\"\n elif fs == 4:\n return \"1,0,0,\"\n elif fs == 5:\n return \"0,0,0,\"\n else:\n return \",,,\"\n\n\n\nclass CPRDB(object):\n def __init__(self, expire_time=10, clean_time=1):\n self.cpr_db = {}\n self.expire_time = expire_time\n self.clean_time = clean_time\n self.last_clean = None\n\n def clean(self, current_time):\n # Проверка времени очистки базы\n if self.last_clean is not None and \\\n current_time - self.last_clean < self.clean_time:\n return\n\n self.last_clean = current_time\n for key in self.cpr_db.keys():\n if current_time - self.cpr_db[key]['time'] > self.expire_time:\n del self.cpr_db[key]\n\n def add_cpr(self, icao, f, lat, lon, current_time):\n self.clean(current_time)\n if icao not in self.cpr_db:\n item = dict(icao=icao, lat0=None, lat1=None,\n lon0=None, lon1=None, time=current_time)\n self.cpr_db[icao] = item\n else:\n item = self.cpr_db[icao]\n item['time'] = current_time\n if f > 0:\n item['lat1'] = lat\n item['lon1'] = lon\n else:\n item['lat0'] = lat\n item['lon0'] = lon\n return item\n\n\ndef modulo(x, y):\n return x - y * floor(x / y)\n\n\ndef decode_cpr(db, icao, lat, lon, cpr_format, current_time, ground=False):\n \"\"\"Декодирование CPR координат\"\"\"\n nbits_const = 131072.0\n cpr_const = 360.0\n dlat0 = 6.0\n dlat1 = 6.101694915254237288\n if ground:\n cpr_const = 90.0\n dlat0 = 1.5\n dlat1 = 1.5254237288135593\n\n pair = db.add_cpr(icao, cpr_format, lat, lon, current_time)\n if pair['lat0'] is None or pair['lat1'] is None:\n return None, None\n\n j = floor((59.0 * pair['lat0'] - 60.0 * pair['lat1']) / nbits_const + 0.5)\n rlat0 = dlat0 * (modulo(j, 60.0) + pair['lat0'] / nbits_const)\n rlat1 = dlat1 * (modulo(j, 59.0) + pair['lat1'] / nbits_const)\n\n if rlat0 >= 270:\n rlat0 -= 360.0\n if rlat1 >= 270:\n rlat1 -= 360.0\n\n NL0 = NL(rlat0)\n NL1 = NL(rlat1)\n\n if NL0 != NL1:\n return None, None\n\n m = floor((pair['lon0'] * (NL0 - 1) - \\\n pair['lon1'] * NL1) / nbits_const + 0.5)\n\n if cpr_format == 0:\n ni = NL0\n if NL0 <= 1.0:\n ni = 1.0\n dlon = cpr_const / ni\n rlat = rlat0\n rlon = dlon * (modulo(m, ni) + pair['lon0'] / nbits_const)\n else:\n ni = 1.0\n if NL1 > 2.0:\n ni = NL1 - 1.0\n dlon = cpr_const / ni\n rlat = rlat1\n rlon = dlon * (modulo(m, ni) + pair['lon1'] / nbits_const)\n return rlat, rlon\n\n\nSUB2GSUB0 = {1: 0, 3: 1, 2: 2, 6: 3, 4: 4}\nSUB2GSUB1 = {4: 0, 6: 1, 2: 2, 3: 3, 1: 4}\n\ndef decode_alt(data):\n \"\"\"Декодирование высоты для DF0\"\"\"\n mbit = data & 0x0040\n qbit = data & 0x0010\n\n C1 = (data >> 12) & 1\n A1 = (data >> 11) & 1\n C2 = (data >> 10) & 1\n A2 = (data >> 9) & 1\n C4 = (data >> 8) & 1\n A4 = (data >> 7) & 1\n B1 = (data >> 5) & 1\n D1 = (data >> 4) & 1\n B2 = (data >> 3) & 1\n D2 = (data >> 2) & 1\n B4 = (data >> 1) & 1\n D4 = data & 1\n\n if qbit:\n alt = (C1*1024 + A1*512 + C2*256 + A2*128 + C4*64 + A4*32 + \\\n B1*16 + B2*8 + D2*4 + B4*2 + D4) * 25 - 1000\n if mbit:\n alt = round((alt + 1000.0) / 25.0) * 10 - 300\n alt = round(alt * 3.281)\n return int(alt)\n\n alt_code = D2*1024 + D4*512 + A1*256 + A2*128 + C4*64 + B1*32 + \\\n B2*16 + B4*8 + C1*4 + C2*2 + C4\n alt_grey = alt_code >> 3\n subcode = alt_code & 0x7\n\n copybit = 0\n mask = 0x80\n grey_res = 0\n while mask > 0:\n if alt_grey & mask:\n copybit ^= 1\n if copybit:\n grey_res |= mask\n mask >>= 1\n ags = 0\n if grey_res % 2 == 0:\n if subcode in SUB2GSUB0:\n ags = SUB2GSUB0[subcode]\n else:\n if subcode in SUB2GSUB1:\n ags = SUB2GSUB1[subcode]\n return grey_res * 500 + ags * 100 - 1200\n\n\ndef decode_df17_pos(db, data, icao, current_time, stat):\n \"\"\"Позиция в воздухе\"\"\"\n alt = (((data[5] << 3) & 0b11111110000) | (data[6] >> 4)) * 25 - 1000\n f = (data[6] >> 2) & 1\n lat = (data[6] & 0b11) << 15 | data[7] << 7 | data[8] >> 1\n lon = (data[8] & 1) << 16 | data[9] << 8 | data[10]\n rlat, rlon = decode_cpr(db, icao, lat, lon, f, current_time)\n if rlat is None or rlon is None:\n return None\n return \"MSG,3,0,0,{icao},0,{date},,{alt},,,{lat},{lon},,,0,0,0,0\\r\\n\" \\\n \"\".format(icao=icao, alt=alt, lat=rlat, lon=rlon,\n date=get_datetime(current_time))\n\n\ndef decode_df17_speed(db, data, icao, current_time, stat):\n \"\"\"Скорость\"\"\"\n subtype = data[4] & 0b111\n if subtype != 1:\n return None\n ew_speed = float(((data[5] & 0b11) << 8) | data[6])\n ns_speed = float((data[7] & 0b1111111) << 3 | data[8] >> 5)\n if ew_speed == 0 and ns_speed == 0:\n return None\n ew_direction = (data[5] >> 2) & 1\n ns_direction = (data[7] >> 7) & 1\n ground = floor(sqrt(ew_speed * ew_speed + ns_speed * ns_speed))\n if ew_direction == 0 and ns_direction == 0:\n if ew_speed == 0:\n track = 0\n else:\n track = 90 - 180.0 / pi * atan(ns_speed / ew_speed)\n elif ew_direction == 0 and ns_direction == 1:\n if ew_speed == 0:\n track = 180\n else:\n track = 90 + 180.0 / pi * atan(ns_speed / ew_speed)\n elif ew_direction == 1 and ns_direction == 1:\n if ew_speed == 0:\n track = 180\n else:\n track = 270 - 180.0 / pi * atan(ns_speed / ew_speed)\n else:\n if ew_speed == 0:\n track = 0\n else:\n track = 270 + 180.0 / pi * atan(ns_speed / ew_speed)\n\n vertical = (((data[8] & 0b111) << 6 | data[9] >> 2) - 1) * 64\n if data[8] & 0b1000 != 0:\n vertical = - vertical\n return \"MSG,4,0,0,{icao},0,{date},,,{ground:1.1f},{track:1.1f}\" \\\n \",,,{vertical:1.0f},,0,0,0,0\\r\\n\".format(icao=icao, track=track,\n ground=ground, vertical=vertical, date=get_datetime(current_time))\n\n\ndef decode_df17_ident(db, data, icao, current_time, stat):\n \"\"\"Декодирование DF17 TypeCode 1-4\"\"\"\n cs = []\n long_cs = data[5] << 16 | data[6] << 8 | data[7]\n cs.append(CS_CONST[(long_cs >> 18) & 0x3f])\n cs.append(CS_CONST[(long_cs >> 12) & 0x3f])\n cs.append(CS_CONST[(long_cs >> 6) & 0x3f])\n cs.append(CS_CONST[(long_cs) & 0x3f])\n\n long_cs = data[8] << 16 | data[9] << 8 | data[10]\n cs.append(CS_CONST[(long_cs >> 18) & 0x3f])\n cs.append(CS_CONST[(long_cs >> 12) & 0x3f])\n cs.append(CS_CONST[(long_cs >> 6) & 0x3f])\n cs.append(CS_CONST[(long_cs) & 0x3f])\n callsign = ''.join(cs).strip()\n return \"MSG,1,0,0,{icao},0,{date},{callsign},,,,,,,,0,0,0,0\\r\\n\" \\\n \"\".format(icao=icao, callsign=callsign,\n date=get_datetime(current_time))\n\n\ndef decode_df17_gpos(db, data, icao, current_time, stat):\n \"\"\"Позиция на земле\"\"\"\n alt = 0\n f = (data[6] >> 2) & 1\n lat = (data[6] & 0b11) << 15 | data[7] << 7 | data[8] >> 1\n lon = (data[8] & 1) << 16 | data[9] << 8 | data[10]\n rlat, rlon = decode_cpr(db, icao, lat, lon, f, current_time, ground=True)\n if rlat is None or rlon is None:\n return None\n return \"MSG,2,0,0,{icao},0,{date},,{alt},,,{lat},{lon},,,0,0,0,0\\r\\n\" \\\n \"\".format(icao=icao, alt=alt, lat=rlat, lon=rlon,\n date=get_datetime(current_time))\n\n\ndef decode_df17(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование ADS-B пакета\"\"\"\n if len(data) != 14:\n logging.debug(\"DF17 pkt_len != 14\", len(data))\n return None\n\n icao = \"{:02X}{:02X}{:02X}\".format(data[1], data[2], data[3])\n type_code = (data[4] >> 3) & 0b11111 # 5 бит в 5 байте\n if type_code == 19:\n result = decode_df17_speed(db, data, icao, current_time, stat)\n if result is not None:\n icao_filter.check_icao(icao, current_time)\n return result\n elif type_code >= 9 and type_code <= 22:\n result = decode_df17_pos(db, data, icao, current_time, stat)\n if result is not None:\n icao_filter.check_icao(icao, current_time)\n return result\n elif type_code >= 1 and type_code <= 4:\n result = decode_df17_ident(db, data, icao, current_time, stat)\n if result is not None:\n icao_filter.check_icao(icao, current_time)\n return result\n elif type_code >= 5 and type_code <= 8:\n result = decode_df17_gpos(db, data, icao, current_time, stat)\n if result is not None:\n icao_filter.check_icao(icao, current_time)\n return result\n logging.debug(\"Unknown DF17 Type Code\", type_code)\n return None\n\n\ndef decode_df0(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF0\"\"\"\n if len(data) != 7:\n logging.debug(\"DF0 pkt_len != 7\", len(data))\n return None\n icao = decode_icao(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n data_alt = ((data[2] & 0b00011111) << 8) + data[3]\n alt = decode_alt(data_alt)\n vs = (data[0] >> 5) & 1\n return \"MSG,7,0,0,{icao},0,{date},,{alt},,,,,,,,,,{vs}\" \\\n \"\\r\\n\".format(icao=icao, alt=alt, vs=vs,\n date=get_datetime(current_time))\n\n\ndef decode_df4(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF4\"\"\"\n if len(data) != 7:\n logging.debug(\"DF4 pkt_len != 7\", len(data))\n return None\n icao = decode_icao(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n data_alt = ((data[2] & 0b00011111) << 8) + data[3]\n alt = decode_alt(data_alt)\n fs = (data[0] >> 5) & 0b111\n return \"MSG,5,0,0,{icao},0,{date},,{alt},,,,,,,{fs}\" \\\n \"\\r\\n\".format(icao=icao, alt=alt, fs=decode_fs(fs),\n date=get_datetime(current_time))\n\n\ndef decode_df5(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF5\"\"\"\n if len(data) != 7:\n logging.debug(\"DF5 pkt_len != 7\", len(data))\n return None\n icao = decode_icao(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n fs = (data[0] >> 5) & 0b111\n return \"MSG,6,0,0,{icao},0,{date},,,,,,,,,{fs}\\r\\n\" \\\n \"\".format(icao=icao, fs=decode_fs(fs),\n date=get_datetime(current_time))\n\n\ndef decode_df11(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF11\"\"\"\n if len(data) != 7:\n logging.debug(\"DF11 pkt_len != 7\", len(data))\n return None\n# ca = (data[0] >> 5) & 0b111\n icao = \"{:02X}{:02X}{:02X}\".format(data[1], data[2], data[3])\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n return \"MSG,8,0,0,{icao},0,{date},,,,,,,,,,,,\\r\\n\" \\\n \"\".format(icao=icao, date=get_datetime(current_time))\n\n\ndef decode_df20(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF20\"\"\"\n if len(data) != 14:\n logging.debug(\"DF20 pkt_len != 14\", len(data))\n return None\n icao = decode_icao112(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n data_alt = ((data[2] & 0b00011111) << 8) + data[3]\n alt = decode_alt(data_alt)\n fs = data[0] & 0b111\n return \"MSG,5,0,0,{icao},0,{date},,{alt},,,,,,,{fs}\" \\\n \"\\r\\n\".format(icao=icao, alt=alt, fs=decode_fs(fs),\n date=get_datetime(current_time))\n\n\ndef decode_df21(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF21\"\"\"\n if len(data) != 14:\n logging.debug(\"DF21 pkt_len != 14\", len(data))\n return None\n icao = decode_icao112(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n fs = data[0] & 0b111\n return \"MSG,6,0,0,{icao},0,{date},,,,,,,,,{fs}\\r\\n\" \\\n \"\".format(icao=icao, fs=decode_fs(fs),\n date=get_datetime(current_time))\n\n\ndef decode_df16(db, data, current_time, stat, icao_filter):\n \"\"\"Декодирование DF16\"\"\"\n if len(data) != 14:\n logging.debug(\"DF16 pkt_len != 14\", len(data))\n return None\n icao = decode_icao112(data)\n if not icao_filter.check_icao(icao, current_time):\n stat.add_stat('icao_filtered', 1)\n return None\n data_alt = ((data[2] & 0b00011111) << 8) + data[3]\n alt = decode_alt(data_alt)\n vs = (data[0] >> 2) & 1\n return \"MSG,7,0,0,{icao},0,{date},,{alt},,,,,,,,,,{vs}\" \\\n \"\\r\\n\".format(icao=icao, alt=alt, vs=vs,\n date=get_datetime(current_time))\n\n\nDF_DECODERS = {\n 0: decode_df0,\n 4: decode_df4,\n 5: decode_df5,\n 11: decode_df11,\n 16: decode_df16,\n 17: decode_df17,\n 20: decode_df20,\n 21: decode_df21\n}\n\n\ndef avr2msg(db, data, stat, icao_filter):\n \"\"\"Декодируем AVR в MSG\"\"\"\n if len(data) == 0:\n return None\n current_time = time()\n df = (data[0] >> 3) & 0b11111\n if df in DF_DECODERS:\n return DF_DECODERS[df](db, data, current_time, stat, icao_filter)\n stat.add_stat('unknown_df', 1)\n logging.debug(\"Unknown DF code:\", df)\n return None\n\n\ndef hex_to_data(hex_data):\n \"\"\"Перевод строки из HEX в двоичный код\"\"\"\n data = []\n hex_len = len(hex_data)\n idx = 0\n while idx + 1 < hex_len:\n data.append(int(hex_data[idx:idx + 2], 16))\n idx += 2\n return data\n","sub_path":"adsb-sender/avr2msg.py","file_name":"avr2msg.py","file_ext":"py","file_size_in_byte":16892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125881130","text":"import os\nimport json\nimport yaml\nimport csv\nimport pandas as pd\nimport argparse\nfrom time import time\nimport io\nimport sqlite3\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"file_name\", type=str)\nargs = parser.parse_args()\n\nfile_name = args.file_name\n\nroot_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\ninput_file_path = root_path + \"/cohana/%s/data.csv\" % file_name\nprint(input_file_path)\nprint(os.listdir(root_path+\"/cohana/%s\"%file_name))\noutput_path = root_path + \"/cohana/%s\" % file_name\n\nraw_output = output_path + \"/raw.csv\"\nyaml_input = output_path + \"/table.yaml\"\nyaml_input2 = output_path + \"/cube.yaml\"\ndim_output = output_path + \"/dim.csv\"\n\n\ndef simpleRead():\n\trawdata = pd.read_csv(input_file_path)\n\trawdata.fillna(\"null\", inplace=True)\n\trawdata.to_csv(raw_output, header=False, index=False)\n\tprint(\"raw save finished\")\n\tspec = {}\n\twith open(yaml_input, 'r') as stream:\n\t\ttry:\n\t\t\tspec = yaml.load(stream)\n\t\texcept yaml.YAMLError as exc:\n\t\t\tprint(exc)\n\n\twith open(dim_output, 'w') as csvfile:\n\t\tdimwriter = csv.writer(csvfile)\n\t\tfor field in spec['fields']:\n\t\t\tif field['dataType'] == 'String':\n\t\t\t\tfor key in rawdata[field['name']].astype('str').unique():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdimwriter.writerow([field['name'], key])\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\telif field['fieldType'] == 'ActionTime':\n\t\t\t\tdimwriter.writerow([field['name'], str(rawdata[field['name']].min())+'|'+str(rawdata[field['name']].max())])\n\t\t\telif field['dataType'] == 'Int32':\n\t\t\t\tdimwriter.writerow([field['name'], str(int(rawdata[field['name']].min()))+'|'+str(int(rawdata[field['name']].max()))])\n\t\t\telse:\n\t\t\t\tpass\n\ndef create_dim():\n table = file_name\n conn = None\n print('creating dim...')\n try:\n conn = sqlite3.connect('dim.db')\n c = conn.cursor()\n\n sql = 'select name from sqlite_master where type = \"table\" and name = \"%s\";' % table\n if all(t[0] != table for t in c.execute(sql)):\n sql = 'create table \"%s\" (col VARCHAR(200), value VARCHAR(200));' % table\n c.execute(sql)\n print('table \"%s\" created' % table)\n else:\n sql = 'delete from \"%s\"' % table\n c.execute(sql)\n print('table \"%s\" deleted' % table)\n\n insert_sql = 'INSERT INTO \"%s\" (col, value) VALUES ' % table\n sql = insert_sql\n i = 0\n with io.open(dim_output) as ifile:\n while ifile.readable():\n line = ifile.readline().strip('\\n').split(',')\n if len(line) < 2:\n break\n sql += '(\"%s\", \"%s\"),' % (line[0], line[1])\n i += 1\n if i == 200:\n c.execute(sql.rstrip(',')+';')\n sql = insert_sql\n i = 0\n c.execute(sql.rstrip(',')+';')\n print('value inserted')\n\n conn.commit()\n conn.close()\n\n except Exception as e:\n conn.close()\n raise e\n\nt0 = time()\nprint(\"Preprocessing Started\")\nsimpleRead()\ncreate_dim()\nprint(\"Preprocessing Finished in \"+str(time()-t0))\n\nt0 = time()\nprint(\"Start Loading Engine\")\nos.system(\"mkdir cohana/\"+file_name+\"/000000\")\nos.system(\"java -jar utils/LocalLoader.jar '\"+yaml_input+\"' '\"+dim_output+\"' '\"+raw_output+\"' 'cohana/\"+file_name+\"/000000' 65536\")\nprint(\"java -jar utils/LocalLoader.jar '\"+yaml_input+\"' '\"+dim_output+\"' '\"+raw_output+\"' 'cohana/\"+file_name+\"/000000' 65536\")\nprint(\"Loading Finished in \"+str(time()-t0))","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606624423","text":"import pandas as pd\nimport numpy as np\nimport random\nimport tensorflow as tf\n\nclass Main():\n def Menu(self):\n while True:\n loop = True\n print(u'Введите num units')\n self.num_units = int(input())\n print(u'Введите количество временнЫх отсчетов')\n self.timesteps = int(input())\n print(u'Введите скорость обучения')\n self.learning_rate = float(input())\n print(u'Введите количество начальных итераций')\n self.initial_iteration = int(input())\n print(u'Введите количество шаговых итераций')\n self.step_iteration = int(input())\n self.dataFrame = pd.DataFrame(data=None, columns=['rock', 'paper', 'scissors', 'rockn', 'papern',\n 'scissorsn'])\n for i in range(self.timesteps + 1):\n response = input()\n if response == '1':\n random_pick = self.prepare(1)\n elif response == '2':\n random_pick = self.prepare(2)\n elif response == '3':\n random_pick = self.prepare(3)\n elif response == 'restart':\n loop = False\n break\n print(random_pick)\n if loop == True:\n self.train()\n while loop == True:\n response = input()\n if response == 'o':\n self.train()\n elif response == '1':\n self.turn(1)\n elif response == '2':\n self.turn(2)\n elif response == '3':\n self.turn(3)\n elif response == 'restart':\n loop = False\n else:\n print('Неверная команда')\n\n def prepare(self, choice):\n random_pick = random.randint(1, 3)\n n_random_choice = self.pick_same(random_pick)\n h_choice = self.pick_same(choice)\n newline = pd.DataFrame([np.concatenate((h_choice, n_random_choice), axis=0)],\n columns=['rock', 'paper', 'scissors', 'rockn', 'papern', 'scissorsn'])\n self.dataFrame = self.dataFrame.append(newline, ignore_index=True)\n return random_pick\n\n def create_model(self):\n input_dim = 6\n output_dim = 3\n # Вход сети\n self.inputs = tf.placeholder(dtype=tf.float32, shape=[None, self.timesteps, input_dim], name='inputs')\n\n # Выход сети\n self.outputs = tf.placeholder(dtype=tf.float32, shape=[None, 3], name='outputs')\n\n # Делаем первое измерение в виде списка\n prepare = tf.unstack(self.inputs, self.timesteps, 1)\n\n # Объявление ячейки lstm\n rnn_cell = tf.contrib.rnn.LSTMCell(num_units=self.num_units, state_is_tuple=True,\n activation=tf.nn.sigmoid)\n\n # Создание сети\n rnn_outputs, _ = tf.contrib.rnn.static_rnn(cell=rnn_cell, inputs=prepare, dtype=tf.float32)\n\n # Веса выходного нейрона\n weights = tf.Variable(tf.random_uniform([self.num_units, output_dim]))\n\n # Смещения выходного нейрона\n biases = tf.Variable(tf.random_uniform([output_dim]))\n\n # Подсчет выходного значения (в качестве сигнала взято последнее значение результирующего вектора lstm сети)\n mul = tf.matmul(rnn_outputs[-1], weights)\n\n self.result = tf.nn.softmax(tf.add(mul, biases, name=\"result\"))\n\n # Целевая функция сети\n # train_loss = tf.reduce_mean(tf.squared_difference(result, outputs), name=\"train_loss\")\n entropy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.result, labels=self.outputs)\n self.train_loss = tf.reduce_mean(entropy_loss)\n # Задаем оптимизатор\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n\n # Задаем оператор обучения\n self.train_op = optimizer.minimize(self.train_loss)\n\n self.neur_choice = [1, 0, 0]\n\n return\n\n # Подготовить данные для обучения\n\n def prepare_data(self):\n X_cols = ['rock', 'paper', 'scissors', 'rockn', 'papern', 'scissorsn']\n Y_cols = ['rock', 'paper', 'scissors']\n Y_original = self.dataFrame[Y_cols].values.reshape(-1, len(Y_cols))\n self.Y = Y_original[self.timesteps:]\n X_original = self.dataFrame[X_cols].values.reshape(-1, 1, len(X_cols))\n self.X = np.roll(X_original, self.timesteps, axis=0)\n # Делаем затравку матрицы входных векторов\n self.X = X_original[:-self.timesteps]\n # Добавляем вектора с временным сдвигом\n for i in range(1, self.timesteps):\n self.X = np.concatenate((self.X, np.roll(X_original, -i, axis=0)[:-self.timesteps]), axis=1)\n return self.X, self.Y\n\n # Обучение модели\n\n def train(self):\n self.prepare_data()\n self.last_neur_choice = self.X[-1, -2, -3:]\n self.n_pick = self.X[-1, -1, -3:]\n self.last_h_choice = self.X[-1, -2, :3]\n self.h_buffer = self.X[-1, -1, :3]\n # Генерация сети\n # self.inputs, self.outputs, self.train_loss, self.train_op = self.create_model()\n self.create_model()\n self.init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(self.init)\n last_valid = 0\n self.train_loss_vector = []\n for i in range(self.initial_iteration):\n last_valid, _ = self.sess.run([self.train_loss, self.train_op],\n feed_dict={self.inputs: self.X, self.outputs: self.Y})\n # if (i + 1) % 5 == 0:\n # print(\"Эпоха = %d, ошибка = %0.7f\" % (i + 1, last_valid))\n self.train_loss_vector.append(last_valid)\n # print('Конец обучения')\n\n def pick_opposite(self, h_choice):\n if h_choice == 1:\n return [0, 1, 0]\n elif h_choice == 2:\n return [0, 0, 1]\n elif h_choice == 3:\n return [1, 0, 0]\n\n def pick_same(self, h_choice):\n if h_choice == 1:\n return [1, 0, 0]\n elif h_choice == 2:\n return [0, 1, 0]\n elif h_choice == 3:\n return [0, 0, 1]\n\n def pick_opposite_not_vector(self, h_choice):\n if h_choice == 1:\n return 2\n elif h_choice == 2:\n return 3\n elif h_choice == 3:\n return 1\n\n def prediction(self):\n res = self.sess.run(self.result, feed_dict={self.inputs: self.X})\n # res as chances\n res = res[-1, :]\n # res as number\n res = np.argmax(res) + 1\n self.n_pick = self.pick_opposite(res)\n print(self.pick_opposite_not_vector(res))\n\n def train_step(self):\n newline = np.concatenate((self.prelast_h_choice, self.prelast_neur_choice), axis=0)\n newline = newline.reshape(-1, 1, 6)\n last_line = self.X[-1, 1:, :]\n last_line = last_line.reshape(1, -1, 6)\n new_batch = np.concatenate((last_line, newline), axis=1)\n self.X = np.concatenate((self.X, new_batch), axis=0)\n y_current = np.expand_dims(self.last_h_choice, axis=0)\n self.Y = np.concatenate((self.Y, y_current), axis=0)\n # for i in range(self.current_epoch):\n # sss, _ = self.sess.run([self.train_loss, self.train_op],\n # feed_dict={self.inputs: new_batch, self.outputs: y_current})\n for i in range(self.step_iteration):\n last_valid, _ = self.sess.run([self.train_loss, self.train_op],\n feed_dict={self.inputs: self.X, self.outputs: self.Y})\n\n def turn(self, human_pick):\n human_pick = self.pick_same(human_pick)\n self.prelast_neur_choice = self.last_neur_choice\n self.last_neur_choice = self.n_pick\n self.prelast_h_choice = self.last_h_choice\n self.last_h_choice = self.h_buffer\n self.h_buffer = human_pick\n self.train_step()\n self.prediction()\n\n\nmain = Main()\nmain.Menu()\n","sub_path":"target/RPS.py","file_name":"RPS.py","file_ext":"py","file_size_in_byte":8656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519243690","text":"#\n# @lc app=leetcode.cn id=37 lang=python3\n#\n# [37] 解数独\n#\n\n# @lc code=start\nclass Solution:\n\n def solveSudoku(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n self.pos = -1\n self.board = board\n self.backtrack()\n \n def next_pos(self):\n self.pos += 1\n if self.pos >= 81:\n return None\n return self.pos // 9, self.pos % 9\n \n def backtrack(self):\n pos = self.next_pos()\n if not pos:\n return True\n \n x, y = pos\n if self.board[x][y] != \".\":\n return self.backtrack()\n \n for i in range(1, 10):\n i = str(i)\n if i in self.collect_cur_row(x, y):\n continue\n if i in self.collect_cur_col(x, y):\n continue\n if i in self.collect_cur_arround(x, y):\n continue\n \n self.board[x][y] = i\n if not self.backtrack():\n self.board[x][y] = \".\"\n self.pos = x * 9 + y\n else:\n return True\n return False\n \n def collect_cur_row(self, i, _) -> list:\n return self.board[i]\n \n def collect_cur_col(self, _, j) -> list:\n return [self.board[i][j] for i in range(9)]\n \n def collect_cur_arround(self, i, j) -> list:\n x, y = i - i%3, j - j%3\n return [self.board[x+ii][y+jj] for ii in range(3) for jj in range(3)]\n# @lc code=end","sub_path":"LeetCode/0037/回溯法.py","file_name":"回溯法.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"266182558","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport re\nimport sys\n\n\nname = 'betty-cropper'\npackage = 'betty'\ndescription = \"A django-powered image server\"\nurl = \"https://github.com/theonion/betty-cropper\"\nauthor = \"Chris Sinchok\"\nauthor_email = 'csinchok@theonion.com'\nlicense = 'MIT'\n\nsetup_requires = []\n\n\ndef read_requirements(name):\n return open(os.path.join('requirements', name + '.txt')).readlines()\n\n\nimgmin_requires = read_requirements('imgmin')\n\ndev_requires = read_requirements('dev') + imgmin_requires\n\ninstall_requires = read_requirements('common')\n\n# Optional S3 storage, included for convenience\ns3_requires = read_requirements('s3')\n\n\nif 'test' in sys.argv:\n setup_requires.extend(dev_requires)\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, \"__init__.py\")).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data={\n \"betty\": [\"cropper/templates/image.js\", \"cropper/font/OpenSans-Semibold.ttf\"]\n },\n install_requires=install_requires,\n tests_require=dev_requires,\n extras_require={\n 'dev': dev_requires,\n 'imgmin': imgmin_requires,\n 's3': s3_requires,\n },\n entry_points={\n \"console_scripts\": [\n \"betty-cropper = betty.cropper.utils.runner:main\",\n ],\n },\n cmdclass={'test': PyTest}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345018057","text":"import streamlit as st\nimport pandas as pd\nfrom st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode, JsCode\n\n\ndef editable_table(entry_list, key, head, openentrys=100, height=150, width=1):\n response = AgGrid(\n pd.DataFrame({head: entry_list + [''] * openentrys}), # input_dataframe,\n height=height,\n editable=True,\n sortable=False,\n filter=False,\n resizable=True,\n defaultWidth=width,\n fit_columns_on_grid_load=True,\n key=key)\n st.info('Edit the table by double-click in it and press Enter after changing a cell.')\n returnlist = []\n if 'data' in response:\n all_list = list(response['data'].to_dict()[head].values())\n for element in all_list:\n if element != '' and element is not None:\n returnlist.append(element)\n return returnlist\n","sub_path":"Utils/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595166411","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef genTab(x_0, x_p, n):\n pom = (x_p - x_0) / (n - 1)\n macierzJeden = np.array([1, 0 * pom + x_0])\n\n for i in range(1, n, 1):\n macierzJeden = np.block([\n [macierzJeden],\n [i + 1, i * pom + x_0],\n ])\n\n macierzDwa = np.array([1, 1, 2])\n\n for i in range(1, n-1, 1):\n macierzDwa = np.block([\n [macierzDwa],\n [i + 1, i + 1, i + 2],\n ])\n return macierzJeden, macierzDwa\n\ndef alokacjaPamieci(n):\n A = np.zeros((n, n))\n b = np.zeros((n, 1))\n return A, b\n\nwezly = np.array([[1, 0],\n [2, 1],\n [3, 0.5],\n [4, 0.75]])\n\nelementy = np.array([[1, 1, 3],\n [2, 4, 2],\n [3, 3, 4]])\n\ntwb_L = 'D'\ntwb_R = 'D'\n\nwwb_L = 0\nwwb_R = 1\n\n\n[wezel, element] = genTab(1, 2, 5)\nwezel = np.array([[1, 0],\n [2, 1],\n [3, 0.5],\n [4, 0.75]])\n\nelement = np.array([[1, 1, 3],\n [2, 4, 2],\n [3, 3, 4]])\ndef rysowanieWykresu(mac):\n wezel = mac[0];\n element = mac[1]\n plt.plot(wezel[:, 1], np.zeros((np.shape(wezel)[0], 1)))\n plt.plot(wezel[:, 1], np.zeros((np.shape(wezel)[0], 1)), 'ro')\n for i in range(np.shape(wezel)[0]):\n plt.text(wezel[i, 1] - 0.02, -0.01, \"x\" + str(int(wezel[i, 0])))\n plt.text(wezel[i, 1] - 0.01, -0.02, str(int(wezel[i, 0])), color=\"green\", fontsize=16)\n for i in range(np.shape(element)[0]):\n pom1 = element[i, 1]\n pom2 = element[i, 2]\n plt.text(wezel[pom1 - 1, 1] + (max(wezel[:,1]))/70, +0.001, \"1\", color=\"red\", fontsize=16)\n plt.text(wezel[pom2 - 1, 1] - (max(wezel[:,1]))/35, +0.001, \"2\", color=\"red\", fontsize=16)\n plt.text((wezel[pom1 - 1, 1]+wezel[pom2 - 1,1])/2, +0.02, str(element[i,0]), color=\"blue\", fontsize=16)\n plt.grid()\n plt.show()\n\nmacierz = genTab(0,1000,5)\nrysowanieWykresu(macierz)\nprint(wezel)\nprint(element)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60402170","text":"import matplotlib.pyplot as plt\n\nplt.figure(num=None, figsize=(8, 6), dpi=90, facecolor='w', edgecolor='k')\narchives = [\"radixsort\",\"mergesort\",\"quicksort\",\"heapsort\"]\nalgorithms = ['RadixSort','MergeSort','QuickSort','HeapSort']\nformats = ['','','','']\nkf = open(\"../Code/data.md\")\nmaxkf = int(kf.readline())\nfor k in range(maxkf):\n for i in range(4):\n file = open(archives[i]+'/'+archives[i]+str(k)+\".md\")\n y = []\n x = []\n line = \"\"\n line2 = \"\"\n while True:\n line = file.readline()\n line2 = file.readline()\n if(line==''):break\n x.append(int(line.split()[2]))\n y.append(float(line2.split()[2])) \n file.readline()\n file.close()\n plt.plot(x,y,formats[i],label = algorithms[i])\n plt.legend(loc='upper left', shadow=True, fontsize='x-large').get_frame().set_facecolor('#FFFFFF')\n plt.ylabel(\"q ( n )\")\n plt.xlabel(\"n\")\n plt.title(\"O(n) vs O(nlog(n))\")\n plt.grid()\n plt.savefig(\"0 - Graphs/fig\"+str(k)+\".png\")\n plt.clf()\n#plt.show()\n \n","sub_path":"T1 - Ordenations/O(n)-vs-O(nlog(n))/a-code.py","file_name":"a-code.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560318828","text":"#!/usr/bin/env python\r\n\r\n#Dette programmet leser av innkommende bytes paa serialport ttyS*,\r\n#dekoder dataen og skriver den til en fil med et timestamp.\r\n\r\nfrom time import gmtime, strftime\r\nfrom time import sleep\r\nimport Adafruit_BBIO.GPIO as GPIO #import GPIO Library\r\nimport serial\r\nimport time\r\nf = open('debug-node2.txt', 'wb')\r\nser = serial.Serial(\"/dev/ttyS2\", baudrate=115200, stopbits=1, parity=\"N\", timeout=1)\r\n\r\n\r\nwhile True:\r\n tid = strftime(\"%Y-%m-%d %H:%M:%S\")\r\n in_data = ser.readline()\r\n if (ser.inWaiting()>0):\r\n #print ('Node2:\\n')\r\n #print(tid)\r\n #print(in_data)\r\n #f.write('Node2:\\n')\r\n f.write(tid)\r\n f.write('\\n')\r\n f.write(in_data)\r\n f.write('\\n')\r\n\r\nf.close()\r\nser.close()\r\nser.flush()\r\n","sub_path":"debug-node2.py","file_name":"debug-node2.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412571123","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom io import open\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport dragonmapper\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\n\ndef open_file(filename):\n \"\"\"Open and read the file *filename*.\"\"\"\n with open(filename, encoding='utf-8') as f:\n return f.read()\n\nreadme = open_file('README.rst')\nhistory = open_file('CHANGES.rst').replace('.. :changelog:', '')\n\nsetup(\n name='dragonmapper',\n version=dragonmapper.__version__,\n author='Thomas Roten',\n author_email='thomas@roten.us',\n url='https://github.com/tsroten/dragonmapper',\n description=('Identification and conversion functions for Chinese '\n 'text processing'),\n long_description=readme + '\\n\\n' + history,\n platforms='any',\n classifiers=[\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Text Processing :: Linguistic',\n ],\n keywords=['chinese', 'mandarin', 'transcription', 'pinyin', 'zhuyin',\n 'ipa', 'convert', 'bopomofo', 'hanzi', 'characters', 'readings',\n 'html'],\n packages=['dragonmapper', 'dragonmapper.data'],\n package_data={'dragonmapper': ['data/*.tsv', 'data/*.csv']},\n test_suite='dragonmapper.tests',\n install_requires=['zhon>=1.1.3', 'hanzidentifier>=1.0.2'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"315491964","text":"import argparse\nfrom typing import List\nimport math\nfrom datetime import datetime\nimport requests\nimport yfinance as yf\nimport mplfinance as mpf\nimport pandas as pd\nfrom gamestonk_terminal import config_terminal as cfg\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n plot_autoscale,\n)\n\n\ndef get_pattern_recognition(ticker: str, resolution: str) -> pd.DataFrame:\n \"\"\"Get pattern recognition data\n\n Parameters\n ----------\n ticker : str\n Ticker to get pattern recognition data\n resolution : str\n Resolution of data to get pattern recognition from\n\n Returns\n -------\n pd.DataFrame\n Get datapoints corresponding to pattern signal data\n \"\"\"\n response = requests.get(\n f\"https://finnhub.io/api/v1/scan/pattern?symbol={ticker}&resolution={resolution}&token={cfg.API_FINNHUB_KEY}\"\n )\n if response.status_code == 200:\n d_data = response.json()\n if \"points\" in d_data:\n return pd.DataFrame(d_data[\"points\"]).T\n\n return pd.DataFrame()\n\n\ndef plot_pattern_recognition(ticker: str, pattern: pd.DataFrame):\n \"\"\"Plot pattern recognition signal\n\n Parameters\n ----------\n ticker : str\n Ticker to display pattern recognition on top of the data\n pattern : pd.DataFrame\n Pattern recognition signal data\n \"\"\"\n\n l_segments = list()\n for i in pattern:\n a_part = (\"\", \"\")\n if \"aprice\" in pattern[i]:\n if pattern[i][\"aprice\"] != 0 and not math.isnan(pattern[i][\"aprice\"]):\n a_part = (\n datetime.utcfromtimestamp(pattern[i][\"atime\"]).strftime(\"%Y-%m-%d\"),\n pattern[i][\"aprice\"],\n )\n\n b_part = (\"\", \"\")\n if \"bprice\" in pattern[i]:\n if pattern[i][\"bprice\"] != 0 and not math.isnan(pattern[i][\"bprice\"]):\n b_part = (\n datetime.utcfromtimestamp(pattern[i][\"btime\"]).strftime(\"%Y-%m-%d\"),\n pattern[i][\"bprice\"],\n )\n\n c_part = (\"\", \"\")\n if \"cprice\" in pattern[i]:\n if pattern[i][\"cprice\"] != 0 and not math.isnan(pattern[i][\"cprice\"]):\n c_part = (\n datetime.utcfromtimestamp(pattern[i][\"ctime\"]).strftime(\"%Y-%m-%d\"),\n pattern[i][\"cprice\"],\n )\n\n d_part = (\"\", \"\")\n if \"dprice\" in pattern[i]:\n if pattern[i][\"dprice\"] != 0 and not math.isnan(pattern[i][\"dprice\"]):\n d_part = (\n datetime.utcfromtimestamp(pattern[i][\"dtime\"]).strftime(\"%Y-%m-%d\"),\n pattern[i][\"dprice\"],\n )\n\n segment = (a_part, b_part, c_part, d_part)\n\n l_segment = list(segment)\n while (\"\", \"\") in l_segment:\n l_segment.remove((\"\", \"\"))\n segm = tuple(l_segment)\n\n l_segments.append(segm)\n\n start_time = 999999999999\n for i in pattern:\n if pattern[i][\"atime\"] < start_time:\n start_time = pattern[i][\"atime\"]\n\n df_stock = yf.download(\n ticker,\n start=datetime.utcfromtimestamp(start_time).strftime(\"%Y-%m-%d\"),\n progress=False,\n )\n\n df_stock[\"date_id\"] = (df_stock.index.date - df_stock.index.date.min()).astype(\n \"timedelta64[D]\"\n )\n df_stock[\"date_id\"] = df_stock[\"date_id\"].dt.days + 1\n\n df_stock[\"OC_High\"] = df_stock[[\"Open\", \"Close\"]].max(axis=1)\n df_stock[\"OC_Low\"] = df_stock[[\"Open\", \"Close\"]].min(axis=1)\n\n mc = mpf.make_marketcolors(\n up=\"green\", down=\"red\", edge=\"black\", wick=\"black\", volume=\"in\", ohlc=\"i\"\n )\n\n s = mpf.make_mpf_style(marketcolors=mc, gridstyle=\":\", y_on_right=False)\n\n mpf.plot(\n df_stock,\n type=\"candle\",\n volume=False,\n title=f\"\\n{ticker}\",\n alines=l_segments,\n xrotation=10,\n style=s,\n figratio=(10, 7),\n figscale=1.10,\n figsize=plot_autoscale(),\n update_width_config=dict(\n candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0\n ),\n )\n\n for ix in range(len(pattern.columns)):\n print(f\"From {l_segments[ix][0][0]} to {l_segments[ix][-1][0]}\")\n print(f\"Pattern: {pattern[0]['patternname']} ({pattern[0]['patterntype']})\")\n print(\"\")\n\n\ndef pattern_recognition_view(other_args: List[str], ticker: str):\n \"\"\"Display pattern recognition signals on the data\n\n Parameters\n ----------\n other_args : List[str]\n Command line arguments to be processed with argparse\n ticker : str\n Ticker to display pattern recognition on top of the data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"pr\",\n description=\"\"\"\n Display pattern recognition signals on the data. [Source: https://finnhub.io]\n \"\"\",\n )\n parser.add_argument(\n \"-r\",\n \"--resolution\",\n action=\"store\",\n dest=\"resolution\",\n type=str,\n default=\"D\",\n choices=[\"1\", \"5\", \"15\", \"30\", \"60\", \"D\", \"W\", \"M\"],\n help=\"Plot resolution to look for pattern signals\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n df_pattern = get_pattern_recognition(ticker, ns_parser.resolution)\n\n if df_pattern.empty:\n print(\"No pattern identified in this data\", \"\\n\")\n return\n\n plot_pattern_recognition(ticker, df_pattern)\n\n except Exception as e:\n print(e, \"\\n\")\n","sub_path":"gamestonk_terminal/technical_analysis/finnhub_view.py","file_name":"finnhub_view.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184156133","text":"import fetchurls\nimport sys\nfrom urllib.parse import urlparse\n\nif len(sys.argv) < 2:\n\tsys.exit(0)\nprint('fetching_base_url')\nurl=urlparse(sys.argv[1])\n\n\"\"\"if url.netloc == \"\":\n\tbase=url.path\nelse:\n\tbase=url.netloc\"\"\"\n\nbase=a=url.scheme+\"://\"+url.netloc\ni=fetchurls.fetching(base,sys.argv[1])\n\nj=[]\nfor a in i:\n\tj=j+fetchurls.fetching(base,a)\n\n\nprint('fetching_done')\nj=i+j\nj=set(j)\nj=list(j)\n\n#p=[]\n#for a in j:\n#\tp=p+fetchurls.fetching(a)\n\n#p=i+j+p\n#p=set(p)\n#p=list(p)\nif len(sys.argv) < 3:\t\n\tf=open('crawlerdata.txt','a')\t\nelse:\n\tf=open(sys.argv[2],'a')\n\nfor k in j:\n\tf.write(str(k)+'\\n')\nf.close()\n#j=j+i\n#j=set(j)\n#j=list(j)\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"241361847","text":"import re\nimport sys\nimport requests\nimport time\nsys.path.append('../')\nfrom webRequest.webRequest import WebRequest\nfrom utilFunction import getHtmlTree\nclass GetFreeProxy(object):\n def __init__(self):\n pass\n\n @staticmethod\n def getProxyMethods():\n # 获取代理IP的函数名称及其IP来源\n return {\n # 'sixProxy': 'http://www.66ip.cn',\n # 'gbjProxy': 'http://www.goubanjia.com/',\n # 'fastProxy': ' https://www.kuaidaili.com',\n # 'cloudProxy': 'http://www.ip3366.net/free/',\n # 'seaProxy': 'http://www.iphai.com/free/ng',\n 'moguProxy': 'http://mogumiao.com'\n }\n\n @staticmethod\n def sixProxy(area=33, page=1):\n # 高匿\n # 抓取代理页数,page=1北京代理页,page=2上海代理页......\n if area > 33:\n area = 33\n for area_index in range(1, area + 1):\n for i in range(1, page + 1):\n url = \"http://www.66ip.cn/areaindex_{}/{}.html\".format(area_index, i)\n html_tree = getHtmlTree(url)\n tr_list = html_tree.xpath(\"//*[@id='footer']/div/table/tr[position()>1]\")\n if len(tr_list) == 0:\n continue\n for tr in tr_list:\n yield tr.xpath(\"./td[1]/text()\")[0] + \":\" + tr.xpath(\"./td[2]/text()\")[0]\n break\n\n @staticmethod\n def seaProxy():\n # IP海代理 http://www.iphai.com/free/ng\n # 解析网页代理方法\n urls = [\n 'http://www.iphai.com/free/ng',\n 'http://www.iphai.com/free/wg',\n ]\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'<td>\\s*?(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s*?</td>[\\s\\S]*?<td>\\s*?(\\d+)\\s*?</td>',\n r.text)\n for proxy in proxies:\n yield \":\".join(proxy)\n\n @staticmethod\n def moguProxy():\n # a = {\n # \"code\": \"0\",\n # \"msg\":\n # [\n # {\"port\": \"37554\", \"ip\": \"218.73.128.104\"},\n # {\"port\": \"35591\", \"ip\": \"115.215.48.233\"},\n # {\"port\": \"20051\", \"ip\": \"218.66.146.184\"},\n # {\"port\": \"30917\", \"ip\": \"115.215.56.150\"},\n # {\"port\": \"28380\", \"ip\": \"117.69.97.134\"}]\n # }\n url = 'http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=744676010f8148a3a2890a184b0f013e&count=10&expiryDate=0&format=1&newLine=2'\n data = requests.get(url=url).json()\n while True:\n if(data['code']=='3001'):\n time.sleep(1)\n data = requests.get(url=url).json()\n break\n data = data['msg']\n for item in data:\n proxy = item['ip']+':'+item['port']\n yield proxy\n\n @staticmethod\n def wuyouProxy(page=10):\n \"\"\"\n 无忧代理 http://www.data5u.com/\n 几乎没有能用的\n :param page: 页数\n :return:\n \"\"\"\n url_list = [\n 'http://www.data5u.com/free/gwgn/index.shtml',\n 'http://www.data5u.com/free/gngn/index.shtml',\n ]\n for url in url_list:\n html_tree = getHtmlTree(url)\n ul_list = html_tree.xpath('//ul[@class=\"l2\"]')\n for ul in ul_list:\n try:\n yield ':'.join(ul.xpath('.//li/text()')[0:2])\n except Exception as e:\n print(e)\n # ok\n\n\n # em\n @staticmethod\n def manongProxy():\n \"\"\"\n 部分透明\n\n 码农代理 https://proxy.coderbusy.com/\n :return:\n \"\"\"\n urls = ['https://proxy.coderbusy.com/classical/country/cn.aspx?page=1']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall('data-ip=\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\".+?>(\\d+)</td>', r.text)\n for proxy in proxies:\n # print(proxy)\n yield ':'.join(proxy)\n\n # no\n @staticmethod\n def xiciProxy(page_count=2):\n \"\"\"\n 透明的,不想用\n\n 西刺代理 http://www.xicidaili.com\n :return:\n \"\"\"\n url_list = [\n 'http://www.xicidaili.com/nn/', # 高匿\n 'http://www.xicidaili.com/nt/', # 透明\n ]\n for each_url in url_list:\n for i in range(1, page_count + 1):\n page_url = each_url + str(i)\n tree = getHtmlTree(page_url)\n proxy_list = tree.xpath('.//table[@id=\"ip_list\"]//tr[position()>1]')\n for proxy in proxy_list:\n try:\n yield ':'.join(proxy.xpath('./td/text()')[0:2])\n except Exception as e:\n pass\n\n # ok\n @staticmethod\n def gbjProxy():\n \"\"\"\n guobanjia http://www.goubanjia.com/\n :return:\n \"\"\"\n url = \"http://www.goubanjia.com/\"\n tree = getHtmlTree(url)\n proxy_list = tree.xpath('//td[@class=\"ip\"]')\n # 此网站有隐藏的数字干扰,或抓取到多余的数字或.符号\n # 需要过滤掉<p style=\"display:none;\">的内容\n xpath_str = \"\"\".//*[not(contains(@style, 'display: none'))\n and not(contains(@style, 'display:none'))\n and not(contains(@class, 'port'))\n ]/text()\n \"\"\"\n for each_proxy in proxy_list:\n try:\n # :符号裸放在td下,其他放在div span p中,先分割找出ip,再找port\n ip_addr = ''.join(each_proxy.xpath(xpath_str))\n port = each_proxy.xpath(\".//span[contains(@class, 'port')]/text()\")[0]\n yield '{}:{}'.format(ip_addr, port)\n except Exception as e:\n pass\n\n # ok\n @staticmethod\n def fastProxy():\n \"\"\"\n 快代理 https://www.kuaidaili.com\n \"\"\"\n url_list = [\n 'https://www.kuaidaili.com/free/inha/{page}/',\n 'https://www.kuaidaili.com/free/intr/{page}/'\n ]\n for url in url_list:\n for page in range(1, 5):\n page_url = url.format(page=page)\n tree = getHtmlTree(page_url)\n proxy_list = tree.xpath('.//table//tr')\n for tr in proxy_list[1:]:\n yield ':'.join(tr.xpath('./td/text()')[0:2])\n\n # no\n @staticmethod\n def mimiProxy():\n \"\"\"\n 秘密代理 http://www.mimiip.com\n 估计没啥可用的,暂时不用了\n \"\"\"\n url_gngao = ['http://www.mimiip.com/gngao/%s' % n for n in range(1, 10)] # 国内高匿\n # url_gnpu = ['http://www.mimiip.com/gnpu/%s' % n for n in range(1, 10)] # 国内普匿\n # url_gntou = ['http://www.mimiip.com/gntou/%s' % n for n in range(1, 10)] # 国内透明\n url_gw = ['http://www.mimiip.com/hw/%s' % n for n in range(1, 10)] #国外\n url_list = url_gngao + url_gw\n\n request = WebRequest()\n for url in url_list:\n r = request.get(url, use_proxy=True)\n proxies = re.findall(r'<td>(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})</td>[\\w\\W].*<td>(\\d+)</td>', r.text)\n for proxy in proxies:\n yield ':'.join(proxy)\n\n # ok\n @staticmethod\n def cloudProxy():\n \"\"\"\n 云代理\n 高匿\n :return:\n \"\"\"\n urls = ['http://www.ip3366.net/free/']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'<td>(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})</td>[\\s\\S]*?<td>(\\d+)</td>', r.text)\n for proxy in proxies:\n yield \":\".join(proxy)\n\n # ok\n\n # em\n @staticmethod\n def fwallProxy():\n \"\"\"\n 墙外网站 cn-proxy\n 不确定是否匿名\n :return:\n \"\"\"\n urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'<td>(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})</td>[\\w\\W]<td>(\\d+)</td>', r.text)\n for proxy in proxies:\n yield ':'.join(proxy)\n\n # em\n @staticmethod\n def swallProxy():\n \"\"\"\n 部分匿名\n https://proxy-list.org/english/index.php\n :return:\n \"\"\"\n urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)]\n request = WebRequest()\n import base64\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r\"Proxy\\('(.*?)'\\)\", r.text)\n for proxy in proxies:\n yield base64.b64decode(proxy).decode()\n\n # em\n @staticmethod\n def twallProxy():\n urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1']\n request = WebRequest()\n for url in urls:\n r = request.get(url)\n proxies = re.findall(r'<td>(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})</td>[\\s\\S]*?<td>(\\d+)</td>', r.text)\n for proxy in proxies:\n yield ':'.join(proxy)\n\n\nif __name__ == '__main__':\n getfree = GetFreeProxy()\n getfree.moguProxy()\n # url = 'https://indienova.com/steam/mustbuy'\n # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0'}\n # pp = GetFreeProxy()\n # for i in pp.wuyouProxy():\n # proxies = {\n # 'http':i\n # }\n # print(i)\n # print(requests.get(url=url, headers=headers, proxies=proxies).status_code)\n\n\n","sub_path":"new_project/FreeProxy/getFreeProxy.py","file_name":"getFreeProxy.py","file_ext":"py","file_size_in_byte":9847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"195138205","text":"# -*- coding: utf-8 -*-\n\nfrom collective.archetypes.select2.select2widget import Select2Widget as CollectiveSelect2Widget\nfrom collective.archetypes.select2.select2widget import MultiSelect2Widget as CollectiveMultiSelect2Widget\nfrom Products.urban.UrbanVocabularyTerm import UrbanVocabulary\nfrom zope.component import getUtility\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport logging\nimport six\n\nlogger = logging.getLogger(\"urban debug\")\n\n\ndef resolve_vocabulary(context, field, values):\n if type(field.vocabulary) == UrbanVocabulary:\n result = [\n field.vocabulary.getAllVocTerms(context)[value].title\n for value in values\n if value\n ]\n elif type(field.vocabulary) == str:\n display_list = getattr(context, field.vocabulary)()\n result = [display_list.getValue(value) for value in values if value]\n elif type(field.vocabulary) == tuple and getattr(\n field, \"vocabulary_factory\", False\n ):\n vocabulary_factory = field.vocabulary_factory\n factory = getUtility(IVocabularyFactory, vocabulary_factory)\n vocabulary = factory(context)\n result = [vocabulary.by_token[value].title for value in values if value]\n if len(result) != len(filter(None, result)):\n logger.info(\n \"{0}: Unknown value for field '{1}' in '{2}'\".format(\n context.absolute_url(), field.__name__, values\n )\n )\n return \", \".join(filter(None, result))\n\n\nclass Select2Widget(CollectiveSelect2Widget):\n def view(self, context, field, request):\n values = super(Select2Widget, self).view(context, field, request)\n return resolve_vocabulary(context, field, values)\n\n\nclass MultiSelect2Widget(CollectiveMultiSelect2Widget):\n def view(self, context, field, request):\n values = super(MultiSelect2Widget, self).view(context, field, request)\n if values != getattr(context, field.__name__):\n # inexpected stored value\n logger.info(\n \"{0}: Inexpected value for field '{1}'\".format(\n context.absolute_url(),\n field.__name__,\n )\n )\n value = getattr(context, field.__name__)\n if isinstance(value, six.string_types):\n values = (value,)\n return resolve_vocabulary(context, field, values)\n","sub_path":"src/Products/urban/widget/select2widget.py","file_name":"select2widget.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220244872","text":"import os\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport pickle\n\n#looking for image and adding them into a some sort of list\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nimage_dir = os.path.join(BASE_DIR,\"images\")\n\n#import cascade file\nface_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')\n\n#get the recognizer \nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\ncurrent_id = 0\nlabel_ids = {} #created a dictionary\ny_labels = []\nx_train = [] \n\n#see those image in there\nfor root, dirs, files in os.walk(image_dir):\n for file in files:\n if file.endswith(\"png\") or file.endswith(\"jpg\"):\n path = os.path.join(root,file)\n #label the directory name\n label = os.path.basename(root).replace(\" \",\"-\").lower()\n #print(label,path)\n if not label in label_ids: \n label_ids[label] = current_id\n current_id += 1\n \n id_ = label_ids[label]\n #print(label_ids)\n #y_labels.append(label) #some number \n #x_train.append(path) #verify this image,turn into a NUMPY array, GRAY\n\n #train images \n\n #every pixel covert into numbers and save it in a numpy arrray\n pil_image = Image.open(path).convert(\"L\")#greyscale\n\n #resize images for training\n size = (550,550)\n final_image = pil_image.resize(size,Image.ANTIALIAS)\n\n image_array = np.array(final_image,\"uint8\")\n #print(image_array)\n\n faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)\n\n for(x,y,w,h) in faces:\n roi = image_array[y:y+h,x:x+w]\n x_train.append(roi)\n y_labels.append(id_)\n\n#print(y_labels)\n#print(x_train)\n\nwith open(\"labels.pickle\",'wb') as f:\n pickle.dump(label_ids,f)\n\n#train the face recognizer\n\nrecognizer.train(x_train, np.array(y_labels))\nrecognizer.save(\"trainner.yml\")\n\n\n\n","sub_path":"src/faces-train.py","file_name":"faces-train.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"401842939","text":"import asyncio\nfrom time import sleep\nfrom bleak import discover\n\nasync def scan(mac_addrs):\n while True:\n #print('Start scanning')\n tstart = loop.time()\n devices = await discover()\n #print('Found %d devices'%(len(devices)))\n #for i in range(len(devices)):\n # print(\"[%d] : %s\" % (i, devices[i]))\n for dev in devices:\n dev_mac = str(dev).split(': ')[0]\n if dev_mac in mac_addrs:\n print(dev_mac, 'detected at', dev.rssi, 'dBm')\n #print(dev.rssi)\n telapsed = loop.time() - tstart\n #print('Elapsed time: %.1f'%(telapsed))\n #await asyncio.sleep(6 - telapsed)\n\nif __name__ == '__main__':\n mac_addrs = (\"D6:07:04:2B:F7:B1\") # surachai\n #mac_addrs = (\"E0:75:58:38:05:A4\") # inn\n \n loop = asyncio.get_event_loop()\n loop.create_task(scan(mac_addrs))\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n loop.close()\n print('Program stopped')\n","sub_path":"python-scan-mac/scanDevice.py","file_name":"scanDevice.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530878412","text":"import os\nimport sys\nimport logging\nimport shutil\nimport six\n\nCURR_DIR = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(os.path.abspath(os.path.join(CURR_DIR, os.pardir)), 'vnc-windows'))\nimport gym_windows\n\nlogger = logging.getLogger()\n\nUNIVERSE_BACKUP_NAME = 'universe-windows-envs'\nUNIVERSE_WINDOWS_ENVS_DIR = os.environ['UNIVERSE_WINDOWS_ENVS_DIR']\nBACKUP_DIR = os.path.expanduser('~\\\\Documents\\\\Universe\\\\envs-update-backup')\n\n\ndef backup_update_executables():\n gym_windows.run_win_cmd('rmdir %s /s /q' % BACKUP_DIR)\n backed_up = False\n for dirpath, dnames, fnames in os.walk(UNIVERSE_WINDOWS_ENVS_DIR):\n for fname in fnames:\n if fname.endswith('.py'):\n old_path = os.path.join(dirpath, fname)\n rel_path = old_path[len(UNIVERSE_WINDOWS_ENVS_DIR):]\n new_path = BACKUP_DIR + rel_path\n new_dir = os.path.dirname(new_path)\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n shutil.copy2(old_path, new_path)\n backed_up = True\n\n if not backed_up:\n raise Exception('No files found in %s pointed to by the %s environment variable' % (\n UNIVERSE_WINDOWS_ENVS_DIR, 'UNIVERSE_WINDOWS_ENVS_DIR'))\n\n\ndef try_updating(update_fn, restore_fn):\n try:\n update_fn()\n except:\n exc_info = sys.exc_info()\n try:\n restore_fn()\n except:\n # If this happens, it clobbers exc_info, which is why we had\n # to save it above\n import traceback\n logger.error('Error updating. Please reinstall universe-windows-envs')\n traceback.print_exc()\n six.reraise(*exc_info)\n logger.error('Error updating, please retry. (You may need to close programs with universe-windows-envs files'\n ' open). See below for more details.')\n six.reraise(*exc_info)\n\n\ndef update():\n logger.info('Updating windows environments, download SIZE is ~2GB')\n gym_windows.download_folder('https://www.dropbox.com/s/ljx7uiodptxr0f3/universe-windows-envs.zip?dl=1',\n os.path.dirname(os.environ['UNIVERSE_WINDOWS_ENVS_DIR']), warn_existing=False)\n\n\ndef restore():\n shutil.copytree(BACKUP_DIR, UNIVERSE_WINDOWS_ENVS_DIR)\n\n\ndef main():\n backup_update_executables() # In case the update fails, we can restore these and try again\n gym_windows.run_win_cmd('rmdir %s /s /q' % UNIVERSE_WINDOWS_ENVS_DIR)\n try_updating(update, restore_fn=restore)\n logger.info('Update complete')\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n logger.setLevel(logging.INFO)\n sys.exit(main())\n","sub_path":"vnc-gtav/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71138591","text":"# coding: utf-8\n\n# # Batch Normalization\n# One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam.\n# Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is\n# batch normalization which was recently proposed by [3].\n#\n# The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated\n# features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network\n# to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution.\n# However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and\n# will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training\n# process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\n#\n# The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks\n# more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch\n# normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and\n# standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard\n# deviations is kept during training, and at test time these running averages are used to center and normalize features.\n#\n# It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be\n# optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer\n# includes learnable shift and scale parameters for each feature dimension.\n#\n# [3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\n# Internal Covariate Shift\", ICML 2015.\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randrange\nfrom pythonML.notebooks.Pytorch.sandbox.TwoLayerAffineNet import *\nfrom pythonML.notebooks.Pytorch.sandbox.TwoLayerFCNetUtils import *\n\n\ndef test_batch_norm():\n # Check the training-time forward pass by checking means and variances\n # of features both before and after batch normalization\n\n # Simulate the forward pass for a two-layer network\n np.random.seed(231)\n N, D1, D2, D3 = 200, 50, 60, 3\n X = np.random.randn(N, D1)\n W1 = np.random.randn(D1, D2)\n W2 = np.random.randn(D2, D3)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n\n print('Before batch normalization:')\n print(' means: ', a.mean(axis=0))\n print(' stds: ', a.std(axis=0))\n\n # Means should be close to zero and stds close to one\n print('After batch normalization (gamma=1, beta=0)')\n a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\n print(' mean: ', a_norm.mean(axis=0))\n print(' std: ', a_norm.std(axis=0))\n\n # Now means should be close to beta and stds close to gamma\n gamma = np.asarray([1.0, 2.0, 3.0])\n beta = np.asarray([11.0, 12.0, 13.0])\n a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\n print('After batch normalization (nontrivial gamma, beta)')\n print(' means: ', a_norm.mean(axis=0))\n print(' stds: ', a_norm.std(axis=0))\n\n\ndef test_time_batch_norm():\n # Check the test-time forward pass by running the training-time\n # forward pass many times to warm up the running averages, and then\n # checking the means and variances of activations after a test-time\n # forward pass.\n np.random.seed(231)\n N, D1, D2, D3 = 200, 50, 60, 3\n W1 = np.random.randn(D1, D2)\n W2 = np.random.randn(D2, D3)\n\n bn_param = {'mode': 'train'}\n gamma = np.ones(D3)\n beta = np.zeros(D3)\n for t in range(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\n bn_param['mode'] = 'test'\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n print('Means should be close to zero and stds close to one, but will be')\n print('noisier than training-time forward passes.')\n print('After batch normalization (test-time):')\n print(' means: ', a_norm.mean(axis=0))\n print(' stds: ', a_norm.std(axis=0))\n\n\ndef test_backward_batch_norm():\n # Gradient check batchnorm backward pass\n # Now implement the backward pass for batch normalization in the function batchnorm_backward.\n # To derive the backward pass you should write out the computation graph for batch normalization and backprop through each\n # of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across\n # these branches in the backward pass.\n # Once you have finished, run the following to numerically check your backward pass.\n np.random.seed(231)\n N, D = 4, 5\n x = 5 * np.random.randn(N, D) + 12\n gamma = np.random.randn(D)\n beta = np.random.randn(D)\n dout = np.random.randn(N, D)\n\n bn_param = {'mode': 'train'}\n fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\n fg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]\n fb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]\n\n dx_num = eval_numerical_gradient_array(fx, x, dout)\n da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)\n db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)\n\n _, cache = batchnorm_forward(x, gamma, beta, bn_param)\n dx, dgamma, dbeta = batchnorm_backward(dout, cache)\n print('dx error: ', rel_error(dx_num, dx))\n print('dgamma error: ', rel_error(da_num, dgamma))\n print('dbeta error: ', rel_error(db_num, dbeta))\n\n\ndef test_fcnet_with_batch_norm():\n np.random.seed(231)\n N, D, H1, H2, C = 2, 15, 20, 30, 10\n X = np.random.randn(N, D)\n y = np.random.randint(C, size=(N,))\n\n for reg in [0, 3.14]:\n print('Running check with reg = ', reg)\n model = FullyConnectedAffineNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print('Initial loss: ', loss)\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))\n if reg == 0: print()\n\n\ndef six_layer_net_with_batch_norm(data):\n np.random.seed(231)\n # Try training a very deep net with batchnorm\n hidden_dims = [100, 100, 100, 100, 100]\n\n num_train = 1000\n small_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n }\n\n weight_scale = 1e-1\n bn_model = FullyConnectedAffineNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n weight_scale = 2e-2\n model = FullyConnectedAffineNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-2,\n },\n verbose=True, print_every=200)\n bn_solver.train()\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\n solver.train()\n\n\nif __name__ == '__main__':\n data = get_CIFAR10_preproc_data()\n for k, v in data.items():\n print('%s: ' % k, v.shape)\n\n # test_batch_norm()\n # test_time_batch_norm()\n # test_backward_batch_norm()\n # test_fcnet_with_batch_norm()\n six_layer_net_with_batch_norm(data)\n","sub_path":"pythonML/notebooks/Pytorch/sandbox/BatchNormAffine.py","file_name":"BatchNormAffine.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532538663","text":"from flask import Flask, render_template, request\nimport h2o\nimport pandas as pd\n\napp = Flask(__name__, static_url_path='/static')\nh2o.init()\n\nsaved_model = h2o.load_model(\"StackedEnsemble_AllModels_AutoML_20181212_231632\")\n\n\n@app.route('/', methods=['post', 'get'])\ndef index():\n if request.method == 'POST':\n cap_surface = request.form['cap_surface']\n cap_color = request.form['cap_color']\n gill_spacing = request.form['gill_spacing']\n gill_size = request.form['gill_size']\n veil_color = request.form['veil_color']\n ring_number = request.form['ring_number']\n stock_sur_below = request.form['stock_sur_below']\n stock_color_below = request.form['stock_color_below']\n stock_root = request.form['stock_root']\n cap_shape = request.form['cap_shape']\n gill_attachment = request.form['gill_attachment']\n gill_color = request.form['gill_color']\n veil_type = request.form['veil_type']\n stock_sur_above = request.form['stock_sur_above']\n stock_color_above = request.form['stock_color_above']\n ring_type = request.form['ring_type']\n stock_shape = request.form['stock_shape']\n habitat = request.form['habitat']\n bruises = request.form['bruises']\n odor = request.form['odor']\n spore_print_col = request.form['spore_print_col']\n population = request.form['population']\n\n # inputs = [cap_shape, cap_surface, cap_color, bruises ,odor, gill_attachment, gill_spacing, gill_size, gill_color,\n # stock_shape, stock_root, stock_sur_above, stock_sur_below, stock_color_above, stock_color_below,\n # veil_type, veil_color, ring_number, ring_type, spore_print_col, population, habitat]\n\n inputs = {'cap-shape': [cap_shape],\n 'cap-surface': [cap_surface]\n , 'cap-color': [cap_color]\n , 'bruises': [bruises]\n , 'gill-attachment': [gill_attachment]\n , 'odor': [odor]\n , 'gill-spacing': [gill_spacing]\n , 'gill-size': [gill_size]\n , 'gill-color': [gill_color]\n , 'stock-shape': [stock_shape]\n , 'stock-root': [stock_root]\n , 'stock-sur-above': [stock_sur_above]\n , 'stock-sur-below': [stock_sur_below]\n , 'stock-color-above': [stock_color_above]\n , 'stock-color-below': [stock_color_below]\n , 'veil-type': [veil_type]\n , 'veil-color': [veil_color]\n , 'ring-number': [ring_number]\n , 'ring-type': [ring_type]\n , 'spore-print-col': [spore_print_col]\n , 'population': [population]\n , 'habitat': [habitat]}\n df = pd.DataFrame(inputs)\n\n input = h2o.H2OFrame(df)\n result_pd = saved_model.predict(input).as_data_frame()\n cutoff = 0.20\n print(result_pd)\n print(input)\n print(result_pd.iloc[0]['p1'])\n probPois = result_pd.iloc[0]['p1']\n if probPois > cutoff :\n return render_template('poison.html')\n else :\n return render_template('notPoison.html')\n\n\n\n\n return render_template('index.html')\n\n\nif __name__ == '__name__':\n app.run()\n\napp.run(port=5000)\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"334215615","text":"import simplejson\nimport sys \nreload(sys) \nsys.setdefaultencoding('utf-8')\n\nfor el in range(1,5):\n\tf = open('json2/'+ str(el) + '.json', 'r')\n\t# data = f.read()\n\tdata = simplejson.loads(f.read())\n\tf.close()\n\n\tposts = data['posts']\n\n\tfor post in posts:\n\t\tjson = simplejson.dumps(post)\n\t\tf = open('json3/'+ str(post['ID']) + '.json', 'w' )\n\t\tf.write(json)\n\t\tf.close()","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396504264","text":"import time, os, picamera\n\n##with picamera.PiCamera() as camera:\n## camera.rotation = 0\n## camera.start_preview()\n## time.sleep(5)\n## camera.capture('foo2.jpg')\n## camera.stop_preview()\n\nfilename =\"pi2cam_002.jpg\"\npicam_options = \"-p -e -rot 180\" #\"-f\"\n###picam_options = \"-ss 800000 -ex sports -ev 25\"\n###picam_options = \"-ex backlight\" -ex night\n###picam_options = \"-ex night\"\n##\nos.system(\"raspistill \"+picam_options+\" -t 5000 -o /home/pi/picam_imgs/\"+filename)\n###os.system(\"raspistill \"+picam_options+\" -tl 2000 -rot 180 -t 6000 -o /home/pi/pythoncode/foo_%d.jpg\")\n##\n\n##from subprocess import call\n##photofile = \"sudo /home/pi/Dropbox-Uploader/dropbox_uploader.sh upload /home/pi/picam_imgs/\"+filename+\" /Apps/ColemanPiUploader/\"\n##call ([photofile], shell=True)\n","sub_path":"picameraPreview_.py","file_name":"picameraPreview_.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35458057","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PAT\")\n\n# initialize MessageLogger and output report\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.categories.append('PATSummaryTables')\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(\n default = cms.untracked.PSet( limit = cms.untracked.int32(0) ),\n PATSummaryTables = cms.untracked.PSet( limit = cms.untracked.int32(-1) )\n)\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\n\n# source\nprocess.source = cms.Source(\"PoolSource\", \n fileNames = cms.untracked.vstring('/store/user/ndefilip/H150_ZZ_4l_10TeV_GEN_HLT/CMSSW_2_2_6-2e2mu_10TeV_Skim10_glb_RECOSIM_IDEAL/3673d1d03efb301851df3f185750fd23/hTozzTo4leptons_Skim_1.root')\n)\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\n\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = cms.string('IDEAL_V12::All')\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n\n\n# PAT Layer 0+1\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n#process.content = cms.EDAnalyzer(\"EventContentAnalyzer\")\n# Switch off old trigger matching\nfrom PhysicsTools.PatAlgos.tools.trigTools import switchOffTriggerMatchingOld\nswitchOffTriggerMatchingOld( process )\n\nprocess.p = cms.Path(\n process.patDefaultSequence \n)\n\n# Output module configuration\nfrom PhysicsTools.PatAlgos.patEventContent_cff import patEventContent\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('PATLayer1_Output.fromAOD_full.root'),\n # save only events passing the full path\n SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring('p') ),\n # save PAT Layer 1 output\n outputCommands = cms.untracked.vstring('drop *', *patEventContent ) # you need a '*' to unpack the list of commands 'patEventContent'\n #outputCommands = cms.untracked.vstring('keep *')\n)\nprocess.outpath = cms.EndPath(process.out)\n\n","sub_path":"test/patLayer1_fromAOD_full.cfg.py","file_name":"patLayer1_fromAOD_full.cfg.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"66333713","text":"import os\n\nimport click\n\nfrom pypgatk.commands.utils import print_help\nfrom pypgatk.ensembl.ensembl import EnsemblDataService\n\nthis_dir, this_filename = os.path.split(__file__)\n\n\n@click.command('vcf-to-proteindb', short_help=\"Generate peptides based on DNA variants VCF files\")\n@click.option('-c', '--config_file', help='Configuration to perform conversion between ENSEMBL Files',\n default=this_dir + '/../config/ensembl_config.yaml')\n@click.option('-f', '--input_fasta', help='Path to the transcript sequence')\n@click.option('-v', '--vcf', help='Path to the VCF file')\n@click.option('-g', '--gene_annotations_gtf', help='Path to the gene annotations file')\n@click.option('-t', '--translation_table', default=1, type=int, help=\"Translation table (Default 1) \")\n@click.option('-m', '--mito_translation_table', default=2, type=int, help='Mito_trans_table (default 2)')\n@click.option('-p', '--var_prefix', default=\"var\", help=\"String to add before the variant peptides\")\n@click.option('--report_ref_seq', help='In addition to var peps, also report all ref peps', is_flag=True)\n@click.option('-o', '--output_proteindb', default=\"peptide-database.fa\",\n help=\"Output file name, exits if already exists\")\n@click.option('--annotation_field_name', default=\"CSQ\",\n help='''Annotation field name found in the INFO column,\n e.g CSQ or vep; if empty it will identify overlapping transcripts\n from the given GTF file and no aa consequence will be considered''')\n@click.option('--af_field', default=\"\",\n help=\"field name in the VCF INFO column to use for filtering on AF, (Default None)\")\n@click.option('--af_threshold', default=0.01, help='Minium AF threshold for considering common variants')\n@click.option('--transcript_index', default=3, type=int,\n help='Index of transcript ID in the annotated columns (separated by |)')\n@click.option('--consequence_index', default=1, type=int,\n help='Index of consequence in the annotated columns (separated by |)')\n@click.option('--exclude_consequences',\n default='downstream_gene_variant, upstream_gene_variant, intergenic_variant, intron_variant, synonymous_variant',\n help=\"Excluded Consequences\", show_default=True)\n@click.option('-s', '--skip_including_all_cds',\n help=\"by default any transcript that has a defined CDS will be used, this option disables this features instead\",\n is_flag=True)\n@click.option('--include_consequences', default='all', help=\"included_consequences, default all\")\n@click.option('--ignore_filters',\n help=\"enabling this option causes or variants to be parsed. By default only variants that have not failed any filters will be processed (FILTER column is PASS, None, .) or if the filters are subset of the accepted filters. (default is False)\",\n is_flag=True)\n@click.option('--accepted_filters', default='', help=\"Accepted filters for variant parsing\")\n@click.pass_context\ndef vcf_to_proteindb(ctx, config_file, input_fasta, vcf, gene_annotations_gtf, translation_table,\n mito_translation_table,\n var_prefix, report_ref_seq, output_proteindb, annotation_field_name,\n af_field, af_threshold, transcript_index, consequence_index,\n exclude_consequences, skip_including_all_cds, include_consequences,\n ignore_filters, accepted_filters):\n if input_fasta is None or vcf is None or gene_annotations_gtf is None:\n print_help()\n\n pipeline_arguments = {EnsemblDataService.MITO_TRANSLATION_TABLE: mito_translation_table,\n EnsemblDataService.TRANSLATION_TABLE: translation_table,\n EnsemblDataService.HEADER_VAR_PREFIX: var_prefix,\n EnsemblDataService.REPORT_REFERENCE_SEQ: report_ref_seq,\n EnsemblDataService.PROTEIN_DB_OUTPUT: output_proteindb,\n EnsemblDataService.ANNOTATION_FIELD_NAME: annotation_field_name,\n EnsemblDataService.AF_FIELD: af_field, EnsemblDataService.AF_THRESHOLD: af_threshold,\n EnsemblDataService.TRANSCRIPT_INDEX: transcript_index,\n EnsemblDataService.CONSEQUENCE_INDEX: consequence_index,\n EnsemblDataService.EXCLUDE_CONSEQUENCES: exclude_consequences,\n EnsemblDataService.SKIP_INCLUDING_ALL_CDS: skip_including_all_cds,\n EnsemblDataService.INCLUDE_CONSEQUENCES: include_consequences,\n EnsemblDataService.IGNORE_FILTERS: ignore_filters,\n EnsemblDataService.ACCEPTED_FILTERS: accepted_filters}\n\n ensembl_data_service = EnsemblDataService(config_file, pipeline_arguments)\n ensembl_data_service.vcf_to_proteindb(vcf, input_fasta, gene_annotations_gtf)\n","sub_path":"pypgatk/commands/vcf_to_proteindb.py","file_name":"vcf_to_proteindb.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"70846715","text":"# BSD 3-Clause License\n#\n# Copyright (c) 2020, IPASC\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\nfrom ipasc_tool import MetadataDeviceTags, MetadataAcquisitionTags\n\n\ndef create_complete_acquisition_meta_data_dictionary():\n\n dictionary = dict()\n dictionary[MetadataAcquisitionTags.UUID.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.ENCODING.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.COMPRESSION.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.PHOTOACOUSTIC_IMAGING_DEVICE.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.DATA_TYPE.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.DIMENSIONALITY.tag] = \"time\"\n dictionary[MetadataAcquisitionTags.REGION_OF_INTEREST.tag] = np.asarray([0, 0.001, 0, 0.001, 0, 0.001])\n dictionary[MetadataAcquisitionTags.SIZES.tag] = np.asarray([4, 200])\n dictionary[MetadataAcquisitionTags.PULSE_LASER_ENERGY.tag] = np.asarray([2])\n dictionary[MetadataAcquisitionTags.FRAME_ACQUISITION_TIMESTAMPS.tag] = np.asarray([2])\n dictionary[MetadataAcquisitionTags.ACQUISITION_OPTICAL_WAVELENGTHS.tag] = np.asarray([2])\n dictionary[MetadataAcquisitionTags.TIME_GAIN_COMPENSATION.tag] = create_random_testing_parameters()['test_array']\n dictionary[MetadataAcquisitionTags.OVERALL_GAIN.tag] = 2.2\n dictionary[MetadataAcquisitionTags.ELEMENT_DEPENDENT_GAIN.tag] = np.ones(100)\n dictionary[MetadataAcquisitionTags.TEMPERATURE_CONTROL.tag] = np.ones(100) * 290.3\n dictionary[MetadataAcquisitionTags.ACOUSTIC_COUPLING_AGENT.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.SCANNING_METHOD.tag] = create_random_testing_parameters()['test_string']\n dictionary[MetadataAcquisitionTags.AD_SAMPLING_RATE.tag] = 1.2234\n dictionary[MetadataAcquisitionTags.FREQUENCY_DOMAIN_FILTER.tag] = create_random_testing_parameters()['test_array']\n dictionary[MetadataAcquisitionTags.ASSUMED_GLOBAL_SPEED_OF_SOUND.tag] = 1540.0\n return dictionary\n\n\ndef create_random_testing_parameters():\n\n test_float = np.random.random()\n test_string = str(np.random.random())\n test_dict = dict()\n test_array = np.random.random((1000, 2))\n return {'test_float': test_float,\n 'test_string': test_string,\n 'test_dict': test_dict,\n 'test_array': test_array}\n\n\ndef create_random_illumination_element(dim_x=None, dim_y=None, dim_z=None):\n\n illuminator_dict = dict()\n illuminator_dict[MetadataDeviceTags.ILLUMINATOR_POSITION.tag] = np.asarray([\n np.random.random() * 2 * dim_x - dim_x,\n np.random.random() * 2 * dim_y - dim_y,\n -np.random.random() * dim_z / 2])\n illuminator_dict[MetadataDeviceTags.ILLUMINATOR_ORIENTATION.tag] = np.asarray([\n np.random.random()-0.5,\n np.random.random()-0.5,\n np.random.random()-0.5])\n illuminator_dict[MetadataDeviceTags.ILLUMINATOR_GEOMETRY.tag] = np.asarray([0.0001,\n 0.0001,\n 0.0001])\n illuminator_dict[MetadataDeviceTags.ILLUMINATOR_GEOMETRY_TYPE.tag] = \"CUBOID\"\n min_wavelength = np.random.random() * 200 + 600\n illuminator_dict[MetadataDeviceTags.WAVELENGTH_RANGE.tag] = np.asarray([min_wavelength,\n min_wavelength +\n np.random.random() * 200,\n 1.0])\n illuminator_dict[MetadataDeviceTags.LASER_ENERGY_PROFILE.tag] = np.asarray([np.random.random(size=200),\n np.random.random(size=200)])\n illuminator_dict[MetadataDeviceTags.LASER_STABILITY_PROFILE.tag] = np.asarray([np.random.random(size=200),\n np.random.random(size=200)])\n illuminator_dict[MetadataDeviceTags.PULSE_WIDTH.tag] = 0.00000012\n illuminator_dict[MetadataDeviceTags.BEAM_INTENSITY_PROFILE.tag] = np.random.random(size=(200, 4))\n illuminator_dict[MetadataDeviceTags.BEAM_INTENSITY_PROFILE_DISTANCE.tag] = 1337.4217\n illuminator_dict[MetadataDeviceTags.BEAM_DIVERGENCE_ANGLES.tag] = np.deg2rad(np.random.random() * 40)\n return illuminator_dict\n\n\ndef create_random_detection_element(dim_x=None, dim_y=None, dim_z=None):\n\n detector_dict = dict()\n detector_dict[MetadataDeviceTags.DETECTOR_POSITION.tag] = np.asarray([np.random.random() * dim_x,\n np.random.random() * dim_y,\n -np.random.random() * dim_z])\n detector_dict[MetadataDeviceTags.DETECTOR_ORIENTATION.tag] = np.asarray(\n [np.random.random() * dim_x - dim_x / 2,\n np.random.random() * dim_y - dim_y / 2,\n np.random.random() * dim_z - dim_z / 2])\n\n detector_dict[MetadataDeviceTags.DETECTOR_GEOMETRY.tag] = np.asarray([0.0001, 0.0001, 0.0001])\n detector_dict[MetadataDeviceTags.DETECTOR_GEOMETRY_TYPE.tag] = \"CUBOID\"\n\n detector_dict[MetadataDeviceTags.FREQUENCY_RESPONSE.tag] = np.asarray([np.random.random(size=200),\n np.random.random(size=200)])\n detector_dict[MetadataDeviceTags.ANGULAR_RESPONSE.tag] = np.asarray([np.random.random(size=200),\n np.random.random(size=200)])\n return detector_dict\n\n\ndef create_complete_device_metadata_dictionary(dim_x=None, dim_y=None, dim_z=None):\n\n if dim_x is None:\n dim_x = 0.001\n if dim_y is None:\n dim_y = 0.03\n if dim_z is None:\n dim_z = 0.03\n\n dictionary = {\n MetadataDeviceTags.GENERAL.tag: {\n MetadataDeviceTags.UUID.tag: \"a2fd-48nbsh-sfiush7-chjs\",\n MetadataDeviceTags.FIELD_OF_VIEW.tag: np.asarray([0, dim_x, 0, dim_y, 0, dim_z]),\n MetadataDeviceTags.NUMBER_OF_ILLUMINATION_ELEMENTS.tag: 2,\n MetadataDeviceTags.NUMBER_OF_DETECTION_ELEMENTS.tag: 4\n },\n MetadataDeviceTags.ILLUMINATORS.tag: {\n MetadataDeviceTags.ILLUMINATION_ELEMENT.tag + \"_0\":\n create_random_illumination_element(dim_x, dim_y, dim_z),\n MetadataDeviceTags.ILLUMINATION_ELEMENT.tag + \"_1\":\n create_random_illumination_element(dim_x, dim_y, dim_z)\n },\n MetadataDeviceTags.DETECTORS.tag: {\n MetadataDeviceTags.DETECTION_ELEMENT.tag + \"_0\":\n create_random_detection_element(dim_x, dim_y, dim_z),\n MetadataDeviceTags.DETECTION_ELEMENT.tag + \"_1\":\n create_random_detection_element(dim_x, dim_y, dim_z),\n MetadataDeviceTags.DETECTION_ELEMENT.tag + \"_2\":\n create_random_detection_element(dim_x, dim_y, dim_z),\n MetadataDeviceTags.DETECTION_ELEMENT.tag + \"_3\":\n create_random_detection_element(dim_x, dim_y, dim_z)\n }\n }\n\n return dictionary\n","sub_path":"ipasc_test/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"313336585","text":"import matplotlib\nmatplotlib.use('Agg')\nfrom dataObj.image import cifarObj\nfrom tf.supervised_control import Supervised\nimport numpy as np\nimport pdb\n\n#Paths to list of filenames\ntrainList = \"/home/slundquist/mountData/datasets/cifar/images/train.txt\"\ntestList = \"/home/slundquist/mountData/datasets/cifar/images/test.txt\"\n\n#Get object from which tensorflow will pull data from\ntrainDataObj = cifarObj(trainList, resizeMethod=\"crop\", shuffle=True, skip=1, seed=None, getGT=True)\ntestDataObj = cifarObj(testList, resizeMethod=\"crop\", shuffle=True, skip=1, seed=None, getGT=True)\n\nparams = {\n #Base output directory\n 'outDir': \"/home/slundquist/mountData/tfLCA/\",\n #Inner run directory\n 'runDir': \"/cifar_sup_128/\",\n 'tfDir': \"/tfout\",\n #Save parameters\n 'ckptDir': \"/checkpoints/\",\n 'saveFile': \"/save-model\",\n 'savePeriod': 100, #In terms of displayPeriod\n #output plots directory\n 'plotDir': \"plots/\",\n 'plotPeriod': 200, #With respect to displayPeriod\n #Progress step\n 'progress': 10,\n #Controls how often to write out to tensorboard\n 'writeStep': 100, #300,\n #Flag for loading weights from checkpoint\n 'load': False,\n 'loadFile': \"/home/slundquist/mountData/DeepGAP/saved/cifar.ckpt\",\n #Device to run on\n 'device': '/gpu:0',\n #Num iterations\n 'outerSteps': 500, #1000000,\n 'innerSteps': 100, #300,\n #Batch size\n 'batchSize': 128,\n #Learning rate for optimizer\n 'learningRate': 1e-4,\n 'numClasses': 10,\n\n 'epsilon': 1e-8,\n\n 'regularizer': 'none',\n 'regWeight': .3,\n\n #####ISTA PARAMS######\n 'VStrideY': 2,\n 'VStrideX': 2,\n 'patchSizeX': 12,\n 'patchSizeY': 12,\n 'numV': 128,\n #####New encode parapms#####\n 'maxPool': True, #Controls max or avg pool\n}\n\n#Allocate tensorflow object\n#This will build the graph\ntfObj = Supervised(params, trainDataObj.inputShape)\n\nprint(\"Done init\")\ntfObj.runModel(trainDataObj, testDataObj = testDataObj)\nprint(\"Done run\")\n\ntfObj.closeSess()\n\n","sub_path":"runs/classifier/cifar_supervised_train.py","file_name":"cifar_supervised_train.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"388745239","text":"from django.urls import re_path\nfrom rest_framework.routers import DefaultRouter\n\nfrom manager.views import HostModelViewSet, GroupModelViewSet\n\nrouter = DefaultRouter()\nrouter.register(r'manager/host', HostModelViewSet)\n# router.register(r'manager/host', HostModelViewSet)\nrouter.register(r'manager/group', GroupModelViewSet)\n\nurlpatterns = [\n # 导出项目\n re_path('manager/export/', HostModelViewSet.as_view({'get': 'export', })),\n # 项目导入模板下载及导入\n # re_path('project/importTemplate/',\n # ProjectModelViewSet.as_view({'get': 'importTemplate', 'post': 'importTemplate'})),\n]\n\nurlpatterns += router.urls\n","sub_path":"apps/manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"475241670","text":"# Imports =========================================================================================================\n# Flask imports.\nfrom flask import Flask, render_template, request, jsonify, make_response, Response, flash, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_table import Table, Col\n\n# Standard Python library imports.\nimport os\nimport json\nimport random \n\n# Project module imports.\nfrom src import main\n\n\n# Application configurations. =========================================================================================================\napp = Flask(__name__)\napp.secret_key = 'cs205'\n\n\n# Databse connection with SQLAlchemy API\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://ngdchkkaxlfwmc:a68705eae84eea6a8ab76b7c9539903386e8157d0d2ceb41e2b81b38c53018bd@ec2-174-129-253-175.compute-1.amazonaws.com:5432/d6olev9figq30p'#keys.database_key\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n# Endpoints =========================================================================================================\n# Server homepage.\n@app.route('/')\ndef index():\n \"\"\"\n An endpoint for the home/index navigation page. \n \"\"\"\n return render_template('index.html')\n\n\n# Genre page \n@app.route('/genre_page/', methods=['GET'])\ndef genre_page():\n \"\"\"\n An endpoint for the genre query page. \n \"\"\"\n # Get inputs from website\n number = request.args.get('over')\n genre = request.args.get('genre')\n searchType = request.args.get('searchType')\n\n if None not in [number, genre, searchType]:\n print(f\" DEBUG: got: {number}, {genre}, {searchType}\")\n # Make Genre all lower Case\n genre = genre.lower().strip()\n\n # Validating input.\n # If number and genre are empty\n if number == '' and genre == '':\n flash('Please enter a number and genre')\n return render_template('genre_page.html')\n \n # If number is empty\n elif number == '':\n flash('Please enter a number')\n return render_template('genre_page.html')\n\n # If genre is empty\n elif genre == '':\n flash('Please enter a genre')\n return render_template('genre_page.html')\n\n # If searchType is None\n elif searchType == 'None':\n flash('Please select a searchType')\n return render_template('genre_page.html')\n \n # Number is less than 0.\n elif (int(number) <= 0) or (int(number) > 25):\n flash('Number must be between 0 and 25')\n return render_template('genre_page.html')\n \n else: # Otherwise the entry is valid, so we make a query.\n if searchType == 'Artist': # Top Artist by Genre\n # Create query Text\n query_text = \"select artist, listeners from artists join associations ON (artists.id = associations.artist_id) join genres on \\\n (associations.genre_id=genres.id) where genre ='{genre}' order by listeners desc limit \".format(genre=genre)+ str(number) + \";\"\n\n # Top Region by Genre\n if searchType == 'Region':\n query_text = \"select region, sum(listeners) from artists join associations on \" \\\n \"(artists.id=associations.artist_id) join genres on (associations.genre_id = genres.id) where genre= 'rock' \" \\\n \"group by artists.region order by sum desc limit \" + str(number)+ \";\"\n\n # Commiting the query.\n result = main.commitQuery(query_text, db)\n \n # Packaging the query.\n packaged_data = json.dumps([dict(zip([\"over\", \"listeners\"], row)) for row in result])\n \n if len(packaged_data) == 2:\n flash('No result found for specified query input.')\n return render_template('genre_page.html', plot_data=None)\n\n else: # The query was successful.\n plot_title = \"Top {number} {searchType}s Over the Genre '{genre}'\".format(number=number, searchType=searchType, genre=genre)\n return render_template('genre_page.html', plot_data=packaged_data, plot_title=plot_title)\n \n else:\n print(\"DEBUG: got nothing.\")\n\n return render_template('genre_page.html')\n\n\n# Artist page \n@app.route('/artist_page/', methods=['GET'])\ndef artist_page():\n \"\"\"\n An endpoint for the artist query page. \n \"\"\"\n # Get inputs from website\n artist = request.args.get('artist')\n searchType = request.args.get('searchType')\n\n if None not in [artist, searchType]:\n print(f\" DEBUG: got: {artist}, {searchType}\")\n # Make Genre all lower Case\n artist = artist.lower().strip()\n\n # Validating input.\n # If artist is empty\n if artist == '':\n flash('Please enter a artist')\n return render_template('artist_page.html')\n\n # If searchType is None\n elif searchType == 'None':\n flash('Please select a searchType')\n return render_template('artist_page.html')\n \n else: # Otherwise the entry is valid, so we make a query.\n # Declaring a table.\n class ArtistTable(Table):\n artist = Col('artist')\n region = Col('region')\n listeners = Col('listeners')\n\n if searchType == 'Artist': # Searching for an artist.\n # Creating and commiting a query.\n query_text = \"select * from artists where artist='{artist}';\".format(artist=artist)\n result = main.commitQuery(query_text, db)\n\n # Packaging the query.\n packaged_data = [dict(zip([\"artist\", \"region\", \"listeners\"], [row[1], row[2], row[3]])) for row in result]\n\n # Creating the table.\n table = ArtistTable(packaged_data)\n \n # Top by Genre\n if searchType == 'Related Artist':\n # Getting the genre ids of the query artist.\n originalGenres_query = \"select genre_id from associations join artists on (artists.id=associations.artist_id) where artist='{artist}';\".format(artist=artist)\n originalGenres = main.commitQuery(originalGenres_query, db)\n originalGenres_packaged = [tupley[0] for tupley in originalGenres]\n \n # Getting the artist ids of every artist to compare. \n compareIDs_query = \"select id from artists\";\n compareIDs = main.commitQuery(compareIDs_query, db)\n compareIDs_packaged = [artist_id[0] for artist_id in compareIDs]\n \n comparison_ids_sample = random.sample(compareIDs_packaged, 100)\n relatedArtists = {}\n\n completion = 0\n percentage_update = False\n for index in comparison_ids_sample:\n completion += 1\n\n # A user update (without re-rendering the page during this funciton call).\n percentage = round(completion/len(comparison_ids_sample), 2)\n print(\"DEBUG: percent complete: \", percentage)\n\n compareGenres_query = \"SELECT genre_id FROM associations WHERE artist_id = '{index}';\".format(index=index)\n compareGenres = main.commitQuery(compareGenres_query, db)\n compareGenres_packaged = [tupley[0] for tupley in compareGenres]\n \n relatedArtists = main.RelatedArtists(originalGenres_packaged, compareGenres_packaged, index, relatedArtists)\n\n related_artists = main.pickTopArtists(relatedArtists)\n\n # If nothing is found.\n if related_artists == None:\n flash(\"Could not find a related artist for this selection (not enough genres associated to complete a search).\")\n return render_template('artist_page.html')\n\n # Now to query for the artist information. \n packaged_table_data = []\n for artist_id in related_artists:\n # Creating and commiting a query.\n query_text = \"select * from artists where id='{artist_id}';\".format(artist_id=artist_id)\n result = main.commitQuery(query_text, db)\n\n # Packaging the query.\n packaged_table_data.extend([dict(zip([\"artist\", \"region\", \"listeners\"], [row[1], row[2], row[3]])) for row in result])\n\n # Creating the table.\n table = ArtistTable(packaged_table_data)\n \n table.border = True\n return render_template('artist_page.html', artist_table=table)\n \n else:\n print(\"DEBUG: got nothing.\")\n\n return render_template('artist_page.html')\n \n\n# Region page \n@app.route('/region_page/', methods=['GET'])\ndef region_page():\n \"\"\"\n An endpoint for the region query page. \n \"\"\"\n # First get all the inputs from the website\n number = request.args.get('over')\n region = request.args.get('region')\n searchType = request.args.get('searchType')\n \n print(f\" DEBUG: got: {number}, {region}, {searchType}\")\n\n if None not in [number, region, searchType]:\n # Make Genre all lower Case\n region = region.lower().strip()\n\n # Validating input\n if number is not None:\n # If number and region are empty\n if number == '' and region == '':\n flash('Please enter an number and region')\n return render_template('region_page.html')\n # If number is empty\n elif number == '':\n flash('Please enter an number')\n return render_template('region_page.html')\n # If region is empty\n elif region == '':\n flash('Please enter a region')\n return render_template('region_page.html')\n # If searchType is not selected\n elif searchType == 'None':\n flash('Please select a searchType')\n return render_template('region_page.html')\n # Number is greater than 0\n elif (int(number) <= 0) or (int(number) > 25):\n flash('Number must be greater than 0 and no greater than 25.')\n return render_template('region_page.html')\n \n else: # Otherwise the entry is valid, so we make a query.\n if searchType == 'Artist': # Top Artist by Region\n # Create query Text\n query_text = \"select artist, listeners from artists where region='{region}' order by listeners desc limit {number};\".format(region=region, number=number)\n # Top genre by region\n if searchType == 'Genre':\n query_text = \"select genre, sum(listeners) from genres join associations on (genres.id=associations.genre_id)\" \\\n \" join artists on (associations.artist_id = artists.id) where region ='{region}' group by genre order\" \\\n \" by sum desc limit {number};\".format(region=region, number=number)\n \n # Commiting the query.\n result = main.commitQuery(query_text, db)\n \n # Packaging the query.\n packaged_data = json.dumps([dict(zip([\"over\", \"listeners\"], row)) for row in result])\n \n if len(packaged_data) == 2:\n flash('No result found for specified query input.')\n return render_template('region_page.html', plot_data=None)\n\n else: # The query was successful.\n plot_title = \"Top {number} {searchType}s Over the Region '{region}'\".format(number=number, searchType=searchType, region=region)\n return render_template('region_page.html', plot_data=packaged_data, plot_title=plot_title)\n\n else:\n print(\"DEBUG: got nothing.\")\n\n return render_template('region_page.html')\n\n\n# Help Page\n@app.route('/help/')\ndef helper():\n return render_template('helper.html')\n\n\n# Creator Page\n@app.route('/creators/')\ndef creators():\n return render_template('creators.html')\n\n\n# Incase the URL is not found.\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n# Mainline Logic =========================================================================================================\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"235557440","text":"import mysql.connector\nimport time\nfrom mysql.connector import Error\n\nclass Database:\n #this parameters needs to be strings\n def __init__(self, host, user, passwd, database):\n self.host = host\n self.user = user\n self.passwd = passwd\n self.database = database\n \n self.db = mysql.connector.connect(\n host= self.host,\n user= self.user,\n passwd= self.passwd,\n database= self.database\n )\n\n self.cursor = self.db.cursor(prepared=True)\n\n \n def insertTemperetureAndHumidity(self, dht11Instance):\n sql = \"INSERT INTO temperature values(0, CURRENT_DATE(), NOW(), %s, %s)\"\n\n while True:\n result = dht11Instance.read()\n while not result.is_valid(): # read until valid values\n result = dht11Instance.read()\n \n print(\"Temperature: %-3.1f C\" % result.temperature)\n print(\"Humidity: %-3.1f %%\" % result.humidity)\n \n temperature = float(result.temperature)\n humidity = float(result.humidity)\n \n try:\n self.cursor.execute(sql, (temperature,humidity,), True)\n self.db.commit()\n print (\"Data committed\")\n \n except mysql.connector.Error as error:\n print(\"parameterized query failed {}\".format(error))\n self.db.rollback() \n \n time.sleep(15)\n\n #days needs to be a string \n def cleanUp(self, days):\n sql = \"DELETE FROM temperature WHERE day < NOW()- INTERVAL %s DAY;\"\n self.days = str(days)\n\n while True:\n try:\n self.cursor.execute(sql, days)\n self.db.commit()\n print (\"Data deleted\")\n \n except mysql.connector.Error as error:\n print(\"parameterized query failed {}\".format(error))\n self.db.rollback() \n time.sleep(2500000)\n\n \n def getAllowdRFIDS(self):\n data = []\n sql = \"SELECT rfid,name,securityLevel FROM rfid WHERE securityLevel = 1 OR securityLevel = 2\"\n try:\n self.cursor.execute(sql)\n \n \n result = self.cursor.fetchall()\n\n for row in result:\n row = list(row)\n data.append(row)\n\n print (\"data recieved\")\n return data\n\n except mysql.connector.Error as error:\n print(\"parameterized query failed {}\".format(error))\n self.db.rollback() \n\n\n def addNewRFID(self, data):\n self.data = str(data)\n sql = \"INSERT INTO `rfid` (`ID`, `name`, `securityLevel`, `rfid`) VALUES (NULL, %s, %s, %s)\"\n name = input(\"Name: \")\n securityLevel = 0\n\n while not (securityLevel == \"1\" or securityLevel == \"2\"):\n securityLevel = str(input(\"2 für Leitende Mitarbeiter, 1 für Angestellte : \"))\n \n try:\n self.cursor.execute(sql, (name, securityLevel, self.data,))\n self.db.commit()\n print('Daten wurden gesetzt')\n\n except mysql.connector.Error as error:\n print(\"parameterized query failed {}\".format(error))\n self.db.rollback() \n\n\n def logEntry(self, name , rfid, access):\n self.name = name\n self.rfid = str(rfid)\n self.access = access\n sql = \"INSERT INTO `entrylog` (`ID`, `time`, `name`, `rfid`, `access`) VALUES (NULL, current_timestamp(), %s, %s, %s)\"\n\n try:\n self.cursor.execute(sql, (self.name, self.rfid, self.access,))\n self.db.commit()\n \n\n except mysql.connector.Error as error:\n print(\"parameterized query failed {}\".format(error))\n self.db.rollback()\n \n\n\n def close(self):\n self.cursor.close()\n self.db.close()\n\n ","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520698994","text":"import re\nimport time\nfile = open('JEOPARDY_CSV.csv', 'r', encoding='utf8')\nlines = file.readlines()\ndataset = dict()\nfor line in lines[1:]:\n splcomma = [i for i in line.split(',')]\n spl = [i.strip('\\\"').strip() for i in ','.join(splcomma[3:]).split('\\\",\\\"')]\n number, date, rnd = splcomma[:3]\n category, value, quest, answ = spl\n price = re.search(r'[0-9]+', value)\n if price:\n price = int(price.group())\n else:\n price = 0\n localDict = dict()\n if category in dataset:\n dataset[category][quest] = {'Show Number': number,\n 'Air Date': date,\n 'Round': rnd,\n 'Value': price,\n 'Answer': answ}\n else:\n dataset[category] = {quest: {'Show Number': number,\n 'Air Date': date,\n 'Round': rnd,\n 'Value': price,\n 'Answer': answ}\n }\nfile.close()","sub_path":"km_83/Sin_Gleb/workshop5/homework/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317762447","text":"from readData.ShowData import ShowData\n\n\"\"\"\n{'license': 3,\n 'file_name': '000000391895.jpg',\n 'coco_url': 'http://images.cocodataset.org/train2017/000000391895.jpg',\n 'height': 360,\n 'width': 640,\n 'date_captured': '2013-11-14 11:18:45',\n 'flickr_url': 'http://farm9.staticflickr.com/8186/8119368305_4e622c8349_z.jpg',\n 'id': 391895}\n\n {'segmentation':[],\n 'area': 2765.1486500000005,\n 'iscrowd': 0,\n 'image_id': 558840,\n 'bbox': [199.84, 200.46, 77.71, 70.88],\n 'category_id': 58,\n 'id': 156}\n\"\"\"\n\n\"\"\"\n这里发现annotation可能存在多对1的关系,这里应当解析出image-annotation\n可能有些image不存在annotation的情况,此类一共一共1021个样本\n\"\"\"\n\nfrom readData.ReadCoCoData import ReadCoCoData\nimport cv2 as cv\n\n\nclass ShowBBox(ShowData):\n def __init__(self):\n self.path = \"../../data\"\n self.train_image_id_map = None\n self.test_image_id_map = None\n self.train_annotation_map = None\n self.test_annotation_map = None\n self.category_map = None\n pass\n\n def showByAnnotation(self, annotation):\n # 获取图片 id、图片的长宽、图片的文件地址\n # 先从 train 取如果没有再从 test 取\n bbox = annotation['bbox']\n image_id = annotation['image_id']\n # 如果是第一次查找,则将 map 缓存起来供下次使用\n if self.train_image_id_map is None:\n self.build_train_image_id_map()\n image = self.train_image_id_map.get(image_id,None)\n path = self.get_path(image, 'train')\n\n if image is None:\n if self.test_image_id_map is None:\n self.build_test_image_id_map()\n image = self.test_image_id_map.get(image_id,None)\n path = self.get_path(image, 'test')\n\n # 绘图\n self.draw(image, annotation, path)\n\n def showByImage(self, image):\n # 解析图片对应的annotation\n annotations ,path= self.getAnnotationsByImage(image)\n # 推断所有的annotation物体及边缘\n # show\n self.draw(image,annotations,path)\n\n\n def getImageByAnnotation(self, annotation):\n pass\n\n def getAnnotationsByImage(self,image):\n \"\"\"\n 根据image获取所有的annotation\n :param image:\n :return:\n \"\"\"\n image_id = image['id']\n if self.train_annotation_map is None:\n self.build_train_annotation_map()\n annotations = self.train_annotation_map.get(image_id,None)\n path = self.get_path(image, 'train')\n\n if annotations is None:\n if self.test_annotation_map is None:\n self.build_test_annotation_map()\n\n annotations = self.test_annotation_map.get(image_id,None)\n path = self.get_path(image, 'test')\n return annotations,path\n\n\n def build_annotation_map(self,type = \"train\"):\n readData = ReadCoCoData()\n images, annotations, categories = readData.read_data(type)\n d = dict()\n for annotation in annotations:\n l = d.get(annotation['image_id'],[])\n l.append(annotation)\n d[annotation['image_id']] = l\n\n return d\n\n def build_train_annotation_map(self):\n self.train_annotation_map = self.build_annotation_map()\n\n def build_test_annotation_map(self):\n self.test_annotation_map = self.build_annotation_map(\"test\")\n\n def getAnnotationsByImageId(self, imageId):\n pass\n\n def getAnnotationsByImageName(self, imageName):\n pass\n\n def build_test_image_id_map(self):\n \"\"\"\n 创建 test 的 map 关系\n \"\"\"\n self.test_image_id_map = self.build_map('test')\n\n def build_train_image_id_map(self):\n \"\"\"\n 创建 train 的 map 关系\n \"\"\"\n self.train_image_id_map = self.build_map()\n\n def draw(self, image, annotations, path):\n if type(annotations) == dict:\n annotations = [annotations]\n\n cateName = \"\"\n img = cv.imread(path, cv.IMREAD_COLOR)\n for annotation in annotations:\n left_top = (int(annotation['bbox'][0]), int(annotation['bbox'][1]))\n right_bottom = (\n int(annotation['bbox'][0] + annotation['bbox'][2]), int(annotation['bbox'][1] + annotation['bbox'][3]))\n img = cv.rectangle(img, left_top, right_bottom, (0, 255, 0), 2)\n cateName += (self.get_category(annotation) + \"-\")\n\n cv.imshow('img' + str(image['id']) + \"-\" + cateName, img)\n cv.waitKey(10000)\n cv.destroyAllWindows()\n\n def build_map(self, type=\"train\"):\n \"\"\"\n :return map\n \"\"\"\n readData = ReadCoCoData()\n images, annotations, categories = readData.read_data(type)\n d = dict()\n for image in images:\n d[image['id']] = image\n return d\n\n def get_path(self, image, type='train'):\n # 根据类型获取图片路径\n l = len('000000391895')\n path = self.path + \"/\" + type + \"2017/\" + '0' * (l - len(str(image['id']))) + str(image['id']) + \".jpg\"\n return path\n\n def get_category(self, annotation):\n # 获取类目\n category_id = annotation['category_id']\n if self.category_map is None:\n self.build_category_map()\n category_name = self.category_map[category_id]['name']\n return category_name\n\n def build_category_map(self,type = \"train\"):\n readData = ReadCoCoData()\n images, annotations, categories = readData.read_data(type)\n d = dict()\n for cate in categories:\n d[cate['id']] = cate\n\n self.category_map = d\n return d\n\nif __name__ == \"__main__\":\n annotation = {'segmentation':[],\n 'area': 2765.1486500000005,\n 'iscrowd': 0,\n 'image_id': 558840,\n 'bbox': [199.84, 200.46, 77.71, 70.88],\n 'category_id': 58,\n 'id': 156}\n\n image = {'license': 3,\n 'file_name': '000000391895.jpg',\n 'coco_url': 'http://images.cocodataset.org/train2017/000000391895.jpg',\n 'height': 360,\n 'width': 640,\n 'date_captured': '2013-11-14 11:18:45',\n 'flickr_url': 'http://farm9.staticflickr.com/8186/8119368305_4e622c8349_z.jpg',\n 'id': 391895}\n show = ShowBBox()\n show.showByAnnotation(annotation)\n show.showByImage(image)","sub_path":"readData/ShowBBox.py","file_name":"ShowBBox.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"588117339","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/6/26 下午5:43\n# @Author : fj\n# @Site : \n# @File : 01_test_titanic.py\n# @Software: PyCharm\n\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\n\n#将模型用于预测\ntest_data = pd.read_csv('./data/test.csv')\ntest_data.fillna(0)\ntest_data['Sex'] = test_data['Sex'].apply(lambda s: 1 if s == 'male' else 0)\nx_test = test_data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare']]\n\nx = tf.placeholder(tf.float32, shape=[None, 6], name='x')\nW = tf.Variable(tf.random_normal([6, 2]), name='weights')\nb = tf.Variable(tf.zeros([2]), name='bias')\n\ny_pred = tf.nn.softmax(tf.matmul(x, W) + b)\n\nwith tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, 'model.ckpt')\n predictions = np.argmax(sess.run(y_pred, feed_dict={x: x_test}), 1)\n print(predictions)\n submission = pd.DataFrame({\n \"PassengerId\": test_data[\"PassengerId\"],\n \"Survived\": predictions\n })\n\n submission.to_csv(\"titanic-submission.csv\", index=False)","sub_path":"2. titanic/01_test_titanic.py","file_name":"01_test_titanic.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173122133","text":"import collections\nfrom piecrust import (\n DEFAULT_FORMAT, DEFAULT_TEMPLATE_ENGINE, DEFAULT_POSTS_FS,\n DEFAULT_DATE_FORMAT, DEFAULT_THEME_SOURCE)\nfrom piecrust.configuration import (\n get_dict_values, try_get_dict_values)\nfrom piecrust.sources.base import REALM_THEME\n\n\ndefault_configuration = collections.OrderedDict({\n 'site': collections.OrderedDict({\n 'title': \"Untitled PieCrust website\",\n 'root': '/',\n 'default_format': DEFAULT_FORMAT,\n 'default_template_engine': DEFAULT_TEMPLATE_ENGINE,\n 'enable_gzip': True,\n 'pretty_urls': False,\n 'trailing_slash': False,\n 'date_format': DEFAULT_DATE_FORMAT,\n 'auto_formats': collections.OrderedDict([\n ('html', ''),\n ('md', 'markdown'),\n ('textile', 'textile')]),\n 'default_auto_format': 'md',\n 'default_pagination_source': None,\n 'pagination_suffix': '/%num%',\n 'slugify_mode': 'encode',\n 'themes_sources': [DEFAULT_THEME_SOURCE],\n 'cache_time': 28800,\n 'enable_debug_info': True,\n 'show_debug_info': False,\n 'use_default_content': True,\n 'use_default_theme_content': True,\n 'theme_site': False\n }),\n 'baker': collections.OrderedDict({\n 'no_bake_setting': 'draft',\n 'workers': None,\n 'batch_size': None\n })\n})\n\n\ndefault_theme_content_model_base = collections.OrderedDict({\n 'site': collections.OrderedDict({\n 'sources': collections.OrderedDict({\n 'theme_pages': {\n 'type': 'default',\n 'ignore_missing_dir': True,\n 'fs_endpoint': 'pages',\n 'data_endpoint': 'site.pages',\n 'default_layout': 'default',\n 'item_name': 'page',\n 'realm': REALM_THEME\n }\n }),\n 'routes': [\n {\n 'url': '/%slug%',\n 'source': 'theme_pages',\n 'func': 'pcurl'\n }\n ],\n 'theme_tag_page': 'theme_pages:_tag.%ext%',\n 'theme_category_page': 'theme_pages:_category.%ext%',\n 'theme_month_page': 'theme_pages:_month.%ext%',\n 'theme_year_page': 'theme_pages:_year.%ext%'\n })\n})\n\n\ndefault_content_model_base = collections.OrderedDict({\n 'site': collections.OrderedDict({\n 'posts_fs': DEFAULT_POSTS_FS,\n 'default_page_layout': 'default',\n 'default_post_layout': 'post',\n 'post_url': '/%year%/%month%/%day%/%slug%',\n 'year_url': '/archives/%year%',\n 'tag_url': '/tag/%tag%',\n 'category_url': '/%category%',\n 'posts_per_page': 5\n })\n})\n\n\ndef get_default_content_model(site_values, values):\n default_layout = get_dict_values(\n (site_values, 'site/default_page_layout'),\n (values, 'site/default_page_layout'))\n return collections.OrderedDict({\n 'site': collections.OrderedDict({\n 'sources': collections.OrderedDict({\n 'pages': {\n 'type': 'default',\n 'ignore_missing_dir': True,\n 'data_endpoint': 'site.pages',\n 'default_layout': default_layout,\n 'item_name': 'page'\n }\n }),\n 'routes': [\n {\n 'url': '/%slug%',\n 'source': 'pages',\n 'func': 'pcurl'\n }\n ],\n 'taxonomies': collections.OrderedDict([\n ('tags', {\n 'multiple': True,\n 'term': 'tag'\n }),\n ('categories', {\n 'term': 'category',\n 'func_name': 'pccaturl'\n })\n ])\n })\n })\n\n\ndef get_default_content_model_for_blog(blog_name, is_only_blog,\n site_values, values,\n theme_site=False):\n # Get the global (default) values for various things we're interested in.\n defs = {}\n names = ['posts_fs', 'posts_per_page', 'date_format',\n 'default_post_layout', 'post_url', 'year_url']\n for n in names:\n defs[n] = get_dict_values(\n (site_values, 'site/%s' % n),\n (values, 'site/%s' % n))\n\n # More stuff we need.\n if is_only_blog:\n url_prefix = ''\n page_prefix = ''\n fs_endpoint = 'posts'\n data_endpoint = 'blog'\n item_name = 'post'\n tpl_func_prefix = 'pc'\n\n if theme_site:\n # If this is a theme site, show posts from a `sample` directory\n # so it's clearer that those won't show up when the theme is\n # actually applied to a normal site.\n fs_endpoint = 'sample/posts'\n else:\n url_prefix = blog_name + '/'\n page_prefix = blog_name + '/'\n data_endpoint = blog_name\n fs_endpoint = 'posts/%s' % blog_name\n item_name = try_get_dict_values(\n (site_values, '%s/item_name' % blog_name),\n (values, '%s/item_name' % blog_name),\n default=('%spost' % blog_name))\n tpl_func_prefix = try_get_dict_values(\n (site_values, '%s/func_prefix' % blog_name),\n (values, '%s/func_prefix' % blog_name),\n default=('pc%s' % blog_name))\n\n # Figure out the settings values for this blog, specifically.\n # The value could be set on the blog config itself, globally, or left at\n # its default. We already handle the \"globally vs. default\" with the\n # `defs` map that we computed above.\n blog_cfg = values.get(blog_name, {})\n blog_values = {}\n for n in names:\n blog_values[n] = blog_cfg.get(n, defs[n])\n\n posts_fs = blog_values['posts_fs']\n posts_per_page = blog_values['posts_per_page']\n date_format = blog_values['date_format']\n default_layout = blog_values['default_post_layout']\n post_url = '/' + url_prefix + blog_values['post_url'].lstrip('/')\n year_url = '/' + url_prefix + blog_values['year_url'].lstrip('/')\n\n year_archive = 'pages:%s_year.%%ext%%' % page_prefix\n if not theme_site:\n theme_year_page = try_get_dict_values(\n (site_values, 'site/theme_year_page'),\n (values, 'site/theme_year_page'))\n if theme_year_page:\n year_archive += ';' + theme_year_page\n\n cfg = collections.OrderedDict({\n 'site': collections.OrderedDict({\n 'sources': collections.OrderedDict({\n blog_name: collections.OrderedDict({\n 'type': 'posts/%s' % posts_fs,\n 'fs_endpoint': fs_endpoint,\n 'data_endpoint': data_endpoint,\n 'item_name': item_name,\n 'ignore_missing_dir': True,\n 'data_type': 'blog',\n 'items_per_page': posts_per_page,\n 'date_format': date_format,\n 'default_layout': default_layout\n })\n }),\n 'generators': collections.OrderedDict({\n ('%s_archives' % blog_name): collections.OrderedDict({\n 'type': 'blog_archives',\n 'source': blog_name,\n 'page': year_archive\n })\n }),\n 'routes': [\n {\n 'url': post_url,\n 'source': blog_name,\n 'func': ('%sposturl' % tpl_func_prefix)\n },\n {\n 'url': year_url,\n 'generator': ('%s_archives' % blog_name),\n 'func': ('%syearurl' % tpl_func_prefix)\n }\n ]\n })\n })\n\n # Add a generator and a route for each taxonomy.\n taxonomies_cfg = try_get_dict_values(\n (site_values, 'site/taxonomies'),\n (values, 'site/taxonomies'),\n default={}).copy()\n for tax_name, tax_cfg in taxonomies_cfg.items():\n term = tax_cfg.get('term', tax_name)\n\n # Generator.\n page_ref = 'pages:%s_%s.%%ext%%' % (page_prefix, term)\n if not theme_site:\n theme_page_ref = try_get_dict_values(\n (site_values, 'site/theme_%s_page' % term),\n (values, 'site/theme_%s_page' % term))\n if theme_page_ref:\n page_ref += ';' + theme_page_ref\n tax_gen_name = '%s_%s' % (blog_name, tax_name)\n tax_gen = collections.OrderedDict({\n 'type': 'taxonomy',\n 'source': blog_name,\n 'taxonomy': tax_name,\n 'page': page_ref\n })\n cfg['site']['generators'][tax_gen_name] = tax_gen\n\n # Route.\n tax_url_cfg_name = '%s_url' % term\n tax_url = try_get_dict_values(\n (blog_cfg, tax_url_cfg_name),\n (site_values, 'site/%s' % tax_url_cfg_name),\n (values, 'site/%s' % tax_url_cfg_name),\n default=('%s/%%%s%%' % (term, term)))\n tax_url = '/' + url_prefix + tax_url.lstrip('/')\n tax_func_name = try_get_dict_values(\n (site_values, 'site/taxonomies/%s/func_name' % tax_name),\n (values, 'site/taxonomies/%s/func_name' % tax_name),\n default=('%s%surl' % (tpl_func_prefix, term)))\n tax_route = collections.OrderedDict({\n 'url': tax_url,\n 'generator': tax_gen_name,\n 'taxonomy': tax_name,\n 'func': tax_func_name\n })\n cfg['site']['routes'].append(tax_route)\n\n return cfg\n\n","sub_path":"piecrust/appconfigdefaults.py","file_name":"appconfigdefaults.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37330622","text":"from pyramid.view import (\n view_config,\n view_defaults,\n)\n\nfrom ....models import DBSession\nfrom ....models.trusted import Trusted\n\n\n@view_defaults(\n route_name='admin_trusted_list',\n permission='admin',\n layout='admin',\n)\nclass AdminTrustedListView(object):\n def __init__(self, request):\n self.request = request\n\n @view_config(\n renderer='admin/trusted/list.html',\n request_method='GET'\n )\n def get(self):\n trusted = DBSession.query(Trusted).all()\n\n return {\n 'trusted': trusted,\n }\n","sub_path":"mts/views/admin/trusted/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185744990","text":"from MultiDark import *\nbox = MultiDarkSimulation(Lbox=2500.0 * uu.Mpc,wdir=\"/data2/DATA/eBOSS/Multidark-lightcones\", boxDir = \"MD_2.5Gpc\",snl = n.array(glob.glob(join(\"/data2/DATA/eBOSS/Multidark-lightcones\" , \"MD_2.5Gpc\", \"snapshots\" , \"hlist_?.?????.list\"))) ,zsl = None,zArray = n.arange(0.2,2.4,1e-1),Hbox = 67.77 * uu.km / (uu.s * uu.Mpc),Melement = 23593750000.0)\n\nfor ii in n.arange(len(box.snl))[45:]:\n\tbox.computeSingleDistributionFunction(ii,'Vpeak', n.arange(0,3.5,0.01))\n\tbox.combinesSingleDistributionFunction(ii,'Vpeak', n.arange(0,3.5,0.01),type = \"Central\")\n\tbox.combinesSingleDistributionFunction(ii,'Vpeak', n.arange(0,3.5,0.01),type = \"Satellite\")\n\n\nimport sys\nsys.exit()\n\n\nnames=n.array([\"M200b\",\"M200c\",\"M2500c\",\"M500c\",\"Macc\",\"mvir\"])\nfor qty in names :\n for ii in n.arange(len(box.snl)):\n box.combinesSingleDistributionFunction(ii, qty, 10**n.arange(8,16,0.01),type = \"Central\")\n box.combinesSingleDistributionFunction(ii, qty, 10**n.arange(8,16,0.01),type = \"Satellite\")\n\nnames=n.array([\"Vacc\",\"vmax\"])\nfor qty in names :\n for ii in n.arange(len(box.snl)):\n box.combinesSingleDistributionFunction(ii, qty, 10**n.arange(0,4.5,0.01),type = \"Central\")\n box.combinesSingleDistributionFunction(ii, qty, 10**n.arange(0,4.5,0.01),type = \"Satellite\")\n","sub_path":"bin/bin_MD/v0/analyze25box-vpeak.py","file_name":"analyze25box-vpeak.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"633928950","text":"# Archivo creado para almanecar todas las operaciones\r\n#matematicas usadas en el documento gl.py\r\n\r\n#Variables de prueba\r\n\r\nimport numpy as np\r\nimport struct\r\nfrom math import cos, sin, pi, tan\r\nfrom collections import namedtuple\r\nV4 = namedtuple('Point4', ['x', 'y', 'z', 'w'])\r\nV3 = namedtuple('Point3', ['x', 'y', 'z'])\r\n\r\na = [1,2,3]\r\nb = [4,5,6]\r\nmatrix = [1,2,3,4,5,6,7,8,9,10]\r\nmatrix1 = [2,3,4,5,6,7,8,9,10,11]\r\n\r\n\r\ndef dotProduct(a,b):\r\n dotProduct = 0\r\n for x,y in zip(a,b):\r\n dotProduct = dotProduct + (x*y)\r\n return dotProduct\r\n\r\n #print(dotProduct)\r\n\r\n#dotProduct(a,b)\r\n\r\n#Funcion para sumar vectores de 3 elementos\r\n#Notas: Los vectores con una mayor cantidad de elementos, ejemplo:\r\n# a = [1,2,3,4]\r\n# El elemento 4 sera ignorado\r\n\r\ndef sumVectors(a,b):\r\n sub1 = a[0] + b[0]\r\n sub2 = a[1] + b[1]\r\n sub3 = a[2] + b[2]\r\n sub = V3(sub1,sub2,sub3)\r\n\r\n return sub\r\n\r\n # print(i)\r\n\r\n#sumVectors(a,b)\r\n\r\n\r\ndef subVectors(a,b):\r\n sub1 = a[0] - b[0]\r\n sub2 = a[1] - b[1]\r\n sub3 = a[2] - b[2]\r\n sub = V3(sub1,sub2,sub3)\r\n\r\n return sub\r\n #print(sub)\r\n\r\n#subVectors(a,b)\r\n\r\ndef crossProduct(a,b):\r\n #A × B = (bz – cy)i + (cx – az)j + (ay – bx)k\r\n\r\n crossProduct1 = ((a[1])*(b[2])) - ((a[2])*(b[1]))\r\n crossProduct2 = ((a[2])*(b[0])) - ((a[0])*(b[2]))\r\n crossProduct3 = ((a[0])*(b[1])) - ((a[1])*(b[0]))\r\n\r\n crossProduct = V3(crossProduct1 , crossProduct2 , crossProduct3)\r\n\r\n return crossProduct\r\n\r\n #print(np.array(crossProduct))\r\n\r\n#crossProduct(a,b)\r\n\r\n#def normalize(a):\r\n# max_valueA = max(a)\r\n# min_valueA = min(a)\r\n# for i in range(0, len(a)):\r\n# a[i] = (a[i] - min_valueA) / (max_valueA - min_valueA)\r\n#\r\n# return a\r\n\r\n#def normalize(a):\r\n# for i in range(0, len(a)):\r\n# sqrt = (a[i])**2\r\n# result = (sqrt)**0.5\r\n# return result#, print(result)\r\n\r\n\r\n#norm(a)\r\n#b = np.linalg.norm(a)\r\n#print(b)\r\n#normalize_list(a)\r\n\r\ndef createMatrix(row, col, listOfLists, multi = 1):\r\n matrix = []\r\n for i in range(row):\r\n\r\n rowList = []\r\n for j in range(col):\r\n\r\n # you need to increment through dataList here, like this:\r\n rowList.append((listOfLists[row * i + j]) * multi)\r\n\r\n matrix.append(rowList)\r\n\r\n return matrix\r\n\r\n#def createMatrix(row, col, listOfLists):\r\n# matrix = []\r\n# for i in range(row):\r\n#\r\n# rowList = []\r\n# for j in range(col):\r\n#\r\n# # you need to increment through dataList here, like this:\r\n# rowList.append(listOfLists[row * i + j])\r\n#\r\n# matrix.append(rowList)\r\n#\r\n#\r\n#\r\n# #for i in range(len(matrix)):\r\n# # for j in range(len(matrix[0])):\r\n# # print('%3d'%matrix[i][j],end='')\r\n# # print()\r\n# return matrix#,print(matrix)\r\n\r\n\r\n#createMatrix(3,4,matrix)\r\n\r\n\r\ndef deg2rads(degNum):\r\n radNum = (degNum * 3.1415926535897932384626433)/180\r\n #print(radNum)\r\n return radNum\r\n\r\n\r\ndef vecMatrix(vector, matrix):\r\n matrix1Row = len(matrix)\r\n matrixColumns = len(matrix[0])\r\n newVector = []\r\n for y in range(matrix1Row):\r\n newNumber = 0\r\n vectorCol = 0\r\n for x in range(matrixColumns):\r\n #print(Matrix[y][x], Vector[vectorCol])\r\n newNumber = (matrix[y][x] * vector[vectorCol]) + newNumber\r\n vectorCol += 1\r\n newVector.append(newNumber)\r\n return newVector#, print(newVector)\r\n\r\n#hola = [[1,0,3,4],\r\n# [3,1,2,1],\r\n# [2,3,1,5],\r\n# [6,0,3,1]]\r\n#hola5 = V4(7,9,11,2)\r\n\r\n#createMatrix(3,4,matrix)\r\n#multiVecMatrix(hola5,hola)\r\n\r\n\r\ndef matrixMultiplication (Matrix, Matrix2):\r\n matrix1Row = len(Matrix)\r\n matrix2RowLimit = len(Matrix2[0])\r\n newMatrix = []\r\n for y in range(matrix1Row):\r\n newRow = []\r\n matrix2Row = 0\r\n matrix2Col = len(Matrix2)\r\n column1 = 0\r\n for x in range(matrix1Row):\r\n for i in range(matrix2Col):\r\n #print(Matrix[y][(x+i) % matrix2Col], Matrix2[(x+i) % matrix2Col][matrix2Row])\r\n column1 = (Matrix[y][(x+i) % matrix2Col] * Matrix2[(x+i) % matrix2Col][matrix2Row]) + column1\r\n #print(column1)\r\n if matrix2RowLimit == 1:\r\n newMatrix.append(column1)\r\n break\r\n matrix2Row += 1\r\n newRow.append(column1)\r\n column1 = 0\r\n if matrix2RowLimit != 1:\r\n newMatrix.append(newRow)\r\n #print(newMatrix)\r\n return newMatrix\r\n\r\n#\r\n#def multyMatrix4X4 (Matrix, Matrix2):\r\n# matrix1Row = len(Matrix)\r\n# matrix1Col = len(Matrix[0])\r\n# newMatrix = []\r\n# for y in range(matrix1Col):\r\n# newRow = []\r\n# matrix2Row = 0\r\n# column1 = 0\r\n# column2 = 0\r\n# column3 = 0\r\n# column4 = 0\r\n# for x in range(matrix1Row):\r\n# column1 = (Matrix[y][x] * Matrix2[x][matrix2Row]) + column1\r\n# column2 = (Matrix[y][x] * Matrix2[x][matrix2Row + 1]) + column2\r\n# column3 = (Matrix[y][x] * Matrix2[x][matrix2Row + 2]) + column3\r\n# column4 = (Matrix[y][x] * Matrix2[x][matrix2Row + 3]) + column4\r\n# newRow.extend([column1, column2, column3, column4])\r\n# newMatrix.append(newRow)\r\n# return newMatrix, print(newMatrix)\r\n#\r\n#hola2 = [[1,3,0,0],\r\n# [0,1,6,7],\r\n# [0,3,1,0],\r\n# [3,0,4,1]]\r\n#\r\n#\r\n##multyMatrix4X4(hola2,hola2)\r\n\r\n\r\ndef transposeMatrix(m):\r\n return map(list,zip(*m))\r\n\r\ndef getMatrixMinor(m,i,j):\r\n return [row[:j] + row[j+1:] for row in (m[:i]+m[i+1:])]\r\n\r\ndef getMatrixDeternminant(m):\r\n #base case for 2x2 matrix\r\n if len(m) == 2:\r\n return m[0][0]*m[1][1]-m[0][1]*m[1][0]\r\n\r\n determinant = 0\r\n for c in range(len(m)):\r\n determinant += ((-1)**c)*m[0][c]*getMatrixDeternminant(getMatrixMinor(m,0,c))\r\n return determinant\r\n\r\ndef getMatrixInverse(m):\r\n determinant = getMatrixDeternminant(m)\r\n #special case for 2x2 matrix:\r\n if len(m) == 2:\r\n return [[m[1][1]/determinant, -1*m[0][1]/determinant],\r\n [-1*m[1][0]/determinant, m[0][0]/determinant]]\r\n\r\n #find matrix of cofactors\r\n cofactors = []\r\n for r in range(len(m)):\r\n cofactorRow = []\r\n for c in range(len(m)):\r\n minor = getMatrixMinor(m,r,c)\r\n cofactorRow.append(((-1)**(r+c)) * getMatrixDeternminant(minor))\r\n cofactors.append(cofactorRow)\r\n cofactors = transposeMatrix(cofactors)\r\n for r in range(len(cofactors)):\r\n for c in range(len(cofactors)):\r\n cofactors[r][c] = cofactors[r][c]/determinant\r\n # cofactors[r][c] = cofactors[r][c]/determinant\r\n return cofactors\r\n\r\n#def pi():\r\n# pi = 3.1415926535897932384626433\r\n# return pi\r\n\r\n#pi()\r\n\r\ndef transpose(matrix):\r\n rows = len(matrix)\r\n columns = len(matrix[0])\r\n\r\n matrix_T = []\r\n for j in range(columns):\r\n row = []\r\n for i in range(rows):\r\n row.append(matrix[i][j])\r\n matrix_T.append(row)\r\n\r\n return matrix_T\r\n\r\n#Obtiene la determinate de una matriz 3X3\r\ndef det(matrix):\r\n rows = len(matrix)\r\n columns = len(matrix[0])\r\n newMatrix = []\r\n for y in range(rows):\r\n newRow = []\r\n for x in range(columns):\r\n if x == 2:\r\n newRow.extend([matrix[y][x], matrix[y][(x + 1) % columns], matrix[y][(x + 2) % columns]])\r\n break\r\n newRow.append(matrix[y][x])\r\n newMatrix.append(newRow)\r\n diagonal1 = 0\r\n diagonal2 = 0\r\n for x in range(columns):\r\n diagonal1 = (newMatrix[0][x] * newMatrix[1][x+1] * newMatrix[2][x+2]) + diagonal1\r\n diagonal2 = -(newMatrix[0][x+2] * newMatrix[1][x+1] * newMatrix[2][x]) + diagonal2\r\n determinante = diagonal1 + diagonal2\r\n return determinante\r\n\r\ndef inverse(Matrix):\r\n newMatrix = transpose(Matrix)\r\n row = len(Matrix[0])\r\n column = len(Matrix)\r\n determinant = 0\r\n cofactorList = []\r\n for y in range(row):\r\n exponent1 = y + 1\r\n for x in range(column):\r\n exponent2 = x + 1\r\n exponentT = exponent2 + exponent1\r\n cofactorM = []\r\n if y == 0:\r\n detM = []\r\n verificador = False\r\n for i in range(row):\r\n if y == 0:\r\n rowDe = [] \r\n rowCo = []\r\n for k in range(column):\r\n if i != y and x != k:\r\n verificador = True\r\n rowCo.append(newMatrix[i][k])\r\n if y == 0:\r\n rowDe.append(Matrix[i][k])\r\n if verificador:\r\n if y == 0:\r\n detM.append(rowDe)\r\n cofactorM.append(rowCo)\r\n verificador = False\r\n deter = ((-1) ** exponentT) * det(cofactorM)\r\n cofactorList.append(deter)\r\n if y == 0: \r\n deter2 = ((-1) ** exponentT) * det(detM)\r\n determinant = (Matrix[y][x] * deter2) + determinant\r\n Inverse = createMatrix(4, 4, cofactorList, (1/determinant))\r\n return Inverse\r\n\r\n\r\ndef length(v0):\r\n return (v0.x**2 + v0.y**2 + v0.z**2)**0.5\r\n\r\ndef norm(v0):\r\n v0length = length(v0)\r\n if not v0length:\r\n return V3(0, 0, 0)\r\n\r\n return V3(v0.x/v0length, v0.y/v0length, v0.z/v0length)\r\n\r\ndef Kmul(v0, k):\r\n \r\n return V3(v0.x * k, v0.y * k, v0.z *k)\r\n\r\ndef firstItemFunction(a):\r\n firstInternalList = a[0]\r\n firstItem = firstInternalList[2]\r\n\r\n\r\n return firstItem\r\n\r\ndef secondItemFunction(a):\r\n secondInternalList = a[1]\r\n\r\n secondtItem = secondInternalList[2]\r\n\r\n return secondtItem\r\n\r\ndef thirdItemFunction(a):\r\n thirdInternalList = a[2]\r\n\r\n thirdItem = thirdInternalList[2]\r\n\r\n return thirdItem\r\n\r\ndef fourthItemFunction(a):\r\n fourthInternalLists = a[3]\r\n\r\n fourthItem = fourthInternalLists[2]\r\n\r\n return fourthItem\r\n\r\n\r\n#def arrayCreation(v0):\r\n# #counter = 3\r\n# #length = length(v0)\r\n#\r\n# firstElement = v0(0)\r\n# secondElement = v0(1)\r\n# thirdElement = v0(2)\r\n#\r\n# listOfElements = [firstElement, secondElement, thirdElement]\r\n#\r\n# return listOfElements, print(listOfElements)\r\n#\r\n#arrayd = V3(0,1,0)\r\n#\r\n#\r\n#\r\n#arrayCreation(arrayd)\r\n","sub_path":"Engine3D-main/mathLibraries.py","file_name":"mathLibraries.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"453569836","text":"\r\n\r\n# https://leetcode.com/problems/height-checker/\r\n\r\nclass Solution:\r\n def heightChecker(self, heights):\r\n new_order = list(heights)\r\n heights.sort()\r\n moves = 0\r\n for i in range(0,len(heights)):\r\n if heights[i] != new_order[i]:\r\n moves += 1\r\n return moves\r\n\r\nprint(Solution().heightChecker([5,1,2,3,4]))","sub_path":"Leet_code/Easy/heightChecker.py","file_name":"heightChecker.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"509097093","text":"# Save the input in this variable\nticket = int(input())\n\n# Add up the digits for each half\nfstHalf = ticket // 1000\nhalf1 = fstHalf % 10\nfstHalf //= 10\nhalf1 += fstHalf % 10\nfstHalf //= 10\nhalf1 += fstHalf % 10\n\nsndHalf = ticket % 1000\nhalf2 = sndHalf % 10\nsndHalf //= 10\nhalf2 += sndHalf % 10\nsndHalf //= 10\nhalf2 += sndHalf % 10\n\n# Thanks to you, this code will work\nif half1 == half2:\n print(\"Lucky\")\nelse:\n print(\"Ordinary\")\n","sub_path":"Topics/Indexes/Lucky ticket/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291894245","text":"def calculate_indirect_orbits(active_object, orbits):\n object_above = \"\"\n counter = -1\n while object_above != \"COM\":\n\n for idx, item in enumerate(orbits):\n if active_object == item[1]:\n counter += 1\n object_above = item[0]\n active_object = item[0]\n return counter\n\n\nobjects = []\nwith open(\"../input\", \"r\") as input_file:\n input_file = input_file.read()\n input_file = input_file.strip().split(\"\\n\")\n for idx, item in enumerate(input_file):\n input_file[idx] = item.split(\")\")\n objects.append(input_file[idx][1])\n\ndirect_orbits = idx + 1\nindirect_orbits = 0\n\nfor item in objects:\n indirect_orbits += calculate_indirect_orbits(item, input_file)\n\nprint(indirect_orbits + direct_orbits)\n\n","sub_path":"Day_06/Day_06.py","file_name":"Day_06.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"126696470","text":"import numpy as np\nimport ray\nimport pystk\nfrom typing import Set\n\nclass Rollout(object):\n def __init__(self, config: pystk.GraphicsConfig = None):\n if config is None:\n config = pystk.GraphicsConfig.ld()\n config.screen_width = 128\n config.screen_height = 96\n pystk.init(config)\n self.config = None\n self.race = None\n self.track = None\n self.map = None\n \n def __del__(self):\n if self.race is not None:\n self.race.stop()\n del self.race\n pystk.clean()\n\n def start(self, config: pystk.RaceConfig = None, track_name='lighthouse'):\n \n if config is None:\n config = pystk.RaceConfig()\n config.players[0].controller = pystk.PlayerConfig.Controller.PLAYER_CONTROL\n config.track = track_name\n config.step_size = 0.1\n\n if self.race is not None:\n self.race.stop()\n del self.race\n\n self.config = config\n\n self.race = pystk.Race(config)\n self.race.start()\n\n self.track = pystk.Track()\n self.track.update()\n\n\n def stop(self):\n if self.race is not None:\n self.race.stop()\n del self.race\n\n self.config = None\n self.race = None\n self.track = None\n\n def rollout(self, drive, reward_func, max_step: float = 1200, restart: bool = True):\n \"\"\"\n :param return_data: what data should we return? 'action', 'image', 'next_image', 'done', 'reward'\n :return:\n \"\"\"\n \n \n import collections\n Data = collections.namedtuple('Data', 'action image reward done next_image')\n assert self.race is not None, \"You need to start the case before the rollout\"\n\n if restart:\n self.race.restart()\n \n self.race.step()\n\n action = pystk.Action()\n result = []\n \n state = pystk.WorldState()\n state.update()\n\n i = 1*np.asarray(self.race.render_data[0].image)\n\n total_reward = 0.\n \n for it in range(max_step):\n \n state = pystk.WorldState()\n state.update()\n \n a = drive(i)\n \n self.race.step(a)\n \n next_i = 1*np.asarray(self.race.render_data[0].image)\n\n d = (it+1 == max_step) # Feel free to experiment with this\n r = reward_func(state)\n total_reward += r\n \n result.append(Data(image=i, action=a, reward=r, next_image=next_i, done=d))\n \n i = next_i\n\n if d:\n break\n \n return result, total_reward\n\n\n@ray.remote\nclass RayRollout(Rollout):\n pass\n\ndef get_rollouts(num_workers, drive, reward_func, iterations=10, track_name='lighthouse'):\n \n rollouts = []\n starts = []\n \n for i in range(num_workers):\n rollout = RayRollout.remote()\n starts.append(rollout.start.remote(track_name=track_name))\n rollouts.append(rollout.rollout.remote(\n drive, reward_func, max_step=1000,\n ))\n\n ray.get(starts)\n\n trajectories = []\n rewards = []\n \n for _ in range(iterations):\n ro = ray.get(rollouts)\n \n for traj, reward in ro:\n trajectories.extend(traj)\n rewards.append(reward)\n\n return trajectories, rewards\n\n\nif __name__ == '__main__':\n reward_func = lambda s: np.linalg.norm(s.karts[0].velocity)\n agent = lambda i: pystk.Action(steer=np.random.uniform(low=-1,high=1), acceleration=1)\n \n ray.init(logging_level=40)\n \n trajs, rewards = get_rollouts(4, agent, reward_func)\n \n print ('Average episode reward: %.2f ± %.2f'%(np.mean(rewards), np.std(rewards)))\n","sub_path":"project/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142399633","text":"from abc import abstractmethod, ABCMeta\n\nimport csv\nfrom datetime import datetime\n\nimport funcy as fy\n\nfrom OnePy.barbase import Current_bar, Bar\nfrom OnePy.event import events, MarketEvent\n\n\nclass FeedMetabase(metaclass=ABCMeta):\n dtformat = \"%Y-%m-%d %H:%M:%S\"\n tmformat = \"%H:%M:%S\"\n timeindex = None\n\n def __init__(self, instrument, fromdate, todate):\n self.instrument = instrument\n self.fromdate = fromdate\n self.todate = todate\n\n self.cur_bar = Current_bar()\n # self.bar_dict = {self.instrument: []}\n self.bar = Bar(instrument)\n self.preload_bar_list = []\n self.continue_backtest = True\n\n # 以下变量会被初始化\n self._per_comm = None\n self._commtype = None\n self._mult = None\n self._per_margin = None\n self._executemode = None\n self._trailingstop_executemode = None\n\n self._iteral_buffer = None\n self._buffer_days = None\n self._iteral_data = None\n\n def set_per_comm(self, value):\n self._per_comm = value\n\n def set_commtype(self, value):\n self._commtype = value\n\n def set_mult(self, value):\n self._mult = value\n\n def set_per_margin(self, value):\n self._per_margin = value\n\n def set_executemode(self, value):\n self._executemode = value\n\n def set_trailingstop_executemode(self, value):\n self._trailingstop_executemode = value\n\n def set_iteral_buffer(self, value):\n self._iteral_buffer = value\n\n def set_buffer_days(self, value):\n self._buffer_days = value\n\n @property\n def per_comm(self):\n return self._per_comm\n\n @property\n def commtype(self):\n return self._commtype\n\n @property\n def mult(self):\n return self._mult\n\n @property\n def per_margin(self):\n return self._per_margin\n\n @property\n def executemode(self):\n return self._executemode\n\n @property\n def trailingstop_executemode(self):\n return self._trailingstop_executemode\n\n @property\n def iteral_buffer(self):\n return self._iteral_buffer\n\n @property\n def buffer_days(self):\n return self._buffer_days\n\n @abstractmethod\n def load_data(self):\n \"\"\"读取数据\"\"\"\n raise NotImplementedError(\"load_data shold be overrided\")\n\n @abstractmethod\n def get_new_bar(self):\n \"\"\"获得新行情\"\"\"\n raise NotImplementedError(\"get_new_bar shold be overrided\")\n\n @abstractmethod\n def preload(self):\n \"\"\"为indicator缓存数据\"\"\"\n raise NotImplementedError(\"preload shold be overrided\")\n\n def run_once(self):\n \"\"\"先load一次,以便cur_bar能够缓存两条数据\"\"\"\n self._iteral_data = self.load_data()\n self.get_new_bar()\n self.preload() # preload for indicator\n\n def __update_bar(self):\n \"\"\"更新行情\"\"\"\n self.bar.set_instrument(self.instrument)\n self.bar.add_new_bar(self.cur_bar.cur_data)\n\n def start(self):\n pass\n\n def prenext(self):\n self.get_new_bar()\n\n def next(self):\n self.__update_bar()\n events.put(MarketEvent(self))\n\n\nclass CSVFeedBase(FeedMetabase):\n \"\"\"自动识别CSV数据中有open,high,low,close,volume数据,但要说明日期格式\"\"\"\n dtformat = \"%Y-%m-%d %H:%M:%S\"\n tmformat = \"%H:%M:%S\"\n timeindex = None\n\n def __init__(self, datapath, instrument, fromdate=None, todate=None):\n super(CSVFeedBase, self).__init__(instrument, fromdate, todate)\n\n self.datapath = datapath\n self.__set_date()\n\n def __set_date(self):\n \"\"\"将日期转化为datetime对象\"\"\"\n if self.fromdate:\n self.fromdate = datetime.strptime(self.fromdate, \"%Y-%m-%d\")\n if self.todate:\n self.todate = datetime.strptime(self.todate, \"%Y-%m-%d\")\n\n def __set_dtformat(self, bar):\n \"\"\"识别日期\"\"\"\n date = bar[\"date\"]\n dt = \"%Y-%m-%d %H:%M:%S\"\n if self.timeindex:\n date = datetime.strptime(str(date), self.dtformat).strftime(\"%Y-%m-%d\")\n return date + \" \" + bar[self.timeindex.lower()]\n else:\n return datetime.strptime(str(date), self.dtformat).strftime(dt)\n\n def get_new_bar(self):\n def __update():\n new_bar = next(self._iteral_data)\n new_bar = fy.walk_keys(lambda x: x.lower(), new_bar)\n new_bar[\"date\"] = self.__set_dtformat(new_bar)\n\n for i in new_bar:\n try:\n new_bar[i] = float(new_bar[i]) # 将数值转化为float\n except ValueError:\n pass\n return new_bar\n\n try:\n new_bar = __update()\n # 日期范围判断\n dt = \"%Y-%m-%d %H:%M:%S\"\n if self.fromdate:\n while datetime.strptime(new_bar[\"date\"], dt) < self.fromdate:\n new_bar = __update()\n if self.todate:\n while datetime.strptime(new_bar[\"date\"], dt) > self.todate:\n raise StopIteration\n\n self.cur_bar.add_new_bar(new_bar)\n\n except StopIteration:\n self.continue_backtest = False # stop backtest\n\n def load_data(self):\n return csv.DictReader(open(self.datapath))\n\n def preload(self):\n \"\"\"\n 只需运行一次,先将fromdate前的数据都load到preload_bar_list\n 若没有fromdate,则不用load\n \"\"\"\n self.set_iteral_buffer(self.load_data()) # for indicator\n\n def _update():\n bar = next(self.iteral_buffer)\n bar = fy.walk_keys(lambda x: x.lower(), bar)\n bar[\"date\"] = self.__set_dtformat(bar)\n\n for i in bar:\n try:\n bar[i] = float(bar[i]) # 将数值转化为float\n except ValueError:\n pass\n return bar\n\n try:\n bar = _update()\n # 日期范围判断\n dt = \"%Y-%m-%d %H:%M:%S\"\n if self.fromdate:\n while datetime.strptime(bar[\"date\"], dt) < self.fromdate:\n bar = _update()\n self.preload_bar_list.append(bar)\n else:\n self.preload_bar_list.pop(-1) # 经过验证bug检查的,最后删除掉一个重复\n\n elif self.fromdate is None:\n pass\n else:\n raise SyntaxError(\"Catch a Bug!\")\n\n except IndexError:\n pass\n\n except StopIteration:\n print(\"???\")\n\n self.preload_bar_list.reverse()\n","sub_path":"OnePy/feeds/feedbase.py","file_name":"feedbase.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"182291761","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2021/6/29 9:51\r\n# @Author : Jinwen Wang\r\n# @Email : jw4013@columbia.edu\r\n# @File : HV_Percentile.py\r\n# @Software: PyCharm\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom zltsql import SQLConn\r\n\r\n#%%\r\ndef historical_percentile(data, code):\r\n data = data[data['Code'] == code].reset_index(drop=True)\r\n\r\n def get_percentile_data(values):\r\n # if len(values) <= 2:\r\n # return values.min(), np.percentile(values, 25), np.percentile(values, 50), np.percentile(values, 75), values.max()\r\n last_5 = np.percentile(values.dropna(), 5)\r\n top_5 = np.percentile(values.dropna(), 95)\r\n #exclude top5 and last5\r\n values1 = values[(values > last_5) & (values < top_5)]\r\n if len(values1) == 0:\r\n return values.min(), np.percentile(values, 10), np.percentile(values, 25), np.percentile(values, 50), np.percentile(values, 75), values.max()\r\n else: \r\n values = values1[(values1 > last_5) & (values1 < top_5)]\r\n _min = values1.min()\r\n _max = values1.max()\r\n _25 = np.percentile(values1, 25)\r\n _50 = np.percentile(values1, 50)\r\n _75 = np.percentile(values1, 75)\r\n return _min, _25, _50, _75, _max\r\n\r\n res = pd.DataFrame(data[['Date', 'Code']])\r\n hv_types = [x for x in data.columns if x not in ['Date', 'Code']]\r\n for i in hv_types:\r\n for idx in range(len(res)):\r\n _values = data[i][0:idx+1].copy()\r\n percentile_data = get_percentile_data(_values)\r\n res.at[idx, '%s_min' % i] = percentile_data[0]\r\n res.at[idx, '%s_25' % i] = percentile_data[1]\r\n res.at[idx, '%s_50' % i] = percentile_data[2]\r\n res.at[idx, '%s_75' % i] = percentile_data[3]\r\n res.at[idx, '%s_max' % i] = percentile_data[4]\r\n return res\r\n\r\ndef get_percentile_data(values, percentiles, eliminate_extremum=True):\r\n \"\"\"\r\n 获取values中的指定分位数,可选择是否剔除大于95%和小于5%的极值\r\n :param values: array or series\r\n :param percentiles: list, array or series\r\n :param eliminate_extremum: whether eliminate extreme values (top and low 5% values)\r\n :return: list that contains percentile values\r\n \"\"\"\r\n\r\n if len(values) == 0:\r\n return [np.nan]\r\n\r\n if eliminate_extremum:\r\n last_5 = np.percentile(values, 5)\r\n top_5 = np.percentile(values, 95)\r\n values1 = values[(values > last_5) & (values < top_5)]\r\n if len(values1) != 0:\r\n values = values1\r\n \r\n result = []\r\n for percentile in percentiles:\r\n result.append(np.percentile(values, percentile))\r\n return result\r\n\r\n#%%\r\nif __name__ == '__main__':\r\n '''\r\n #锦文原代码\r\n SQ = SQLConn()\r\n #df_hv = SQ.GetData('HV').dropna().reset_index(drop=True)\r\n df_hv = SQ.GetData('HV_percentile').dropna().reset_index(drop=True)\r\n SQ.CloseSql()\r\n \r\n percentile_50 = historical_percentile(df_hv, '510050.SH')\r\n percentile_300 = historical_percentile(df_hv, '510300.SH')\r\n df_percentile = pd.concat([percentile_50, percentile_300]).sort_values('Date').reset_index(drop=True)\r\n '''\r\n #锦文基础上跑了所有的分位数\r\n import datetime\r\n SQ = SQLConn()\r\n df_iv = SQ.GetData('df_vol_50etf').sort_values(by='日期').reset_index(drop=True)\r\n df_hv = SQ.GetData('HV_percentile').sort_values(by='Date').reset_index(drop=True)\r\n SQ.CloseSql()\r\n df_iv['Date'] = df_iv['日期'].dt.strftime('%Y%m%d')\r\n df_hv['Date'] = df_hv['Date'].dt.strftime('%Y%m%d')\r\n df_iv_insert = pd.read_excel('D:/Harper/实习文件整理_张依依/HV_percentile/iv_insert_50etf_0728.xlsx')\r\n df_iv_insert['Date'] = df_iv_insert['Date'].astype('str')\r\n df_result = pd.DataFrame({'Date':df_iv['Date']})\r\n percentile_lst = np.arange(0,110,10).tolist()\r\n for i in range(len(df_result)):\r\n date_today = df_result.at[i, 'Date']\r\n print(date_today)\r\n for kind in ['iv_insert','iv','HV5','HV10','HV20','HV40','HV60']:\r\n if kind == 'iv_insert':\r\n df = df_iv_insert\r\n col = kind\r\n elif kind == 'iv':\r\n df = df_iv\r\n col = 'iVIX'\r\n else:\r\n df = df_hv.loc[df_hv['Code']=='510050.SH']\r\n col = kind\r\n for p in percentile_lst: \r\n df_result.at[i, '%s_%s'%(kind,p)] = get_percentile_data(df.loc[(df['Date'] <= date_today), col].copy().dropna(), [p])[0]\r\n \r\n df_result.to_excel(\"D:/Harper/实习文件整理_张依依/HV_percentile/all_percentile_50etf_0728.xlsx\",index=False) \r\n \r\n\r\n ","sub_path":"王锦文/percentile/daily_gamma_signal/HV_Percentile.py","file_name":"HV_Percentile.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"113816231","text":"\"\"\"\nImplement a basic cnn with data augmentation on the data with cross validation\nand softmax activation layer\nAlso implement the evaluation stage at the end of each model training step\n\"\"\"\n\n# Necessary to make the run as consistent as possible\nfrom numpy.random import seed\n\nseed(1)\nfrom tensorflow import set_random_seed\n\nset_random_seed(2)\n\nimport logging\nimport sqlite3\nimport re\nfrom pathlib import Path\nimport os\nimport sys\nfrom datetime import datetime\nimport yaml\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\nfrom keras.models import Sequential\nimport keras\nimport pandas\n\nfrom topaz3.training_models.plot_history import history_to_csv\nfrom topaz3.training_models.k_fold_boundaries import k_fold_boundaries\nfrom topaz3.evaluate_model import evaluate\n\n\nIMG_DIM = (201, 201)\n\n\ndef create_model(input_shape):\n \"\"\"Create and return a new cnn, assuring that the weights have been reinitialised\"\"\"\n model = Sequential()\n\n model.add(\n Conv2D(16, kernel_size=(3, 3), activation=\"relu\", input_shape=input_shape)\n )\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, kernel_size=(3, 3), activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, kernel_size=(3, 3), activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(512, activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(512, activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(2, activation=\"softmax\"))\n\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=keras.optimizers.adam(lr=1e-5),\n metrics=[\"accuracy\"],\n )\n\n return model\n\n\ndef train(training_dir: str, database_file: str, test_dir: str, output_dir: str):\n # Load files\n training_dir_path = Path(training_dir)\n assert (\n training_dir_path.exists()\n ), f\"Could not find directory at {training_dir_path}\"\n train_files = [str(file) for file in training_dir_path.iterdir()]\n assert len(train_files) > 0, f\"Found no files in {training_dir_path}\"\n logging.info(f\"Found {len(train_files)} files for training\")\n\n # Initiate connection to the database\n try:\n conn = sqlite3.connect(database_file)\n except Exception:\n logging.error(f\"Could not connect to database at {database_file}\")\n raise\n\n # Read table into pandas dataframe\n data = pandas.read_sql(f\"SELECT * FROM ai_labels\", conn)\n data_indexed = data.set_index(\"Name\")\n\n # Strip the image number from the filename\n names = [re.findall(\"(.*)(?=_[0-9]+)\", Path(file).stem)[0] for file in train_files]\n train_labels = [data_indexed.at[name, \"Label\"] for name in names]\n\n # Prepare data generators to get data out\n train_datagen = ImageDataGenerator(\n rescale=1.0 / 255,\n horizontal_flip=True,\n vertical_flip=True,\n shear_range=0.3,\n height_shift_range=0.2,\n width_shift_range=0.2,\n fill_mode=\"wrap\",\n )\n validation_datagen = ImageDataGenerator(rescale=1.0 / 255)\n\n # Build model\n input_shape = (201, 201, 1)\n\n # Create training dataframe\n training_dataframe = pandas.DataFrame(\n {\"Files\": train_files, \"Labels\": [str(label) for label in train_labels]}\n )\n training_dataframe.set_index(\"Files\")\n training_data_shuffled = training_dataframe.sample(frac=1)\n\n # Create an output directory if it doesn't exist\n output_dir_path = Path(output_dir + \"_\" + datetime.now().strftime(\"%Y%m%d_%H%M\"))\n histories_path = output_dir_path / \"histories\"\n models_path = output_dir_path / \"models\"\n evaluations_path = output_dir_path / \"evaluations\"\n if not output_dir_path.exists():\n # Make one\n try:\n # Make directories\n os.mkdir(output_dir_path)\n os.mkdir(histories_path)\n os.mkdir(models_path)\n os.mkdir(evaluations_path)\n logging.info(f\"Created output directories at {output_dir_path}\")\n except Exception as e:\n logging.error(\n f\"Could not create directory at {output_dir_path}.\\n\"\n f\"Please check permissions and location.\"\n )\n logging.error(e)\n raise\n\n # Train the model k-fold number of times on different folds and record the output\n # Model run parameters\n k_folds = 5\n runs = 5\n epochs = 100\n batch_size = 50\n\n fold_boundaries = k_fold_boundaries(train_files, k_folds)\n for k in range(runs):\n logging.info(f\"Running cross validation set {k+1}\")\n\n # New model\n model = create_model(input_shape)\n\n # Separate the active training and validations set based on the fold boundaries\n active_training_set = pandas.concat(\n [\n training_data_shuffled[: fold_boundaries[k][0]],\n training_data_shuffled[fold_boundaries[k][1] :],\n ]\n )\n active_validation_set = training_data_shuffled[\n fold_boundaries[k][0] : fold_boundaries[k][1]\n ]\n\n logging.info(f\"Active training set of {len(active_training_set['Files'])}\")\n logging.info(f\"Active validation set of {len(active_validation_set['Files'])}\")\n\n # Create generators\n train_generator = train_datagen.flow_from_dataframe(\n active_training_set,\n x_col=\"Files\",\n y_col=\"Labels\",\n target_size=IMG_DIM,\n color_mode=\"grayscale\",\n shuffle=True,\n batch_size=batch_size,\n class_mode=\"categorical\",\n )\n\n val_generator = validation_datagen.flow_from_dataframe(\n active_validation_set,\n x_col=\"Files\",\n y_col=\"Labels\",\n target_size=IMG_DIM,\n color_mode=\"grayscale\",\n shuffle=True,\n batch_size=batch_size,\n class_mode=\"categorical\",\n )\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=int((len(active_training_set[\"Files\"]) / batch_size)),\n epochs=epochs,\n validation_data=val_generator,\n validation_steps=(len(active_validation_set[\"Files\"]) / batch_size),\n use_multiprocessing=True,\n workers=8,\n )\n\n # Send history to csv\n history_to_csv(history, histories_path / f\"history_{k}.csv\")\n # Save model as h5\n model.save(str(models_path / f\"model_{k}.h5\"))\n\n # Make evaluation folder\n evaluation_dir_path = str(evaluations_path / f\"evaluation_{k}\")\n if not Path(evaluation_dir_path).exists():\n os.mkdir(evaluation_dir_path)\n evaluate(\n str(models_path / f\"model_{k}.h5\"),\n test_dir,\n database_file,\n evaluation_dir_path,\n )\n\n # Log the key information about the model and run\n key_info = {\n \"Epochs\": epochs,\n \"Folds\": k_folds,\n \"Runs\": runs,\n \"Training files (Total)\": len(train_files),\n \"Model\": model.get_config(),\n }\n with open(output_dir_path / \"info.yaml\", \"w\") as f:\n yaml.dump(key_info, f)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n print(f\"{__file__}\")\n train(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n","sub_path":"topaz3/training_models/basic_cnn_aug_7.py","file_name":"basic_cnn_aug_7.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546502539","text":"import mysql.connector\nfrom mysql.connector import errorcode\n\n\nclass DatabaseHandler(object):\n\n\tdef __init__(self, configuration):\n\t\tself.connection_pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"pool\", pool_size=10, **configuration)\n\n\tdef insert_location(self, location):\n\t\ttry:\n\t\t\tconnection = self.connection_pool.get_connection()\n\t\texcept mysql.connector.Error as err:\n\t\t\tif err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n\t\t\t\tprint('Something is wrong with your user name or password')\n\t\t\telif err.errno == errorcode.ER_BAD_DB_ERROR:\n\t\t\t\tprint('Database does not exist')\n\t\t\telse:\n\t\t\t\tprint(err)\n\n\t\tcursor = connection.cursor()\n\n\t\tquery = ('INSERT INTO Location (city, country) VALUES (%s, %s)')\n\n\t\tquery_parameters = (location.city, location.country)\n\n\t\tcursor.execute(query, query_parameters)\n\n\t\tconnection.commit()\n\n\t\tcursor.close()\n\t\tconnection.close()\n","sub_path":"local-tourist-location-scraper/src/main/database_handler.py","file_name":"database_handler.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226171962","text":"#!/usr/bin/env python\n\nimport sys\nimport re\nimport json\n\n\nclass Mapper:\n\n def __init__(self, stdin=sys.stdin, stdout=sys.stdout):\n \"\"\"Init allows to redirect input and output, i.e. for testing purposes.\"\"\"\n sys.stdin = stdin\n sys.stdout = stdout\n\n def run(self):\n predicates = ['type.object.name','type.object.type','common.topic.alias']\n re_ns = re.compile(r'ns/([\\w.]+)') # Regexp to match mid\n re_str_ns = re.compile(r'\"(.+)\"@(.*?)$|ns/([\\w._]+)') # Regexp to match string value with lang id or reference\n\n for line in sys.stdin:\n triple = line.split(\"\\t\") # split triples\n try:\n k = re.search(re_ns, triple[0]).group(1) # get id\n if k[:2] == \"g.\":\n # omit g. objects\n continue\n except:\n # if error occurs, skip line\n continue\n v = [None, None]\n v[0] = re.search(re_ns, triple[1]) # get predicate\n if not v[0]:\n # on error, skip line\n continue\n v[0] = v[0].group(1)\n if not v[0] in predicates:\n # skip not desired predicates\n continue\n v[1] = re.search(re_str_ns, triple[2]) # get value or reference\n if not v[1]:\n # on error, skip line\n continue\n if not v[1].group(3) and v[1].group(2) != 'en':\n # skip non-@en values\n continue\n else:\n v[1] = v[1].group(1) or v[1].group(3)\n if k[:2] == \"m.\":\n # for \"m.\" objects output mid and value\n print(\"%s\\t%s\" % (k, json.dumps(v)), file=sys.stdout, flush=True)\n else:\n if v[0] == \"type.object.name\":\n # for type objects output type id and name\n print(\"%s\\t%s\" % (k, json.dumps(v[1])), file=sys.stdout, flush=True)\n\nif __name__ == \"__main__\":\n m = Mapper()\n m.run()\n","sub_path":"python/src/extractor_json/mapper1.py","file_name":"mapper1.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"594364672","text":"import scrapy\nimport requests\nfrom replays.items import ReplayItem\n\n\nclass SpawningToolSpider(scrapy.Spider):\n name = 'spawning-tool-spider'\n start_urls = [\n 'http://lotv.spawningtool.com/replays/?p=&query=&after_time=&'\n 'before_time=&after_played_on=&before_played_on=&patch=&order_by='\n ]\n\n def parse(self, response):\n for row in response.css('table tr'):\n if row.css('td:last-child ::attr(href)').extract_first() is None:\n continue\n else:\n url = 'http://lotv.spawningtool.com' \\\n + row.css('td:last-child ::attr(href)').extract_first()\n\n request_response = requests.head(url)\n if request_response.status_code == 302:\n url = request_response.headers[\"Location\"]\n\n url = url.split('?')\n\n yield ReplayItem(file_urls=[url[0]])\n\n next_page = response.css('a.pull-right ::attr(href)').extract_first()\n if next_page:\n yield scrapy.Request(\n response.urljoin(next_page), callback=self.parse\n )\n","sub_path":"replays/replays/spiders/spawning-tool-spider.py","file_name":"spawning-tool-spider.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"62372164","text":"from pages.base_page import BasePage\nfrom .locators import MainPageLocators, CataloguePageLocators\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nclass CataloguePage(BasePage):\n CATALOGUE_PAGE_LINK = \"http://selenium1py.pythonanywhere.com/catalogue/\"\n\n def __init__(self, browser):\n # Set timeout to 0 since unavailable items are searched for on this page\n # and exceptions are thrown \n BasePage.__init__(self, browser, self.CATALOGUE_PAGE_LINK, 0)\n\n def go_to_main_page(self):\n self.browser.find_element(*CataloguePageLocators.BACK_TO_MAIN_PAGE_LINK_NAV).click()\n\n def switch_to_books_category(self):\n self.browser.find_element(*MainPageLocators.CATALOGUE_BOOKS_BUTTON).click()\n assert \"books_2\" in self.browser.current_url, \\\n \"Should be books category in browser url\"\n\n def switch_to_clothing_category(self):\n self.browser.find_element(*MainPageLocators.CATALOGUE_CLOTHING_BUTTON).click()\n assert \"clothing_1\" in self.browser.current_url, \\\n \"Should be clothing category in browser url\"\n\n def get_items_availability(self):\n all_items_on_page = self.browser.find_elements(*CataloguePageLocators.PRODUCT_POD_ARTICLE)\n available_items, unavailable_items = [], []\n for item in all_items_on_page:\n try:\n self.find_in_element(item, CataloguePageLocators.AVAILABLE_ITEM_TAG)\n except NoSuchElementException:\n unavailable_items.append(item)\n else:\n available_items.append(item)\n return available_items, unavailable_items\n\n def get_item_title_as_text(self, parent):\n item_title = self.find_in_element(parent, CataloguePageLocators.ITEM_TITLE)\n return item_title.text\n\n def get_item_price_as_text(self, parent):\n item_price = self.find_in_element(parent, CataloguePageLocators.ITEM_PRICE)\n return item_price.text\n\n def add_item_to_cart(self, parent):\n self.find_in_element(parent, CataloguePageLocators.ADD_TO_CART_BUTTON).click()\n\n def verify_item_added_to_cart(self, item_title, item_price):\n item_in_basket_title_notif = self.browser.find_element(*CataloguePageLocators.ADDED_ITEM_TITLE)\n item_in_basket_price_notif = self.browser.find_element(*CataloguePageLocators.ADDED_ITEM_PRICE)\n assert item_title == item_in_basket_title_notif.text, \\\n \"Item title should be correctly displayed in the basket\"\n assert item_price in item_in_basket_price_notif.text, \\\n \"Item price should be correctly displayed in the basket\"\n\n def go_to_next_page(self, page_number: int):\n for page in range(1, page_number):\n self.browser.find_element(*CataloguePageLocators.NEXT_PAGE_BUTTON).click()\n page += 1\n assert f\"page={page}\" in self.browser.current_url, \\\n \"Page number should be correct\"\n\n def go_to_previous_page(self, page_number: int):\n for page in range(page_number, 1, -1):\n self.browser.find_element(*CataloguePageLocators.PREVIOUS_PAGE_BUTTON).click()\n page -= 1\n assert f\"page={page}\" in self.browser.current_url, \\\n \"Page number should be correct\"\n","sub_path":"pages/catalogue_page.py","file_name":"catalogue_page.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"584764031","text":"from flask import Blueprint, abort, request, jsonify\nfrom flask_restful import Resource, Api, reqparse\n\nimport requests\n\nSERVICEID = '181'\nAPI_PATH = 'http://charette15.ing.puc.cl/api'\n\n'''\nclass MessagesCollection(Resource):\n API_PATH_MC = API_PATH + '{}'.format('/messages')\n\n def get(self):\n args = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_MC, params={'access_token': args})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n'''\n\nclass Message(Resource):\n API_PATH_M = API_PATH + '{}'.format('/messages/{}')\n\n def get(self, id_):\n args = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_M.format(id_), params={'access_token': args})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\n def delete(self, id_):\n args = request.args.get('access_token','')\n resp = requests.delete(self.API_PATH_M.format( id_), params={'access_token': args})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\n'''\nclass MessageCreate(Resource):\n API_PATH_M_CREATE = API_PATH + '{}'.format('/posts/{}/messages')\n\n def __init__(self):\n self.reqparse= reqparse.RequestParser()\n self.reqparse.add_argument(\n 'description',\n required=True,\n help= 'No description provided',\n location=['form', 'json',]\n )\n super().__init__()\n\n def post(self, postId):\n args = self.reqparse.parse_args()\n params = request.args.get('access_token','')\n resp = requests.post(self.API_PATH_M_CREATE.format(postId), data=args, params={'access_token': params})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n'''\n\nclass MessagesResponsesCollection(Resource):\n API_PATH_MRC = API_PATH + '{}'.format('/messages/{}/responses')\n\n def __init__(self):\n self.reqparse= reqparse.RequestParser()\n self.reqparse.add_argument(\n 'description',\n required=True,\n help= 'No description provided',\n location=['form', 'json',]\n )\n super().__init__()\n\n def get(self, id_):\n params = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_MRC.format(id_), params={'access_token': params})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\n def post(self, id_):\n args = self.reqparse.parse_args()\n token = request.args.get('access_token','')\n resp = requests.post(self.API_PATH_MRC.format(id_), data=args, params={'access_token': token})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\nclass MessagesFilter(Resource):\n API_PATH_MHC = API_PATH + '{}'.format('/services/{}/filterMessages/{}')\n\n def get(self, string_):\n params = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_MHC.format(SERVICEID, string_), params={'access_token': params})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\n\nmessages_api = Blueprint('resources.messages', __name__)\n\napi = Api(messages_api)\n#api.add_resource(MessagesCollection, '/messages')\napi.add_resource(MessagesFilter, '/messages/filter/<string:string_>')\napi.add_resource(Message, '/messages/<int:id_>')\n#api.add_resource(MessageCreate, '/posts/<int:postId>/messages', endpoint='messagecreate')\napi.add_resource(MessagesResponsesCollection, '/messages/<int:id_>/responses')\n#api.add_resource(MessagesHashtagCollection, '/filterMessages/filterString')\n","sub_path":"grupo5_backend/resources/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216521842","text":"import pickle \ndestandaard = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_De Standaard.p\", \"rb\"))\ndemorgen = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_De Morgen.p\", \"rb\"))\ndetijd = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_De Tijd.p\", \"rb\"))\nhetlaatstenieuws = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_Het Laatste Nieuws.p\", \"rb\"))\nhumo = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_Humo.p\", \"rb\"))\nlesoir = pickle.load(open(\"app_sentiment/bdw/input_data/1995Januari1-2015Mei24_Bart De Wever_Le Soir.p\", \"rb\"))\n\nartikels = destandaard+demorgen+detijd+hetlaatstenieuws+humo+lesoir\n\n# Data cleaning\n# Datum string\nimport datetime\nfor artikel in artikels:\n if artikel[\"datum\"].split(\" \")[2] == \"Jan.\" or artikel[\"datum\"].split(\" \")[2] == \"Jan\":\n maand = \"01\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Feb.\" or artikel[\"datum\"].split(\" \")[2] == \"Feb\":\n maand = \"02\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Maa.\" or artikel[\"datum\"].split(\" \")[2] == \"Maa\" or artikel[\"datum\"].split(\" \")[2] == \"Mar.\":\n maand = \"03\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Apr.\" or artikel[\"datum\"].split(\" \")[2] == \"Apr\":\n maand = \"04\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Mei.\" or artikel[\"datum\"].split(\" \")[2] == \"Mei\":\n maand = \"05\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Jun.\" or artikel[\"datum\"].split(\" \")[2] == \"Jun\":\n maand = \"06\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Jul.\" or artikel[\"datum\"].split(\" \")[2] == \"Jul\":\n maand = \"07\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Aug.\" or artikel[\"datum\"].split(\" \")[2] == \"Aug\":\n maand = \"08\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Sep.\" or artikel[\"datum\"].split(\" \")[2] == \"Sep\":\n maand = \"09\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Okt.\" or artikel[\"datum\"].split(\" \")[2] == \"Okt\":\n maand = \"10\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Nov.\" or artikel[\"datum\"].split(\" \")[2] == \"Nov\":\n maand = \"11\"\n elif artikel[\"datum\"].split(\" \")[2] == \"Dec.\" or artikel[\"datum\"].split(\" \")[2] == \"Dec\":\n maand = \"12\"\n \n artikel[\"datum\"] = artikel[\"datum\"].split(\" \")[1] + \"/\" + maand + \"/\" + artikel[\"datum\"].split(\" \")[3]\n artikel[\"datum\"] = datetime.datetime.strptime(artikel[\"datum\"], \"%d/%m/%Y\")\n\n# Publicatie\nfor artikel in artikels:\n if \"De Standaard\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"De Standaard\"\n elif \"De Morgen\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"De Morgen\"\n elif \"De Tijd\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"De Tijd\"\n elif \"Het Laatste Nieuws\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"Het Laatste Nieuws\"\n elif \"Humo\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"Humo\"\n elif \"Le Soir\" in artikel[\"publicatie\"]:\n artikel[\"publicatie\"] = \"Le Soir\"\n \nimport pandas as pd\npanda = pd.DataFrame(artikels)\n\n# Set datum column as index\npanda.datum = pd.to_datetime(panda.datum)\npanda = panda.set_index(\"datum\")\npanda = panda.sort_index()\n\n# Histogram (per week groeperen)\ndef perdelta(start, end, delta):\n l = []\n curr = start-delta\n while curr < end:\n l.append(curr)\n curr += delta\n l.append(curr)\n \n return l\ndate_interval = perdelta(panda.index[0], panda.index[-1], datetime.timedelta(days=7))\n\nvolume = dict()\nvolume[\"alles\"] = []\nvolume[\"destandaard\"] = []\nvolume[\"demorgen\"] = []\nvolume[\"detijd\"] = []\nvolume[\"hetlaatstenieuws\"] = []\nvolume[\"humo\"] = []\nvolume[\"lesoir\"] = []\nvolume[\"datum\"] = []\nfor i in range(len(date_interval)-1):\n volume[\"datum\"].append(date_interval[i+1].value/1000000)\n volume[\"alles\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1])]))\n volume[\"destandaard\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"De Standaard\")]))\n volume[\"demorgen\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"De Morgen\")]))\n volume[\"detijd\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"De Tijd\")]))\n volume[\"hetlaatstenieuws\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"Het Laatste Nieuws\")]))\n volume[\"humo\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"Humo\")]))\n volume[\"lesoir\"].append(len(panda[(panda.index > date_interval[i]) & (panda.index <= date_interval[i+1]) & (panda.publicatie == \"Le Soir\")]))\n\n# Save\npickle.dump(volume,open(\"app_sentiment/bdw/results/histogram.p\", \"wb\"))","sub_path":"app_sentiment/bdw/volume.py","file_name":"volume.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"509693309","text":"import data_processing as dp\nimport sys\nimport random\nimport pickle\n\nprint('Number of arguments:', len(sys.argv), 'arguments.')\n\nprint('Argument List:', str(sys.argv))\n\nif __name__ == \"__main__\":\n\n path = sys.argv[1]\n\n dims = sys.argv[2]\n\n labels_file = open(\"labels.pkl\",\"rb\")\n\n labels = pickle.load(labels_file)\n\n labels_file.close()\n\n tokens = dp.get_tokens(\"raw_data\")\n\n print(\"Random samples: \" + str(random.sample(tokens, 2)))\n\n word2vector, word2idx = dp.get_glove_dicts(path, dims, True)\n\n weights_matrix, word2idx = dp.get_weight_matrix(tokens, word2vector, dims, True)\n\n sentence_matrices, labels_matrices = dp.process_dataset(path, word2idx, labels)\n\n print(\"Program executed succesfully ...\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"645445782","text":"N_,K = map(int,input().split())\nA = sorted(list(map(int,input().split())))\nmod = int(1e9+7)\nif N_ == 1 or N_ == 2:\n print(0)\n exit()\n\n\"\"\"\nhttps://www.planeta.tokyo/entry/5195/\n\"\"\"\ndef cmb(n, r, p):\n if (r < 0) or (n < r):\n return 0\n r = min(r, n - r)\n return fact[n] * factinv[r] * factinv[n-r] % p\n\np = 10**9+7\nN = 10**5+1 # N は必要分だけ用意する\nfact = [1, 1] # fact[n] = (n! mod p)\nfactinv = [1, 1] # factinv[n] = ((n!)^(-1) mod p)\ninv = [0, 1] # factinv 計算用\n \nfor i in range(2, N + 1):\n fact.append((fact[-1] * i) % p)\n inv.append((-inv[p % i] * (p // i)) % p)\n factinv.append((factinv[-1] * inv[-1]) % p)\n\nans = 0\nfor i in range(N_):\n ans += cmb(i,K-1,mod)*A[i]\n\nfor j in range(N_):\n ans -= cmb(N_-j-1,K-1,mod)*A[j]\n\nprint(ans%mod)","sub_path":"Python_codes/p02804/s739004953.py","file_name":"s739004953.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"512865400","text":"# Вводится карта проходимых в обе стороны тоннелей подземлья в виде строк,\n# содержащих разделённые пробелом названия двух пещер, которые соединяет соответствующий тоннель.\n# Две последние строки не содержат пробелов — это название входа в подземелье и название выхода.\n# Вывести \"YES\", если из входа можно попасть в выход, и \"NO\" в противном случае.\n# Пары могут повторяться или содержать одинаковые слова.\n\n\ndungeons = {}\nenter = input()\nwhile \" \" in enter:\n enter, exit = enter.split()\n dungeons.setdefault(enter, set()).add(exit)\n dungeons.setdefault(exit, set()).add(enter)\n enter = input()\nexit = input()\nresult, ways = set(), {enter}\nwhile ways:\n new = set()\n for way in ways:\n new |= dungeons[way]\n result |= ways\n ways = new - result\nprint(\"YES\" if exit in result else \"NO\")\n","sub_path":"dungeons.py","file_name":"dungeons.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"385032997","text":"#!usr/bin/python\n\nfrom graphviz import Source\nfrom Parser import KnockSoup\n\n\ndef get_with_idx(tag):\n return \"{}:{}\".format(tag[\"idx\"], tag.text)\n\n\ndef extract_dependencies(xml_name, type=\"collapsed-dependencies\"):\n soup = KnockSoup(xml_name)\n tag1, tag2 = \"dependencies\", \"dep\"\n\n deps_list = list()\n for dependencies in soup.find_all(tag1, type=type):\n deps = list()\n for dep in dependencies.find_all(tag2):\n deps.append((dep[\"type\"], get_with_idx(dep.governor),\n get_with_idx(dep.dependent)))\n deps_list.append(deps)\n\n return deps_list\n\n\ndef make_dot_lang(deps, graph_name=\"dep_graph\", layout=\"dot\"):\n script = \"digraph {} {{\\n graph [ layout = {} ];\\n\"\n script = script.format(graph_name, layout)\n for dep in deps:\n script += ' \"{1}\" -> \"{2}\" [ label = \"{0}\" ];\\n'.format(*dep)\n script += \"}\"\n return script\n\n\nif __name__ == \"__main__\":\n xml_name = \"head.txt.xml\"\n deps_list = extract_dependencies(xml_name)\n for deps in deps_list:\n script = make_dot_lang(deps)\n src = Source(script)\n src.render(view=True)\n input(\"Do you want to display the next? [Enter / Ctrl-C]\")\n","sub_path":"kodaira/6set/knock57.py","file_name":"knock57.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"633086404","text":"###############################################################\n#\n# Job options file to test SCT encoding\n#\n#==============================================================\n\nif not \"doPrint\" in dir():\n doPrint = False\nif not \"EvtMax\" in dir():\n EvtMax = 10\n\nmsg = None\nif doPrint:\n from AthenaCommon.Logging import logging\n msg = logging.getLogger(\"testSCTEncode\")\n msg.setLevel(logging.INFO)\n\n#--------------------------------------------------------------\n# Standard includes\n#--------------------------------------------------------------\nimport AthenaCommon.AtlasUnixStandardJob\n\n#--------------------------------------------------------------\n# Thread-specific setup\n#--------------------------------------------------------------\nfrom AthenaCommon.ConcurrencyFlags import jobproperties\nnumThreads = jobproperties.ConcurrencyFlags.NumThreads()\nif numThreads > 0:\n from AthenaCommon.AlgScheduler import AlgScheduler\n AlgScheduler.CheckDependencies( True )\n AlgScheduler.ShowControlFlow( True )\n AlgScheduler.ShowDataDependencies( True )\n\n#--------------------------------------------------------------\n# use auditors\n#--------------------------------------------------------------\nfrom AthenaCommon.AppMgr import ServiceMgr\nfrom GaudiSvc.GaudiSvcConf import AuditorSvc\nServiceMgr += AuditorSvc()\ntheAuditorSvc = ServiceMgr.AuditorSvc\ntheAuditorSvc.Auditors += [ \"ChronoAuditor\"]\ntheAuditorSvc.Auditors += [ \"MemStatAuditor\" ]\ntheApp.AuditAlgorithms=True\n\n#--------------------------------------------------------------\n# Load Geometry\n#--------------------------------------------------------------\nfrom AthenaCommon.GlobalFlags import globalflags\nglobalflags.DetDescrVersion=\"ATLAS-R2-2015-03-01-00\"\nglobalflags.DetGeo=\"atlas\"\nglobalflags.InputFormat=\"pool\"\nglobalflags.DataSource=\"geant4\"\nif doPrint:\n msg.info(globalflags)\n\n#--------------------------------------------------------------\n# Set Detector setup\n#--------------------------------------------------------------\n# --- switch on InnerDetector\nfrom AthenaCommon.DetFlags import DetFlags\nDetFlags.detdescr.SCT_setOn()\nDetFlags.ID_setOff()\nDetFlags.Calo_setOff()\nDetFlags.Muon_setOff()\nDetFlags.Truth_setOff()\nDetFlags.LVL1_setOff()\nDetFlags.SCT_setOn()\n\n# ---- switch parts of ID off/on as follows\n#switch off tasks\nDetFlags.pileup.all_setOff()\nDetFlags.simulate.all_setOff()\nDetFlags.makeRIO.all_setOff()\nDetFlags.writeBS.all_setOff()\nDetFlags.writeBS.SCT_setOn()\nDetFlags.readRDOBS.all_setOff()\nDetFlags.readRIOBS.all_setOff()\nDetFlags.readRIOPool.all_setOff()\nDetFlags.readRDOPool.all_setOff()\nDetFlags.readRDOPool.SCT_setOn()\nDetFlags.writeRIOPool.all_setOff()\n\nif doPrint:\n DetFlags.Print()\n\nimport AtlasGeoModel.SetGeometryVersion\nimport AtlasGeoModel.GeoModelInit\n# import MagFieldServices.SetupField\n\n#--------------------------------------------------------------\n# Load IOVDbSvc\n#--------------------------------------------------------------\nIOVDbSvc = Service(\"IOVDbSvc\")\nfrom IOVDbSvc.CondDB import conddb\nconddb.dbdata=\"OFLP200\"\nIOVDbSvc.GlobalTag=\"OFLCOND-RUN12-SDR-31\"\nIOVDbSvc.OutputLevel = WARNING\n\nfrom AthenaCommon.AlgSequence import AthSequencer\ncondSeq = AthSequencer(\"AthCondSeq\")\ncondAlgName = \"SCT_CablingCondAlgFromCoraCool\"\nif not hasattr(condSeq, condAlgName):\n from AthenaCommon.CfgGetter import getAlgorithm\n SCT_CablingCondAlgFromCoraCool = getAlgorithm(condAlgName)\n condSeq += SCT_CablingCondAlgFromCoraCool\n\n# Set input byte stream file (from RecExCommon/myTopOptions.py)\ninputRDOFiles = [\"/afs/cern.ch/atlas/project/rig/referencefiles/RTTinputFiles/MC15_13TeV/valid1.110401.PowhegPythia_P2012_ttbar_nonallhad.recon.RDO.e3099_s2578_r6699_10evt.pool.root\"]\n\n# Set up POOL file reading\nimport AthenaPoolCnvSvc.ReadAthenaPool\nServiceMgr.EventSelector.InputCollections = inputRDOFiles\n\n# Set up event info cnv alg\nfrom AthenaCommon.AlgSequence import AlgSequence\ntopSequence = AlgSequence()\nfrom xAODEventInfoCnv.xAODEventInfoCnvConf import xAODMaker__EventInfoCnvAlg\ntopSequence += xAODMaker__EventInfoCnvAlg()\n\n# Set up byte stream writing\ninclude(\"ByteStreamCnvSvc/RDP_ByteStream_jobOptions.py\")\n\nfrom ByteStreamCnvSvc import WriteByteStream\nStreamBSFileOutput = WriteByteStream.getStream(\"EventStorage\",\"StreamBSFileOutput\")\nStreamBSFileOutput.ItemList += [ \"SCT_RDO_Container#SCT_RDOs\" ]\n\n# Print algorithms\nif doPrint:\n msg.info(topSequence)\n\n# Set the number of events to be processed\ntheApp.EvtMax = EvtMax\n\n#--------------------------------------------------------------\n# Set output lvl (VERBOSE, DEBUG, INFO, WARNING, ERROR, FATAL)\n#--------------------------------------------------------------\nServiceMgr.MessageSvc.OutputLevel = DEBUG\nServiceMgr.MessageSvc.Format = \"% F%40W%S%7W%R%T %0W%M\"\n\nif numThreads >= 2:\n from SCT_ConditionsAlgorithms.SCTCondAlgCardinality import sctCondAlgCardinality\n sctCondAlgCardinality.set(numThreads)\n","sub_path":"InnerDetector/InDetEventCnv/SCT_RawDataByteStreamCnv/share/testSCTEncode.py","file_name":"testSCTEncode.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357093658","text":"import docker\nimport unittest\n\nclient = docker.from_env()\n\n# Set variables for tests\ncontainer_name = 'minion'\n\ndef set_container_id():\n container_id = ''\n for container in client.containers.list():\n if container.name == container_name:\n container_id = container.id\n assert container_id != ''\n return container_id\n\n# Check for container name\ndef test_minion_container_exists():\n found_master_container = False\n for container in client.containers.list():\n if container.name == container_name:\n found_master_container = True\n\n assert found_master_container == True\n\n# Check container is running\ndef test_minion_container_is_running():\n cid = set_container_id()\n container = client.containers.get(cid)\n assert container.status == 'running'\n\n","sub_path":"tests/test_minion.py","file_name":"test_minion.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"293288329","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 22 23:14:42 2021\n\n@author: Lesson-24 Lambda\n\"\"\"\n\nimport math\n# def nom(agrument):\n# return ifoda \n\n# labmda argument:ifoda \n\n# labmda argument1, argument2:ifoda=argument1+argument2 \n\n# uzunlik = lambda pi, r : 2*pi*r\n# print(uzunlik(math.pi,10))\n\n# kvadrat = lambda x, y : x**y \n# print(kvadrat(3, 2))\n\n# def daraja(n):\n# return lambda x : x**n\n# kvadrat = daraja(2)\n# kub = daraja(3)\n# print(f\"3-ning kvadrati {kvadrat(3)} ga, \"\n# f\"kubi {kub(3)} ga teng\")\n\n# from math import sqrt # sqrt kdavrat ildiz hisoblaydigan funksiya\n# sonlar = list(range(11))\n# ildizlar = list(map(sqrt,sonlar))\n# print(ildizlar)\n\n# def daraja2(x):\n# \"\"\"Berilgan sonni kvadratini qaytaruvchi funksiya\"\"\"\n# return x*x\n# print(list(map(daraja2,sonlar)))\n\n# kvadratlar = list(map(lambda x:x*x,sonlar))\n# # print(kvadratlar)\n\n# a = [4, 5, 6]\n# b = [7, 8, 9]\n# a_plus_b = list(map(lambda x,y:x+y,a,b))\n# print(a_plus_b)\n\n\nimport random as r\n# sonlar = r.sample(range(100),10)\n# print(sonlar)\n# def juftmi(x):\n# \"\"\"x Juft bulsa True, aks holda False qaytariladi\"\"\"\n# return x%2==0\n\n# # juft_sonlar = list(filter(juftmi,sonlar))\n# juft_sonlar = list(filter(lambda x: x%2==0,sonlar))\n# print(juft_sonlar)\n\nmevalar = ['olma','anor','anjir','shaftoli','orik','tarvuz','qovin','banan']\nharf = 'o'\nmevalar_b = list(filter(lambda meva:meva.startswith(harf),mevalar))\n# print(mevalar_b)\n\nmevalar2 = list(filter(lambda meva:len(meva)<=5,mevalar))\nprint(mevalar2)\n\n\nlist(filter(lambda meva:(meva.startswith('a') and meva.endswith('r')), mevalar))\n\n\n","sub_path":"Lesson-24.py","file_name":"Lesson-24.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499917218","text":"def solution(n):\n l = 1\n while n - 3**l > 0 :\n n -= 3**l \n l += 1\n\n n = n-1\n l = l-1\n answer = []\n while l > 0:\n d, n = divmod(n,3**l) \n if d==2:\n answer.append(d+2)\n else:\n answer.append(d+1)\n l -= 1\n if n==2:\n answer.append(4)\n else:\n answer.append(n+1)\n answer = [str(i) for i in answer]\n return \"\".join(answer)","sub_path":"Programmers/2단계_124_나라의_숫자.py","file_name":"2단계_124_나라의_숫자.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"550594946","text":"# not so good implementation of singleton class\r\nclass Borg:\r\n \"\"\"Borg pattern making the class attributes global\"\"\"\r\n _shared_data = {} # Attribute dictionary\r\n\r\n def __init__(self):\r\n self.__dict__ = self._shared_data # Make it an attribute dictionary\r\n\r\n \r\nclass Singleton(Borg): #Inherits from the Borg class\r\n \"\"\"This class now shares all its attributes among its various instances\"\"\"\r\n #This essenstially makes the singleton objects an object-oriented global variable\r\n\r\n def __init__(self, **kwargs):\r\n Borg.__init__(self)\r\n self._shared_data.update(kwargs) # Update the attribute dictionary by inserting a new key-value pair \r\n\r\n \r\n#Let's create a singleton object and add our first acronym\r\nx = Singleton(HTTP=\"Hyper Text Transfer Protocol\")\r\n# Print the object\r\nprint(x) \r\nprint(x._shared_data)\r\n\r\n#Let's create another singleton object and if it refers to the same attribute dictionary by adding another acronym.\r\ny = Singleton(SNMP=\"Simple Network Management Protocol\")\r\n# Print the object\r\nprint(y)\r\nprint(y._shared_data)\r\n\r\n\r\n# Output\r\n# (base) D:\\>python dp7_singleton2.py\r\n# <__main__.Singleton object at 0x000001B7506E5240>\r\n# {'HTTP': 'Hyper Text Transfer Protocol'}\r\n# <__main__.Singleton object at 0x000001B75054D898>\r\n# {'HTTP': 'Hyper Text Transfer Protocol', 'SNMP': 'Simple Network Management Protocol'}","sub_path":"dp7_singleton2.py","file_name":"dp7_singleton2.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356386696","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nfiles = [\"Original/pc.txt\", \"Pools/pc.txt\", \"Original/android.txt\", \"Pools/android.txt\"]\n\ndef parseData(filename):\n file = open(filename)\n data = [x.split(\":\")[1] for x in file.readlines() if x.split(\":\")[0] == \"Frame Samples\"]\n data = [x[:-1].split(\" \")[:-1] for x in data]\n data = [item for sublist in data for item in sublist]\n data = [int(x) for x in data]\n mean = np.mean(data)\n std = np.std(data)\n topten = np.mean(sorted(data)[140:150])\n botten = np.mean(sorted(data)[:10])\n max = np.max(data)\n min = np.min(data)\n return {\"Data\": data, \"Mean\" : mean, \"Std\" : std, \"Top 10%\" : topten, \"Bottom 10%\" : botten, \"Min\" : min, \"Max\" : max}\n\n\n\nresults = [parseData(x) for x in files]\n\nprint(\"PC Mean %f / %f\" % (results[0][\"Mean\"], results[1][\"Mean\"]))\nprint(\"PC Std %f / %f\" % (results[0][\"Std\"], results[1][\"Std\"]))\nprint(\"PC Top 10 Mean %f / %f\" % (results[0][\"Top 10%\"], results[1][\"Top 10%\"]))\nprint(\"PC Bottom 10 Mean %f / %f\" % (results[0][\"Bottom 10%\"], results[1][\"Bottom 10%\"]))\nprint(\"PC Min value %d / %d\" % (results[0][\"Min\"], results[1][\"Min\"]))\nprint(\"PC Max value %d / %d\" % (results[0][\"Max\"], results[1][\"Max\"]))\nprint(\"Android Mean %f / %f\" % (results[2][\"Mean\"], results[3][\"Mean\"]))\nprint(\"Android Std %f / %f\" % (results[2][\"Std\"], results[3][\"Std\"]))\nprint(\"Android Top Mean %f / %f\" % (results[2][\"Top 10%\"], results[3][\"Top 10%\"]))\nprint(\"Android Bottom Mean %f / %f\" % (results[2][\"Bottom 10%\"], results[3][\"Bottom 10%\"]))\nprint(\"Android Min value %d / %d\" % (results[2][\"Min\"], results[3][\"Min\"]))\nprint(\"Android Max value %d / %d\" % (results[2][\"Max\"], results[3][\"Max\"]))\n\nplt.plot(range(150), results[0][\"Data\"], \"#0000ff\")\nplt.plot(range(150), results[1][\"Data\"], \"#008888\")\nplt.plot(range(150), results[2][\"Data\"], \"#00ff00\")\nplt.plot(range(150), results[3][\"Data\"], \"#888800\")\nplt.legend([\"PC unoptimized\", \"PC optimized\", \"Android unoptimized\", \"Android optimized\"])\nplt.axis([0, 150, 0, 250])\nplt.xlabel(\"Sample index\")\nplt.ylabel(\"Framerate\")\nplt.show()","sub_path":"Mobile/Paper/CollectedData/gatherData.py","file_name":"gatherData.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101271355","text":"\"\"\"\n FusionPBX\n Version: MPL 1.1\n\n The contents of this file are subject to the Mozilla Public License Version\n 1.1 (the \"License\"); you may not use this file except in compliance with\n the License. You may obtain a copy of the License at\n http://www.mozilla.org/MPL/\n\n Software distributed under the License is distributed on an \"AS IS\" basis,\n WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License\n for the specific language governing rights and limitations under the\n License.\n\n The Initial Developer of the Original Code is\n Jim Millard <jmillard459@gmail.com>\n Portions created by the Initial Developer are Copyright (C) 2008-2016\n the Initial Developer. All Rights Reserved.\n\n Contributor(s):\n Mark J. Crane <markjcrane@fusionpbx.com>\n\"\"\"\n\n\nimport subprocess\nimport sys\nimport os\nimport FPBXParms\n\ndef ipostgresql():\n INSTALL_ROOT = os.getcwd()\n if os.path.isfile(\"%s/resources/install.json\" % (INSTALL_ROOT)):\n FPBXParms.PARMS = FPBXParms.load_parms(FPBXParms.PARMS)\n else:\n print(\"Error no install parameters\")\n sys.exit(1)\n \n if FPBXParms.PARMS[\"DatabaseType\"][0] == \"P\":\n print(\"Installing Postgresql\")\n pgidbg = open(\"pginstall.out\", 'w')\n pgierr = open(\"pginstall.err\", 'w')\n ret = subprocess.call(\"apt-get -y install postgresql-9.4 postgresql-client-9.4 postgresql-client-common postgresql-common ssl-cert\", stdout=pgidbg, stderr=pgierr, shell=True)\n FPBXParms.check_ret(ret, \"installing postgresql\")\n pgidbg.close()\n pgierr.close()\n return\n","sub_path":"Install_postgresql.py","file_name":"Install_postgresql.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576641535","text":"import json\n\nimport datetime\nimport mongoengine\n\n\nclass CashTagUser(mongoengine.Document):\n username = mongoengine.StringField()\n password = mongoengine.StringField()\n avatar_img_url = mongoengine.StringField()\n\n cashtags_contributed_to = mongoengine.ListField()\n cashtags_watching = mongoengine.ListField()\n cashtags_created_active = mongoengine.ListField()\n # cashtags_created_past = mongoengine.ListField()\n\n friends = mongoengine.ListField()\n contacts = mongoengine.ListField()\n\n money_able_to_send = mongoengine.DecimalField(default=0)\n money_pending = mongoengine.DecimalField(default=0)\n\n def to_api_json(self, json_dumps=False):\n from cashtag.models import CashTag\n\n friends = [CashTagUser.objects.get(pk=pk).username for pk in self.friends]\n contacts = [CashTagUser.objects.get(pk=pk).username for pk in self.contacts]\n data = {\n 'username': self.username,\n 'avatar_img_url': self.avatar_img_url,\n 'cashtags_contributed_to': self.cashtags_contributed_to,\n 'cashtags_watching': self.cashtags_watching,\n 'cashtags_created_active': [CashTag.objects.get(pk=pk).to_api_json() for pk in self.cashtags_created_active],\n 'friends': friends,\n 'contacts': contacts,\n 'money_able_to_send': self.money_able_to_send,\n 'money_pending': self.money_pending,\n }\n if json_dumps:\n return json.dumps(data)\n return data\n\n @staticmethod\n def get_user_contributions_total(username, cashtag_pk):\n cashtag_pk = str(cashtag_pk)\n from cashtag.models import Contribution\n user_contributions = Contribution.objects(username=username, cashtag_pk=cashtag_pk)\n return sum(contrib.amount for contrib in user_contributions)\n\n def contribute_to(self, cashtag_pk, amount, notes=None):\n from cashtag.models import CashTag, Contribution\n assert amount > 0\n assert amount <= self.money_able_to_send\n\n cash_tag = CashTag.objects.get(pk=cashtag_pk)\n assert amount > cash_tag.min_price\n\n contribution = Contribution(\n username=self.username,\n amount=amount,\n cashtag_pk=cashtag_pk,\n notes=notes,\n timestamp=datetime.datetime.now(),\n )\n contribution.save()\n\n self.money_able_to_send -= amount\n if cashtag_pk not in self.cashtags_watching:\n # Don't re-follow if we've donated in the apst and explicitly stopped watching\n if cashtag_pk not in self.cashtags_contributed_to:\n self.cashtags_watching.append(cashtag_pk)\n if cashtag_pk not in self.cashtags_contributed_to:\n self.cashtags_contributed_to.append(cashtag_pk)\n self.save()\n\n cash_tag.money_collected += amount\n if self.username not in cash_tag.supporters:\n cash_tag.supporters.append(self.username)\n cash_tag.save()\n\n def stop_watching(self, cashtag_pk):\n if cashtag_pk in self.cashtags_watching:\n self.cashtags_watching.remove(cashtag_pk)\n self.save()\n\n\n\n\n","sub_path":"back/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"79091193","text":"import sys, os, shutil\n\nimport inv_check\nimport insert_jaif\nimport ontology_to_daikon\nimport pa2checker\n\nimport backend\nimport common\nimport dot\nimport argparse\nfrom simprog import Similarity\nimport json\n\ndef get_daikon_patterns():\n ordering_operator = \"<=\"\n\n ontology_invariant_file = \"TODO_from_Howie.txt\"\n with open(ontology_invariant_file, 'w') as f:\n f.write(ordering_operator)\n\n invariant_name = \"TODO_sorted_sequence\"\n\n daikon_pattern_java_file = ontology_to_daikon.create_daikon_invariant(ontology_invariant_file, invariant_name)\n\n pattern_class_dir = os.path.join(common.WORKING_DIR, \"invClass\")\n if os.path.isdir(pattern_class_dir):\n shutil.rmtree(pattern_class_dir)\n os.mkdir(pattern_class_dir)\n\n cmd = [\"javac\", \"-g\", \"-classpath\", common.get_jar('daikon.jar'),\n daikon_pattern_java_file, \"-d\", pattern_class_dir]\n common.run_cmd(cmd)\n\n return pattern_class_dir\n\n\ndef compute_daikon_invariants(project_list, pattern_class_dir=None):\n\n list_of_methods = []\n for project in project_list:\n\n dljc_dir = common.get_dljc_dir_for_project(project)\n if (not dljc_dir) or (not os.path.isdir(dljc_dir)):\n print (\"Project {0} was not built\".format(project))\n continue\n i=0\n while True:\n i+=1\n dtrace_dir = os.path.join(dljc_dir, \"test-classes{}\".format(i))\n dtrace_file = os.path.join(dtrace_dir, 'RegressionTestDriver.dtrace.gz')\n if not os.path.isfile(dtrace_file):\n print (\"No dtrace file found at {0}\".format(dtrace_file))\n break\n \n ppt_names = inv_check.find_ppts_that_establish_inv(dtrace_file, pattern_class_dir, \"TODO_sorted_sequence\")\n methods = set()\n for ppt in ppt_names:\n print (\"BINGO !!!!!!!!!!! {0}\".format(ppt))\n method_name = ppt[:ppt.find(':::EXIT')]\n methods.add(method_name)\n list_of_methods +=[(project, methods)]\n\n print (\"\\n ************\")\n print (\"The following corpus methods return a sequence sorted by <=\")\n for project, methods in list_of_methods:\n if len(methods)>0:\n print (project)\n for m in methods:\n print(\"\\t{}\".format(m))\n print (\"\\n ************\")\n\n if pattern_class_dir:\n shutil.rmtree(pattern_class_dir)\n\ndef get_method_map(project_list):\n dot_to_method_map = {}\n for project in project_list:\n for output_dir in dot.dot_dirs(project):\n #output_dir = dot.dot_dirs(project)[0] # first folder only for now\n method_file = dot.get_method_path(project, output_dir)\n if not os.path.isfile(method_file):\n print (\"Cannot find method file for project {0} at {1}\".format(project, method_file))\n sys.exit(0)\n\n with open(method_file, \"r\") as mf:\n content = mf.readlines()\n for line in content:\n line = line.rstrip()\n items = line.split('\\t')\n method_name = items[0]\n method_dot = items[1]\n method_dot_path = dot.get_dot_path(project, output_dir, method_dot) \n dot_to_method_map[method_dot_path] = method_name\n return dot_to_method_map\n\ndef check_similarity(project, result_file, kernel_file, corpus_dot_to_method_map, output_json_file, cluster_json=None, top_k=5):\n \"\"\" SUMMARY: use case of the user-driven functionality of PASCALI.\n \"\"\"\n\n # fetch various method information from each project in the list\n # output_dir = dot.dot_dirs(project)[0]\n # method_file = dot.get_method_path(project, output_dir)\n\n # check similarity\n json_result = {}\n sim = Similarity()\n sim.read_graph_kernels(kernel_file)\n iter_num = 3 # number of iteration of the WL-Kernel method\n this_method_map = get_method_map([project])\n with open(result_file, \"w\") as fo: \n for dot_file in this_method_map.keys():\n dot_method = corpus_dot_to_method_map[dot_file]\n json_result[dot_method] = []\n result_program_list_with_score = sim.find_top_k_similar_graphs(dot_file, dot_file, top_k, iter_num, cluster_json)\n line = dot_file+\":\\n\"\n for (dt, score) in result_program_list_with_score:\n line += \"{} , {}\\n\".format(dt, score)\n if dt not in corpus_dot_to_method_map:\n print(\"{0} does not exist.\".format(dt))\n sys.exit(0)\n tmp_dict = {}\n tmp_dict[corpus_dot_to_method_map[dt]] = score\n json_result[dot_method].append(tmp_dict)\n line += \"\\n\"\n fo.write(line)\n with open(output_json_file, \"w\") as jo:\n jo.write(json.dumps(json_result, indent=4))\n\ndef run(project_list, args, kernel_dir):\n dot_method_map = get_method_map(project_list)\n for project in project_list:\n print(\"Computing similar programs for {0}...\".format(project))\n result_file = os.path.join(common.WORKING_DIR, args.dir, project+\"_result.txt\")\n kernel_file = os.path.join(common.WORKING_DIR, kernel_dir, project+\"_kernel.txt\")\n json_file = os.path.join(common.WORKING_DIR, args.dir, project+\"_result.json\") \n check_similarity(project, result_file, kernel_file, dot_method_map, json_file, args.cluster, min(5,len(project_list)))\n\n #compute_daikon_invariants(project_list, get_daikon_patterns())\n #compute_daikon_invariants(project_list)\n\n","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"304803623","text":"\"\"\"\nAbstract description of a Cluster Job\n\nTo see status messages, set\n\n import logging\n logging.basicConfig(level=logging.DEBUG)\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, \\\n unicode_literals\n\n__version__ = \"1.1.3\"\n\nimport os\nimport subprocess as sp\nimport tempfile\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nfrom glob import glob\nfrom .utils import set_executable\nfrom textwrap import dedent\nimport logging\nimport importlib\n\nclass Job(object):\n \"\"\"\n Class Attributes\n ----------------\n\n default_opts: dict\n Default values of the `options` attribute for a newly created Job\n instance.\n\n backends: dict\n Available backends. Maps backend name to a dictionary of backend\n options. See the documentation of `clusterjob.backends.slurm`, for\n details. User-defined backends may be added with the `register_backend`\n class method\n\n default_backend: str\n The default backend name to be used.\n\n default_shell: str\n The shell to be used (shebang) for the job script. Also the shebang for\n the prologue and epilogue scripts, if no shebang is present in those\n scripts\n\n default_remote: str\n The remote to be used when submitting the job script\n\n default_rootdir: str\n The default root for all working directories.\n\n default_sleep_interval: int or None\n The default value of the `sleep_interval` attribute.\n\n cache_folder: str\n Local folder in which to cache the AsyncResult instances resulting from\n job submission\n\n cache_prefix: str\n prefix for cache filenames\n\n cache_counter: int\n Internal counter to be used when no cache_id is specified during\n submission\n\n debug_cmds: boolean\n If set to True, write debug information about all external commands\n (`utils.run_cmd` calls) to stdout.\n\n Attributes\n ----------\n\n jobscript: str\n Multiline job script. Should not contain a shebang or backend-specific\n submission headers. It will be rendered for the given backend by\n * adding a shebang (based on the `shell` attribute)\n * adding job submission headers (based on the options attribute)\n * applying the mappings defined in the `job_vars` entry of the backend\n * formatting the script with the attributes of the job attributes (e.g.\n {rootdir} will be replaced by the value of the `rootdir` attribute.\n\n backend: str\n name of backend, must be a key in the backends class dictionary\n\n shell: str\n shell that is used to execute runscript\n\n remote: str or None\n remote server on which to execute submit commands\n\n rootdir: str\n root directory for workdir\n\n workdir: str\n work directory (local or remote) in which the job script file will be\n placed, and from which the submission command will be called. Relative\n to `rootdir`.\n\n filename: str\n Name of file to which the job script will be written (inside\n rootdir/workdir). If not set explicitly set, the filename will be set\n from the job name (`options['jobname']` attribute) together with a\n backend-specific file extension\n\n prologue: str\n multiline shell script that will be executed *locally* in the current\n working directory before submitting the job. If the script does not\n contain a shebang, the shell specified in the `shell` attribute will be\n used. The body of the script will be formatted with the Job attributes\n (at submission time); e.g., '{remote}' will be replaced by the value of\n the corresponding attribute. In addition, '{fulldir}' will be replaced\n by\n\n os.path.join(rootdir, workdir)\n\n The main purpose of the prologue script is to move data to a remote\n cluster, e.g. via the commands\n\n ssh {remote} 'mkdir -p {fulldir}'\n rsync -av {workdir}/ {remote}:{fulldir}\n\n epilogue: str\n multiline shell script that will be executed *locally* in the current\n working directory the first time that the job is known to have\n finished. It will be formatted in the same way as the prologue script\n (at submission time). It's execution will be handled by the AsyncResult\n object resulting from the job submission. The main purpose of the\n epilogue script is to move data from a remote cluster upon completion\n of the job.\n\n sleep_interval: int or None\n Value for the `sleep_interval` attribute of the AsyncResult instance\n that is created upon submission. If None, the value for that attribute\n will be automatically determined between 10 and 1800 seconds, depending\n on the projected runtime of the job.\n\n options: dict\n Dictionary of submission options describing resource requirements. Will\n be translated according to the backend and passed to the submission\n command\n\n Examples\n --------\n\n >>> script = r'''\n ... echo \"####################################################\"\n ... echo \"Job id: $XXX_JOB_ID\"\n ... echo \"Job name: $XXX_WORKDIR\"\n ... echo \"Job started on\" `hostname` `date`\n ... echo \"Current directory:\" `pwd`\n ... echo \"####################################################\"\n ...\n ... echo \"####################################################\"\n ... echo \"Full Environment:\"\n ... printenv\n ... echo \"####################################################\"\n ...\n ... sleep 90\n ...\n ... echo \"Job Finished: \" `date`\n ... exit 0\n ... '''\n >>> job = Job(script, backend='slurm', jobname='printenv', queue='test',\n ... time='00:05:00', nodes=1, threads=1, mem=100,\n ... stdout='printenv.out', stderr='printenv.err')\n >>> print(job)\n #!/bin/bash\n #SBATCH --output=printenv.out\n #SBATCH --mem=100\n #SBATCH --job-name=printenv\n #SBATCH --partition=test\n #SBATCH --cpus-per-task=1\n #SBATCH --error=printenv.err\n #SBATCH --time=00:05:00\n #SBATCH --nodes=1\n <BLANKLINE>\n echo \"####################################################\"\n echo \"Job id: $SLURM_JOB_ID\"\n echo \"Job name: $SLURM_SUBMIT_DIR\"\n echo \"Job started on\" `hostname` `date`\n echo \"Current directory:\" `pwd`\n echo \"####################################################\"\n <BLANKLINE>\n echo \"####################################################\"\n echo \"Full Environment:\"\n printenv\n echo \"####################################################\"\n <BLANKLINE>\n sleep 90\n <BLANKLINE>\n echo \"Job Finished: \" `date`\n exit 0\n <BLANKLINE>\n\n Python's ability to add arbitrary attributes to an existing object together\n with the formatting step in rendering the job script allow for a a powerful\n (but hacky) way to use arbitrary template variables in the job script:\n\n >>> script = r'''\n ... echo {myvar}\n ... '''\n >>> job = Job(script, jobname='myvar_test')\n >>> job.myvar = 'Hello'\n >>> print(job)\n #!/bin/bash\n #SBATCH --nodes=1\n #SBATCH --cpus-per-task=1\n #SBATCH --job-name=myvar_test\n <BLANKLINE>\n echo Hello\n <BLANKLINE>\n \"\"\"\n\n default_opts = {}\n backends = {}\n default_backend = 'slurm'\n default_shell = None\n default_remote = None\n default_rootdir = ''\n default_sleep_interval = None\n cache_folder = None\n cache_prefix = 'clusterjob'\n cache_counter = 0\n debug_cmds = False\n\n @classmethod\n def register_backend(cls, backend):\n \"\"\"\n Register a new backend\n\n `backend` must be a dictionary that follows the same structure as\n `clusterjob.backends.slurm.backend`. If the dictionary is found to have\n the wrong structure, an AssertionError will be raised.\n \"\"\"\n logger = logging.getLogger(__name__)\n from . backends import check_backend\n try:\n if check_backend(backend):\n cls.backends[backend['name']] = backend\n except AssertionError as e:\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n logger.error(\"Invalid backend:\\n%s\\n\\n%s\", pp.pformat(backend), e)\n\n @classmethod\n def clear_cache_folder(cls):\n \"\"\"Remove all files in the cache_folder\"\"\"\n if cls.cache_folder is not None:\n for file in glob(os.path.join(cls.cache_folder, '*')):\n os.unlink(file)\n\n def __init__(self, jobscript, jobname, **kwargs):\n \"\"\"\n Arguments\n ---------\n\n jobscript: str\n Body (template) for the jobscript as multiline string\n\n jobname: str\n Name of the job\n\n\n Keyword Arguments\n -----------------\n\n The backend, shell, remote, rootdir, workdir, filename, prologue,\n epilogue, and sleep_interval arguments specify the value of the\n corresponding attributes.\n\n All other keyword arguments are stored in the `options` dict attribute,\n to be used as options for the job submission command (e.g. sbatch for\n slurm or qsub for PBS). At a minimum, the following arguments are\n supported:\n\n queue: str\n Name of queue/partition to which to submit the job\n\n time: time\n Maximum runtime\n\n nodes: int\n Required number of nodes\n\n threads: int\n Required number of threads (cores)\n\n mem: int\n Required memory\n\n stdout: str\n name of file to which to write the jobs stdout\n\n stderr: str\n name of file to which to write the jobs stderr\n\n Custom backends may define further options, or even support arbitrary\n additional options. For example, in the default SLURM backend,\n unknown options are passed directly as arguments to `sbatch`, where\n single-letter argument names are prepended with '-', multi-letter\n argument names with '--'. An argument with boolean values is passed\n without any value iff the value is True:\n\n contiguous=True -> --contiguous\n dependency='after:12454' -> --dependency=after:12454\n F='nodefile.txt' -> -F nodefile.txt\n\n All backends are encouraged to implement a similar behavior.\n \"\"\"\n self.options = {'jobname': jobname}\n\n self.jobscript = jobscript\n\n if len(self.backends) == 0:\n # register all available backends\n import pkgutil\n import clusterjob.backends\n for __, module_name, __ \\\n in pkgutil.walk_packages(clusterjob.backends.__path__):\n mod = importlib.import_module(\n 'clusterjob.backends.%s' % module_name)\n self.register_backend(mod.backend)\n\n for kw in ['backend', 'shell', 'remote', 'rootdir', 'workdir',\n 'filename', 'prologue', 'epilogue', 'sleep_interval']:\n self.__dict__[kw] = None\n if kw in kwargs:\n self.__dict__[kw] = kwargs[kw]\n del kwargs[kw]\n else:\n default_key = 'default_%s' % kw\n if default_key in self.__class__.__dict__:\n self.__dict__[kw] = self.__class__.__dict__[default_key]\n if self.shell is None:\n self.shell = '/bin/bash'\n if self.rootdir is None:\n self.rootdir = ''\n if self.workdir is None:\n self.workdir = ''\n if self.filename is None:\n self._default_filename()\n\n self.options.update(self.backends[self.backend]['default_opts'])\n self.options.update(self.default_opts)\n self.options.update(kwargs)\n\n def _default_filename(self):\n \"\"\"If self.filename is None, attempt to set it from the jobname\"\"\"\n if self.filename is None:\n if 'jobname' in self.options:\n self.filename = \"%s.%s\" \\\n % (self.options['jobname'],\n self.backends[self.backend]['extension'])\n\n def __str__(self):\n \"\"\"Return the string representation of the job, i.e. the fully rendered\n jobscript\"\"\"\n opt_translator = self.backends[self.backend]['translate_options']\n opt_array = opt_translator(self.options)\n prefix = self.backends[self.backend]['prefix']\n jobscript = self.jobscript\n var_replacements = self.backends[self.backend]['job_vars']\n for var in var_replacements:\n jobscript = jobscript.replace(var, var_replacements[var])\n jobscript_lines = []\n jobscript_lines.append(\"#!%s\" % self.shell)\n for option in opt_array:\n jobscript_lines.append(\"%s %s\" % (prefix, option))\n for line in jobscript.split(\"\\n\"):\n if not line.startswith(\"#!\"):\n jobscript_lines.append(line)\n jobscript = \"\\n\".join(jobscript_lines)\n return jobscript.format(**self.__dict__)\n\n def write(self, filename=None):\n \"\"\"\n Write out the fully rendered jobscript to file. If filename is not\n None, write to the given *local* file. Otherwise, write to the local or\n remote file specified in the filename attribute, in the folder\n specified by the rootdir and workdir attributes. The folder will be\n created if it does not exist already.\n \"\"\"\n from . utils import run_cmd\n remote = self.remote\n if filename is None:\n self._default_filename()\n filename = self.filename\n filename = os.path.join(self.rootdir, self.workdir, filename)\n else:\n remote = None\n\n if filename is None:\n raise ValueError(\"filename not given\")\n filepath = os.path.split(filename)[0]\n run_cmd(['mkdir', '-p', filepath], remote, ignore_exit_code=False)\n\n # Write / Upload\n if remote is None:\n with open(filename, 'w') as run_fh:\n run_fh.write(str(self))\n set_executable(filename)\n else:\n with tempfile.NamedTemporaryFile('w', delete=False) as run_fh:\n run_fh.write(str(self))\n tempfilename = run_fh.name\n set_executable(tempfilename)\n try:\n sp.check_output(\n ['scp', tempfilename, remote+':'+filename],\n stderr=sp.STDOUT)\n finally:\n os.unlink(tempfilename)\n\n def _run_prologue(self):\n \"\"\"Render and run the prologue script\"\"\"\n if self.prologue is not None:\n prologue = self.prologue.format(\n fulldir=os.path.join(self.rootdir, self.workdir),\n **self.__dict__)\n if not prologue.startswith(\"#!\"):\n prologue = \"#!\" + self.shell + \"\\n\" + prologue\n with tempfile.NamedTemporaryFile('w', delete=False) as prologue_fh:\n prologue_fh.write(prologue)\n tempfilename = prologue_fh.name\n set_executable(tempfilename)\n try:\n sp.check_output( [tempfilename, ], stderr=sp.STDOUT)\n except sp.CalledProcessError as e:\n logger = logging.getLogger(__name__)\n logger.error(r'''\n Prologue script did not exit cleanly.\n CWD: {cwd}\n prologue: ---\n {prologue}\n ---\n response: ---\n {response}\n ---\n '''.format(cwd=os.getcwd(), prologue=self.prologue,\n response=e.output))\n raise\n finally:\n os.unlink(tempfilename)\n\n def submit(self, block=False, cache_id=None, force=False, retry=True):\n \"\"\"\n Submit the job.\n\n Parameters\n ----------\n\n block: boolean, optional\n If `block` is True, wait until the job is finished, and return the\n exit status code. Otherwise, return an AsyncResult object.\n\n cache_id: str or None, optional\n An ID uniquely defining the submission, used as identifier for the\n cached AsyncResult object. If not given, the cache_id is determined\n internally. If an AsyncResult with a matching cache_id is present\n in the cache_folder, nothing is submitted to the cluster, and the\n cached AsyncResult object is returned\n\n force: boolean, optional\n If True, discard any existing cached AsyncResult object, ensuring\n that the job is sent to the cluster.\n\n retry: boolean, optional\n If True, and the existing cached AsyncResult indicates that the job\n finished with an error (CANCELLED/FAILED), resubmit the job,\n discard the cache and return a fresh AsyncResult object\n \"\"\"\n logger = logging.getLogger(__name__)\n from . status import FAILED, CANCELLED, PENDING, str_status\n from . utils import mkdir, run_cmd, time_to_seconds\n assert self.filename is not None, 'jobscript must have a filename'\n if self.remote is None:\n logger.info(\"Submitting job %s locally\",\n self.options['jobname'])\n else:\n logger.info(\"Submitting job %s on %s\",\n self.options['jobname'], self.remote)\n\n submitted = False\n if cache_id is None:\n Job.cache_counter += 1\n cache_id = str(Job.cache_counter)\n else:\n cache_id = str(cache_id)\n cache_file = None\n\n ar = AsyncResult(backend=self.backends[self.backend])\n ar.debug_cmds = self.debug_cmds\n\n if self.cache_folder is not None:\n mkdir(self.cache_folder)\n cache_file = os.path.join(self.cache_folder,\n \"%s.%s.cache\" % (self.cache_prefix, cache_id))\n if os.path.isfile(cache_file):\n if force:\n try:\n os.unlink(cache_file)\n except OSError:\n pass\n else:\n logger.debug(\"Reloading AsyncResult from %s\", cache_file)\n ar.load(cache_file)\n submitted = True\n if ar._status >= CANCELLED:\n if retry:\n logger.debug(\"Cached run %s, resubmitting\",\n str_status[ar._status])\n os.unlink(cache_file)\n ar = \\\n AsyncResult(backend=self.backends[self.backend])\n ar.debug_cmds = self.debug_cmds\n submitted = False\n\n if not submitted:\n self._run_prologue()\n cmd_submit, id_reader = self.backends[self.backend]['cmd_submit']\n self.write()\n job_id = None\n try:\n cmd = cmd_submit(self.filename)\n job_id = id_reader(\n run_cmd(cmd, self.remote, self.rootdir,\n self.workdir, ignore_exit_code=True))\n if job_id is None:\n logger.error(\"Failed to submit job\")\n status = FAILED\n else:\n logger.info(\"Job ID: %s\", job_id)\n status = PENDING\n except sp.CalledProcessError as e:\n logger.error(\"Failed to submit job: %s\", e)\n status = FAILED\n\n ar.remote = self.remote\n ar.options = self.options.copy()\n ar.cache_file = cache_file\n ar.backend = self.backends[self.backend]\n if self.sleep_interval is not None:\n ar.sleep_interval = self.sleep_interval\n else:\n try:\n ar.sleep_interval \\\n = int(time_to_seconds(self.options['time']) / 10)\n if ar.sleep_interval < 10:\n ar.sleep_interval = 10\n if ar.sleep_interval > 1800:\n ar.sleep_interval = 1800\n except KeyError:\n ar.sleep_interval = 60\n ar._status = status\n ar.job_id = job_id\n if self.epilogue is not None:\n epilogue = self.epilogue.format(\n fulldir=os.path.join(self.rootdir, self.workdir),\n **self.__dict__)\n if not epilogue.startswith(\"#!\"):\n epilogue = \"#!\" + self.shell + \"\\n\" + epilogue\n ar.epilogue = epilogue\n\n if block:\n result = ar.get()\n else:\n result = ar\n\n ar.dump()\n\n return result\n\n\nclass AsyncResult(object):\n \"\"\"\n Result of submitting a cluster job\n\n Attributes\n ----------\n\n remote: str or None\n The remote host on which the job is running. Passwordless ssh must be\n set up to reach the remote. A value of None indicates that the job is\n running locally\n\n options: dict\n copy of the `options` attribute of the Job() instance that created the\n AsyncResult object\n\n cache_file: str or None\n The full path and name of the file to be used to cache the AsyncResult\n object. The cache file will be written automatically anytime a change\n in status is detected\n\n backend: dict\n A reference to the backend options dictionary for the backend under\n which the job is running\n\n sleep_interval: int\n Numer of seconds to sleep between polls to the cluster scheduling\n systems when waiting for the Job to finish\n\n job_id: str\n The Job ID assigned by the cluster scheduler\n\n epilogue: str\n Multiline script to be run once when the status changes from \"running\"\n (pending/running) to \"not running\" (completed, canceled, failed).\n The contents of this variable will be written to a temporary file as\n is, and executed as a script in the current working directory.\n \"\"\"\n\n debug_cmds = False\n\n def __init__(self, backend):\n \"\"\"Create a new AsyncResult instance\"\"\"\n from . status import CANCELLED\n self.remote = None\n self.options = {}\n self.cache_file = None\n self.backend = backend\n self.sleep_interval = 10\n self.job_id = ''\n self._status = CANCELLED\n self.epilogue = None\n\n @property\n def status(self):\n \"\"\"Return the job status as one of the codes defined in the\n `clusterjob.status` module.\n finished, communicate with the cluster to determine the job's status.\n \"\"\"\n from . status import COMPLETED, STATUS_CODES\n if self._status >= COMPLETED:\n return self._status\n else:\n from . utils import run_cmd\n cmd_status, status_reader = self.backend['cmd_status_running']\n cmd = cmd_status(self.job_id)\n response = run_cmd(cmd, self.remote, ignore_exit_code=True)\n status = status_reader(response)\n if status is None:\n cmd_status, status_reader = self.backend['cmd_status_finished']\n cmd = cmd_status(self.job_id)\n response = run_cmd(cmd, self.remote, ignore_exit_code=True)\n status = status_reader(response)\n prev_status = self._status\n self._status = status\n if not self._status in STATUS_CODES:\n raise ValueError(\"Invalid status code %s\", self._status)\n if prev_status != self._status:\n if self._status >= COMPLETED:\n self.run_epilogue()\n self.dump()\n return self._status\n\n def get(self, timeout=None):\n \"\"\"Return status\"\"\"\n from . status import COMPLETED\n status = self.status\n if status >= COMPLETED:\n return status\n else:\n self.wait(timeout)\n return self.status\n\n def dump(self, cache_file=None):\n \"\"\"Write dump out to file\"\"\"\n if cache_file is None:\n cache_file = self.cache_file\n if cache_file is not None:\n self.cache_file = cache_file\n with open(cache_file, 'wb') as pickle_fh:\n pickle.dump((self.remote, self.options, self.sleep_interval,\n self.job_id, self._status, self.epilogue),\n pickle_fh)\n\n def load(self, cache_file):\n \"\"\"Read dump from file\"\"\"\n self.cache_file = cache_file\n with open(cache_file, 'rb') as pickle_fh:\n self.remote, self.options, self.sleep_interval, self.job_id, \\\n self._status, self.epilogue = pickle.load(pickle_fh)\n\n\n def wait(self, timeout=None):\n \"\"\"Wait until the result is available or until roughly timeout seconds\n pass.\"\"\"\n from . status import COMPLETED\n import time\n spent_time = 0\n sleep_seconds = int(self.sleep_interval)\n while self.status < COMPLETED:\n time.sleep(sleep_seconds)\n spent_time += sleep_seconds\n if timeout is not None:\n if spent_time > timeout:\n return\n\n def ready(self):\n \"\"\"Return whether the job has completed.\"\"\"\n from . status import COMPLETED\n return (self.status >= COMPLETED)\n\n def successful(self):\n \"\"\"Return True if the job finished with a COMPLETED status, False if it\n finished with a CANCELLED or FAILED status. Raise an AssertionError if\n the job has not completed\"\"\"\n from . status import COMPLETED\n status = self.status\n assert status >= COMPLETED, \"status is %s\" % status\n return (self.status == COMPLETED)\n\n def cancel(self):\n \"\"\"Instruct the cluster to cancel the running job. Has no effect if\n job is not running\"\"\"\n from . status import CANCELLED, COMPLETED\n from . utils import run_cmd\n if self.status > COMPLETED:\n return\n cmd_cancel = self.backend['cmd_cancel']\n cmd = cmd_cancel(self.job_id)\n run_cmd(cmd, self.remote, ignore_exit_code=True)\n self._status = CANCELLED\n self.dump()\n\n def run_epilogue(self):\n \"\"\"\n Run the epilogue script in the current working directory.\n\n Raise sp.CalledProcessError if the script does not finish with\n exit code zero.\n \"\"\"\n logger = logging.getLogger(__name__)\n if self.epilogue is not None:\n with tempfile.NamedTemporaryFile('w', delete=False) as epilogue_fh:\n epilogue_fh.write(self.epilogue)\n tempfilename = epilogue_fh.name\n set_executable(tempfilename)\n try:\n sp.check_output( [tempfilename, ], stderr=sp.STDOUT)\n except sp.CalledProcessError as e:\n logger.error(dedent(r'''\n Epilogue script did not exit cleanly.\n CWD: {cwd}\n epilogue: ---\n {epilogue}\n ---\n response: ---\n {response}\n ---\n ''').format(cwd=os.getcwd(), epilogue=self.epilogue,\n response=e.output))\n raise\n finally:\n os.unlink(tempfilename)\n\n","sub_path":"clusterjob/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":27640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481482148","text":"def gcd(a,b):\n\tif b==0:\n\t\treturn a;\n\tif a==0:\n\t\treturn b;\n\treturn gcd(b,a%b)\n\nn=int(input())\nfor i in range (0,n):\n\tx=input(\"\").split();\n#print(x)\n\ta=int(x[0])\n\tb=int(x[1])\n\tt=int(x[2])\n\tif (b < a):\n\t\ta,b=b,a\n\t\n\tif a+b<=t:\n\t\tprint(\"1/1\")\n\telse: \n\t\tden=2 * a * b;\n\t\tif t <=a:\n#\t\t\tprint(\"First\")\n\t\t\tnum=t*t;\n\t\telif t<=b:\n\t\t\tnum=2*a*(t-a) + a*a;\n#\t\t\tprint (type(2*a*(t-a)))\n\t\telse:\n\t\t\tnum=2*a*b-(a+b-t)*(a+b-t)\n#\t\t\tprint (type(2*a*b))\n#\t\t\tprint (type((a+b-t)*(a+b-t)))\n#\t\t\tprint (2*a*b)\n#\t\t\tprint ((a+b-t)*(a+b-t))\n#\t\tprint(num);\n#\t\tprint(den);\n#\t\tprint (type(num))\n\t\tgc=gcd(int(num),int(den))\n\t\tprint('%d/%d'%(num/gc,den/gc))\t\n","sub_path":"hackerrank/math/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69243309","text":"\"\"\"\nA bone-simple fake client used to test the hass integration\n\"\"\"\nimport http.client\nimport time\nimport urllib.parse\nfrom datetime import datetime\nfrom random import randrange\n\nMY_PASSKEY = \"34271334ED1FADA6D8B988B14267E55D\"\n# MY_PASSKEY = '35271334ED1FADA7D8B988B22222E22D'\n\nparamset_a = {\n \"PASSKEY\": MY_PASSKEY,\n \"stationtype\": \"EasyWeatherV1.4.9\",\n \"dateutc\": \"2020-11-13+17:10:24\",\n \"tempinc\": 20.0,\n \"tempinf\": 68.0,\n \"humidityin\": 40,\n \"baromrelin\": 28.760,\n \"baromabsin\": 28.760,\n \"tempc\": 20.0,\n \"tempf\": 68.0,\n \"humidity\": 64,\n \"winddir\": 319,\n \"windspeedmph\": 0.9,\n \"windgustmph\": 1.1,\n \"rainratein\": 0.000,\n \"eventrainin\": 0.000,\n \"dailyrainin\": 0.000,\n \"weeklyrainin\": 0.024,\n \"monthlyrainin\": 0.028,\n \"yearlyrainin\": 0.843,\n \"solarradiation\": 375.53,\n \"uv\": 3,\n \"pm25_ch1\": 8.0,\n \"pm25_avg_24h_ch1\": 5.2,\n \"freq\": \"915M\",\n \"model\": \"HP3500_V1.6.2\",\n \"leak_ch1\": 0,\n \"leakbatt1\": 5,\n}\n\nparamset_b = {\n \"PASSKEY\": MY_PASSKEY,\n \"stationtype\": \"EasyWeatherV1.5.4\",\n \"dateutc\": \"2020-11-16+15:30:24\",\n \"tempinc\": 20.7,\n \"tempinf\": 69.26,\n \"humidityin\": 52,\n \"baromrelin\": 29.785,\n \"baromabsin\": 29.785,\n \"tempc\": 20.4,\n \"tempf\": 68.72,\n \"humidity\": 94,\n \"winddir\": 260,\n \"winddir_avg10m\": 260,\n \"windspeedmph\": 0.0,\n \"windspdmph_avg10m\": 0.0,\n \"windgustmph\": 0.0,\n \"maxdailygust\": 6.9,\n \"rainratein\": 0.000,\n \"eventrainin\": 0.118,\n \"hourlyrainin\": 0.000,\n \"dailyrainin\": 0.118,\n \"weeklyrainin\": 0.118,\n \"monthlyrainin\": 0.378,\n \"yearlyrainin\": 6.268,\n \"solarradiation\": 0.00,\n \"uv\": 0,\n \"soilmoisture1\": 0,\n \"wh65batt\": 1,\n \"wh25batt\": 0,\n \"soilbatt1\": 1.5,\n \"leak_ch1\": 0,\n \"leakbatt1\": 5,\n \"leak_ch2\": 1,\n \"leakbatt2\": 3,\n \"tf_co2\": 56.7,\n \"humi_co2\": 72,\n \"pm25_co2\": 24.7,\n \"pm25_24h_co2\": 29.4,\n \"pm10_co2\": 24.7,\n \"pm10_24h_co2\": 29.9,\n \"co2\": 455,\n \"co2_24h\": 464,\n \"co2_batt\": 6,\n \"freq\": \"868M\",\n \"model\": \"HP1000SE-PRO_Pro_V1.6.0\",\n}\n\nhost = \"localhost\"\nport = \"4199\"\n\nwhile True:\n try:\n print(\"Connecting to host {0} on port {1}\".format(host, port))\n conn = http.client.HTTPConnection(host, port)\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\"}\n\n usedParam = paramset_a\n\n usedParam[\"dateutc\"] = datetime.now().strftime(\"%Y-%m-%d+%H:%M:%S\")\n usedParam[\"tempinc\"] = 20 + randrange(10) / 10.0\n\n params = urllib.parse.urlencode(usedParam)\n print(params)\n conn.request(\"POST\", \"\", params, headers)\n response = conn.getresponse()\n print(response.status, response.reason)\n conn.close()\n except Exception as err:\n print(err)\n finally:\n time.sleep(5)\n","sub_path":"tests/fake_client.py","file_name":"fake_client.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306242869","text":"\n\n#Create App Dictionary: Main function\nimport requests\nrequests.packages.urllib3.disable_warnings()\nfrom biokbase.catalog.Client import Catalog\nfrom biokbase.narrative_method_store.client import NarrativeMethodStore\ncatalog = Catalog(url = \"https://kbase.us/services/catalog\")\nnms = NarrativeMethodStore(url = \"https://kbase.us/services/narrative_method_store/rpc\")\nfrom data_configure import data_configure\n\nimport pandas as pd\n\ndef create_app_dictionary():\n apps = nms.list_methods({\"tag\": \"release\"})\n apps_datastruc = pd.DataFrame.from_dict(apps)\n ModDfApps = data_configure(apps_datastruc)\n ModDfApps.drop(['app_type', 'authors', 'git_commit_hash', 'icon', 'input_types', 'module_name', 'name', 'namespace',\n 'output_types', 'subtitle', 'tooltip', 'ver'], axis=1, inplace=True)\n keys = list(set([item for sublist in list(ModDfApps.categories) for item in sublist]))\n app_dict = {k: [] for k in keys}\n\n for i in ModDfApps.index.values:\n\n app_category_lst = ModDfApps[\"categories\"][i]\n for category in app_category_lst:\n if category in app_dict.keys():\n app_dict[category].append(ModDfApps[\"id\"][i])\n app_dict[category] = list(set(app_dict[category]))\n else:\n raise KeyError(\"{} not a KBase app category\".format(category))\n\n return app_dict\n","sub_path":"source/category_to_app_dict.py","file_name":"category_to_app_dict.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"405133789","text":"'''\nImports\n'''\nfrom random import shuffle\nfrom os import system\n\nsuits = ('Spade','Club','Heart','Diamond')\nranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')\nvalues = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11}\n\nplaying = True\n\nclass InsufficientBalanceError(Exception):\n\t'''\n\tCustome exception when user enters bet amount higher than available balanve\n\t'''\n\tpass\n\nclass Card:\n\t'''\n\tThis class is used to create a Card object which will have Suite and Rank as it's attributes\n\t'''\n\tdef __init__(self,suit,rank):\n\t\t'''\n\t\tInitialize the Card object\n\t\t'''\n\t\tself.suit = suit\n\t\tself.rank = rank\n\n\tdef __str__(self):\n\t\t'''\n\t\tThis method returns the string in 'Rank of Suit' formt when called as print(acrd_object)\n\t\t'''\n\t\treturn self.rank+\" of \"+self.suit\n\nclass Deck:\n\t'''\n\tThis class creates an object of deck which will inturn creates an object of class for all the 52 cards in the deck\n\t'''\n\tdef __init__(self):\n\t\t'''\n\t\tInitialize the deck\n\t\t'''\n\t\tself.deck = []\t\t#This list will store the card objects. \n\t\tfor suit in suits:\n\t\t\tfor rank in ranks:\n\t\t\t\tself.deck.append(Card(suit,rank))\n\n\tdef __str__(self):\n\t\t'''\n\t\tThis method will print the Deck in string format\n\t\t'''\n\t\tdeck_string = 'Content of the deck:'\n\t\tfor card in self.deck:\n\t\t\tdeck_string += '\\n'+card.__str__()\n\t\treturn deck_string\n\n\tdef shuffle(self):\n\t\t'''\n\t\tThis method shuffles the current deck\n\t\t'''\n\t\tshuffle(self.deck)\n\n\tdef deal(self):\n\t\t'''\n\t\tThis method will take the returns the last card in the deck\n\t\t'''\n\t\treturn self.deck.pop()\n\nclass Hand:\n\t'''\n\tThis class represents a player and also computer dealer\n\t'''\n\tdef __init__(self):\n\t\t'''\n\t\tInitialize method for Hand class\n\t\t'''\n\t\tself.cards = []\n\t\tself.value = 0\n\t\tself.aces = 0\t\t#This is to keep track and manage aces which are special cards holding dual values\n\n\tdef add_card(self,card):\n\t\t'''\n\t\tThis method will add a new card to a player. This will be used to assign two cards in the beginning and whenever a player calls Hit\n\t\t'''\n\t\tself.cards.append(card)\n\t\tself.value += values[card.rank]\n\t\tif card.rank == 'Ace':\n\t\t\tself.aces += 1\n\n\tdef adjust_for_ace(self):\n\t\t'''\n\t\tThis method adjust the value of aces if the user has when the total value crosses 21\n\t\t'''\n\t\twhile self.value > 21 and self.aces > 0:\n\t\t\tself.value -= 10\n\t\t\tself.aces -= 1\n\nclass Chips:\n\t'''\n\tThis class is used to manage players Chips (money)\n\t'''\n\tdef __init__(self,total=100):\n\t\t'''\n\t\tInitialize method for Chip class\n\t\t'''\n\t\tself.total = total\n\t\tself.bet = 0\n\n\tdef win_bet(self):\n\t\t'''\n\t\tThis method will be called when a player wins the bet\n\t\t'''\n\t\tself.total += self.bet\n\n\tdef lose_bet(self):\n\t\t'''\n\t\tThis method will be called when a player loses bet\n\t\t'''\n\t\tself.total -= self.bet\n\ndef take_bet(chips):\n\t'''\n\tThis function is used to take the bet from player\n\t'''\n\twhile True:\n\t\ttry:\n\t\t\tchips.bet = int(input('Whats your bet for this round? '))\n\t\t\tif chips.bet > chips.total:\n\t\t\t\traise InsufficientBalanceError\n\t\t\telif chips.bet < 1:\n\t\t\t\traise ValueError\t\t# Custom exception\n\t\texcept ValueError:\n\t\t\tprint('\\nPlease enter a right bet amount')\n\t\texcept InsufficientBalanceError:\n\t\t\tprint('\\nInsufficient Fund')\n\t\telse:\n\t\t\tbreak\n\ndef hit(deck,hand):\n\t'''\n\tFunction to take hit untill they bust. They are called anytime in the game play when the user request for 'Hit'\n\tInput: Objects of Deck and Hand class\n\t'''\n\thand.add_card(deck.deal())\n\thand.adjust_for_ace()\n\ndef hit_or_stand(deck,hand):\n\t'''\n\tThis function should accept the deck and the player's hand as arguments, and assign playing as a global variable.\n\tIf the Player Hits, employ the hit() function above. If the Player Stands, set the playing variable to False - this will control the behavior of a while loop later on in our code.\n\t'''\n\tglobal playing\n\n\twhile True:\n\t\tx = input(\"\\nWould you like to Hit or Stand? Enter 'h' or 's' \")\n\n\t\tif x[0].lower() == 'h':\n\t\t\thit(deck,hand)\n\t\telif x[0].lower() == 's':\n\t\t\tprint('Player stands. Dealer is playing.')\n\t\t\tplaying = False\n\t\telse:\n\t\t\tprint('Sorry, please try again')\n\t\t\tcontinue\n\t\tbreak\n\ndef show_some(player,dealer):\n\t'''\n\tWhen the game starts, and after each time Player takes a card, the dealer's first card is hidden and all of Player's cards are visible.\n\t'''\n\tprint(\"\\nDealer's Hand:\\n--------------\")\n\tprint(\" <card hidden>\")\n\tprint('',dealer.cards[1])\n\tprint(\"\\nPlayers Hand:\\n--------------\", *player.cards, sep='\\n')\n\ndef show_all(player,dealer):\n\t'''\n\tAt the end of the hand all cards are shown\n\t'''\n\tprint(\"\\nDealer's Hand:\\n--------------\", *dealer.cards, sep='\\n ')\n\tprint(\"Dealer's Hand =\",dealer.value)\n\tprint(\"\\nPlayer's Hand:\\n--------------\", *player.cards, sep='\\n ')\n\tprint(\"Player's Hand =\",player.value)\n\ndef player_busts(chips):\n\t'''\n\tScenario: when user's value cros 21\n\t'''\n\tprint('\\n<< Player bust! >>')\n\tchips.lose_bet()\n\ndef player_wins(chips):\n\t'''\n\tScenario: when user's value is close to 21 than dealer's\n\t'''\n\tprint('\\n<< Player wins! >>')\n\tchips.win_bet()\n\ndef dealer_busts(chips):\n\t'''\n\tScenario: when dealer's value cros 21\n\t'''\n\tprint('\\n<< Dealer bust >>!')\n\tchips.win_bet()\n\ndef dealer_wins(chips):\n\t'''\n\tScenario: when dealer's value is closer to 21 than user's\n\t'''\n\tprint('\\n<< Dealer wins! >>')\n\tchips.lose_bet()\n\ndef push():\n\t'''\n\tScenario: When dealer and player tie\n\t'''\n\tprint(\"Dealer and Player tie! It's a Push !\")\n\n\n'''\nGame play starts here\n'''\n\nsystem('cls')\n\n# Print an opening statement\nprint('Welcome to BlackJack! Get close to 21 as you can without going over!\\n\\\n\tDealer hits until she reaches 17.\\n\\\n\tAces count as 1 or 11.')\n\n# Setup a players chip - deafault value is 100\nplayer_chips = Chips()\n\nwhile player_chips.total > 0:\n\n\t# Create a Deck and shuffle\n\tdeck = Deck()\n\tdeck.shuffle()\n\n\t# Create a hand object for player and deal two cards to player\n\tplayer_hand = Hand()\n\tplayer_hand.add_card(deck.deal())\n\tplayer_hand.add_card(deck.deal())\n\n\t# Create a hand object for dealer and deal two cards to dealer\n\tdealer_hand = Hand()\n\tdealer_hand.add_card(deck.deal())\n\tdealer_hand.add_card(deck.deal())\n\n\t# Take player bet\n\ttake_bet(player_chips)\n\n\t# Show the original cards. Hide first card of dealer\n\tshow_some(player_hand,dealer_hand)\n\n\twhile playing:\n\n\t\t# Prompt the user for hit or stand\n\t\thit_or_stand(deck,player_hand)\n\n\t\t# Show all of players cards after each round. Hide dealers first card\n\t\tshow_some(player_hand,dealer_hand)\n\n\t\t# If player's hand exceeds 21, run scenarion player_busts()\n\t\tif player_hand.value > 21:\n\t\t\tplayer_busts(player_chips)\n\t\t\tbreak\n\n\t# If player didn't bust, play dealers hand until dealer reaches 17\n\tif player_hand.value <= 21:\n\n\t\twhile dealer_hand.value < 17:\n\t\t\thit(deck,dealer_hand)\n\n\t\t# Show all the cards\n\t\tshow_all(player_hand,dealer_hand)\n\n\t\t# test for different scenarios\n\t\tif dealer_hand.value > 21:\n\t\t\tdealer_busts(player_chips)\n\n\t\telif dealer_hand.value > player_hand.value:\n\t\t\tdealer_wins(player_chips)\n\n\t\telif dealer_hand.value < player_hand.value:\n\t\t\tplayer_wins(player_chips)\n\n\t\telse:\n\t\t\tpush()\n\n\t# Show players chips after this round\n\tprint(\"\\nPlayers total chips stands at: \",player_chips.total)\n\n\t# Ask to play again\n\tnew_game = input(\"\\nWould you like to play another game? Enter 'y' or 'n' \")\n\n\tif new_game[0].lower() == 'y':\n\t\tplaying = True\n\t\tcontinue\n\telse:\n\t\tprint('Thanks for playing!')\n\t\tbreak\n","sub_path":"BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":7384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208947358","text":"# -*- coding: utf-8 -*-\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom geral.models import Evento, Estado, Cidade\nfrom integrante.models import Integrante\nfrom django.views.generic.base import View\nfrom django.core.serializers import serialize\nimport os\nimport json\n#import Cookie\n\nclass EventoView(View):\n def get(self,request, *args, **kwargs):\n context = {}\n context['listaestados'] = Estado.objects.all()\n return render(request, 'geral/evento_novo.html', context)\n\n def post(self,request, *args, **kwargs):\n\n oEvento=Evento()\n valido = request.POST.get(\"valido\")\n oficial = oficial=request.POST.get(\"oficial\")\n\n if(valido):\n oEvento.valido=True\n else:\n oEvento.valido=False\n if(oficial):\n oEvento.oficial=True\n else:\n oEvento.oficial=False\n oEvento.titulo=request.POST.get(\"titulo\")\n oEvento.motoclube=request.POST.get(\"motoclube\")\n oEvento.data_inicio=request.POST.get('data_inicio')\n oEvento.data_fim=request.POST.get('data_fim')\n oEvento.estado=Estado.objects.get(id = request.POST.get('estado'))\n oEvento.cidade=Cidade.objects.get(id = request.POST.get('cidade'))\n oEvento.endereco=request.POST.get(\"endereco\")\n\n oEvento.save()\n\n return redirect('/eventos/')\n\nclass EventoEdicaoView(View):\n def get(self,request, *args, **kwargs):\n context = {}\n\n id = request.GET.get(\"id\")\n evento = Evento.objects.get(id=id)\n\n context['evento'] = evento\n context['listaestados'] = Estado.objects.all()\n return render(request, 'geral/evento_integra.html', context)\n\n def post(self,request, *args, **kwargs):\n id = request.GET.get(\"id\")\n\n oEvento.valido=request.POST.get(\"valido\")\n oEvento.oficial=request.POST.get(\"oficial\")\n oEvento=Evento.objects.get(id=id)\n oEvento.titulo=request.POST.get(\"titulo\")\n oEvento.motoclube=request.POST.get(\"motoclube\")\n oEvento.data_inicio=request.POST.get('data_inicio')\n oEvento.data_fim=request.POST.get('data_fim')\n oEvento.estado=Estado.objects.get(id = request.POST.get('estado'))\n oEvento.cidade=Cidade.objects.get(id = request.POST.get('cidade'))\n oEvento.endereco=request.POST.get(\"endereco\")\n\n oEvento.save()\n\n return redirect('/eventos/')\n\ndef eventos(request):\n context = {}\n\n u_logado = 0\n\n if 'logado' in request.COOKIES:\n u_logado = request.COOKIES['logado']\n else:\n return redirect('/')\n\n oIntegrante = Integrante.objects.get(id=u_logado)\n\n listaeventos = Evento.objects.all()\n\n FiltroMC = request.GET.get('ipFiltroMotoclube')\n FiltroEvento = request.GET.get('ipFiltroEvento')\n FiltroMes = request.GET.get('ipFiltroMes')\n FiltroAno =request.GET.get('ipFiltroAno')\n FiltroOficial = request.GET.get('ckFiltroOficial')\n\n if(FiltroMC):\n listaeventos = listaeventos.filter(motoclube__icontains = FiltroMC)\n if(FiltroEvento):\n listaeventos = listaeventos.filter(titulo__icontains = FiltroEvento)\n if(FiltroMes != None):\n if(int(FiltroMes) > 0):\n listaeventos = listaeventos.filter(data_inicio__month = FiltroMes)\n if(FiltroAno):\n listaeventos = listaeventos.filter(data_inicio__year = FiltroAno)\n if(FiltroOficial):\n listaeventos = listaeventos.filter(valido = True)\n\n # c = Cookie.SimpleCookie()\n oIntegrante = Integrante.objects.get(id=u_logado)\n\n context['integrante']=oIntegrante\n context['listaeventos']=list(listaeventos)\n return render(request, 'geral/eventos_geral.html', context)\n\ndef eventosoficiais(request):\n Integrantes = Integrante.objects.all()\n\n context = {}\n context['integrantes'] = Integrantes\n\n return render(request, 'geral/eventos_oficiais.html', context)\n\ndef eventoeufui(request, evento_id):\n\n u_logado = 0\n\n if 'logado' in request.COOKIES:\n u_logado = request.COOKIES['logado']\n else:\n return redirect('/')\n\n oEvento = Evento.objects.get(id = evento_id)\n oIntegrante = Integrante.objects.get(id=u_logado)\n\n oIntegrante.eventos_eufui.add(oEvento)\n oIntegrante.save()\n\n return redirect('/eventos/')\n\ndef eventonaoestava(request, evento_id):\n u_logado = 0\n\n if 'logado' in request.COOKIES:\n u_logado = request.COOKIES['logado']\n else:\n return redirect('/')\n oEvento = Evento.objects.get(id = evento_id)\n oIntegrante = Integrante.objects.get(id=u_logado)\n\n oIntegrante.eventos_eufui.remove(oEvento)\n oIntegrante.save()\n\n return redirect('/eventos/integra/' + evento_id)\n\ndef novoevento(request):\n listaestados = Estado.objects.all()\n return render(request, 'geral/evento_novo.html', context)\n\n\ndef eventointegra(request, evento_id):\n # evento_id = request.GET.get('id_evento')\n\n context = {}\n context['evento'] = Evento.objects.get(id = evento_id)\n context['estados'] = list(Estado.objects.all())\n\n return render (request, 'geral/evento_integra_pb.html', context)\n\ndef eventoedicao(request, evento_id):\n # evento_id = request.GET.get('id_evento')\n\n context = {}\n context['evento'] = Evento.objects.get(id = evento_id)\n context['listaestados'] = list(Estado.objects.all())\n\n return render (request, 'geral/evento_integra.html', context)\n\ndef eventoeuvou(request, evento_id):\n u_logado = 0\n\n if 'logado' in request.COOKIES:\n u_logado = request.COOKIES['logado']\n else:\n return redirect('/')\n\n context = {}\n oEvento = Evento.objects.get(id=evento_id)\n oIntegrante = Integrante.objects.get(id=u_logado)\n\n oIntegrante.Eventos.add(oEvento)\n oIntegrante.save()\n\n return redirect('/eventos/')\n\ndef eventoexcluir(request, evento_id):\n\n oEvento = Evento.objects.get(id=evento_id)\n oEvento.delete()\n\n return redirect('/eventos/')\n\ndef eventoquemfoi(request, evento_id):\n u_logado = 0\n\n if 'logado' in request.COOKIES:\n u_logado = request.COOKIES['logado']\n else:\n return redirect('/')\n\n context = {}\n oEvento = Evento.objects.get(id=evento_id)\n\n # Retorna um QuerySet, permitindo que eu filtre isso como preferir\n context['participantes'] = oEvento.participantes.all()\n context['evento'] = Evento.objects.get(id = evento_id)\n context['integrantelogado'] = Integrante.objects.get(id=u_logado)\n\n\n return render(request, 'geral/evento_integra_pb.html', context)\n\ndef getCidadeByEstado(request):\n estado_id = request.GET.get('estado')\n lista = serialize('json', Cidade.objects.filter(estado__id = estado_id))\n return HttpResponse(lista, content_type='application/json')\n","sub_path":"geral/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"526056526","text":"import numpy as np\nimport random\nimport sys\nimport time\nimport os\nimport configparser\n\n\nDTYPE = np.float32\n\nconfig = configparser.ConfigParser()\nconfig.read('input.ini')\n\ndef run(): \n Nx = int(config['PRS']['Lattice_size'])\n Nt_int = int(config['PRS']['points_per_part'])\n parts = int(config['PRS']['sPRS_parts'])+int(config['PRS']['mPRS_parts'])\n name = config['Parameters']['name']\n \n #parts = 150\n #Nt_int = 10\n \n Nt=parts*Nt_int\n #Nx = 2**6\n \n c=0\n \n # Deletes old memory-map and creates new one\n try:\n os.remove(str(name)+'_phi.mymemmap')\n except FileNotFoundError:\n pass\n phi = np.memmap(str(name)+'_phi.mymemmap', dtype='float32', mode='w+', shape=(Nt,Nx,Nx))\n \n for p in range(1, parts+1):\n # Load the data files\n phi_fname = str(name) + '_phi_data'+str(p)+'.npy'\n phi_r = np.load(phi_fname)\n \n #print (phi.shape[0])\n try:\n if (p==1):\n for i in range (0, phi_r.shape[0]):\n # print (c,i)\n phi[c,:,:]=phi_r[i,:,:]\n c = c+1\n \n else:\n for i in range (0, phi_r.shape[0]-1):\n # print(c,i+1)\n phi[c,:,:]=phi_r[i+1,:,:]\n c=c+1\n except:\n print(\"Error in the loop\")\n\n \n\n","sub_path":"merge_phi.py","file_name":"merge_phi.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623639043","text":"#! /usr/bin/env python\n# _*_ coding: utf-8 _*_\n\n# module: fnmatch\n# fnmatch(str, pattern)\n# fnmatchcase(str, pattern)\n\nfrom fnmatch import fnmatch, fnmatchcase\n\nif __name__ == '__main__':\n print(fnmatch('AA.txt', '*.txt'))\n print(fnmatchcase('AA.TXT', '*.TXT'))\n\n addresses = ['5421 N CLARK ST',\n '1060 W ADDISON ST',\n '1039 W GRANVILLE AVE',\n '2122 N CLARK ST',\n '4802 N BROADWAY',\n ]\n print([addr for addr in addresses if fnmatchcase(addr, '* N *')])\n print([addr for addr in addresses if fnmatchcase(addr, '[0-9]0[0-9][0-9] *')])\n","sub_path":"ch2/ch2_3.py","file_name":"ch2_3.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"389585259","text":"# -*- coding: utf-8 -*-\nimport chardet\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\nimport random\nimport psutil\n\n'''\nPIL:Python Imaging Library,已经是Python平台事实上的图像处理标准库了。PIL功能非常强大,但API却非常简单易用。\n下面是操作图像的一个例子\n'''\n# 打开一个jpg图像文件,注意是当前路径:\nim = Image.open('/home/black/Pictures/a.jpg')\n# 获得图像尺寸:\nw, h = im.size\nprint('Original image size: %sx%s' % (w, h))\n# 缩放到50%:\nim.thumbnail((w // 2, h // 2))\nprint('Resize image to: %sx%s' % (w // 2, h // 2))\n# 把缩放后的图像用jpeg格式保存:\nim.save('thumbnail.jpg', 'jpeg')\n\n# 其他功能如切片、旋转、滤镜、输出文字、调色板等一应俱全。\n# 打开一个jpg图像文件,注意是当前路径:\nim = Image.open('/home/black/Pictures/a.jpg')\n# 应用模糊滤镜:\nim2 = im.filter(ImageFilter.BLUR)\nim2.save('blur.jpg', 'jpeg')\n\n'''\nPIL的ImageDraw提供了一系列绘图方法,让我们可以直接绘图。比如要生成字母验证码图片:\n'''\n\n\n# 随机字母:\ndef rndChar():\n return chr(random.randint(65, 90))\n\n\n# 随机颜色1:\ndef rndColor():\n return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))\n\n\n# 随机颜色2:\ndef rndColor2():\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n\n\n# 240 x 60:\nwidth = 60 * 4\nheight = 60\nimage = Image.new('RGB', (width, height), (255, 255, 255))\n# 创建Font对象,这里要是报找不到,需要传字体的绝对路径\nfont = ImageFont.truetype('AppleGaramond.ttf', 36)\n# 创建Draw对象:\ndraw = ImageDraw.Draw(image)\n# 填充每个像素:\nfor x in range(width):\n for y in range(height):\n draw.point((x, y), fill=rndColor())\n# 输出文字:\nfor t in range(4):\n draw.text((60 * t + 10, 10), rndChar(), font=font, fill=rndColor2())\n# 模糊:\nimage = image.filter(ImageFilter.BLUR)\nimage.save('code.jpg', 'jpeg')\n\n# 生成验证码结束\n\n\n'''\nchardet 使用\n'''\n# 当我们拿到一个bytes时,就可以对其检测编码。用chardet检测编码,只需要一行代码:\nprint(chardet.detect(b'Hello, world!'))\ndata = '离离原上草,一岁一枯荣'.encode('gbk')\nprint(chardet.detect(data))\n\ndata = '离离原上草,一岁一枯荣'.encode('utf-8')\nprint(chardet.detect(data))\n\n'''\npsutil\n用Python来编写脚本简化日常的运维工作是Python的一个重要用途。在Linux下,有许多系统命令可以让我们时刻监控系统运行的状态,如ps,top,free等等。\n要获取这些系统信息,Python可以通过subprocess模块调用并获取结果。但这样做显得很麻烦,尤其是要写很多解析代码。\n\n在Python中获取系统信息的另一个好办法是使用psutil这个第三方模块。顾名思义,psutil = process and system utilities,它不仅可以通过一两行代码实现系统监控,\n还可以跨平台使用,支持Linux/UNIX/OSX/Windows等,是系统管理员和运维小伙伴不可或缺的必备模块。\n'''\n# CPU逻辑数量\nprint(psutil.cpu_count())\n\n# CPU物理核心\nprint(psutil.cpu_count(logical=False))\n\n# 统计CPU的用户/系统/空闲时间:\nprint(psutil.cpu_times())\n\n#实现类似top命令的做法\nfor x in range(10):\n psutil.cpu_percent(interval=1, percpu=True)\n\n#使用psutil获取物理内存和交换内存信息,分别使用:\nprint(psutil.virtual_memory())\nprint(psutil.swap_memory())\n\n\n#获取磁盘信息\n# 磁盘分区信息\nprint(psutil.disk_partitions())\n# 磁盘使用情况\nprint(psutil.disk_usage('/'))\n# 磁盘IO\nprint( psutil.disk_io_counters())\n\n#获取网络信息 等等,省略获取进程信息示例代码\nprint(psutil.net_if_addrs())","sub_path":"third/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"407067036","text":"class BankAccount:\n bank_name = 'First National Bank'\n all_accounts = []\n\n def __init__(self, account_name, int_rate, balance=0): #default balance = 0\n self.account_name = account_name\n self.int_rate = float(int_rate) #float (decimal) 0.01\n self.balance = balance\n BankAccount.all_accounts.append(self)\n\n def deposit(self, amount):\n self.balance += amount\n return self\n\n def withdraw(self, amount):\n if BankAccount.can_withdraw(self.balance, amount):\n self.balance -= amount\n else:\n print('Insufficient funds')\n return self\n\n def display_account_info(self):\n print(\n f\"Bank {self.bank_name}\\nBalance: {self.balance}\\nInterest Rate: {self.int_rate}\\n\")\n return self\n\n def yield_interest(self):\n if self.balance > 0:\n self.balance += self.balance * self.int_rate\n else:\n print('Unable to accrue interest due to insufficient funds')\n return self\n\nclass User:\n def __init__(self, name, email_address):\n self.account_list = []\n self.name = name\n self.email_address = email_address\n\n def create_account(self, name, int_rate, balance):\n self.account = BankAccount(name, int_rate, balance)\n self.account_info = {\n 'bank_name': self.account.bank_name,\n 'account_name': self.account.account_name,\n 'interest_rate': self.account.int_rate,\n 'balance': self.account.balance\n }\n self.account_list.append(self.account_info)\n return self\n\n def make_deposit(self, name, amount):\n for i in range(len(self.account_list)):\n if self.account_list[i]['account_name'] == name:\n self.account.deposit(amount)\n print(self.account.display_account_info())\n self.account_list[i]['balance'] = self.account.balance\n return self\n else:\n print('No account with that name found')\n return self\n\n def make_withdrawl(self, name, amount):\n for i in range(len(self.account_list)):\n if self.account_list[i]['account_name'] == name:\n self.account.withdraw(amount)\n print(self.account.display_account_info())\n self.account_list[i]['balance'] = self.account.balance\n break\n else:\n print('No account with that name found')\n return self\n\n#Classmethods to print all instances of a Bank Account's info\n\n @classmethod\n def change_bank_name(cls, name):\n cls.bank_name = name\n\n @classmethod\n def all_balances(cls):\n sum = 0\n for account in cls.all_accounts:\n sum += account.balance\n return sum\n\n @classmethod\n def all_account_info(cls):\n for i, v in enumerate(cls.all_accounts, 0): # for i in range(len(cls.all_accounts)):\n print((i, v.display_account_info())) # cls.all_accounts[i].display_account_info()\n\nryan = User('Ryan', 'ryanpc@dev.com')\nryan.create_account('Checking', 0.01, 500)\nryan.create_account('Savings', 0.05, 1000)\nryan.make_deposit('Checking', 1000).make_deposit('Checking',1500).make_deposit('Checking',1200).make_withdrawl('Checking', 700)\nprint(ryan.account_list)\nprint(ryan.account_info())\n\nsophia = User('Sophia', 'sophia@dev.com')\nsophia.create_account('Checking',0.01,1000)\nsophia.create_account('Savings',0.05,5000)\nsophia.make_deposit('Checking',7000).make_deposit('Savings',10000).make_withdrawl('Checking',500).make_withdrawl('Checking',250).make_withdrawl('Checking',1500).make_withdrawl('Checking',700)\nprint(sophia.account_list)\nprint(sophia.account_info())\n","sub_path":"python/fundamentals/oop/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287596757","text":"#\n# @lc app=leetcode.cn id=29 lang=python3\n#\n# [29] 两数相除\n#\n# https://leetcode-cn.com/problems/divide-two-integers/description/\n#\n# algorithms\n# Medium (17.13%)\n# Total Accepted: 7.9K\n# Total Submissions: 45.7K\n# Testcase Example: '10\\n3'\n#\n# 给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。\n# \n# 返回被除数 dividend 除以除数 divisor 得到的商。\n# \n# 示例 1:\n# \n# 输入: dividend = 10, divisor = 3\n# 输出: 3\n# \n# 示例 2:\n# \n# 输入: dividend = 7, divisor = -3\n# 输出: -2\n# \n# 说明:\n# \n# \n# 被除数和除数均为 32 位有符号整数。\n# 除数不为 0。\n# 假设我们的环境只能存储 32 位有符号整数,其数值范围是 [−231,  231 − 1]。本题中,如果除法结果溢出,则返回 231 − 1。\n# \n# \n#\nclass Solution:\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n\n mark = (dividend < 0) is (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n \n time = 0\n while dividend >= divisor:\n temp, i = divisor, 1\n while dividend >= temp:\n dividend -= temp\n time += i\n i <<= 1\n temp <<= 1\n \n time = time if mark else -time\n return min(max(-2147483648, time), 2147483647)\n\n \n","sub_path":"leetcode/029_divide_two_integers.py","file_name":"029_divide_two_integers.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121909178","text":"\"\"\"\r\n██████╗██╗██████╗ ██╗ ██╗███████╗██╗ ██╗\r\n██╔════╝██║██╔══██╗██║ ██║██╔════╝╚██╗ ██╔╝\r\n██║ ██║██████╔╝███████║█████╗ ╚████╔╝ \r\n██║ ██║██╔═══╝ ██╔══██║██╔══╝ ╚██╔╝ \r\n╚██████╗██║██║ ██║ ██║███████╗ ██║ \r\n© Brandon Skerritt\r\nGithub: brandonskerritt\r\n\r\nClass to provide helper functions for mathematics\r\n(oh, not entirely mathematics either. Some NLP stuff and sorting dicts. It's just a helper class\r\n)\r\n\"\"\"\r\n\r\nfrom collections import OrderedDict\r\nfrom string import punctuation\r\nfrom loguru import logger\r\n\r\n\r\nclass mathsHelper:\r\n \"\"\"Class to provide helper functions for mathematics and other small things\"\"\"\r\n\r\n def __init__(self):\r\n # ETAOIN is the most popular letters in order\r\n self.ETAOIN = \"ETAOINSHRDLCUMWFGYPBVKJXQZ\"\r\n self.LETTERS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\n @staticmethod\r\n def gcd(a, b) -> int:\r\n \"\"\"Greatest common divisor.\r\n\r\n The Greatest Common Divisor of a and b using Euclid's Algorithm.\r\n\r\n Args:\r\n a -> num 1\r\n b -> num 2\r\n\r\n Returns:\r\n Returns GCD(a, b)\r\n\r\n \"\"\"\r\n # Return\r\n while a != 0:\r\n a, b = b % a, a\r\n return b\r\n\r\n @staticmethod\r\n def find_mod_inverse(a: int, m: int) -> int:\r\n \"\"\"Return the modular inverse of a % m.\r\n\r\n Which is the number x such that a*x % m = 1. Calculated using the Extended Euclidean Algorithm.\r\n\r\n Args:\r\n a -> num 1\r\n m -> num 2\r\n\r\n Returns:\r\n Returns modular inverse(u1, m)\r\n\r\n \"\"\"\r\n # Return the modular inverse of a % m, which is\r\n # the number x such that a*x % m = 1\r\n\r\n if gcd(a, m) != 1:\r\n return None # No mod inverse exists if a & m aren't relatively prime.\r\n\r\n # Calculate using the Extended Euclidean Algorithm:\r\n u1, u2, u3 = 1, 0, a\r\n v1, v2, v3 = 0, 1, m\r\n while v3 != 0:\r\n q = u3 // v3 # Note that // is the integer division operator\r\n v1, v2, v3, u1, u2, u3 = (\r\n (u1 - q * v1),\r\n (u2 - q * v2),\r\n (u3 - q * v3),\r\n v1,\r\n v2,\r\n v3,\r\n )\r\n return u1 % m\r\n\r\n @staticmethod\r\n def percentage(part: float, whole: float) -> float:\r\n \"\"\"Returns percentage.\r\n\r\n Just a normal algorithm to return the percent.\r\n\r\n Args:\r\n part -> part of the whole number\r\n whole -> the whole number\r\n\r\n Returns:\r\n Returns the percentage of part to whole.\r\n\r\n \"\"\"\r\n if part <= 0 or whole <= 0:\r\n return 0\r\n # works with percentages\r\n return 100 * float(part) / float(whole)\r\n\r\n @staticmethod\r\n def sort_dictionary(dictionary: dict) -> dict:\r\n \"\"\"Sorts a dictionary.\r\n\r\n Uses OrderedDict to sort a dictionary.\r\n\r\n Args:\r\n dictionary -> the dictionary to sort.\r\n\r\n Returns:\r\n Returns the dictionary, but sorted.\r\n\r\n \"\"\"\r\n ret = dict(OrderedDict(sorted(dictionary.items())))\r\n logger.debug(\r\n f\"The old dictionary was {dictionary} and I am sorting it to {ret}\"\r\n )\r\n return ret\r\n\r\n def sort_prob_table(self, prob_table: dict) -> dict:\r\n \"\"\"Sorts the probability table.\r\n\r\n Sorts a dictionary of dictionaries (and all the sub-dictionaries).\r\n\r\n Args:\r\n prob_table -> The probability table returned by the neural network to sort.\r\n\r\n Returns:\r\n Returns the prob_table, but sorted.\r\n\r\n \"\"\"\r\n # for each object: prob table in dictionary\r\n max_overall: int = 0\r\n max_dict_pair: dict = {}\r\n highest_key = None\r\n empty_dict: dict = {}\r\n # sorts the prob table before we find max, and converts it to order dicts\r\n for key, value in prob_table.items():\r\n prob_table[key] = self.new_sort(value)\r\n prob_table[key] = dict(prob_table[key])\r\n\r\n # gets maximum key then sets it to the front\r\n counter_max: int = 0\r\n counter_prob: int = len(prob_table)\r\n while counter_max < counter_prob:\r\n max_overall = 0\r\n highest_key = None\r\n logger.debug(\r\n f\"Running while loop in sort_prob_table, counterMax is {counter_max}\"\r\n )\r\n for key, value in prob_table.items():\r\n logger.debug(f\"Sorting {key}\")\r\n maxLocal = 0\r\n # for each item in that table\r\n for key2, value2 in value.items():\r\n logger.debug(\r\n f\"Running key2 {key2}, value2 {value2} for loop for {value.items()}\"\r\n )\r\n maxLocal = maxLocal + value2\r\n logger.debug(\r\n f\"MaxLocal is {maxLocal} and maxOverall is {max_overall}\"\r\n )\r\n if maxLocal > max_overall:\r\n logger.debug(f\"New max local found {maxLocal}\")\r\n # because the dict doesnt reset\r\n max_dict_pair = {}\r\n max_overall = maxLocal\r\n # so eventually, we get the maximum dict pairing?\r\n max_dict_pair[key] = value\r\n highest_key = key\r\n logger.debug(f\"Highest key is {highest_key}\")\r\n # removes the highest key from the prob table\r\n logger.debug(f\"Prob table is {prob_table} and highest key is {highest_key}\")\r\n logger.debug(f\"Removing {prob_table[highest_key]}\")\r\n del prob_table[highest_key]\r\n logger.debug(f\"Prob table after deletion is {prob_table}\")\r\n counter_max += 1\r\n empty_dict = {**empty_dict, **max_dict_pair}\r\n\r\n # returns the max dict (at the start) with the prob table\r\n # this way, it should always work on most likely first.\r\n logger.debug(\r\n f\"The prob table is {prob_table} and the maxDictPair is {max_dict_pair}\"\r\n )\r\n logger.debug(f\"The new sorted prob table is {empty_dict}\")\r\n return empty_dict\r\n\r\n @staticmethod\r\n def new_sort(new_dict: dict) -> dict:\r\n \"\"\"Uses OrderedDict to sort a dictionary.\r\n\r\n I think it's faster than my implementation.\r\n\r\n Args:\r\n new_dict -> the dictionary to sort\r\n\r\n Returns:\r\n Returns the dict, but sorted.\r\n\r\n \"\"\"\r\n # (f\"d is {d}\")\r\n logger.debug(f\"The old dictionary before new_sort() is {new_dict}\")\r\n sorted_i = OrderedDict(\r\n sorted(new_dict.items(), key=lambda x: x[1], reverse=True)\r\n )\r\n logger.debug(f\"The dictionary after new_sort() is {sorted_i}\")\r\n # sortedI = sort_dictionary(x)\r\n return sorted_i\r\n\r\n @staticmethod\r\n def is_ascii(s: str) -> bool:\r\n \"\"\"Returns the boolean value if is_ascii is an ascii char.\r\n\r\n Does what it says on the tree. Stolen from\r\n https://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii\r\n\r\n Args:\r\n s -> the char to check.\r\n\r\n Returns:\r\n Returns the boolean of the char.\r\n\r\n \"\"\"\r\n\r\n return bool(lambda s: len(s) == len(s.encode()))\r\n\r\n @staticmethod\r\n def check_equal(a) -> bool:\r\n \"\"\"checks if all items in an iterable are the same.\r\n\r\n https://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical\r\n\r\n Args:\r\n a -> an iterable\r\n\r\n Returns:\r\n Returns boolean.\r\n\r\n \"\"\"\r\n return a.count(a[0]) == len(a)\r\n\r\n @staticmethod\r\n def strip_puncuation(text: str) -> str:\r\n \"\"\"Strips punctuation from a given string.\r\n\r\n Uses string.puncuation.\r\n\r\n Args:\r\n text -> the text to strip puncuation from.\r\n\r\n Returns:\r\n Returns string without puncuation.\r\n\r\n \"\"\"\r\n text: str = str(text).translate(str.maketrans(\"\", \"\", punctuation))\r\n return text\r\n\r\n def get_all_letters(self, text: str) -> dict:\r\n \"\"\"Gets letter frequency of text.\r\n\r\n Uses a for loop to do it.\r\n\r\n Args:\r\n text -> the text to get letter frequency of.\r\n\r\n Returns:\r\n Returns dict of letter frequency.\r\n\r\n \"\"\"\r\n # This part creates a letter frequency of the text\r\n letterFreq: dict = {\r\n \"a\": 0,\r\n \"b\": 0,\r\n \"c\": 0,\r\n \"d\": 0,\r\n \"e\": 0,\r\n \"f\": 0,\r\n \"g\": 0,\r\n \"h\": 0,\r\n \"i\": 0,\r\n \"j\": 0,\r\n \"k\": 0,\r\n \"l\": 0,\r\n \"m\": 0,\r\n \"n\": 0,\r\n \"o\": 0,\r\n \"p\": 0,\r\n \"q\": 0,\r\n \"r\": 0,\r\n \"s\": 0,\r\n \"t\": 0,\r\n \"u\": 0,\r\n \"v\": 0,\r\n \"w\": 0,\r\n \"x\": 0,\r\n \"y\": 0,\r\n \"z\": 0,\r\n }\r\n\r\n for letter in text.lower():\r\n if letter in letterFreq:\r\n letterFreq[letter] += 1\r\n else:\r\n # if letter is not puncuation, but it is still ascii\r\n # it's probably a different language so add it to the dict\r\n if letter not in punctuation and self.mh.is_ascii(letter):\r\n letterFreq[letter] = 1\r\n return letterFreq\r\n\r\n def get_letter_count(self, message: str) -> dict:\r\n \"\"\"Gets letter count.\r\n\r\n Returns a dictionary with keys of single letters and values of the\r\n count of how many times they appear in the message parameter:\r\n\r\n Args:\r\n message -> message to get letter count of.\r\n\r\n Returns:\r\n Returns dict of letter count.\r\n\r\n \"\"\"\r\n\r\n letterCount = {\r\n \"A\": 0,\r\n \"B\": 0,\r\n \"C\": 0,\r\n \"D\": 0,\r\n \"E\": 0,\r\n \"F\": 0,\r\n \"G\": 0,\r\n \"H\": 0,\r\n \"I\": 0,\r\n \"J\": 0,\r\n \"K\": 0,\r\n \"L\": 0,\r\n \"M\": 0,\r\n \"N\": 0,\r\n \"O\": 0,\r\n \"P\": 0,\r\n \"Q\": 0,\r\n \"R\": 0,\r\n \"S\": 0,\r\n \"T\": 0,\r\n \"U\": 0,\r\n \"V\": 0,\r\n \"W\": 0,\r\n \"X\": 0,\r\n \"Y\": 0,\r\n \"Z\": 0,\r\n }\r\n\r\n for letter in message.upper():\r\n if letter in self.LETTERS:\r\n letterCount[letter] += 1\r\n\r\n return letterCount\r\n\r\n @staticmethod\r\n def get_item_at_index_zero(items):\r\n \"\"\"Gets the item at index 0 from an iterable\"\"\"\r\n return items[0]\r\n\r\n def get_frequency_order(self, message: str) -> str:\r\n \"\"\"Returns frequency order.\r\n\r\n Returns a string of the alphabet letters arranged in order of most\r\n frequently occurring in the message parameter.\r\n\r\n Args:\r\n message -> message to get freq of.\r\n\r\n Returns:\r\n str of the alphabet letters in most frequently occuring order.\r\n\r\n \"\"\"\r\n\r\n # First, get a dictionary of each letter and its frequency count:\r\n letterToFreq = self.get_letter_count(message)\r\n\r\n # Second, make a dictionary of each frequency count to each letter(s)\r\n # with that frequency:\r\n freqToLetter = {}\r\n for letter in self.LETTERS:\r\n if letterToFreq[letter] not in freqToLetter:\r\n freqToLetter[letterToFreq[letter]] = [letter]\r\n else:\r\n freqToLetter[letterToFreq[letter]].append(letter)\r\n\r\n # Third, put each list of letters in reverse \"self.self.ETAOIN\" order, and then\r\n # convert it to a string:\r\n for freq in freqToLetter:\r\n freqToLetter[freq].sort(key=self.ETAOIN.find, reverse=True)\r\n freqToLetter[freq] = \"\".join(freqToLetter[freq])\r\n\r\n # Fourth, convert the freqToLetter dictionary to a list of\r\n # tuple pairs (key, value), then sort them:\r\n freqPairs = list(freqToLetter.items())\r\n freqPairs.sort(key=self.get_item_at_index_zero, reverse=True)\r\n\r\n # Fifth, now that the letters are ordered by frequency, extract all\r\n # the letters for the final string:\r\n freqOrder = []\r\n for freqPair in freqPairs:\r\n freqOrder.append(freqPair[1])\r\n\r\n return \"\".join(freqOrder)\r\n\r\n def english_freq_match_score(self, message: str) -> int:\r\n \"\"\"Return number of mathces in the string\r\n\r\n Return the number of matches that the string in the message\r\n parameter has when its letter frequency is compared to English\r\n letter frequency. A \"match\" is how many of its six most frequent\r\n and six least frequent letters is among the six most frequent and\r\n six least frequent letters for English.\r\n\r\n Args:\r\n message -> message to get freq match of\r\n\r\n Returns:\r\n int, how many matches for the most common letters / least common letters.\r\n\r\n \"\"\"\r\n\r\n freqOrder = self.get_frequency_order(message)\r\n\r\n matchScore = 0\r\n # Find how many matches for the six most common letters there are:\r\n for commonLetter in self.ETAOIN[:6]:\r\n if commonLetter in freqOrder[:6]:\r\n matchScore += 1\r\n # Find how many matches for the six least common letters there are:\r\n for uncommonLetter in self.ETAOIN[-6:]:\r\n if uncommonLetter in freqOrder[-6:]:\r\n matchScore += 1\r\n\r\n return matchScore\r\n","sub_path":"ciphey/mathsHelper.py","file_name":"mathsHelper.py","file_ext":"py","file_size_in_byte":14054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296761289","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTests for the config module.\n\nSPDX-FileCopyrightText: 2016-2021 Uwe Krien <krien@uni-bremen.de>\n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien <krien@uni-bremen.de>\"\n__license__ = \"MIT\"\n\nfrom nose.tools import eq_, ok_, assert_raises_regexp\nfrom configparser import NoOptionError, NoSectionError\nimport os\nfrom reegis import config\n\n\ndef test_ini_filenames_basic():\n files = config.get_ini_filenames(use_importer=False, local=False)\n fn = sorted([f.split(os.sep)[-1] for f in files])\n eq_(\n fn,\n [\n \"dictionary.ini\",\n \"mobility.ini\",\n \"reegis.ini\",\n \"solar.ini\",\n \"wind.ini\",\n ],\n )\n\n\ndef test_ini_filenames_local_path():\n local_path = os.path.join(os.path.expanduser(\"~\"), \".reegis\")\n os.makedirs(local_path, exist_ok=True)\n new_file = os.path.join(local_path, \"test_ini_file.ini\")\n f = open(new_file, \"w+\")\n f.close()\n files = config.get_ini_filenames()\n fn = sorted([f.split(os.sep)[-1] for f in files])\n ok_(\"test_ini_file.ini\" in fn)\n os.remove(new_file)\n\n\ndef test_ini_filenames_additional_path():\n additional_path = [os.path.join(os.path.dirname(__file__), \"data\")]\n files = config.get_ini_filenames(\n use_importer=False, local=False, additional_paths=additional_path\n )\n fn = sorted([f.split(os.sep)[-1] for f in files])\n eq_(\n fn,\n [\n \"config_test.ini\",\n \"dictionary.ini\",\n \"mobility.ini\",\n \"reegis.ini\",\n \"solar.ini\",\n \"wind.ini\",\n ],\n )\n\n\ndef test_init_basic():\n config.init()\n fn = sorted([f.split(os.sep)[-1] for f in config.FILES])\n eq_(\n fn,\n [\n \"dictionary.ini\",\n \"mobility.ini\",\n \"reegis.ini\",\n \"solar.ini\",\n \"wind.ini\",\n ],\n )\n\n\ndef test_init_additional_path():\n additional_path = [os.path.join(os.path.dirname(__file__), \"data\")]\n config.init(paths=additional_path)\n fn = sorted([f.split(os.sep)[-1] for f in config.FILES])\n eq_(\n fn,\n [\n \"config_test.ini\",\n \"dictionary.ini\",\n \"mobility.ini\",\n \"reegis.ini\",\n \"solar.ini\",\n \"wind.ini\",\n ],\n )\n\n\ndef test_init_own_file_list():\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n fn = sorted([f.split(os.sep)[-1] for f in config.FILES])\n eq_(fn, [\"config_test.ini\"])\n eq_(config.get(\"tester\", \"my_test\"), \"my_value\")\n\n\ndef test_check_functions():\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n ok_(config.has_section(\"tester\"))\n ok_(not (config.has_section(\"teste\")))\n ok_(config.has_option(\"tester\", \"my_test\"))\n\n\ndef test_get_function():\n \"\"\"Read config file.\"\"\"\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n ok_(config.get(\"type_tester\", \"my_bool\"))\n ok_(isinstance(config.get(\"type_tester\", \"my_int\"), int))\n ok_(isinstance(config.get(\"type_tester\", \"my_float\"), float))\n ok_(isinstance(config.get(\"type_tester\", \"my_string\"), str))\n ok_(isinstance(config.get(\"type_tester\", \"my_None\"), type(None)))\n ok_(isinstance(config.get(\"type_tester\", \"my_list\"), str))\n eq_(int(config.get_list(\"type_tester\", \"my_list\")[2]), 7)\n\n\ndef test_missing_value():\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n with assert_raises_regexp(\n NoOptionError, \"No option 'blubb' in section: 'type_tester'\"\n ):\n config.get(\"type_tester\", \"blubb\")\n with assert_raises_regexp(NoSectionError, \"No section: 'typetester'\"):\n config.get(\"typetester\", \"blubb\")\n\n\ndef test_dicts():\n \"\"\"Test dictionaries in config file.\"\"\"\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n d = config.get_dict(\"type_tester\")\n eq_(d[\"my_list\"], \"4,6,7,9\")\n d = config.get_dict_list(\"type_tester\")\n eq_(d[\"my_list\"][1], \"6\")\n eq_(d[\"my_None\"][0], None)\n eq_(d[\"my_int\"][0], 5)\n d = config.get_dict_list(\"type_tester\", string=True)\n eq_(d[\"my_list\"][1], \"6\")\n eq_(d[\"my_None\"][0], \"None\")\n eq_(d[\"my_int\"][0], \"5\")\n\n\ndef test_set_temp_value():\n files = [\n os.path.join(os.path.dirname(__file__), \"data\", \"config_test.ini\")\n ]\n config.init(files=files)\n with assert_raises_regexp(\n NoOptionError, \"No option 'blubb' in section: 'type_tester'\"\n ):\n config.get(\"type_tester\", \"blubb\")\n config.tmp_set(\"type_tester\", \"blubb\", \"None\")\n eq_(config.get(\"type_tester\", \"blubb\"), None)\n config.tmp_set(\"type_tester\", \"blubb\", \"5.5\")\n eq_(config.get(\"type_tester\", \"blubb\"), 5.5)\n\n\ndef test_set_temp_without_init():\n config.tmp_set(\"type_tester\", \"blubb\", \"None\")\n","sub_path":"tests/config_tests.py","file_name":"config_tests.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279030213","text":"__author__ = 'sean'\nimport ConfigParser\nimport os\n\n\nclass Config:\n conf_path = os.path.abspath(os.path.curdir) + os.sep + 'server.conf'\n config = None\n\n @staticmethod\n def getConfig(section, key):\n if not Config.config:\n Config.config = ConfigParser.RawConfigParser()\n path = os.path.realpath(Config.conf_path)\n Config.config.read(path)\n return Config.config.get(section, key)\n\n @staticmethod\n def getServerConfig(key):\n \"\"\"\n :param key:\n :return:\n \"\"\"\n return Config.getConfig('vps', key)\n\n @staticmethod\n def getMysqlConfig(key):\n \"\"\"\n :param key:\n :return:\n \"\"\"\n return Config.getConfig('mysql', key)\n\n @staticmethod\n def getPathConfig(key):\n \"\"\"\n :param key:\n :return:\n \"\"\"\n return Config.getConfig('path', key)\n","sub_path":"Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28360562","text":"import matplotlib.pyplot as plt\n\n\ndef plot_data_over_time(x, y=None, title=None, xlabel=\"Date\", ylabel=None, figsize=None):\n fig, ax = plt.subplots(figsize=figsize)\n plt.title(title)\n fig.autofmt_xdate()\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n if y is None:\n plt.plot(x)\n else:\n plt.plot(x, y)\n\n plt.show()\n\ndef plot_fundamental_diagram(flow, occupancy, title=None):\n plt.title(title)\n plt.xlabel(\"Occupancy (%)\")\n plt.ylabel(\"Flow (vph)\")\n\n plt.scatter(occupancy, flow)\n plt.xlim(0, 100)\n plt.ylim(bottom=0)\n plt.show()\n","sub_path":"lib/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"77740800","text":"#\n# (c) 2012 Commonwealth of Australia\n# Australian Bureau of Meteorology, COSPPac COMP\n# All Rights Reserved\n#\n# Author: Sheng Guo <s.guo@bom.gov.au>\n# Danielle Madeley <d.madeley@bom.gov.au>\n\n\"\"\"\nStore the server specific configurations\n\nDon't import config directly, use ocean.config.get_server_config()\n\"\"\"\n\nfrom ocean.config import BaseConfig\n\nclass default(BaseConfig):\n \"\"\"\n Default server config. Inherit this class to set per-server config.\n \"\"\"\n\n # path on web server\n baseURL = '/portal/'\n\n # relative path to rasters\n rasterURL = 'comp/raster/'\n\n # path on disk to output rasters/caches\n outputDir = '/opt/data/comp/raster/'\n\n # relative path to caches (relative to rasterURL) (obsolete?)\n cacheDir = {\n 'reynolds': 'cache/reynolds/',\n 'ersst': 'cache/ersst/',\n }\n\n dataDir = {}\n\n mapservPath = '/usr/lib/cgi-bin/mapserv'\n debug = True\n profile = False\n\nclass localhost(default):\n debug = True\n mapservPath = '/usr/lib/cgi-bin/mapserv'\n dataDir = {\n 'bran': '/opt/data/blue_link/data/',\n 'ersst': '/opt/data/ersst/',\n 'reynolds': '/opt/data/reynolds/',\n 'sealevel': '/opt/data/sea_level/',\n 'msla': '/opt/data/sea_level/',\n 'ww3': '/opt/data/wavewatch3/',\n 'coral':'/opt/data/coral/',\n 'poamasla':'/opt/data/poama/',\n 'poamassta':'/opt/data/poama/',\n 'oceanmaps':'/opt/data/oceanmaps/',\n 'chloro':'/opt/data/chloro/',\n 'currents':'/opt/data/currents/',\n 'ww3forecast':'/opt/data/wavewatch3/forecast/',\n 'mur':'/opt/data/mur/'\n }\n\n\n\nclass oceanportal(default):\n debug = True\n mapservPath = '/usr/lib/cgi-bin/mapserv'\n dataDir = {\n 'bran': '/opt/data/blue_link/data/BRAN3p5/',\n 'ersst': '/opt/data/ersst/',\n 'reynolds': '/opt/data/reynolds/',\n 'sealevel': '/opt/data/sea_level/',\n 'msla': '/opt/data/sea_level/',\n 'ww3': '/opt/data/wavewatch3/',\n 'coral':'/opt/data/coral/',\n 'coral_ol':'/opt/data/coral/',\n 'poamasla':'/opt/data/poama/',\n 'poamassta':'/opt/data/poama/',\n 'oceanmaps':'/opt/data/oceanmaps/',\n 'chloro':'/opt/data/chloro/',\n 'currents':'/opt/data/currents/',\n 'ww3forecast':'/opt/data/wavewatch3/forecast/',\n 'mur':'/opt/data/mur/'\n }\n\nclass tunceli(default):\n debug = True\n\n # shared data directories from ITB (mounted rw)\n dataDir = {\n 'bran': '/www4/data/cosppac/bran/',\n 'ersst': '/www4/data/cosppac/ersst/',\n 'reynolds': '/www4/data/cosppac/reynolds/',\n 'sealevel': '/www4/data/cosppac/sea_level/',\n 'msla': '/www4/data/cosppac/sea_level/',\n 'ww3': '/www4/data/cosppac/wavewatch3/',\n 'coral':'/www4/data/cosppac/coral/',\n 'coral_ol':'/www4/data/cosppac/coral/',\n 'poamasla':'/www4/data/cosppac/poama/',\n 'poamassta':'/www4/data/cosppac/poama/',\n 'oceanmaps':'/www4/data/cosppac/oceanmaps/',\n 'chloro':'/www4/data/cosppac/chloro/',\n 'currents':'/www4/data/cosppac/currents/',\n 'ww3forecast':'/www4/data/cosppac/wavewatch3/forecast/',\n\t'mur':'/www4/data/cosppac/mur/'\n }\n\nclass www4(default):\n debug = True\n baseURL = '/cosppac/apps/portal/'\n outputDir = '/web/cosppac/raster/'\n\n dataDir = {\n 'bran': '/web/data/cosppac/bran/',\n 'ersst': '/web/data/cosppac/ersst/',\n 'reynolds': '/web/data/cosppac/reynolds/',\n 'sealevel': '/web/data/cosppac/sea_level/',\n 'msla': '/web/data/cosppac/sea_level/',\n 'ww3': '/web/data/cosppac/wavewatch3/',\n 'coral': '/web/data/cosppac/coral/',\n 'coral_ol': '/web/data/cosppac/coral/',\n 'poamasla':'/web/data/cosppac/poama/',\n 'poamassta':'/web/data/cosppac/poama/',\n 'oceanmaps': '/web/data/cosppac/oceanmaps/',\n 'chloro':'/web/data/cosppac/chloro/',\n 'currents':'/web/data/cosppac/currents/',\n 'ww3forecast':'/web/data/cosppac/wavewatch3/forecast/',\n\t'mur':'/web/data/cosppac/mur'\n }\n\nclass hoapp2(www4):\n debug = False\n\n__version__ = ''\n","sub_path":"ocean/config/serverConfig.py","file_name":"serverConfig.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"442309630","text":"import os\r\nimport json\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import Tensor\r\nfrom .resnet34 import ResNet34\r\n\r\nDEFAULT_MODEL_PATH = os.path.join(\r\n os.path.split(os.path.realpath(__file__))[0],\r\n 'models', 'resnet34.pt')\r\n\r\nwith open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'models', 'class_indices.json'), 'r') as f:\r\n CLASS_INDICES = list(dict(json.load(f)).values())\r\n\r\n\r\nclass OriginalModel():\r\n \"\"\"ResNet-34 Model Encapsulation.\r\n\r\n Attributes:\r\n device: A string to indicating model inference device, such as cpu, cuda or cuda:0.\r\n num_class: An integer count of class.\r\n model: A torch.nn.Module.\r\n\r\n Example:\r\n >>> model = OriginalModel(\\\r\n model_weight_path='<your_model_weight_path>',\\\r\n device=DEVICE)\r\n \"\"\"\r\n\r\n def __init__(self,\r\n model_weight_path=DEFAULT_MODEL_PATH,\r\n device='cpu' # default device is cpu\r\n ):\r\n self.device = device\r\n self.num_class = 10\r\n\r\n # Load model\r\n self.model = ResNet34()\r\n self.model.linear = nn.Linear(\r\n self.model.linear.in_features, self.num_class)\r\n\r\n self.model.load_state_dict(\r\n torch.load(model_weight_path, map_location=torch.device(device)))\r\n self.model.to(self.device)\r\n\r\n def inference(self, x: Tensor) -> Tensor:\r\n \"\"\"\r\n - x: Tensor [1,3,224,224]\r\n \"\"\"\r\n if x.dim() == 3:\r\n x = x.unsqueeze(0)\r\n x = x.to(self.device)\r\n self.model.eval()\r\n with torch.no_grad():\r\n x = self.model(x)\r\n x = torch.squeeze(x)\r\n x = torch.softmax(x, dim=0)\r\n return x\r\n\r\n def top_k(self, x: Tensor, k: int = 5):\r\n \"\"\"\r\n - x: Tensor [1,3,224,224]\r\n - k: sort tensor and select top-k\r\n \"\"\"\r\n values, indices = torch.topk(self.inference(x), k, dim=0)\r\n class_name = []\r\n for i in indices.tolist():\r\n class_name.append(CLASS_INDICES[i])\r\n return values.tolist(), indices.tolist(), class_name\r\n","sub_path":"api/api_originalModel/originalModel.py","file_name":"originalModel.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"541986905","text":"import pytesseract\nfrom pytesseract import pytesseract\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef barlabelandheightratio(img,yaxisminimum=0):\n \"\"\"\n This function returns the ratio of the range,\n the x axis labels and the postiton of the bars\n \"\"\"\n\n \n y=img.shape[0]-yaxisminimum\n\n\n custom_config=r'--oem 3 --psm 6'\n data=pytesseract.image_to_boxes(img,config=custom_config)\n l=data.split(\"\\n\")\n matrix=[]\n\n for i in l:\n matrix.append(i.split(\" \"))\n # matrix[i][2]=(int(matrix[i][2])-img.shape[0])\n # matrix[i][4]=(int(matrix[i][4])-img.shape[0])\n\n numberlist=[]\n\n\n for i in matrix:\n\n ###############\n if i[0].isdigit() and int(i[2])>y-10:\n # print(i)\n numberlist.append((int(i[2])+int(i[4]))/2)\n\n\n unit_data=[]\n\n for i in range(1,len(numberlist)):\n d=abs(numberlist[i]-numberlist[i-1])\n if d>7:\n unit_data.append(d)\n\n\n unit_data.sort()\n k=len(unit_data)\n \n # print(numberlist)\n\n height_ratio=unit_data[k//2]\n # print(data)\n\n\n bartitleposition=[]\n bartitle=[]\n\n labelbegin=0\n\n# return the beginning coordinates of the labels \n for i in range(len(matrix)):\n if int(matrix[i][4])<y:\n labelbegin=i \n break \n\n\n s=\"\" \n flagnext=0\n j=0\n\n for i in range(labelbegin,len(matrix)-1):\n yaverage=(int(matrix[i][2])+int(matrix[i][4]))/2\n xaverage=(int(matrix[i][1])+int(matrix[i][3]))/2\n\n yaverage2=(int(matrix[i+1][2])+int(matrix[i+1][4]))/2\n xaverage2=(int(matrix[i+1][1])+int(matrix[i+1][3]))/2\n \n if abs(yaverage-yaverage2)<=10 and abs(xaverage-xaverage2)<=30:\n j=xaverage\n break \n ############\n maxlength=img.shape[1]\n maxheight=img.shape[0]\n matrix.append(['_',str(maxlength),str(maxheight),str(maxlength),str(maxheight)])\n #code that returns the bar positions\n for i in range(labelbegin,len(matrix)-1):\n yaverage=(int(matrix[i][2])+int(matrix[i][4]))/2\n xaverage=(int(matrix[i][1])+int(matrix[i][3]))/2\n\n yaverage2=(int(matrix[i+1][2])+int(matrix[i+1][4]))/2\n xaverage2=(int(matrix[i+1][1])+int(matrix[i+1][3]))/2\n \n if abs(yaverage-yaverage2)<=10:\n if abs(xaverage-xaverage2)<=30:\n s=s+matrix[i][0]\n if flagnext==1:\n j=xaverage\n flagnext=0\n else: \n s=s+matrix[i][0]\n bartitle.append(s)\n if len(s)==1:\n bartitleposition.append(xaverage)\n else:\n bartitleposition.append(abs(xaverage+j)/2)\n flagnext=1\n s=\"\"\n\n else:\n s=s+matrix[i][0]\n diff=((int(matrix[i][1])+int(matrix[i][3]))/2)\n #if the label is one letter then directly add the coordinate\n if len(s)==1:\n bartitleposition.append(diff)\n else:\n bartitleposition.append((diff+j)/2)\n bartitle.append(s)\n break \n\n # print(unit_data)\n # print(height_ratio)\n # print(bartitle)\n # print(bartitleposition)\n\n return [height_ratio,bartitle,bartitleposition]\n \n\nif __name__==\"__main__\":\n img_path = 'image2.png'\n img=cv2.imread(img_path)\n yaxisminimum=530\n d=barlabelandheightratio(img,yaxisminimum)\n print(d)\n plt.imshow(img)\n plt.show()","sub_path":"maincode/textposition.py","file_name":"textposition.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"383960217","text":"# Create a function that takes a filename and a string as parameter,\n# And writes the string got as second parameter into the file 10 times.\n# If the writing succeeds, the function should return True.\n# If any problem raises with the file output, the function should not break, but return False.\n# Example: when called with the following two parameters: \"tree.txt\", \"apple\",\n# the function should write \"appleappleapple\" to the file \"tree.txt\", and return True.\n\ndef count_a_in_text_file(file_name, string):\n if isinstance(string, str):\n with open(file_name, \"a\") as myfile:\n myfile.write(string*10)\n return True\n else:\n return False\n\ncount_a_in_text_file(\"text_for_second.txt\",\"apple\")\n","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290221525","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom system.models import Menu, Meta\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_user_model()\n fields = ('id', 'username', 'first_name', 'last_name', 'email', 'phone', 'is_staff', 'is_active', 'date_joined', 'last_login')\n\nclass MetaSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Meta\n fields = ('id', 'title', 'icon', 'iconSvg', 'aside', 'cache', 'description')\n\nclass MenuTreeSerializer(serializers.ModelSerializer):\n children = serializers.SerializerMethodField()\n meta = MetaSerializer()\n\n class Meta:\n model = Menu\n fields = ('id', 'name', 'path', 'hidden', 'meta', 'parent', 'children')\n\n def get_children(self, obj):\n children = MenuTreeSerializer(obj.get_children(), many=True).data\n if any(children):\n return children\n return\n","sub_path":"api/serializers/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"372615989","text":"\"\"\"\nG2_RIGHTS.\n\nAn L3 switch based on static routing.\n\nThis module creates a POX controller which reads static routing configuration from a file.\nAccordingly, each switch that connects to this controller will receive both IP and ARP flows table entries.\nTherefore, no routing request comes to the controller for known paths.\nIf a flow needs to be transmitted on an unknown path, requests will come to the controller only to get ignored and hence those requests would not succeed.\n\n\"\"\"\n\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.packet.ethernet import ethernet\nfrom pox.lib.packet.ipv4 import ipv4\nfrom pox.lib.packet.arp import arp\nfrom pox.lib.addresses import IPAddr, EthAddr\nfrom pox.lib.revent import *\n\nimport ConfigParser\nfrom collections import defaultdict\nimport json\n\nlog = core.getLogger()\n\nclass TopoStructure():\n \"\"\"Topology structure related constants.\n\n Args:\n topoFile (str): Path to file that contains topology information.\n\n Attributes:\n hostAddrDict (dict): Mapping from host ID to IP address and MAC address.\n\n Examples:\n hostAddrDict['h1']['IP'] = 10.0.1.10\n hostAddrDict['h1']['MAC'] = 000000000001\n\n \"\"\"\n\n def __init__(self, topoFile):\n self.hostAddrDict = {}\n with open(topoFile, \"r\") as read_file:\n self.hostAddrDict = json.load(read_file)\n read_file.close()\n\nclass StaticRouter():\n \"\"\"Definition of a router that reads flow rules from a config file and prepares data required to create flow rules for switches.\n\n Args:\n config_file (str): Path of file that contains routing configuration.\n\n Attributes:\n config (str): Path of file that contains routing configuration.\n\n \"\"\"\n\n def __init__(self, config_file):\n self.config = config_file\n\n def getRoutes(self):\n \"\"\"Create a dictionary of flow rules.\n\n Returns:\n dict: With (key, value) = (switch dpid, list of flow rules)\n\n Example:\n rulesDict['1'] = [(h1,h2,3,2)] can be interpreted as follows:\n On switch s1, a flow rule should be inserted to forward any packets to port 2 which match source host h1, source port 3,\n and destination host h2\n\n \"\"\"\n\n rulesDict = defaultdict(list)\n Config = ConfigParser.ConfigParser()\n if Config.read(self.config):\n switches = Config.sections() # ['s1', 's2', 's3', ...]\n if switches:\n for switch in switches:\n options = Config.options(switch)\n for pair in options:\n ks = pair.split('-')\n sh, dh = ks[0], ks[1] # sh: source host, dh: destination host\n vs = Config.get(switch, pair).split('-')\n sp, dp = vs[0], vs[1] # sp: source port, dp: destination port\n rulesDict[int(switch[1:])].append((sh,dh,sp,dp)) # dict key is just int dpid\n else:\n log.debug(\"no switches found in routing conf. No rules will be inserted.\")\n return rulesDict\n\nclass G2Switch (EventMixin):\n \"\"\"An L3 switch class.\n\n Args:\n topoFile (str): Path to file that contains topology information.\n routingFile (str): Path to file that contains routing configuration.\n\n\n Attributes:\n routingConfig (str): Path of file that contains routing configuration.\n topoStruct (TopoStructure): Instance of TopoStructure class that contains topology-related constants.\n\n\n \"\"\"\n\n def __init__ (self, topoFile, routingFile):\n self.topoStruct = TopoStructure(topoFile)\n self.routingConfig = routingFile\n core.addListeners(self)\n\n def _handle_GoingUpEvent (self, event):\n core.openflow.addListeners(self)\n log.debug(\"Up...\")\n\n def _handle_ConnectionUp (self, event):\n dpid = event.connection.dpid\n log.debug(\"switch %i has come up.\", dpid)\n router = StaticRouter(self.routingConfig)\n flowRules = router.getRoutes()\n if flowRules:\n rules = flowRules[dpid] # list of tuples\n for rule in rules:\n sh, dh, inp, outp = rule\n\n # IP\n fm = of.ofp_flow_mod()\n fm.match.in_port = None\n fm.priority = 42\n fm.match.dl_type = 0x0800\n\n fullIP = self.topoStruct.hostAddrDict[sh][\"IP\"]\n splits = fullIP.split('/')\n (addr, netmask) = (splits[0].strip(), int(splits[1].strip()))\n fm.match.nw_src = (IPAddr(addr), netmask)\n\n fullIP = self.topoStruct.hostAddrDict[dh][\"IP\"]\n splits = fullIP.split('/')\n (addr, netmask) = (splits[0].strip(), int(splits[1].strip()))\n fm.match.nw_dst = (IPAddr(addr), netmask)\n\n fm.actions.append(of.ofp_action_output(port = int(outp)))\n event.connection.send(fm)\n\n # ARP\n fm = of.ofp_flow_mod()\n fm.match.in_port = None\n fm.priority = 42\n fm.match.dl_type = 0x0806\n fm.match.dl_src = EthAddr(self.topoStruct.hostAddrDict[sh][\"MAC\"])\n\n fullIP = self.topoStruct.hostAddrDict[dh][\"IP\"]\n splits = fullIP.split('/')\n (addr, netmask) = (splits[0].strip(), int(splits[1].strip()))\n fm.match.nw_dst = (IPAddr(addr), netmask)\n\n fm.actions.append(of.ofp_action_output(port = int(outp)))\n event.connection.send(fm)\n log.debug(\"inserted flow rules in switch %i.\", dpid)\n else:\n log.debug(\"routing conf was not found. No rules added to switch %i.\", dpid)\n\n def _handle_PacketIn (self, event):\n dpid = event.connection.dpid\n inport = event.port\n packet = event.parsed\n\n if not packet.parsed:\n log.warning(\"switch %i port %i ignoring unparsed packet\", dpid, inport)\n return\n\n if packet.type == ethernet.LLDP_TYPE:\n # Ignore LLDP packets\n return\n\n if isinstance(packet.next, ipv4):\n log.debug(\"IPv4 packet\")\n log.debug(\"switch %i port %i IP %s => %s\", dpid,inport,\n packet.next.srcip,packet.next.dstip)\n log.debug(\"ignoring packet\")\n # Do nothing\n return\n elif isinstance(packet.next, arp):\n log.debug(\"ARP packet\")\n a = packet.next\n log.debug(\"switch %i port %i ARP %s srcIP %s => dstIP %s\", dpid, inport,\n {arp.REQUEST:\"request\",arp.REPLY:\"reply\"}.get(a.opcode,\n 'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))\n\n if a.prototype == arp.PROTO_TYPE_IP:\n if a.hwtype == arp.HW_TYPE_ETHERNET:\n if a.protosrc != 0:\n log.debug(\"ignoring packet\")\n # Do nothing\n return\n\n # Todo: Future work- (1) handle other protocol types\n # (2) suppress warnings: ipv6 packet data incomplete and dns incomplete name.\n\n\ndef launch (topo, routing):\n \"\"\"POX controller's launch() function. The function that POX calls to tell the component to initialize itself.\n\n Args:\n topo (str): Path to JSON file that contains topology information.\n routing (str): Path to file that contains routing configuration.\n\n Example:\n The command line arguments are passed as follows:\n ./pox.py --verbose openflow.of_01 --port=6633 g2_static --topo='path/to/topo.json --routing='path/to/routing.conf '\n\n \"\"\"\n\n # POX core will handle the case when 'topo' and 'routing' were not specified.\n core.registerNew(G2Switch, topo, routing)\n","sub_path":"pox/g2_static.py","file_name":"g2_static.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"494451587","text":"LLENAR = 107\nEMPTY = bytes((0x80, 0, 0, 0))\nCOLOR = bytes((0x81, 0xff, 0x00, 0xff))\nOTHER_COLOR = bytes((0x81, 0xff, 0xff, 0xff))\n\nNUM_LEDS=107\nb=bytearray(12+NUM_LEDS * 4)\n\nfor n in range(NUM_LEDS):\n start = 4 + n*4\n b[start:start+4] = EMPTY\n\nfor n in range(LLENAR):\n start = 4 + n*4\n b[start:start+4] = COLOR\n\nb[4:4+4] = OTHER_COLOR\n\nprint(b)\n\n#import machine\n#spi = machine.SPI(machine.SPI.HSPI, sck=14, mosi=13, miso=15)\nspi.write(b)\n","sub_path":"tests/strip_comparison/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435417511","text":"import os\nimport unittest\nfrom capmetrics_etl import quality\n\n\nclass TestQualityAssurance(unittest.TestCase):\n\n def setUp(self):\n self.worksheet_names = [\n \"Ridership by Route Weekday\",\n \"Ridership by Route Saturday\",\n \"Ridership by Route Sunday\",\n \"Riders per Hour Weekday\",\n \"Riders Hour Saturday\",\n \"Riders per Hour Sunday\"\n ]\n tests_path = os.path.dirname(__file__)\n self.test_excel = os.path.join(tests_path, 'data/test_cmta_data.xls')\n\n def test_worksheet_completeness_check(self):\n has_worksheets, missing = quality.check_worksheet_completeness(self.test_excel,\n self.worksheet_names)\n self.assertTrue(has_worksheets, msg=missing)\n\n def test_check_route_info(self):\n self.assertTrue(quality.check_route_info(self.test_excel, 'Ridership by Route Weekday'))\n self.assertTrue(quality.check_route_info(self.test_excel, 'Ridership by Route Saturday'))\n self.assertTrue(quality.check_route_info(self.test_excel, 'Ridership by Route Sunday'))\n\n def test_check_ridership_columns(self):\n self.assertTrue(quality.check_for_ridership_columns(self.test_excel,\n self.worksheet_names))\n","sub_path":"tests/test_quality.py","file_name":"test_quality.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328834296","text":"# -*-coding:utf-8-*-\n__author__ = 'NeoXie'\n\nimport time\n\nstime = 0\n\ndef G():\n global stime\n if stime == 0:\n stime = time.time()\n else:\n print(time.time() - stime)\n\n\nif __name__ == \"__main__\":\n G()\n time.sleep(2)\n G()","sub_path":"client/v1/data/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219036369","text":"#!/usr/bin/python3\n\"\"\"\nClass for chunk of filters\n\"\"\"\n\nfrom classes.Filter import Filter\n\nclass FiltersChunk(object):\n \"\"\"\n Class for chunk of filters\n \"\"\"\n filters = None\n db = None\n def __init__(self, db, concrete_id=None):\n \"\"\"\n Build chunk\n :param db classes.Database:\n \"\"\"\n self.filters = []\n self.db = db\n filters_rows = db.fetch_all(\n \"SELECT id, name, target, type, content FROM filters\" +\n ((\" WHERE id = {0}\".format(concrete_id)) if concrete_id is not None else \"\")\n )\n for filter_row in filters_rows:\n self.filters.append(\n Filter(\n filter_row['id'],\n filter_row['name'],\n filter_row['target'],\n filter_row['type'],\n filter_row['content']\n )\n )\n\n def run(self, letter):\n \"\"\"\n Run filters chunk\n :param letter classes.Letter:\n :return:\n \"\"\"\n return [_filter.get_id() for _filter in self.filters if _filter.process(letter)]\n","sub_path":"classes/FiltersChunk.py","file_name":"FiltersChunk.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607685485","text":"# encoding: utf-8\n\nfrom ... import player\nfrom .react import React\nfrom .espread import ESpread\n\nclass Spot(React):\n def __init__(self, id):\n super(Spot, self).__init__(1)\n self._id = id\n\n def __repr__(self):\n return 'Spot(%r)' % self._id\n\n def do(self, event, trace):\n spot = event['__spots__'].get(self._id)\n if spot:\n if not isinstance(spot[1], int) or 0 > spot[1]:\n raise ESpread(spot[1])\n trace.append('Fire%r' % (spot,))\n if isinstance(spot[0], player.Rect):\n if spot[1]:\n point = spot[0].random(spot[1])\n else:\n point = spot[0].center\n elif spot[1]:\n point = spot[0].spread(spot[1])\n else:\n point = spot[0]\n trace.append(['= %r' % point])\n event.click(point)\n else:\n trace.append('None')\n event['__fatal__'] = True\n","sub_path":"pybot/bot/react/spot.py","file_name":"spot.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140681044","text":"# ======================================================================== #\n# \n# Copyright (c) 2017 - 2018 scVAE authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n# ======================================================================== #\n\nimport tensorflow as tf\nfrom numpy import inf\n\nfrom tensorflow_probability import distributions as tensorflow_distributions\n\nfrom tensorflow.python.ops.nn import relu, softmax, softplus\nfrom tensorflow import sigmoid, identity\n\nfrom distributions.zero_inflated import ZeroInflated\nfrom distributions.categorized import Categorized\nfrom distributions.lomax import Lomax\nfrom distributions.pareto import Pareto\nfrom distributions.generalised_pareto import GeneralisedPareto\nfrom distributions.multinomial_non_permuted import NonPermutedMultinomial\n\ndistributions = {\n \"gaussian\": {\n \"parameters\": {\n \"mu\": {\n \"support\": [-inf, inf],\n \"activation function\": identity,\n \"initial value\": tf.zeros\n },\n \"log_sigma\": {\n \"support\": [-3, 3],\n \"activation function\": identity,\n \"initial value\": tf.zeros\n }\n },\n \"class\": lambda theta: tensorflow_distributions.Normal(\n loc = theta[\"mu\"], \n scale = tf.exp(theta[\"log_sigma\"])\n )\n },\n\n \"modified gaussian\": {\n \"parameters\": {\n \"mean\": {\n \"support\": [-inf, inf],\n \"activation function\": identity,\n \"initial value\": tf.zeros\n },\n \"variance\": {\n \"support\": [-3, 3],\n \"activation function\": softplus,\n \"initial value\": tf.ones\n }\n },\n \"class\": lambda theta: tensorflow_distributions.Normal(\n loc = theta[\"mean\"], \n scale = tf.sqrt(theta[\"variance\"])\n )\n },\n\n \"gaussian mixture\": {\n \"parameters\": {\n \"logits\": {\n \"support\": [-inf, inf],\n \"activation function\": identity,\n \"initial value\": tf.ones\n },\n \"mus\": {\n \"support\": [-inf, inf],\n \"activation function\": identity,\n \"initial value\": lambda x: tf.random_normal(x, stddev = 1)\n },\n \"log_sigmas\": {\n \"support\": [-3, 3],\n \"activation function\": identity,\n \"initial value\": tf.zeros\n }\n },\n \"class\": lambda theta: tensorflow_distributions.Mixture(\n cat = tensorflow_distributions.Categorical(logits = theta[\"logits\"]), \n components = [tensorflow_distributions.MultivariateNormalDiag(\n loc = m, scale_diag = tf.exp(s)) for m, s in \n zip(theta[\"mus\"], theta[\"log_sigmas\"])]\n )\n },\n\n \"categorical\": {\n \"parameters\": {\n \"logits\": {\n \"support\": [-inf, inf],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: \n tensorflow_distributions.Categorical(logits = theta[\"logits\"]), \n },\n\n \"bernoulli\": {\n \"parameters\": {\n \"logits\": {\n \"support\": [-inf, inf],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: tensorflow_distributions.Bernoulli(\n logits = theta[\"logits\"]\n )\n },\n \n \"poisson\": {\n \"parameters\": {\n \"log_lambda\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: tensorflow_distributions.Poisson(\n rate = tf.exp(theta[\"log_lambda\"])\n )\n },\n\n \"constrained poisson\": {\n \"parameters\": {\n \"lambda\": {\n \"support\": [0, 1],\n \"activation function\": softmax\n }\n },\n \"class\": lambda theta, N: tensorflow_distributions.Poisson(\n rate = theta[\"lambda\"] * N\n )\n },\n\n \"lomax\": {\n \"parameters\": {\n \"log_concentration\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n },\n \"log_scale\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: Lomax(\n concentration = tf.exp(theta[\"log_concentration\"]),\n scale = tf.exp(theta[\"log_scale\"])\n )\n },\n\n \"pareto\": {\n \"parameters\": {\n \"log_alpha\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: Pareto(\n alpha = tf.exp(theta[\"log_alpha\"])\n )\n },\n\n \"generalised pareto\": {\n \"parameters\": {\n \"xi\": {\n \"support\": [-1e4, 1e4],\n \"activation function\": identity\n },\n \"log_sigma\": {\n \"support\": [-3, 3],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: GeneralisedPareto(\n xi = theta[\"xi\"],\n sigma = tf.exp(theta[\"log_sigma\"])\n , validate_args=True)\n },\n\n \"multinomial\": {\n \"parameters\": {\n \"p\": {\n \"support\": [0, 1],\n \"activation function\": softmax\n }\n },\n \"class\": lambda theta, N: NonPermutedMultinomial(\n n = N,\n p = theta[\"p\"])\n }, \n\n \"zero-inflated poisson\": {\n \"parameters\": {\n \"pi\": {\n \"support\": [0, 1],\n \"activation function\": sigmoid\n },\n \"log_lambda\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: ZeroInflated(\n tensorflow_distributions.Poisson(\n rate = tf.exp(theta[\"log_lambda\"])\n ),\n pi = theta[\"pi\"]\n )\n },\n \n \"negative binomial\": {\n \"parameters\": {\n \"p\": {\n \"support\": [0, 1],\n \"activation function\": sigmoid\n },\n \"log_r\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: tensorflow_distributions.NegativeBinomial(\n total_count = tf.exp(theta[\"log_r\"]),\n probs = theta[\"p\"]\n )\n },\n \n \"zero-inflated negative binomial\": {\n \"parameters\": {\n \"pi\": {\n \"support\": [0, 1],\n \"activation function\": sigmoid\n },\n \"p\": {\n \"support\": [0, 1],\n \"activation function\": sigmoid\n },\n \"log_r\": {\n \"support\": [-10, 10],\n \"activation function\": identity\n }\n },\n \"class\": lambda theta: ZeroInflated(\n tensorflow_distributions.NegativeBinomial(\n total_count = tf.exp(theta[\"log_r\"]),\n probs = theta[\"p\"]\n ),\n pi = theta[\"pi\"]\n )\n }\n}\n\nlatent_distributions = {\n \"gaussian\": {\n \"prior\": {\n \"name\": \"gaussian\",\n \"parameters\": {\n \"mu\": 0.0,\n \"log_sigma\": 0.0\n }\n },\n \"posterior\": {\n \"name\": \"gaussian\",\n \"parameters\": {}\n }\n },\n \"unit-variance gaussian\": {\n \"prior\": {\n \"name\": \"gaussian\",\n \"parameters\": {\n \"mu\": 0.0,\n \"log_sigma\": 0.0\n }\n },\n \"posterior\": {\n \"name\": \"gaussian\",\n \"parameters\": {\n \"log_sigma\": 0.0\n }\n }\n },\n \"gaussian mixture\": {\n \"prior\": {\n \"name\": \"gaussian mixture\",\n \"parameters\": {}\n },\n \"posterior\": {\n \"name\": \"gaussian mixture\", \n \"parameters\": {}\n }\n },\n \"half gaussian mixture\": {\n \"prior\": {\n \"name\": \"gaussian mixture\",\n \"parameters\": {}\n },\n \"posterior\": {\n \"name\": \"gaussian\", \n \"parameters\": {}\n }\n },\n \"fixed gaussian mixture\": {\n \"prior\": {\n \"name\": \"gaussian mixture\",\n \"parameters\": {}\n },\n \"posterior\": {\n \"name\": \"gaussian\", \n \"parameters\": {}\n }\n }\n}\n\nmodel_inference_graph = {\n \"explicit gaussian mixture\": {\n \"posteriors\": {\n \"q_z_given_x_y\": {\n \"name\": \"gaussian\", \n \"parameters\": {},\n \"conditioning\": [\"encoder\", \"q_y_given_x\"]\n },\n \"q_y_given_x\": {\n \"name\": \"categorical\",\n \"parameters\": {},\n \"conditioning\": [\"encoder\"]\n }\n },\n \"priors\": {\n \"p_z_given_y\": {\n \"name\": \"gaussian\",\n \"parameters\": {},\n \"conditioning\": [\"decoder\"]\n }\n }\n },\n}\n","sub_path":"distributions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"379240456","text":"#default_headers\n\nfrom requests.packages.urllib3.util import Retry\nfrom requests import Session\nimport functools\nimport re\ndefault_headers={'Host':'www.zhihu.com',\n 'Origin':'https://www.zhihu.com',\n 'Referer':'https://www.zhihu.com/question/50266549',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',\n 'X-Requested-With':'XMLHttpRequest'}\n\n\n# zhihu_urls\nzhihu_url ='https://www.zhihu.com'\nlogin_url=zhihu_url+'/login/email'\ncaptcha_url=zhihu_url+'/captcha.gif'\ntopic_url=zhihu_url+'/topic'\nquestion_url=zhihu_url+'/question'\nanswer_url='zhihu_url'+'/answer/'\n#vote_people_url=zhihu_url+'/answer'+str(data_id)+'/voters_profile?total='+str(total)+'&offset={0}&follows='\n\n\n\nre_question_url = re.compile(\n r'^https?://www\\.zhihu\\.com/question/\\d+(\\?sort=created|/?)$')\n\n\n\n#common_deacorator\n\n\n# def common_init(re_url):\n# \tdef decorator(func):\n# \t\tdef wrapper(self,url,*args,**kwargs):\n# \t\t\tif url is None:\n# \t\t\t\traise ValueError('Invalid url'+url)\n# \t\t\tif url is not None:\n# \t\t\t\tif re_url.match(url) is None:\n# \t\t\t\t\traise ValueError('Invalid url'+url)\n\n# \t\t\tif 'session' not in kwargs.keys() or kwargs['session'] is None:\n# \t\t\t\tkwargs['session'] = Session()\n# \t\t\t\t# kwargs['session'].mount('https://', Retry(5))\n# \t\t\t\t# kwargs['session'].mount('http://', Retry(5))\n# \t\t\treturn func(self,url,*args,**kwargs)\n# \t\treturn wrapper\n# \treturn decorator\n\n\ndef class_common_init(url_re, allowed_none=True, trailing_slash=True):\n def real(func):\n @functools.wraps(func)\n def wrapper(self, url, *args, **kwargs):\n if url is None and not allowed_none:\n raise ValueError('Invalid Url: ' + url)\n if url is not None:\n if url_re.match(url) is None:\n raise ValueError('Invalid URL: ' + url)\n if not url.endswith('/') and trailing_slash:\n url += '/'\n\n if 'session' not in kwargs.keys() or kwargs['session'] is None:\n kwargs['session'] = Session()\n # kwargs['session'].mount('https://', Retry(5))\n # kwargs['session'].mount('http://', Retry(5))\n # self.soup = None\n return func(self, url, *args, **kwargs)\n\n return wrapper\n\n return real\t\t","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"21140851","text":"# -*- coding: utf-8 -*-\nfrom repository import SocialDataRepository\nfrom flask import Flask, request\nimport json\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\n@app.route(\"/\")\ndef index():\n return \"Welcome to SocialDataRepository!\"\n\n@app.route(\"/saveQuery\", methods=['POST'])\ndef saveQuery():\n SocialDataRepository.saveQuery(json.loads(request.get_data()))\n return ('', 204)\n\n@app.route(\"/twitter/saveTweet\", methods=['POST'])\ndef saveTweet():\n SocialDataRepository.saveTweet(json.loads(request.get_data()))\n return ('', 204)\n\n@app.route(\"/twitter/read\", methods=['GET'])\ndef readTweet():\n TwitterRepository.readTweet()\n return ('', 204)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"119875192","text":"'''\n 공백이면 그대로 +=\n 유니코드 바꾸고 밀어서, 범위 벗어나면 -=26\n \n'''\ndef process(code, n,start_code, end_code):\n if start_code<=code+n and code+n<=end_code:\n return chr(code+n)\n else:\n return chr(code+n-26)\n\n\ndef solution(s, n):\n code_A=ord('A'); code_Z=ord('Z'); code_a=ord('a'); code_z=ord('z')\n answer = ''\n \n for alph in s:\n if alph==' ':\n answer+=alph; continue\n \n code = ord(alph)\n if code_A<=code and code<=code_Z:\n answer+=process(code, n, code_A, code_Z)\n else:\n answer+=process(code, n, code_a, code_z)\n \n return answer","sub_path":"day1/강준우/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264237186","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom tensorflow.python.platform import gfile\n\nimport captcha_model as captcha\nimport config\n\nIMAGE_WIDTH = config.IMAGE_WIDTH\nIMAGE_HEIGHT = config.IMAGE_HEIGHT\n\nCHAR_SETS = config.CHAR_SETS\nCLASSES_NUM = config.CLASSES_NUM\nCHARS_NUM = config.CHARS_NUM\n\nFLAGS = None\n\n\ndef one_hot_to_texts(recog_result):\n texts = []\n for i in range(recog_result.shape[0]):\n index = recog_result[i]\n texts.append(''.join([CHAR_SETS[i] for i in index]))\n return texts\n\n\ndef input_data(image_path):\n if not gfile.Exists(image_path):\n print(\">> Image '\" + image_path + \"' not found.\")\n return None\n images = np.zeros([1, IMAGE_HEIGHT * IMAGE_WIDTH], dtype='float32')\n image = Image.open(image_path)\n image_gray = image.convert('L')\n image_resize = image_gray.resize(size=(IMAGE_WIDTH, IMAGE_HEIGHT))\n image.close()\n input_img = np.array(image_resize, dtype='float32')\n images[0, :] = input_img.flatten()\n return images\n\n\ndef run_predict():\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n input_filename = FLAGS.file_path\n input_images = input_data(input_filename)\n images = tf.math.divide(input_images, 1. / 255)\n images = tf.math.subtract(images, 0.5)\n logits = captcha.inference(images, keep_prob=1)\n result = captcha.output(logits)\n saver = tf.compat.v1.train.Saver()\n sess = tf.compat.v1.Session()\n saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_dir))\n print(tf.train.latest_checkpoint(FLAGS.checkpoint_dir))\n recog_result = sess.run(result)\n sess.close()\n text = one_hot_to_texts(recog_result)\n print('image ' + input_filename + \" recognize ----> '\" + text[0] + \"'\")\n\n\ndef main(_):\n run_predict()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--checkpoint_dir',\n type=str,\n default='./captcha_train',\n help='Directory where to restore checkpoint.'\n )\n parser.add_argument(\n '--file_path',\n type=str,\n help='Absolute path to the captcha image.'\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","sub_path":"captcha_recognize_single.py","file_name":"captcha_recognize_single.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556421020","text":"import math\n\nNUM = 1000\n\nprime = []\n#print(prime)\n\nfor i in range( 2, NUM+1):\n prime = [1] * i\n #print(prime)\n \nLimit = int (math.sqrt(NUM))\nprint(\"Limit={}\".format(Limit))\n\nfor i in range( 2, Limit):\n #print(\"{} \".format(i),end=\"\")\n if prime[i] == 1:\n #print(\"{}-{} \".format( i, prime[i]),end=\"\")\n for j in range( 2*i, NUM):\n #print(\"{}={} \".format( j, 2*i),end=\"\")\n if j%i == 0:\n prime[j] = 0\n #print()\n\n#print()\nprint(\"求められた素数\")\nfor i in range( 2, NUM):\n if prime[i] == 1:\n print('{:>5}'.format(i),end=\"\")","sub_path":"エラトステネスのふるい_2~Nの中から素数を求める/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"178033505","text":"# -*- coding: utf-8 -*-\r\nimport logging,time\r\nimport os\r\nimport shutil\r\nfrom functions import fileDir\r\nfrom functions import other\r\nfrom functions.enviConf import enviConf\r\n\r\nclass projectConf(object):\r\n \r\n def __init__(self,params,cid,stdLogger):\r\n self.stdLogger=stdLogger\r\n self.cid=cid\r\n self.params=eval(params)\r\n self.customerDir='../customerDir/'+self.params['customerId']\r\n self.projectDir=self.customerDir+'/project/'+self.params['project']\r\n with open('conf/template.conf','r') as myfile:\r\n self.formats=eval(myfile.read())\r\n \r\n def run(self):\r\n if os.path.exists('../customerDir/'+self.params['customerId']):\r\n if self.params['action']=='add':\r\n self.Add()\r\n elif self.params['action']=='delete':\r\n self.delete()\r\n else:\r\n self.sendMess('1','Action '+self.params['action']+' of the unknown') \r\n else:\r\n self.sendMess('1','Customer '+self.params['customerId']+' does not exist')\r\n\r\n def Add(self):\r\n if os.path.exists(self.projectDir):\r\n self.sendMess('1','Project '+self.params['project']+' already exists')\r\n else:\r\n if not os.path.exists(self.customerDir+'/project'):\r\n os.mkdir(self.customerDir+'/project')\r\n os.mkdir(self.projectDir)\r\n self.sendMess('0','Project '+self.params['project']+' has been created')\r\n \r\n def delete(self):\r\n if os.path.exists(self.projectDir): \r\n if os.path.exists(self.projectDir+'/envi'):\r\n dirs=fileDir.listFileName('dir',self.projectDir+'/envi','','short') \r\n for env in dirs:\r\n envMess=str(self.toEnvConfStr(env))\r\n myenv=enviConf(envMess,self.cid,self.stdLogger)\r\n myenv.run() \r\n os.rmdir(self.projectDir+'/envi')\r\n while 1: \r\n if fileDir.checkAllQueue(self.params['customerId'],'project',self.params['project']):\r\n break\r\n time.sleep(1)\r\n #if os.path.exists(self.projectDir+'/app'):\r\n # shutil.rmtree(self.projectDir+'/app')\r\n shutil.rmtree(self.projectDir) \r\n self.sendMess('1','Delete project '+self.params['project']+' is completed') \r\n else:\r\n self.sendMess('0','Project '+self.params['project']+' does not exist') \r\n \r\n def toEnvConfStr(self,env):\r\n for format in self.formats:\r\n if format['task']=='enviConf':\r\n break\r\n format['ID']=self.params['ID']\r\n format['task']='enviConf'\r\n format['customerId']=self.params['customerId']\r\n format['action']=self.params['action']\r\n format['project']=self.params['project']\r\n format['environment']=env\r\n return(format)\r\n \r\n def sendMess(self,status,mess):\r\n #print(other.toSendMessageStr(self.params['ID'],self.cid,self.params['customerId'],status,mess))\r\n fileDir.appendFileLine('messages/'+self.params['customerId']+'.receiveMessage.queue',other.toSendMessageStr(self.params['ID'],self.cid,self.params['customerId'],status,mess))","sub_path":"functions/projectConf.py","file_name":"projectConf.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"439313264","text":"import requests\n\n#get by InstanceIdentifier\n#url = \"https://esbdev.imsone.rxcorp.com/client/process/instance/info/49466907\"\n\n\n#getall\nurl = \"http://spark-jobserver-mesos-devl.dev.spark-group.marathon.mesos:8090/jobs/\"\n\nheaders = {\n 'authorization': \"Basic YnI5ZHVzcjpCcjlkZXZsMQ==\",\n 'cache-control': \"no-cache\",\n #'postman-token': \"67968b4d-5d68-0495-2a40-3c7df71a8293\"\n }\n\nresponse = requests.request(\"GET\", url, headers=headers)\n\nprint(response.text)\n\n\n#parse Json\n\n","sub_path":"Projects/YNZ/aaaaa.py","file_name":"aaaaa.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326793241","text":"import logging\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\n\nLOGGER = logging.getLogger('console_logger')\n\n\ndef send_mail(subject, content, sender, recipients, attachment): # coverage:ignore=\n \"\"\"\n 要配置settings.py的邮件服务器才能够发送\n :param subject: 邮件主题\n :param content: 邮件内容\n :param sender: 邮件发送者\n :param recipients: 邮件接收者list\n :param attachment: 邮件附件list\n :return:\n \"\"\"\n msg = EmailMultiAlternatives(subject, content, sender, recipients)\n msg.content_subtype = \"html\"\n # 添加附件(可选)\n for attach in attachment:\n msg.attach_file(attach)\n # 发送\n msg.send()\n\n\ndef send_warning_mail(msg, title='第三方服务异常'):\n \"\"\"\n 发邮件函数\n :param msg: 邮件内容\n :param title: 邮件标题\n :return:\n \"\"\"\n # 邮件提醒\n if settings.ENVIRONMENT == \"development\": # coverage:ignore=\n return\n\n try:\n send_mail('【{}】{}'.format(settings.ENVIRONMENT, title), msg, settings.EMAIL_HOST_USER,\n settings.RECEIVER_EMAIL_GROUP, [])\n except Exception as error:\n LOGGER.warning('email send error:%s', error)\n","sub_path":"applications/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250483845","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import StructType, StructField, StringType, BooleanType, TimestampType, IntegerType\nimport logging\nimport time\nimport json\n\nimport sys\nsys.path.append(\"/usr/local/spark/resources/x/\")\nimport config as c\n\n# Start timer to record script running time\nstar_time = time.time()\n\n# Logging setup\nclass HandlerFilter():\n '''Class to filter handler based on message levels'''\n def __init__(self, level):\n '''\n Initialize HandleFilter object.\n \n Args:\n level: Level to filter handler with\n '''\n self.__level = level\n\n def filter(self, log_record):\n '''\n Filter log record based on level.\n \n Args:\n log_record: Log to filter\n '''\n\n return log_record.levelno == self.__level\n\n# Logger setup (emit log records)\nlogger = logging.getLogger(\"initial_data_cleaning\")\nlogger.setLevel(logging.INFO)\n\n# Handler setup (send the log records to the appropriate destination)\nconsole_handler = logging.StreamHandler()\nlogger.addHandler(console_handler)\n\nfile_handler = logging.FileHandler(\"/usr/local/spark/resources/pipeline.log\")\nlogger.addHandler(file_handler)\n\n# Filter setup (based on the message level)\nconsole_handler.addFilter(HandlerFilter(logging.INFO))\n# file_handler.addFilter(HandlerFilter(logging.WARNING))\n\n# Formatter setup (specify the layout of log records in the final output)\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\nfile_handler.setFormatter(formatter)\n\n\ndef read_parquet(file):\n\n df = cleaning_session.read.parquet(f\"/usr/local/spark/resources/output/Extracted_MySQL_Tables/initial_extraction/{file}\")\n rows = df.count()\n logger.info(f\"Parquet file '{file}' was successfully loaded into DataFrame. {rows} rows loaded\")\n\n return df\n\n\ndef filter_status_out(df_name, df, statuses):\n '''Filter out from DataFrame, orders that have status not required based on status id number.\n \n Args:\n df_name (str): Name of DataFrame\n df (DataFrame): DataFrame to filter\n statuses (list): List of status id's to be filtered out from DataFrame\n \n Returns:\n filtered_df (DataFrame)\n \n Raises:\n Exception:\n '''\n\n filtered_df = df.filter(~df.statusId.isin(statuses))\n rows = filtered_df.count()\n logger.info(f\"Transformation on DataFrame '{df_name}' completed: Transactions with status {statuses} were filtered out. Row count is {rows}\")\n \n return filtered_df\n\n\ndef categorize_transactions(df_name, df):\n '''Categorize transactions based on defined parameters.\n \n Args:\n df_name (str): Name of DataFrame\n df (DataFrame): DataFrame to categorize \n \n Returns:\n categorized_df (DataFrame)\n \n Raises:\n Exception:\n '''\n\n categorized_df = df.select(df['*'], \n F.when((df.num.like(\"%#SS%\")) | (df.num.like(\"%#CS%\")) | (df.num.like(\"%#MS%\"))\\\n | (df.num.like(\"%Samples%\")) | (df.num.like(\"%SAMPLES%\")) | (df.num.like(\"%samples%\")), \"Samples\")\n .when(df.num.like(\"%RMA%\"), \"RMA\")\n .when(df.customerId == c.id_15, \"C\")\n .when(df.customerId == c.id_14, \"G\")\n .when(df.customerId == c.id_13, \"H\")\n .when(df.customerId == c.id_12, \"J\")\n .when(df.customerId == c.id_11, \"N\")\n .when(df.customerId == c.id_10, \"A\")\n .when(df.customerId == c.id_9, \"B\")\n .when(df.customerId == c.id_8, \"E\")\n .when(df.customerId == c.id_7, \"F\")\n .when(df.customerId == c.id_6, \"I\")\n .when(df.customerId == c.id_5, \"M\")\n .when(df.customerId == c.id_4, \"Q\")\n .when(df.customerId == c.id_3, \"R\")\n .when((df.customerId == c.id_2) | (df.customerId == c.id_1), \"Closed Channel\")\n .when((df.qbClassId == 19) & (df.customerId != c.id_15) & (df.customerId != c.id_11), \"K\")\n .when((df.qbClassId == 9) & (df.customerId != c.id_7) & (df.customerId != c.id_5) & (df.customerId != c.id_12), \"O\")\n .when((df.qbClassId == 12) | (df.qbClassId == 17), \"P\") \n .otherwise(\"Uncategorized\").alias(\"sales_channel\"))\n \n rows = categorized_df.count()\n logger.info(f\"Transformation on DataFrame '{df_name}' completed: Sales transactions were categorized. Row count is {rows}\")\n \n return categorized_df\n\n\ndef retrieve_not_req_trans(df, categories):\n '''Filter categories not required for Fact_Sales table.\n \n Args:\n df (DataFrame): DataFrame to filter and save as CSV file\n categories (list): List of categories to be filtered \n \n Returns:\n not_required_trans (DataFrame)\n \n Raises:\n Exception:\n '''\n \n not_required_trans = df.filter(df.sales_channel.isin(categories))\n rows = not_required_trans.count()\n logger.info(f\"Transactions not required in Fact_Sales table were loaded into DataFrame 'not_required'. Row count is {rows}\")\n \n return not_required_trans\n\n\ndef filter_categories_out(df_name, df, categories):\n '''Filter out categories not required for Fact_Sales table.\n \n Args:\n df_name (str): Name of DataFrame\n df (DataFrame): DataFrame to filter\n categories (list): List of categories to be filtered out \n \n Returns:\n filtered_df (DataFrame)\n \n Raises:\n Exception:\n '''\n \n filtered_df = df.select(\"id\", \"currencyId\", \"customerId\", \"dateCompleted\", \"dateCreated\", \"locationGroupId\", \"qbClassId\", \"statusId\", \"sales_channel\")\\\n .filter(~df.sales_channel.isin(categories))\n rows = filtered_df.count()\n logger.info(f\"Transformation on DataFrame '{df_name}' completed: Sales transactions with categories: {categories} were filtered out. Row count is {rows}\")\n \n return filtered_df\n\n\ndef filter_item_type(df_name, df, item_type):\n '''Filter required item type.\n \n Args:\n df_name (str): Name of DataFrame\n df (DataFrame): DataFrame to filter\n item_type (int): Item type to be filtered\n \n Returns:\n not_required (DataFrame)\n \n Raises:\n Exception:\n '''\n \n filtered_df = df.filter(df.typeId == item_type)\n rows = filtered_df.count()\n logger.info(f\"Transformation on DataFrame '{df_name}' completed: Items with typeId {item_type} were filtered. Row count is {rows}\")\n \n return filtered_df\n\n\ndef round_qty_ordered(df_name, df):\n '''Round quantity ordered to zero digits.\n \n Args:\n df_name (str): Name of DataFrame\n df (DataFrame): DataFrame to round ordered quantity from \n \n Returns:\n rounded_df (DataFrame)\n \n Raises:\n Exception:\n '''\n \n rounded_df = df.withColumn(\"qtyOrdered_r\", F.round(df.qtyOrdered, 0)).drop(df.qtyOrdered)\n rows = rounded_df.count()\n logger.info(f\"Transformation on DataFrame '{df_name}' completed: Column qtyOrdered was rounded to zero digits. Row count is {rows}\")\n \n return rounded_df\n\n\ndef calculate_part_volume(df):\n '''Round part dimensions down to two digits.\n \n Args: \n df (DataFrame): DataFrame to calculate part volume from \n \n Returns:\n updated_df (DataFrame)\n \n Raises:\n Exception:\n '''\n \n \n updated_df = df.withColumn(\"volume\", F.round(df.len * df.width * df.height, 2)).drop(*[\"len\", \"width\", \"height\"])\n rows = updated_df.count()\n logger.info(f\"Transformation on DataFrame 'part' completed: part volume calculated. Row count is {rows}\")\n \n return updated_df\n\n\n@F.udf(returnType=StringType())\ndef parse_json(custom_field, key):\n\n '''Read json string from part.customFields to get specific attribute.\n \n Args:\n custom_field (str): Includes all part custom field data\n key (str): Dictionary key of field to add as a column\n\n \n Returns:\n String\n '''\n str_to_dict = json.loads(custom_field)\n print(str_to_dict)\n attr_dict = str_to_dict.get(key)\n print(attr_dict)\n attribute = attr_dict.get(\"value\", None)\n\n return attribute\n\n\ndef create_fact_sales(soitem, so, product, part):\n '''Create Fact Sales table by joining multile DataFrames.\n \n Args:\n soitem (DataFrame): soitem DataFrame\n so (DataFrame): so DataFrame\n product (DataFrame): product DataFrame\n part (DataFrame): part DataFrame\n \n Returns:\n Fact_Sales (DataFrame)\n \n Raises:\n Exception:\n '''\n \n # Join DataFrame 'soitem' with DataFrame 'so'\n categorized_items = soitem.join(so, soitem.soId == so.id) \n rows = categorized_items.count()\n logger.info(f\"Join completed. DataFrame 'soitem' joined with DataFrame 'so'. Row count is {rows}\")\n\n # Join DataFrame 'categorized_items' with DataFrame 'product'\n categorized_items = categorized_items.join(product, categorized_items.productId == product.id)\n rows = categorized_items.count()\n logger.info(f\"Join completed. DataFrame 'categorized_items' joined with DataFrame 'product'. Row count is {rows}\")\n\n # Join DataFrame 'categorized_items' with DataFrame 'part'\n categorized_items = categorized_items.join(part, categorized_items.partId == part.id)\n rows = categorized_items.count()\n logger.info(f\"Join completed. DataFrame 'categorized_items' joined with DataFrame 'part'. Row count is {rows}\")\n\n # Fact Sales table\n Fact_Sales = categorized_items.select(categorized_items.dateCreated.alias(\"Date_Id\"), categorized_items.sales_channel.alias(\"Sales_Channel_Id\"), \\\n categorized_items.masked_num.alias(\"Product_Id\"), categorized_items.qtyOrdered_r.alias(\"Units_Sold\"))\n rows = Fact_Sales.count() \n logger.info(f\"Fact Sales table was created. Row count is {rows}\")\n \n return Fact_Sales\n\n\ndef create_dim_products(part):\n '''Create Dim Products table.\n \n Args: \n part (DataFrame): part DataFrame\n \n Returns:\n Dim_Products (DataFrame)\n \n Raises:\n Exception:\n '''\n \n Dim_Products = part.select(part.masked_num.alias(\"sku\"), part.design_year, part.season, part.material, part.volume)\n rows = Dim_Products.count() \n logger.info(f\"'Dim Products' table was created. Row count is {rows}\") \n \n return Dim_Products\n\n\n# Start SparkSession (entry point to Spark)\ncleaning_session = SparkSession.builder.master(\"spark://spark:7077\").appName('initial_data_cleaning').getOrCreate()\n\n\n# Read Parquet files into DataFrames\nso = read_parquet(\"r_so\")\nsoitem = read_parquet(\"r_soitem\")\nproduct = read_parquet(\"r_product\")\npart = read_parquet(\"m_part\")\n\n\n\n## 'so' DataFrame transformations\n\n# Filter out orders that have order status not required (80: Voided, 85: Cancelled, 90: Expired, 95: Historical)\nstatus_not_required = [80, 85, 90, 95]\nso = filter_status_out(\"so\", so, status_not_required)\n\n# Categorize transactions by Sales Channel\nso = categorize_transactions(\"so\", so)\n\n# Save transactions not required (Samples, RMA, Closed Channel, Uncategorized) in CSV file as backup\ncategories_not_req = [\"Samples\", \"RMA\", \"Closed Channel\", \"Uncategorized\"]\nnot_required_trans = retrieve_not_req_trans(so, categories_not_req)\nnot_required_trans.write.mode('overwrite').csv(\"/usr/local/spark/resources/output/Extracted_MySQL_Tables/initial_extraction/transactions_not_loaded.csv\")\nlogger.info(f\"DataFrame 'not_required' was saved as CSV file\")\n\n# Remove transactions not required from \"so\" DataFrame (Samples, RMA, Closed Channel, Uncategorized)\nso = filter_categories_out(\"so\", so, categories_not_req)\n\n\n## 'soitem' DataFrame transformations\n\n# Filter only items with typeId = 10 (Sale Items)\nsoitem = filter_item_type(\"soitem\", soitem, 10)\n\n\n# Round qtyOrdered field to zero digits\nsoitem = round_qty_ordered(\"soitem\", soitem)\n\n\n## 'part' DataFrame transformations\n\n# Extract attributes from 'customFields' into new columns\npart = part.withColumn(\"design_year\", parse_json(part.customFields, F.lit(\"64\")))\npart = part.withColumn(\"season\", parse_json(part.customFields, F.lit(\"65\")))\npart = part.withColumn(\"material\", parse_json(part.customFields, F.lit(\"63\")))\n\n\n# Calculate part volume\npart = calculate_part_volume(part)\n\n\n# Create and save 'Fact_Sales' table as Parquet file\nFact_Sales = create_fact_sales(soitem, so, product, part)\nFact_Sales.write.mode('overwrite').parquet(\"/usr/local/spark/resources/output/Star_Schema_Tables/Fact_Sales\")\nlogger.info(f\"Table 'Fact_Sales' was successfully saved as Parquet file\")\n\n\n# Create and save 'Dim_Products' table as Parquet file\nDim_Products = create_dim_products(part)\nDim_Products.write.mode('overwrite').parquet(\"/usr/local/spark/resources/output/Star_Schema_Tables/Dim_Products\")\nlogger.info(f\"Table 'Dim_Products' was successfully saved as Parquet file\")\n\n\n# Create and save 'Dim_Sales_Channels' table as Parquet file\nsales_cat_schema = StructType([\n\tStructField(\"sales_channel\", StringType(), False),\n StructField(\"platform_type\", StringType(), False),\n StructField(\"business_to\", StringType(), False),\n StructField(\"drop_shipping\", BooleanType(), False),\n \n])\n\nDim_Sales_Channels = cleaning_session.read.option(\"header\", True).schema(sales_cat_schema).csv(\"/usr/local/spark/resources/x/sales_cat.csv\")\nDim_Sales_Channels.printSchema()\nDim_Sales_Channels.write.mode('overwrite').parquet(\"/usr/local/spark/resources/output/Star_Schema_Tables/Dim_Sales_Channels\")\nlogger.info(f\"Table 'Dim_Sales_Channels' was successfully saved as Parquet file\")\n\n\n# Create 'Dim_Dates' table\ndate_schema = StructType([\n\tStructField(\"date_ts\", TimestampType(), False),\n StructField(\"year\", IntegerType(), False),\n StructField(\"month\", IntegerType(), False),\n StructField(\"day\", IntegerType(), False), \n])\n\nDim_Dates = cleaning_session.read.option(\"header\", True).schema(date_schema).csv(\"/usr/local/spark/resources/x/date.csv\")\nDim_Dates.write.mode('overwrite').parquet(\"/usr/local/spark/resources/output/Star_Schema_Tables/Dim_Dates\")\nlogger.info(f\"Table 'Dim_Dates' was successfully saved as Parquet file\")\n\n# Record script running time\nscript_time = round(time.time() - star_time, 2)\nlogger.info(f\"'initial_data_cleaning' script was successfully executed. Runnig time was {script_time} secs\")","sub_path":"spark/app/initial_data_cleaning.py","file_name":"initial_data_cleaning.py","file_ext":"py","file_size_in_byte":15258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"123017694","text":"import pygame\nimport Battle\nimport DialogueScene\nimport TitleScreen\nimport LoadMenu\nimport SaveMenu\nimport Options\nimport utils\nfrom pygame.locals import *\n\n\ndef init():\n pygame.init()\n screen = pygame.display.set_mode((600, 600))\n return screen\n\ndef FireEmblem(screen):\n inGame = True\n nbSave = 0\n state = [\"Title\", \"Scene1\", 1]\n characters = []\n while inGame:\n if state[0] == \"Dialogue\":\n state = DialogueScene.Dialogue(screen, state[1], state[2])\n elif state[0] == \"Battle\":\n infosBattle = utils.loadBattle(state[1], nbSave)\n endBattle = Battle.battle(screen, infosBattle[0], infosBattle[1], infosBattle[2], infosBattle[3])\n state = endBattle[0]\n characters = endBattle[1]\n if state[0] not in [\"Quit\", \"Title\"]:\n utils.save(\"auto\", state[0], state[1], state[2], pygame.Surface((600, 600)), characters)\n elif state[0] == \"Title\":\n state = TitleScreen.TitleScreen(screen)\n elif state[0] == \"Load\":\n state = LoadMenu.loadMenu(screen)\n elif state[0] == \"Save\":\n state = SaveMenu.SaveMenu(screen, state[1], state[2], state[3], state[4], characters)\n elif state[0] == \"Options\":\n state = Options.Options(screen)\n elif state[0] == \"Quit\":\n inGame = False\n\n\nif __name__ == '__main__':\n FireEmblem(init())\n pygame.quit()\n","sub_path":"SingularityA.py","file_name":"SingularityA.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601994649","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom datetime import datetime, timedelta\nimport time\nimport torch\nfrom torch.utils import data\nimport numpy as np\nimport gzip\nimport random\nimport subprocess\nimport itertools\n#logging.getLogger(\"transformers.tokenization_utils\").setLevel(logging.ERROR)\n\n\nclass DataWIKI_MLM(data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, pathWIKI, shuffle=False):\n 'Initialization'\n\n fname = pathWIKI+\".nlines.txt\"\n # read buffer with number of lines from disk if exists\n if os.path.isfile(fname):\n with open(fname, 'r', encoding='utf8') as fh:\n tot = fh.readlines()\n print(\"tot: \", tot)\n assert(len(tot)==1)\n self.tot = int(tot[0].strip())\n\n else:\n # compute buffer with number of lines and write to disk\n self.tot = int(subprocess.check_output(\"zcat \" + pathWIKI + \" | wc -l\", shell=True).split()[0])\n # write buffer with number of lines to disk\n with open(fname, 'w', encoding='utf8') as fh:\n fh.write(str(self.tot))\n\n # shuffle file before opening\n if shuffle:\n print(\"Shuffling dataset\", end=\"...\")\n start_t = time.time()\n self.shuffle(pathWIKI)\n end_t = time.time()\n print(\" took %s.\"%(str(timedelta(seconds=end_t - start_t))))\n\n #print(\"TOT\")\n print(\"num samples: \" + str(self.tot))\n fileBIG = gzip.open(pathWIKI, 'rt')\n self.itera = itertools.cycle(enumerate(fileBIG))\n self.n_reads = 0\n\n\n def shuffle(self, pathWIKI):\n \"\"\" Decompress gzipped file, shuffle it, and recompress it \"\"\"\n assert(pathWIKI.endswith(\"gz\")), \"Incorrect file format: %s\"%pathWIKI\n\n temp_fname = pathWIKI + \".temp\"\n try:\n subprocess.check_output(\"zcat \" + pathWIKI + \" | shuf | gzip > \" + temp_fname, shell=True)\n subprocess.check_output(\"mv \" + temp_fname + \" \" + pathWIKI, shell=True)\n except:\n raise Exception(\"An error happened while decompressing/shuffling/recompressing file %s, %s\"%(\n pathWIKI, temp_fname))\n assert( os.path.isfile(pathWIKI) ), \"Could not find file: %s\"%pathWIKI\n\n\n def __len__(self):\n 'Denotes the total number of samples'\n return self.tot\n\n\n def preprocess(self, sample):\n idxs = sample[1].strip().split(\"\\t\")\n assert(len(idxs)==4)\n # input_ids_labels, attention_mask, (useless segment idx), input_ids_mlm\n #label idx_tokens idx_masks idx_segs idx_idxs\n #procsample = (np.fromstring(idxs[1],dtype=int,sep=' '), np.fromstring(idxs[2],dtype=int,sep=' '), np.fromstring(idxs[3],dtype=int,sep=' '), np.fromstring(idxs[4],dtype=int,sep=' '), np.fromstring(idxs[0],dtype=int,sep=' '))\n #procsample = (np.fromstring(idxs[0],dtype=int,sep=' '), np.fromstring(idxs[2],dtype=int,sep=' '), np.fromstring(idxs[3],dtype=int,sep=' '), np.fromstring(idxs[4],dtype=int,sep=' '), np.fromstring(idxs[0],dtype=int,sep=' '))\n\n # labels/targets , attention masks, segment ids (unused), input ids (unmasked)\n procsample = (np.fromstring(idxs[0],dtype=int,sep=' '), np.fromstring(idxs[1],dtype=int,sep=' '), np.fromstring(idxs[2],dtype=int,sep=' '), np.fromstring(idxs[3],dtype=int,sep=' '))\n\n # fix the ignore index to -100\n targets_mask = procsample[3] == procsample[0]\n procsample[0][targets_mask] = -100\n\n return procsample\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n sample = next(self.itera)\n #print(\" sample, before: \", sample)\n sample = self.preprocess(sample)\n #print(\" sample: \", sample)\n #print(\"DataWiki_MLM.py\")\n #sys.exit(1)\n self.n_reads += 1\n return sample\n","sub_path":"wiki_crosslingual/DataWIKI_MLM.py","file_name":"DataWIKI_MLM.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368187467","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, TensorDataset)\nfrom pytorch_pretrained_bert.modeling import BertModel\nfrom pytorch_pretrained_bert.optimization import BertAdam\nfrom torch import nn\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nimport time\nimport os\nimport pickle as pkl\nfrom model import sequence_correct_label_model,sequence_label_model\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id,predict):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.predict=predict\n\nif __name__ == '__main__':\n # distant_pkl='my_data_new/distant_CDWA_predict.pkl'\n # distant_final_pkl='my_data_new/distant_CDCA.pkl'\n # model_bert_predict_path='model_CDWA_correct/model_bert.pth'\n # model_sequence_predict_label='model_CDWA_correct/model_sequence_label.pth'\n\n # distant_pkl='my_data_new/distant_CDWC_predict.pkl'\n # distant_final_pkl='my_data_new/distant_CDCC.pkl'\n # model_bert_predict_path='model_CDWC_correct/model_bert.pth'\n # model_sequence_predict_label='model_CDWC_correct/model_sequence_label.pth'\n\n hidden_dim=768\n tagset_size=3\n max_seq_length=512\n project_dim=20\n bert_file='/biobert_v1.1_pubmed/'\n #############distant#################\n with open(distant_pkl, \"rb\") as f:\n # distant_data,_,_=pkl.load(f)\n distant_data=pkl.load(f)\n print(f'distant data len {len(distant_data)}') \n ##############获得标签#####################################\n if not os.path.exists(distant_final_pkl):\n model_bert = BertModel.from_pretrained(bert_file)\n model_bert.load_state_dict(torch.load(model_bert_predict_path)) \n model_bert.cuda()\n model_sequence_label=sequence_correct_label_model(hidden_dim,tagset_size,project_dim)\n # model_sequence_label=sequence_label_model(hidden_dim,tagset_size)\n model_sequence_label.load_state_dict(torch.load(model_sequence_predict_label)) \n model_sequence_label.cuda()\n distant_dataloader = DataLoader(distant_data, batch_size=1)\n predict=[]\n for batch in tqdm(distant_dataloader):\n batch = tuple(t.cuda() for t in batch)\n input_ids, input_mask, segment_ids, label_ids,predict_label = batch\n # input_ids, input_mask, segment_ids,predict_label, label_ids = batch \n # input_ids, input_mask, segment_ids, label_ids = batch \n all_encoder_layers, _ = model_bert(input_ids, segment_ids, input_mask, output_all_encoded_layers=False)\n out=model_sequence_label(all_encoder_layers,predict_label,False)\n # out=model_sequence_label(all_encoder_layers,label_ids,False)\n \n # out=model_sequence_label(all_encoder_layers,False)\n decoded=np.argmax(out.squeeze(0).data.cpu().numpy(),axis=1)\n predict.append(decoded)\n # with open(distant_predict_pkl, \"rb\") as f:\n # predict=pkl.load(f)\n all_input_ids = torch.tensor([f[0][0].data.cpu().numpy() for f in distant_dataloader], dtype=torch.long).cuda()\n all_input_mask = torch.tensor([f[1][0].data.cpu().numpy() for f in distant_dataloader], dtype=torch.long).cuda()\n all_segment_ids = torch.tensor([f[2][0].data.cpu().numpy() for f in distant_dataloader], dtype=torch.long).cuda()\n all_label_ids = torch.tensor(predict, dtype=torch.long).cuda()\n # all_gold_labels = torch.tensor([f[3][0].data.cpu().numpy() for f in distant_dataloader], dtype=torch.long).cuda() \n distant_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # distant_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_gold_labels)\n \n with open(distant_final_pkl, \"wb\") as f:\n pkl.dump((distant_data), f, -1)\n else:\n with open(distant_final_pkl, \"rb\") as f:\n distant_data=pkl.load(f)\n\n #############获得整合标签#################\n \n \n write_root='my_data_new/'\n\n distant_final_pkl1=write_root+'distant_CDCA.pkl'\n distant_final_pkl2=write_root+'distant_CDCC.pkl'\n intergrate_pkl='my_data_new/diatant_intergrate2.pkl'\n\n if not os.path.exists(intergrate_pkl):\n ############distant#################\n with open(distant_final_pkl1, \"rb\") as f:\n # distant_data1,_,_=pkl.load(f)\n distant_data1=pkl.load(f)\n \n with open(distant_final_pkl2, \"rb\") as f:\n # distant_data2,_,_=pkl.load(f)\n distant_data2=pkl.load(f)\n \n \n distant_dataloader1 = DataLoader(distant_data1, batch_size=1)\n distant_dataloader2 = DataLoader(distant_data2, batch_size=1)\n # for f1,f2 in zip(distant_dataloader1,distant_dataloader2):\n # print((f1[3][0]==f2[3][0]).float().data.cpu().numpy())\n # ppp\n all_input_ids = torch.tensor([f[0][0].data.cpu().numpy() for f in distant_dataloader1], dtype=torch.long).cuda()\n print(all_input_ids.size())\n all_input_mask = torch.tensor([f[1][0].data.cpu().numpy() for f in distant_dataloader1], dtype=torch.long).cuda()\n print(all_input_mask.size()) \n all_segment_ids = torch.tensor([f[2][0].data.cpu().numpy() for f in distant_dataloader1], dtype=torch.long).cuda()\n print(all_segment_ids.size()) \n all_label_ids1 = torch.tensor([f[3][0].data.cpu().numpy() for f in distant_dataloader1], dtype=torch.long).cuda()\n print(all_label_ids1.size()) \n all_label_ids2 = torch.tensor([f[3][0].data.cpu().numpy() for f in distant_dataloader2], dtype=torch.long).cuda()\n print(all_label_ids2.size()) \n all_label_sims = torch.tensor([(f1[3][0]==f2[3][0]).float().data.cpu().numpy() for f1,f2 in zip(distant_dataloader1,distant_dataloader2)], dtype=torch.float).cuda()\n print(all_label_sims.size()) \n distant_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids1,all_label_ids2,all_label_sims)\n with open(intergrate_pkl, \"wb\") as f:\n pkl.dump((distant_data), f, -1)\n else:\n with open(intergrate_pkl, \"rb\") as f:\n distant_data=pkl.load(f)\n\n ","sub_path":"bibm2020/TBNER-main/get_label.py","file_name":"get_label.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530569008","text":"\"\"\"\r\nLibrary implements parsing BDD feature files into TM4J testcases\r\n\"\"\"\r\n\r\nimport argparse\r\nfrom classes.BddParser import BddParser\r\nfrom libs.files import get_list_of_files, get_full_path, try_file_exists, find_file_in_list_by_name\r\nfrom libs.test_log_parser import parse_log, remove_duplicates\r\nfrom libs.config import read_config\r\nfrom libs.tm_log import get_logger\r\n\r\nlogger = None\r\n\r\n\r\ndef get_list_of_feature_files_to_proceed(config, diff: str = None) -> list:\r\n \"\"\"\r\n function returns list of feature files to proceed basing on config settings\r\n :param config:\r\n :param diff: abs path to alternate diff file\r\n :return: list of feature file paths either all or modified\r\n \"\"\"\r\n logger.info(f'Getting list of feature files to proceed')\r\n bdd_config = config['BDD']\r\n updated_files_list = []\r\n use_relative_path = bool(config['GENERAL']['useRelativePath'])\r\n features_folder = get_full_path(bdd_config['localRepoRoot'], use_relative_path, bdd_config['featuresFolderInLocalRepository'])\r\n files_list = get_list_of_files(features_folder, '.feature', use_relative_path)\r\n if bdd_config['diffTestsUpdate'] == 'True':\r\n diff_file = diff if diff else get_full_path(bdd_config['localRepoRoot'], use_relative_path, bdd_config['diffFilePath'])\r\n diff_file = try_file_exists(diff_file, '', logger, use_relative_path, True)\r\n modified_files = parse_log(diff_file)\r\n modified_feature_files = list(filter(lambda x: '.feature' in x, modified_files))\r\n for modified_file in modified_feature_files:\r\n try:\r\n updated_files_list.append(find_file_in_list_by_name(modified_file, files_list, bdd_config, True))\r\n except FileNotFoundError:\r\n logger.error(f'File {modified_file} was not found locally, skipping...')\r\n logger.info(f'Diff_file {diff_file}. Diff feature files: {modified_feature_files}'\r\n f'. Updated_files_list: {updated_files_list}')\r\n return remove_duplicates(updated_files_list)\r\n logger.info(f'Files_list: {files_list}.')\r\n return files_list\r\n\r\n\r\ndef main():\r\n global logger\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-c\", \"--config\", help=\"Path to alternative config file\")\r\n parser.add_argument(\"-d\", \"--diff\", help=\"Path to diff file to proceed\")\r\n args = parser.parse_args()\r\n config_path = args.config if args.config else None\r\n diff_path = args.diff if args.diff else None\r\n config = read_config(config_path)\r\n logger = get_logger(__name__, config)\r\n try:\r\n files_list = get_list_of_feature_files_to_proceed(config=config, diff=diff_path)\r\n bdd_parser = BddParser(config_path)\r\n bdd_parser.read_files(files_list)\r\n bdd_parser.do_export_results()\r\n except Exception as e:\r\n logger.exception(e)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"tm4j_adapter/bdd_parser.py","file_name":"bdd_parser.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506180059","text":"# 647, 回文子串\n# https://leetcode-cn.com/problems/palindromic-substrings/\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n n = len(s)\n dp = [[0]*n for _ in range(n)]\n results = 0\n for i in range(n):\n dp[i][i] = 1\n results += 1\n \n for i in range(n-2, -1, -1):\n for j in range(i+1,n):\n if s[i]==s[j]:\n if j-i==1:\n dp[i][j] = 1\n else:\n dp[i][j] = dp[i+1][j-1]\n if dp[i][j]:\n results +=1\n return results\n","sub_path":"Week_06/huiwen.py","file_name":"huiwen.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323974444","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\n# Player starts in Room 0\nplayer = Player(world.starting_room)\n\n# List will fill with directions to walk\ntraversal_path = []\n\n# Inverse directions N/S/E/W\ninverse = {\"n\": \"s\", \"s\": \"n\", \"e\": \"w\", \"w\": \"e\"}\nprev_room = []\n\n# Keep track of visited rooms \nvisited = set()\n# Keep track of exits explored \nexits = {} # player.current_room.get_exits()\n\n\n# While all rooms have not been visited\nwhile len(visited) < len(room_graph):\n # Current room\n room = player.current_room\n # Get room exits\n if room.id not in exits:\n # add possible exits path\n exits[room.id] = room.get_exits()\n # mark current room as visited \n visited.add(room.id)\n\n # If there isn't an exit to traverse\n if len(exits[room.id]) <= 0:\n # go back to last/prev direction\n prev = prev_room.pop()\n player.travel(prev)\n # add direction to traversal path\n traversal_path.append(prev)\n else:\n # travel in next possible direction\n next_exit = exits[room.id].pop()\n player.travel(next_exit)\n # add direction to traversal path\n traversal_path.append(next_exit)\n # add invesre to prev_room\n prev_room.append(inverse[next_exit])\n\nprint(traversal_path)\n\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n# #######\n# # UNCOMMENT TO WALK AROUND\n# #######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"projects/adventure/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"227434818","text":"# -*- coding: utf-8 -*-\n# libolm python bindings\n# Copyright © 2015-2017 OpenMarket Ltd\n# Copyright © 2018 Damir Jelić <poljar@termina.org.uk>\n\"\"\"libolm Utility module.\n\nThis module contains utilities for olm.\nIt only contains the ed25519_verify function for signature verification.\n\nExamples:\n >>> alice = Account()\n\n >>> message = \"Test\"\n >>> signature = alice.sign(message)\n >>> signing_key = alice.identity_keys[\"ed25519\"]\n\n >>> ed25519_verify(signing_key, message, signature)\n\n\"\"\"\n\n# pylint: disable=redefined-builtin,unused-import\nfrom typing import AnyStr, Type\n\n# pylint: disable=no-name-in-module\nfrom _libolm import ffi, lib # type: ignore\n\nfrom ._compat import to_bytes\nfrom ._finalize import track_for_finalization\n\n\ndef _clear_utility(utility): # pragma: no cover\n # type: (ffi.cdata) -> None\n lib.olm_clear_utility(utility)\n\n\nclass OlmVerifyError(Exception):\n \"\"\"libolm signature verification exception.\"\"\"\n\n\nclass _Utility(object):\n # pylint: disable=too-few-public-methods\n \"\"\"libolm Utility class.\"\"\"\n\n _buf = None\n _utility = None\n\n @classmethod\n def _allocate(cls):\n # type: (Type[_Utility]) -> None\n cls._buf = ffi.new(\"char[]\", lib.olm_utility_size())\n cls._utility = lib.olm_utility(cls._buf)\n track_for_finalization(cls, cls._utility, _clear_utility)\n\n @classmethod\n def _check_error(cls, ret):\n # type: (int) -> None\n if ret != lib.olm_error():\n return\n\n raise OlmVerifyError(\"{}\".format(\n ffi.string(lib.olm_utility_last_error(\n cls._utility)).decode(\"utf-8\")))\n\n @classmethod\n def _ed25519_verify(cls, key, message, signature):\n # type: (Type[_Utility], AnyStr, AnyStr, AnyStr) -> None\n if not cls._utility:\n cls._allocate()\n\n byte_key = to_bytes(key)\n byte_message = to_bytes(message)\n byte_signature = to_bytes(signature)\n\n cls._check_error(\n lib.olm_ed25519_verify(cls._utility, byte_key, len(byte_key),\n byte_message, len(byte_message),\n byte_signature, len(byte_signature)))\n\n\ndef ed25519_verify(key, message, signature):\n # type: (AnyStr, AnyStr, AnyStr) -> None\n \"\"\"Verify an ed25519 signature.\n\n Raises an OlmVerifyError if verification fails.\n\n Args:\n key(str): The ed25519 public key used for signing.\n message(str): The signed message.\n signature(bytes): The message signature.\n \"\"\"\n return _Utility._ed25519_verify(key, message, signature)\n","sub_path":"olm/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"460105637","text":"import copy\nimport datetime\nimport random\n\nfrom functools import reduce\n\nimport math\nfrom multiprocessing.pool import Pool\n\nimport scipy.stats\n\nfrom multiprocessing import cpu_count\n\nfrom larger_simulation.node import Node\nfrom larger_simulation.priodict import priorityDictionary\n\n\"\"\"\nPrepare map and graph\n\"\"\"\n\ndef load_spot_graph(file):\n \"\"\"\n Load the graph structure from 'spot_graph'\n :param file: spot_graph\n :return:\n \"\"\"\n nodeset = {}\n\n lines = [line.rstrip(\"\\n\") for line in open(file, \"r\")]\n\n # initialize all the nodes\n for line in lines:\n tabs = line.split(\" \")\n id = int(tabs[0])\n node = Node(id, \"D\")\n parking_spots = set()\n if len(tabs) > 2:\n p_s = tabs[2].split(\",\")\n parking_spots = set(map(int, p_s))\n node.parking_spots = parking_spots\n nodeset[id] = node\n\n # set neighbors for all the nodes\n for line in lines:\n tabs = line.split(\" \")\n n_i = tabs[1].split(\",\")\n neighbors_id = set(map(int, n_i))\n neighbors = []\n for n in neighbors_id:\n neighbors.append(nodeset[n])\n id = int(tabs[0])\n nodeset[id].neighbors = set(neighbors)\n\n return nodeset\n\n\ndef generate_map(perctge, total_spots):\n \"\"\"\n Generate a random map by perctage\n :param perctge: percentage of occupancy\n :return:\n \"\"\"\n # 0: empty 1: taken\n random_spot = random.sample(range(total_spots), int(round(total_spots * perctge)))\n spots = [0] * total_spots\n for i in random_spot:\n spots[i] = 1\n return spots\n\n\ndef generate_uniform_map(nodeset, perctge, total_spots):\n spot_map = [0] * total_spots\n\n nodeitems = [key for key, val in nodeset.items() if\n val.type == \"P\"]\n\n random_spot = random.sample(nodeitems, int(len(nodeitems) * perctge))\n\n for s in random_spot:\n spot_map[s] = 1\n\n return spot_map\n\n\ndef generate_gaussian_map(nodeset, all_pair, exit_node, sigma, perctge, total_spots):\n spot_map = [0] * total_spots\n spot_pdf = {}\n\n distance_distribution = {}\n\n # node list in every distance\n nodeitems = [(key, val) for key, val in nodeset.items() if val.type == \"D\"]\n for key, node in nodeitems:\n p = shortest_path(all_pair, node, exit_node)\n length = path_length(p)\n if length not in distance_distribution:\n distance_distribution[length] = []\n distance_distribution[length].append(node)\n\n exp = 0\n pdf = {}\n slots = {}\n total_parking = 0\n scale = max(distance_distribution.items(), key=lambda p: p[0])[0] / 2\n for key, val in distance_distribution.items():\n p = round(scipy.stats.norm(0, sigma).pdf(key / scale), 2)\n pdf[key] = p\n parking = set()\n for slot in val:\n parking.update(slot.empty_parking_spot(spot_map))\n slots[key] = parking\n total_parking += len(parking)\n exp += len(parking) * p\n\n occupancy = int(total_parking * perctge)\n # increment to comply with total occupancy percentage\n increment = (occupancy - exp) / total_parking\n\n # print(increment)\n\n random_spot = []\n for key, val in distance_distribution.items():\n if len(val) == 0:\n continue\n p = pdf[key] + increment\n if p <= 0:\n p = 0\n elif p > 1:\n p = 1\n n = int(round(len(slots[key]) * p))\n s = n if n <= len(slots[key]) else len(slots[key])\n random_spot += random.sample(slots[key], s)\n for v in val:\n spot_pdf[v.id] = p\n\n for s in random_spot:\n spot_map[s] = 1\n\n # print(\"Map:\")\n # for p in range(len(spot_map)):\n # if p % 27 == 0:\n # print(\"\\n\", end=\"\")\n # if nodeset[p].type == \"C\":\n # print(\"+\", end=\"\")\n # elif nodeset[p].type == \"D\":\n # print(\"+\", end=\"\")\n # elif nodeset[p].type == \"N\":\n # print(\"*\", end=\"\")\n # else:\n # if spot_map[p] == 1:\n # print(\"1\", end=\"\")\n # else:\n # print(\"0\", end=\"\")\n # print(\"\\n\\n\")\n # print(\"Id:\")\n # for p in range(len(spot_map)):\n # if p % 27 == 0:\n # print(\"\\n\", end=\"\")\n # if nodeset[p].type == \"C\":\n # print(p, end=\"\")\n # elif nodeset[p].type == \"D\":\n # print(p, end=\"\")\n # elif nodeset[p].type == \"N\":\n # print(\"*\", end=\"\")\n # else:\n # if spot_map[p] == 1:\n # print(p, end=\"\")\n # else:\n # print(p, end=\"\")\n # print(\"\\n\")\n\n return spot_map, spot_pdf\n\n\n\"\"\"\nPrepare shortest path cache\n\"\"\"\n\ndef dijkstra(G, start, end=None):\n\n D = {} # dictionary of final distances\n P = {} # dictionary of predecessors\n Q = priorityDictionary() # estimated distances of non-final vertices\n Q[start] = 0\n\n for v in Q:\n D[v] = Q[v]\n if v == end:\n break\n for w in G[v]:\n vwLength = D[v] + G[v][w]\n if w in D:\n if vwLength < D[w]:\n raise ValueError(\"Dijkstra: found better path to already-final vertex\")\n elif w not in Q or vwLength < Q[w]:\n Q[w] = vwLength\n P[w] = v\n return (D, P)\n\n\ndef dijkstra_path(Tree, start, end):\n Path = []\n while True:\n if end == start:\n break\n Path.append(end)\n end = Tree[end]\n Path.append(start)\n Path.reverse()\n return Path\n\n\ndef all_path(nodeset, graph):\n workers = cpu_count()\n x = math.ceil(len(nodeset) / workers)\n pool = Pool(processes=workers)\n args = []\n dict_nodeset = dict(nodeset)\n for i in range(workers):\n end = (i + 1) * x\n if end >= len(nodeset):\n partial_set = dict(nodeset[i * x:])\n else:\n partial_set = dict(nodeset[i * x: (i+1) * x])\n args.append((i, graph, dict_nodeset, partial_set))\n\n result = pool.map(single_work, args)\n pool.close()\n pool.join()\n\n all_pair = {}\n for d in result:\n for key, val in d.items():\n paths = []\n for v in val:\n path = []\n for n in v:\n path.append(dict_nodeset[n])\n paths.append(path)\n all_pair[key] = paths\n\n return all_pair\n\n\ndef single_work(param):\n i, graph, nodeset, partial_set = param\n count = 0\n all_pair = {}\n for k1, start in partial_set.items():\n all_pair[pair_key(start, start)] = [[start.id]]\n count += 1\n pecntg = round(count * 100 / len(partial_set), 1)\n if pecntg % 10 == 0:\n print(str(datetime.datetime.now()) + \" Processor\" + str(i) + \" \" + str(pecntg) + \"%\")\n G_copy = copy.deepcopy(graph)\n del G_copy[start.id]\n for neighbor in start.get_neighbors([\"D\", \"C\"], nodeset):\n del G_copy[neighbor.id][start.id]\n for neighbor in start.get_neighbors([\"D\", \"C\"], nodeset):\n D, Tree = dijkstra(G_copy, neighbor.id)\n for k2, end in nodeset.items():\n key = pair_key(start, end)\n if key not in all_pair:\n all_pair[key] = []\n if end == start:\n continue\n all_pair[key].append([start.id] + dijkstra_path(Tree, neighbor.id, end.id))\n print(\"Processor \" + str(i) + \" done \" + str(count))\n return all_pair\n\n\n\"\"\"\nShortest path from cache\n\"\"\"\n\n\ndef shortest_path(all_pair, start, end):\n if start == end:\n return [start]\n key = pair_key(start, end)\n if len(all_pair[key]) == 0:\n return []\n return min(all_pair[key], key=lambda p: path_length(p))\n\n\ndef shortest_path_by_direction(all_pair, start, end, next_node):\n \"\"\"\n Find the shortest path given the next node\n :param all_pair: paths between all pairs\n :param start:\n :param end:\n :param next_node:\n :return:\n \"\"\"\n key = pair_key(start, end)\n if key not in all_pair:\n raise Exception(\"Wrong key \" + key)\n all_paths = all_pair[key]\n tmp = [p for p in all_paths if p[1].id == next_node.id and start not in p[1:]]\n return min(tmp, key=lambda p: path_length(p))\n\n\ndef search_by_depth(all_pair, start, depth, d_cost, nodeset):\n \"\"\"\n Search all the nodes with limited search depth\n :param start:\n :param depth:\n :param d_cost:\n :param nodeset:\n :param all_pair:\n :return:\n \"\"\"\n search_result = {}\n for neighbor in start.neighbors:\n one_direction = []\n for k, end in nodeset.items():\n if start == end or end.type != \"D\":\n continue\n path = shortest_path_by_direction(all_pair, start, end, nodeset[neighbor])\n if path_length(path) * d_cost < depth:\n one_direction.append(path)\n search_result[nodeset[neighbor]] = one_direction\n return search_result\n\n\ndef total_cost(path, all_pair, exit_node, d_cost, w_cost, u_cost):\n \"\"\"\n Cost of final path\n :param path:\n :param all_pair: cache\n :param exit_node:\n :param d_cost:\n :param w_cost:\n :param u_cost:\n :return:\n \"\"\"\n cost = 0\n i = -2\n for j in range(len(path)):\n if j == 0 or j == len(path) - 1:\n cost += path[j].length / 2 * d_cost\n else:\n cost += d_cost * path[j].length\n if i >= 0 and path[i] == path[j]:\n cost += u_cost\n i += 1\n cost += w_cost * path_length(shortest_path(all_pair, path[-1], exit_node))\n return cost\n\n\ndef path_length(path):\n \"\"\"\n Path length\n :param path:\n :return:\n \"\"\"\n if len(path) <= 1:\n return 0\n return sum([p.length for p in path[1:-1]]) + (path[0].length + path[-1].length) / 2\n\n\n\"\"\"\nExecution details\n\"\"\"\n\ndef pair_key(start, end):\n \"\"\"\n Combine ids as key\n :param start:\n :param end:\n :return:\n \"\"\"\n return str(start.id) + \"_\" + str(end.id)\n\n\ndef gain_knowledge(knowledge, node, spot_map):\n \"\"\"\n Update knowledge after arriving a node\n :param knowledge:\n :param node:\n :param spot_map:\n :return:\n \"\"\"\n knowledge[node.id] = len(node.empty_parking_spot(spot_map))\n\n\ndef update_best_node(knowledge, all_pair, prev_path, nodeset, cur_node, exit_node, best_cost, best_node, d_cost, w_cost, u_cost):\n \"\"\"\n Recompute the best node and cost after every movement\n :param knowledge:\n :param prev_path:\n :param nodeset:\n :param cur_node:\n :param exit_node:\n :param d_cost:\n :param w_cost:\n :return:\n \"\"\"\n for i in range(len(knowledge)):\n if knowledge[i] > 0:\n walk_path = shortest_path(all_pair, nodeset[i], exit_node)\n drive_path = shortest_path(all_pair, cur_node, nodeset[i])\n\n walk_cost = path_length(walk_path) * w_cost\n drive_cost = path_length(drive_path) * d_cost\n uturn_cost = u_cost if is_uturn(prev_path, drive_path) else 0\n cost = drive_cost + walk_cost + uturn_cost\n if cost < best_cost:\n best_cost = cost\n best_node = nodeset[i]\n return best_cost, best_node\n\n\ndef is_uturn(prev_path, to_path):\n \"\"\"\n Return true if the next direction requires u turn\n :param prev_path:\n :param to_path:\n :return:\n \"\"\"\n if len(prev_path) > 1 and len(to_path) > 1 and prev_path[-2] == to_path[1]:\n return True\n return False\n\n\ndef all_node_visited(nodeset, knowledge):\n \"\"\"\n Test if all the node is visited\n :param nodeset:\n :param knowledge:\n :return:\n \"\"\"\n for key, val in nodeset.items():\n if knowledge[val.id] < 0:\n return False\n return True\n\n\ndef node_list_deep_copy(node_list):\n new_list = []\n for node in node_list:\n new_list.append(node.copy())\n return new_list\n\n\ndef candidates_probability(knowledge, spot_map, prev_path, candidate_paths, all_pair, exit_node, best_cost, d_cost, w_cost, u_cost, x):\n \"\"\"\n Calculate the not available probability\n :param knowledge:\n :param prev_path:\n :param candidate_paths:\n :param all_pair:\n :param exit_node:\n :param best_cost:\n :param d_cost:\n :param w_cost:\n :param u_cost:\n :return:\n \"\"\"\n time_pr = {}\n cost_map = {}\n for path in candidate_paths:\n candidate_node = path[-1]\n drive_path = path\n walk_path = shortest_path(all_pair, candidate_node, exit_node)\n\n drive_cost = path_length(drive_path) * d_cost\n walk_cost = path_length(walk_path) * w_cost\n uturn_cost = u_cost if is_uturn(prev_path, drive_path) else 0\n cost = drive_cost + walk_cost + uturn_cost\n if cost not in cost_map:\n cost_map[cost] = []\n cost_map[cost].append(candidate_node)\n if cost < best_cost:\n # better cost\n if knowledge[candidate_node.id] > 0:\n # known node with available parking spots\n p_not_exist = 0\n elif knowledge[candidate_node.id] > -1:\n # known node without available parking spots\n p_not_exist = 1\n else:\n # unknown node\n p_not_exist = unknown_probability(candidate_node, x, candidate_node.empty_parking_spot(spot_map))\n else:\n # no better cost\n p_not_exist = 1\n\n if cost not in time_pr:\n time_pr[cost] = []\n\n time_pr[cost].append(p_not_exist)\n return time_pr, cost_map\n\n\ndef unknown_probability(candidate_node, x, parking_spot):\n \"\"\"\n Return the probability of candidate node\n :param candidate_node:\n :param x:\n :param parking_spot:\n :return:\n \"\"\"\n if x == \"G\":\n # ground truth\n return 0 if len(parking_spot) > 0 else 1\n elif isinstance(x, float):\n # x\n return x if len(parking_spot) > 0 else 1 - x\n elif isinstance(x, dict):\n # historical\n return x[candidate_node.id]\n else:\n raise Exception(\"Wrong type x\")\n\n\ndef choice_expectation(knowledge, spot_map, all_pair, choices, exit_node, prev_path, best_cost, d_cost, w_cost, u_cost, x):\n \"\"\"\n Calculate expectation for each direction (denoted as next node)\n :param knowledge:\n :param spot_map:\n :param all_pair:\n :param choices:\n :param exit_node:\n :param prev_path:\n :param best_cost:\n :param d_cost:\n :param w_cost:\n :param u_cost:\n :param x:\n :return:\n \"\"\"\n exp = {}\n for next_node, candidates in choices.items():\n # saving time cumulative distribution function\n saving_time_pr_cdf = []\n # time_pr: {time:[not_available_pr]}\n # cost_map: {time:[node]}\n time_pr, cost_map = candidates_probability(knowledge, spot_map, prev_path, candidates, all_pair, exit_node, best_cost, d_cost, w_cost, u_cost, x)\n # sort by cost time\n items = sorted(time_pr.items(), key=lambda i: i[0])\n for time, p_list in items:\n p_equal_to_time = reduce(lambda x, y: x * y, p_list)\n # pr if all nodes are not available\n p_less_than_time = (1 - saving_time_pr_cdf[-1][1]) if len(saving_time_pr_cdf) > 0 else 1\n # cdf of saving time\n saving_time_pr_cdf.append((best_cost - time, 1 - (p_equal_to_time * p_less_than_time)))\n t = [saving_time_pr_cdf[0]] + [(y[0], y[1] - x[1]) for x, y in zip(saving_time_pr_cdf, saving_time_pr_cdf[1:])]\n # expectation of a choice\n exp[next_node] = sum(list(x*y for x, y in t))\n return exp\n\n\ndef execute(spot_map, nodeset, all_pair, knowledge, enter_node, exit_node, x, d_cost, w_cost, u_cost, default_best_cost, saving_threshold):\n\n cur_node = enter_node\n best_cost = default_best_cost\n best_node = enter_node\n prev_path = [enter_node]\n\n finished = False\n\n single_spot_time = math.ceil(22 * d_cost)\n\n gain_knowledge(knowledge, cur_node, spot_map)\n\n back_steps = 0\n\n penality_flag = False\n\n while not finished:\n # possible candidates of each direction\n choices = search_by_depth(all_pair, cur_node, best_cost, d_cost, nodeset)\n # saving time expectation of each direction\n exp = choice_expectation(knowledge, spot_map, all_pair, choices, exit_node, prev_path, best_cost, d_cost, w_cost, u_cost, x)\n\n # if len(prev_path) > 1 and cur_node.type == \"C\":\n # # no uturn on cross\n # del exp[prev_path[-2]]\n #\n # if len(prev_path) > 1 and prev_path[-2].type == \"C\":\n # # no uturn when passing cross\n # del exp[prev_path[-2]]\n\n # next_to_cross = False\n\n # for n, l in exp.items():\n # if n.type == \"C\":\n # next_to_cross = True\n #\n # if next_to_cross:\n # del exp[prev_path[-2]]\n\n if len(prev_path) > 1:\n del exp[prev_path[-2]]\n\n # best expected saving time\n next_node, best_saving = max(exp.items(), key=lambda t_e: t_e[1])\n\n if best_saving <= saving_threshold and len(best_node.empty_parking_spot(spot_map)) > 0:\n if cur_node == best_node:\n # arrive at best spot\n finished = True\n else:\n # head to known best spot\n\n path = shortest_path(all_pair, cur_node, best_node)\n if len(path) == 1 or len(path) == 0:\n print(cur_node.id, next_node.id, path[0].id)\n print(saving_threshold, best_saving, best_cost, next_node.id, best_node.id, exp)\n known_back_path = True\n for n in path:\n if knowledge[n.id] < 0:\n known_back_path = False\n break\n if known_back_path:\n cur_node = path[-1]\n prev_path += path[1:-1]\n finished = True\n else:\n cur_node = path[1]\n back_steps += 1\n else:\n cur_node = next_node\n\n # if cur_node == exit_node and penality_flag:\n # saving_threshold += single_spot_time\n #\n # if cur_node == exit_node:\n # penality_flag = True\n\n\n gain_knowledge(knowledge, cur_node, spot_map)\n prev_path.append(cur_node)\n best_cost, best_node = update_best_node(knowledge, all_pair, prev_path, nodeset, cur_node, exit_node, best_cost, best_node, d_cost, w_cost, u_cost)\n\n if all_node_visited(nodeset, knowledge):\n finished = True\n\n if len(prev_path) > 300:\n print(str(prev_path[-3]), str(prev_path[-2]), str(prev_path[-1]))\n raise Exception(\"Fall into infinite loop\")\n return best_node, prev_path, back_steps\n\n\ndef ground_truth(drive_nodeset, all_pair, spot_map, enter, exit, d_cost, w_cost):\n best_node = None\n prev_path = None\n best_cost = 1000000000000\n for key, node in drive_nodeset:\n if node == enter or len(node.empty_parking_spot(spot_map)) == 0:\n continue\n drive_path = shortest_path(all_pair, enter, node)\n walk_path = shortest_path(all_pair, node, exit)\n cost = path_length(drive_path) * d_cost + path_length(walk_path) * w_cost\n if cost < best_cost:\n best_node = node\n best_cost = cost\n prev_path = drive_path\n return best_node, prev_path, 0\n\n","sub_path":"larger_simulation/search13.py","file_name":"search13.py","file_ext":"py","file_size_in_byte":19370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"361530940","text":"fname = input(\"Enter file name: \")\ntry:\n fh = open(fname)\nexcept:\n print('Something wrong with the file')\nlst = list()\nfor line in fh:\n wil = line.split()\n for i in wil:\n if i in lst: continue\n lst.append(i)\nlst.sort()\nprint(lst)\n","sub_path":"Exercise-08/exc-08-04.py","file_name":"exc-08-04.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180253524","text":"import os\nimport requests\nimport datetime\nimport time\nimport threading\n#Add the root directory to sys.path to solve the problem of not finding packages on the command line\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport psutil as ps\nfrom UnitTestCase.Tools import Tools\n\n\nclass TestSelectRowData:\n times = []\n error = []\n url = 'http://127.0.0.1:8050/'\n upload_file = '_dash-update-component'\n upload_url = url + upload_file\n def get_post_data(self, path):\n json_data = Tools.read_file(path)\n return json_data\n def get_select_response(self, path):\n post_data = self.get_post_data(path)\n response = Tools.post_method2(TestSelectRowData.upload_url, post_data)\n return response\n def select(self):\n path = os.getcwd() + '/../UnitTestCase/distinct_labels_asc.json'\n respone = self.get_select_response(path)\n respone_time = float(respone.elapsed.microseconds)/1000\n self.times.append(respone_time)\n if respone.status_code!= 200:\n TestSelectRowData.error.append(\"0\")\n def get_cpu_load_avg(self):\n # Get the average system load\n load_avg = ps.getloadavg()\n print('load_avg=', load_avg)\n return load_avg\n def get_cpu_percent(self):\n # Get CPU usage\n cpu_percent = ps.cpu_percent()\n print('cpu_percent=', cpu_percent)\n return cpu_percent\n def get_cpu_stats(self):\n # Get CPU statistics\n cpu_stats = ps.cpu_stats()\n print('cpu_stats=', cpu_stats)\n return cpu_stats\n def get_cpu_freq(self):\n # Get CPU frequency\n cpu_freq = ps.cpu_freq()\n print('cpu_freq=', cpu_freq)\n return cpu_freq\n def get_cpu_times_percent(self):\n # Acquisition time ratio\n cpu_times_percent = ps.cpu_times_percent()\n print('cpu_times_percent=', cpu_times_percent)\n return cpu_times_percent\n def get_virtual_memory(self):\n # Memory usage\n virtual_memory = ps.virtual_memory()\n print('virtual_memory=', virtual_memory)\n return virtual_memory\n def get_disk_usage(self):\n # Gets disk partition information\n disk_usage = ps.disk_usage('/')\n print('disk_usage=', disk_usage)\n return disk_usage\n def get_disk_io_counters(self):\n # Get IO statistics\n disk_io_counters = ps.disk_io_counters()\n print('disk_io_counters=', disk_io_counters)\n return disk_io_counters\n def get_net_io_counter(self):\n # Obtain network card IO statistics\n net_io_counters = ps.net_io_counters()\n print('net_io_counters=', net_io_counters)\n return net_io_counters\n def get_server_info(self):\n self.get_cpu_freq()\n self.get_cpu_load_avg()\n self.get_cpu_percent()\n self.get_cpu_stats()\n self.get_cpu_times_percent()\n self.get_virtual_memory()\n self.get_disk_usage()\n self.get_disk_io_counters()\n self.get_net_io_counter()\n time.sleep(3)\n def thread_task(self):\n threads = []\n start_time = datetime.datetime.now()\n print (\"request start_time %s \" % start_time)\n print('Monitor metrics before performing performance tests')\n self.get_server_info()\n print('Start performing performance tests')\n nub = 100\n think_time = 0.1\n for i in range(1, nub+1):\n t = threading.Thread(target=self.select(), args='')\n threads.append(t)\n for t in threads:\n time.sleep(think_time)\n t.setDaemon(True)\n t.start()\n t.join()\n end_time = datetime.datetime.now()\n print(\"request end_time %s \" % end_time)\n time.sleep(0.1)\n average_time = \"{:.3f}\".format(float(sum(self.times))/float(len(self.times)))\n print (\"Average Response Time %s ms\" % average_time)\n use_time = str(end_time - start_time)\n hour = use_time.split(\":\").pop(0)\n minute = use_time.split(\":\").pop(1)\n second = use_time.split(\":\").pop(2)\n totaltime = float(hour) * 60 * 60 + float(minute) * 60 + float(second)\n print (\"Concurrent processing %s\" % nub)\n print (\"use total time %s s\" % (totaltime - float(nub * think_time )))\n print (\"fail request %s\" % self.error.count(\"0\"))\n self.get_server_info()\n print('The performance test execution ends')\nif __name__ == '__main__':\n row = TestSelectRowData()\n row.thread_task()\n ","sub_path":"PerformanceTest/TestSelectRowData.py","file_name":"TestSelectRowData.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"45518107","text":"import gtk\r\n\r\n\r\n\r\n\r\nclass PacketInformationWindow(gtk.Dialog):\r\n\r\n def __init__(self, parent):\r\n super(PacketInformationWindow, self).__init__(parent=parent)\r\n self.set_title(\"Packet Information\")\r\n # self.set_size_request(460,100)\r\n self.row = 1\r\n\r\n #Table\r\n table = gtk.Table(6, 7)\r\n table.set_row_spacings(5)\r\n table.set_col_spacings(5)\r\n\r\n valueLbl = gtk.Label(\"Value\")\r\n table.attach(valueLbl, 0, 1, 0, 1)\r\n\r\n textDescLbl = gtk.Label(\"Text Description\")\r\n table.attach(textDescLbl, 1, 2, 0, 1)\r\n\r\n valueTxt = gtk.TextView()\r\n valueTxt.set_size_request(220, 20)\r\n table.attach(valueTxt, 0, 1, self.row, self.row + 1)\r\n\r\n textDescTxt = gtk.TextView()\r\n textDescTxt.set_size_request(220, 20)\r\n table.attach(textDescTxt, 1, 2, self.row, self.row + 1)\r\n\r\n self.row += 1\r\n\r\n def add_field(widget):\r\n add_valueTxt = gtk.TextView()\r\n add_valueTxt.set_size_request(220, 20)\r\n table.attach(add_valueTxt, 0, 1, self.row, self.row + 1)\r\n\r\n add_textDescTxt = gtk.TextView()\r\n add_textDescTxt.set_size_request(220, 20)\r\n table.attach(add_textDescTxt, 1, 2, self.row, self.row + 1)\r\n\r\n table.remove(self.button)\r\n self.OPEN_IMAGE = gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_BUTTON)\r\n self.button = gtk.Button()\r\n self.button.connect(\"clicked\", add_field)\r\n self.button.set_image(self.OPEN_IMAGE)\r\n table.attach(self.button, 3, 4, self.row + 1, self.row + 2)\r\n fixed.show_all()\r\n\r\n self.row += 1\r\n\r\n self.OPEN_IMAGE = gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_BUTTON)\r\n self.button = gtk.Button()\r\n self.button.connect(\"clicked\", add_field)\r\n self.button.set_image(self.OPEN_IMAGE)\r\n table.attach(self.button, 3, 4, self.row + 1, self.row + 2)\r\n\r\n fixed = gtk.Fixed()\r\n fixed.put(table, 5, 10)\r\n fixed.show_all()\r\n\r\n self.vbox.add(fixed)\r\n self.run()\r\n self.destroy()\r\n","sub_path":"PacketInformationWindow.py","file_name":"PacketInformationWindow.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346643616","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 17 18:34:04 2021\n\n@author: domen\n\"\"\"\n\nimport threading, time, os\nfrom ctypes import windll, Structure, c_long, byref\n\nxy = [0,0]\n\nclass POINT(Structure):\n _fields_ = [(\"x\", c_long), (\"y\", c_long)]\n\n\n\ndef queryMousePosition():\n pt = POINT()\n windll.user32.GetCursorPos(byref(pt))\n return [pt.x,pt.y]\n\n\n \ndef on_press(key):\n global xy\n if key == Key.ctrl_l:\n xy = queryMousePosition()\n print(xy)\n return False\n \n\n \ndef on_release(key):\n if key == Key.esc:\n # Stop listener\n print('-- Shutting down.')\n # return False\nmode = 'full'\nprint('-- Calibrating positions')\nif mode == 'full':\n start=8\nelse:\n start=0\n\nwith open('user_settings.cfg') as f:\n cfg_lines = f.readlines() \n \nfor ind,val in enumerate(cfg_lines):\n print('Please mark point #{}'.format(ind))\n with Listener( \n on_press=on_press,\n on_release=on_release) as listener:\n listener.join() \n cfg_lines[ind+start] = '{} {}\\n'.format(xy[0],xy[1])\n if ind == 7:\n break\n\n\nwith open('user_settings.cfg','w') as f:\n f.writelines(cfg_lines)\n \n \n","sub_path":"app_data/dev_scripts/calibrate_screen.py","file_name":"calibrate_screen.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"285643290","text":"# Accept number from user and return Smallest digit from that number.\r\n# input : 4529\r\n# output : 2\r\n\r\ndef SmallestDigit(no):\r\n iDigit = 0\r\n iMin =9\r\n while(no > 0):\r\n iDigit = int(no % 10)\r\n if iDigit < iMin:\r\n iMin = iDigit\r\n no = int(no / 10)\r\n return iMin\r\n \r\ndef main():\r\n print(\"Enter the number\")\r\n value = int(input())\r\n print(\"*************************\")\r\n ret = SmallestDigit(value)\r\n print(\"Smallest Digit is :\",ret)\r\n print(\"*************************\")\r\n\r\nif __name__==\"__main__\":\r\n main()","sub_path":"SmallestDigit.py","file_name":"SmallestDigit.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"29601001","text":"import numpy as np\n\ndef sigmoid(x,deriv=False):\n if deriv == True:\n return x*(1-x)\n return 1/(1+np.exp(-x))\n \n# Training dataset:\nX = np.array([ [0,0,1],\n [0,1,0],\n [0,1,1],\n [1,0,0],\n [1,0,1],\n [1,1,0],\n [1,1,1] ])\n\ny = np.array([ [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [1] ])\n\nalpha = 10\nnp.random.seed(42)\nsynapse_0 = 2*np.random.random((3,1)) - 1\n\nfor iter in range(10000):\n\n # Forward propagation\n l0 = X\n l1 = sigmoid(np.dot(l0,synapse_0))\n\n # Backward propogation\n l1_error = y - l1\n l1_delta = l1_error * sigmoid(l1,True)\n\n synapse0 += alpha * np.dot(l0.T,l1_delta)\n\nprint(\"Output:\")\nprint(l1)\n","sub_path":"lib/feed_forward_2_layers.py","file_name":"feed_forward_2_layers.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94912772","text":"from socket import *\nfrom tkinter import *\nimport os\nimport _thread as th\ndef _quit():\n rec.close()\n send.close()\n os._exit(0)\n main.destroy()\ndef addchat(event=None):\n dat=txt.get()\n send.sendto(bytes(dat,'utf-8'),('192.168.1.7',13003))\n chist.insert(END,'\\nYou: '+dat)\n txt.delete(0,END)\n\nsend=socket(AF_INET,SOCK_DGRAM)\nrec=socket(AF_INET,SOCK_DGRAM)\nrec.bind(('',13003))\n\nmain=Tk()\nmain.bind('<Return>',addchat)\n\ntxt=Entry(main)\nchist=Text(main,height=50,width=50)\nquit=Button(main,text='Quit',command=_quit)\nquit.pack(side=BOTTOM,fill=X)\ntxt.pack(side=BOTTOM,fill=X)\nchist.pack(side=BOTTOM,fill=X)\n\ndef receive():\n while True:\n chist.insert(END,'\\nHe: '+str(rec.recvfrom(1024)[0])[2:-1])\nth.start_new_thread(receive,())\nmain.mainloop()\n","sub_path":"cheatchat gui.py","file_name":"cheatchat gui.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"215308942","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 21 21:42:56 2018\n\n@author: longbao\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn\nfrom torch import optim\ndevice = torch.device(\"cuda:0\")\n\n# inputs data set\nimport math\nimport torch.utils.data\ndef mkdataset(data_size, data_length=50, freq=60., noise=0.02):\n \"\"\"\n params\n data_size : データセットサイズ\n data_length : 各データの時系列長\n freq : 周波数\n noise : ノイズの振幅\n returns\n train_x : トレーニングデータ(t=1,2,...,size-1の値)\n train_t : トレーニングデータのラベル(t=sizeの値)\n \"\"\"\n train_x = []\n train_t = []\n\n for offset in range(data_size):\n train_x.append([[math.sin(2 * math.pi * (offset + i) / freq) + np.random.normal(loc=0.0, scale=noise)] for i in range(data_length)])\n train_t.append([math.sin(2 * math.pi * (offset + data_length) / freq)])\n\n return train_x, train_t\n\nx_train, y_train = mkdataset(1000)\nx_train = torch.tensor(x_train).view(1000,50,1)\ny_train = torch.tensor(y_train).view(1000,1)\n\ntrain_tensor = torch.utils.data.TensorDataset(x_train, y_train) \ntrain_loader = torch.utils.data.DataLoader(dataset = train_tensor, \n batch_size = 128, shuffle = True)\n\n\n# implement model\n\nclass LSTM(nn.Module):\n \n def __init__(self, inputdim, hiddendim, outputdim):\n super(LSTM, self).__init__()\n self.rnn = nn.LSTM(input_size = inputdim,\n hidden_size = hiddendim,\n num_layers = 3,\n batch_first = True)\n self.fc = nn.Linear(hiddendim, outputdim)\n \n def forward(self, x):\n out,_ = self.rnn(x)\n out = self.fc(out[:,-1,:]) \n return out\n \nmod = LSTM(1, 5, 1).to(device)\n\n# loss and optimizer\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(mod.parameters(),\n lr =0.01)\n\n# training\nepoches = 100\nfor epoch in range(epoches):\n running_loss = 0.0\n for i, (inputs, labels) in enumerate(train_loader, 0):\n optimizer.zero_grad()\n \n # forward + backward + optimize\n inputs, labels = inputs.cuda(), labels.cuda()\n outputs = mod.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n # print statitistics\n running_loss += loss.item()\n if i % 10 == 9: # print every 2000 mini-batches\n print('%d th epoch, %d th batch, loss: %.4f'\n % (epoch + 1, i + 1, running_loss / 10))\n running_loss = 0.0\n\n# test model\nx_test,_ = mkdataset(100)\nx_test = torch.tensor(x_test).view(100,50,1).to(device)\ny_test = mod.forward(x_test)\ny_test = y_test.cpu().data.numpy()\nplt.plot(y_test)","sub_path":"DataScienceProject/0.Test_Script/0.home_script/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"469425634","text":"# Hw 10, pr2 for CS 5 gold, 2016\n# \n# The Board class from CS 5 Hw #10\n# for use as a starting point for\n# Hw#11, the Player class (and AI)\n#\n\nclass Board:\n \"\"\" a datatype representing a C4 board\n with an arbitrary number of rows and cols\n \"\"\"\n\n def __init__( self, width=7, height=6 ):\n \"\"\" the constructor for objects of type Board \"\"\"\n self.width = width\n self.height = height\n self.data = [[' ']*width for r in range(height)]\n \n def __repr__(self):\n \"\"\" this method returns a string representation\n for an object of type Board\n \"\"\"\n s = '' # the string to return\n for row in range( self.height ):\n s += '|' # add the spacer character\n for col in range( self.width ):\n s += self.data[row][col] + '|'\n s += '\\n'\n\n s += '--'*self.width # add the bottom of the board\n s += '-\\n'\n \n for col in range( self.width ):\n s += ' ' + str(col%10)\n\n s += '\\n'\n return s # the board is complete, return it\n\n def setBoard( self, moves, show=False ):\n \"\"\" sets the board according to a string\n of turns (moves), starting with 'X'\n if show==True, it prints each one\n \"\"\"\n nextCh = 'X'\n for move in moves:\n col = int(move)\n if self.allowsMove(col):\n self.addMove( col, nextCh )\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'\n if show: print(self) \n\n def set( self, moves, show=True ):\n \"\"\" sets the board according to a string\n of turns (moves), starting with 'X'\n if show==True, it prints each one\n \"\"\"\n nextCh = 'X'\n for move in moves:\n col = int(move)\n if self.allowsMove(col):\n self.addMove( col, nextCh )\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'\n if show: print(self)\n\n def clear( self ):\n for row in range(self.height):\n for col in range(self.width):\n self.data[row][col] = ' '\n\n def addMove( self, col, ox ):\n \"\"\" adds checker ox into column col\n does not need to check for validity...\n allowsMove will do that.\n \"\"\"\n row = self.height - 1\n while row >= 0:\n if self.data[row][col] == ' ':\n self.data[row][col] = ox\n return\n row -= 1\n\n def winsFor( self, ox ):\n \"\"\" checks if the board self is a win for ox \"\"\"\n for row in range( self.height ):\n for col in range( self.width ):\n if self.isOX( row, col, ox ) and \\\n self.isOX( row+1, col, ox ) and \\\n self.isOX( row+2, col, ox ) and \\\n self.isOX( row+3, col, ox ):\n return True\n if self.isOX( row, col, ox ) and \\\n self.isOX( row, col+1, ox ) and \\\n self.isOX( row, col+2, ox ) and \\\n self.isOX( row, col+3, ox ):\n return True\n if self.isOX( row, col, ox ) and \\\n self.isOX( row+1, col+1, ox ) and \\\n self.isOX( row+2, col+2, ox ) and \\\n self.isOX( row+3, col+3, ox ):\n return True\n if self.isOX( row, col, ox ) and \\\n self.isOX( row+1, col-1, ox ) and \\\n self.isOX( row+2, col-2, ox ) and \\\n self.isOX( row+3, col-3, ox ):\n return True\n return False\n \n def delMove( self, col ):\n \"\"\" removes the checker from column col \"\"\"\n for row in range( self.height ):\n # look for the first nonempty row\n if self.data[row][col] != ' ':\n # put in the checker\n self.data[row][col] = ' '\n return\n # it's empty, just return\n return\n \n def allowsMove( self, col ):\n \"\"\" returns True if a move to col is allowed\n in the board represented by self\n returns False otherwise\n \"\"\"\n if col < 0 or col >= self.width:\n return False\n return self.data[0][col] == ' '\n\n def isFull( self ):\n \"\"\" returns True if the board is completely full \"\"\"\n for col in range( self.width ):\n if self.allowsMove( col ):\n return False\n return True\n\n def gameOver( self ):\n \"\"\" returns True if the game is over... \"\"\"\n if self.isFull() or self.winsFor('X') or self.winsFor('O'):\n return True\n return False\n\n def isOX( self, row, col, ox ):\n \"\"\" checks if the spot at row, col is legal and ox \"\"\"\n if 0 <= row < self.height:\n if 0 <= col < self.width: # legal...\n if self.data[row][col] == ox:\n return True\n return False","sub_path":"kevinFiles/CS5Black/hw11/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239653149","text":"#!/usr/bin/env python3\n\"\"\"\n..\n Copyright: 2017 Twinleaf LLC\n Author: kornack@twinleaf.com\n\nLog data!\n\n\"\"\"\n\nimport tldevice\nimport argparse\n\nparser = argparse.ArgumentParser(prog='tio_log', \n description='Very simple logging utility.')\n\nparser.add_argument(\"url\", \n nargs='?', \n default='tcp://localhost/',\n help='URL: tcp://localhost')\nparser.add_argument(\"logfile\", \n nargs='?', \n default='log.tsv',\n help='Log filename: log.tsv')\nparser.add_argument(\"--cmd\", \n action='append', \n default=[],\n type=lambda kv: kv.split(\":\"), \n help='Commands to be run on start; rpc:val')\nargs = parser.parse_args()\n\ndevice = tldevice.Device(url=args.url, commands=args.cmd)\n\nfile = open(args.logfile,'w') \n\nfor row in device.data.stream_iter():\n rowstring = \"\\t\".join(map(str,row))+\"\\n\"\n file.write(rowstring)\n\n\n","sub_path":"examples/tio_log.py","file_name":"tio_log.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616442638","text":"# NLP Pkgs\nimport spacy \nnlp = spacy.load('en_core_web_sm')\n# Pkgs for Normalizing Text\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom string import punctuation\n# Import Heapq for Finding the Top N Sentences\nfrom heapq import nlargest\nfrom collections import Counter\n\n\ndef text_summarizer(raw_docx):\n raw_text = raw_docx\n docx = nlp(raw_text)\n stopwords = list(STOP_WORDS)\n \n keyword = []\n\n pos_tag = ['PROPN', 'ADJ', 'NOUN', 'VERB']\n for token in docx:\n if(token.text in stopwords or token.text in punctuation):\n continue\n if(token.pos_ in pos_tag):\n keyword.append(token.text)\n\n freq_word = Counter(keyword)\n\n max_freq = Counter(keyword).most_common(1)[0][1]\n for word in freq_word.keys(): \n freq_word[word] = (freq_word[word]/max_freq)\n freq_word.most_common(5)\n\n sent_strength={}\n for sent in docx.sents:\n for word in sent:\n if word.text in freq_word.keys():\n if sent in sent_strength.keys():\n sent_strength[sent]+=freq_word[word.text]\n else:\n sent_strength[sent]=freq_word[word.text]\n\n summarized_sentences = nlargest(3, sent_strength, key=sent_strength.get)\n\n final_sentences = [ w.text for w in summarized_sentences ]\n summary = ' '.join(final_sentences)\n\n return summary\n ","sub_path":"spacy_summarization.py","file_name":"spacy_summarization.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"215145773","text":"#take a list, and write a program that prints out all the elements of the list that are less than 5\n\nlistA = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 4, 3]\nlistB = []\ncountValue = 0\ncomparisonValue = 0\t\n\nwhile countValue < len(listA): #Every time this runs, 1 is added to countValue. For as long as countValue is less than the number of entries in the list, this continues to run\n\tcomparisonValue = listA[countValue] #Pulls a number from listA in position countValue\n\tif comparisonValue < 5: #Compares the value pulled from the list to see if it is less than 5\n\t\tlistB.append(comparisonValue)\t#if the compared value is less than 5, it is appended to listB\n\tcountValue = countValue + 1\t\t#add 1 to countValue to continue the loop\t\n\nprint(listB) #print listB which is all of the values from listA that are less than 5\n\n#Ask user for a number, return only numbers from the list that are smaller than that number\n\nuserNumber = int(input(\"Please enter a number\"))\n\nprint([number for number in listA if number < userNumber])","sub_path":"Beginner-Python/One hit wonders/listlessthanten.py","file_name":"listlessthanten.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71425801","text":"\"\"\" Utilizando diccionarios, evaluar un texto y generar un diccionario con las letras\nen minuscula como keys y como valor una lista con las posiciones donde aparece la letra\nen el texto.\n\nInput : Arroceria\nOutput : {\n ’a ’: [0 , 8] ,\n ’r ’: [1 , 2 , 6] ,\n ’o ’: [3] ,\n ’c ’: [4] ,\n ’e ’: [5] ,\n ’i ’: [7]\n}\n\"\"\"\n\nfrom json import dump\n\npalabra = input(\"Input: \").lower()\noutput = {}\n\nfor i, char in enumerate(palabra):\n if char not in output.keys():\n # output[char] = [i]\n output.update({char: [i]})\n else:\n output[char].append(i)\n\nwith open('ej5.json', 'w') as file:\n dump(output, file, indent=1)\n","sub_path":"2021-1/s10/ej5/ej5.py","file_name":"ej5.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"363009786","text":"# -*- coding: utf-8 -*-\n\nimport ez_setup\nez_setup.use_setuptools()\n\nfrom setuptools import setup\n#from setuptools_scm import get_version\n\n\n#git_version = get_version() # root='..', relative_to=__file__)\n\nversion = {}\nwith open(\"pygcgen/version.py\") as fh:\n exec(fh.read(), version)\n\nsetup(\n name=version['__title__'],\n version=version['__version__'],\n license=version['__license__'],\n\n # get the version number from git using setuptools_scm\n # use_scm_version = True,\n\n keywords=version['__summary__'],\n description='Generate changelog based on tags, issues and '\n 'merged pull requests on GitHub.',\n long_description=\"This package started as a conversion from \" \\\n \"ruby to python of the \" \\\n \"'GitHub Changelog Generator' \" \\\n \"(https://github.com/skywinder/github-changelog-generator).\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development :: Documentation\",\n ],\n\n author=version['__author__'],\n author_email=version['__email__'],\n maintainer=version['__author__'],\n maintainer_email=version['__email__'],\n\n url=version['__uri__'],\n\n setup_requires=['setuptools_scm', 'setuptools_scm_git_archive', ],\n install_requires=[\"agithub\", \"python-dateutil\"],\n\n packages=[version['__title__']],\n # data_files=[\n # ('..', [\n # 'README.md',\n # 'LICENSE',\n # 'CHANGELOG.md',\n # '.pygcgen_example'\n # ]),\n # ],\n exclude_package_data={\n '.': ['.gitignore', ]\n },\n\n entry_points={\n 'console_scripts': ['pygcgen = pygcgen.__main__:run', ],\n #'gui_scripts': ['pygcgenw = pygcgen.__main__:run_gui', ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"186956871","text":"from .extensions import db\n\nclass Message(db.Model):\n id = db.Column(db.String(), primary_key = True)\n content = db.Column(db.String(500))\n\n send_message = db.relationship('Send_message', backref = 'message', lazy=True)\n\nclass Session(db.Model):\n id = db.Column(db.String(50), primary_key = True)\n\n send_message = db.relationship('Send_message', backref = 'session', lazy=True)\n\nclass Participant(db.Model):\n id = db.Column(db.String(50), primary_key = True)\n\nsend_to_participant = db.Table('send_to_participant',\n db.Column('send_message_id', db.Integer, db.ForeignKey('send_message.id'), primary_key=True),\n db.Column('participant_id', db.String(50), db.ForeignKey('participant.id'), primary_key=True)\n)\n\nclass Send_message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n application_id = db.Column(db.Integer)\n session_id = db.Column(db.ForeignKey('session.id'))\n message_id = db.Column(db.ForeignKey('message.id'))\n\n participants = db.relationship(\n 'Participant',\n secondary=send_to_participant,\n lazy=True,\n backref=db.backref('send_message',lazy=True)\n )\n\n def to_json(self):\n participants = []\n for participant in self.participants:\n participants.append(participant.id)\n\n message = Message.query.get(self.message_id)\n \n json = {\n 'application_id' : self.application_id,\n 'session_id' : self.session_id,\n 'message_id' : self.message_id,\n 'participants' : participants,\n 'content' : message.content\n }\n\n return json\n\n\n\n","sub_path":"app_structure/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165014477","text":"d = float(input(\"Quantos km:\"))\n\nif d <200:\n p = 0.50*d\nelse:\n p = 100 + 0.45*(d-200)\n \nn1= p\nt =2 # Numero de casas\nr = int(n1 * 10**t)/10**t\nprint(r)","sub_path":"backup/user_264/ch25_2020_01_19_18_29_54_082821.py","file_name":"ch25_2020_01_19_18_29_54_082821.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240187516","text":"import math\n\ndef IsPrime(m):\n if m < 2:\n return False\n elif m == 2:\n return True\n else:\n i = 2\n temp = int(math.sqrt(m))\n while i <= temp:\n if m%i == 0:\n return False\n i += 1\n\n return True\n\nn = input()\nn = int(n)\nprime = []\nfor i in range(n):\n if IsPrime(i):\n prime.append(i)\n\nk = 0\nprime_count = 0\nwhile k+1 < len(prime):\n d = prime[k+1] - prime[k]\n if d == 2:\n prime_count += 1\n k += 1\n\nprint(prime_count)\n\n\n\n","sub_path":"PAT_1007.py","file_name":"PAT_1007.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49792140","text":"\"\"\" Add CLI support to start MIP rare disease RNA\"\"\"\n\nimport logging\n\nimport click\n\nfrom cg.apps import hk, lims, tb\nfrom cg.apps.environ import environ_email\nfrom cg.apps.mip import MipAPI\nfrom cg.apps.mip.fastq import FastqHandler\nfrom cg.cli.workflow.get_links import get_links\nfrom cg.cli.workflow.mip.store import store as store_cmd\nfrom cg.cli.workflow.mip_rna.deliver import CASE_TAGS, SAMPLE_TAGS\nfrom cg.cli.workflow.mip_rna.deliver import deliver as deliver_cmd\nfrom cg.meta.deliver import DeliverAPI\nfrom cg.meta.workflow.mip_rna import AnalysisAPI\nfrom cg.store import Store\n\nLOG = logging.getLogger(__name__)\n\n\n@click.group(\"mip-rna\")\n@click.pass_context\ndef mip_rna(context: click.Context):\n \"\"\"Rare disease RNA workflow\"\"\"\n context.obj[\"db\"] = Store(context.obj[\"database\"])\n hk_api = hk.HousekeeperAPI(context.obj)\n lims_api = lims.LimsAPI(context.obj)\n context.obj[\"tb\"] = tb.TrailblazerAPI(context.obj)\n deliver = DeliverAPI(\n context.obj, hk_api=hk_api, lims_api=lims_api, case_tags=CASE_TAGS, sample_tags=SAMPLE_TAGS,\n )\n context.obj[\"api\"] = AnalysisAPI(\n db=context.obj[\"db\"],\n hk_api=hk_api,\n tb_api=context.obj[\"tb\"],\n lims_api=lims_api,\n deliver_api=deliver,\n )\n context.obj[\"rna_api\"] = MipAPI(\n context.obj[\"mip-rd-rna\"][\"script\"], context.obj[\"mip-rd-rna\"][\"pipeline\"]\n )\n\n\n@mip_rna.command()\n@click.option(\"-c\", \"--case\", \"case_id\", help=\"link all samples for a case\")\n@click.argument(\"sample_id\", required=False)\n@click.pass_context\ndef link(context: click.Context, case_id: str, sample_id: str):\n \"\"\"Link FASTQ files for a SAMPLE_ID\"\"\"\n store = context.obj[\"db\"]\n link_objs = get_links(store, case_id, sample_id)\n\n for link_obj in link_objs:\n LOG.info(\n \"%s: %s link FASTQ files\", link_obj.sample.internal_id, link_obj.sample.data_analysis,\n )\n\n if \"mip + rna\" in link_obj.sample.data_analysis.lower():\n mip_fastq_handler = FastqHandler(context.obj, context.obj[\"db\"], context.obj[\"tb\"])\n context.obj[\"api\"].link_sample(\n mip_fastq_handler,\n case=link_obj.family.internal_id,\n sample=link_obj.sample.internal_id,\n )\n\n\n@mip_rna.command()\n@click.option(\"-d\", \"--dry\", is_flag=True, help=\"print command to console\")\n@click.option(\"-e\", \"--email\", help=\"email to send errors to\")\n@click.option(\"-p\", \"--priority\", type=click.Choice([\"low\", \"normal\", \"high\"]))\n@click.option(\"-sw\", \"--start-with\", help=\"start mip from this program.\")\n@click.argument(\"case_id\")\n@click.pass_context\ndef run(\n context: click.Context,\n case_id: str,\n dry: bool = False,\n priority: str = None,\n email: str = None,\n start_with: str = None,\n):\n \"\"\"Run the analysis for a case\"\"\"\n tb_api = context.obj[\"tb\"]\n rna_api = context.obj[\"rna_api\"]\n case_obj = context.obj[\"db\"].family(case_id)\n\n if case_obj is None:\n LOG.error(\"%s: case not found\", case_id)\n context.abort()\n\n if tb_api.analyses(family=case_obj.internal_id, temp=True).first():\n LOG.warning(\"%s: analysis already running\", case_obj.internal_id)\n return\n\n email = email or environ_email()\n kwargs = dict(\n config=context.obj[\"mip-rd-rna\"][\"mip_config\"],\n case=case_id,\n priority=priority,\n email=email,\n dryrun=dry,\n start_with=start_with,\n )\n if dry:\n command = rna_api.build_command(**kwargs)\n LOG.info(\" \".join(command))\n else:\n rna_api.run(**kwargs)\n tb_api.mark_analyses_deleted(case_id=case_id)\n tb_api.add_pending(case_id, email=email)\n LOG.info(\"MIP run started!\")\n\n\n@mip_rna.command(\"config-case\")\n@click.option(\"-d\", \"--dry\", is_flag=True, help=\"Print config to console\")\n@click.argument(\"case_id\")\n@click.pass_context\ndef config_case(context: click.Context, case_id: str, dry: bool = False):\n \"\"\"Generate a config for the case_id\"\"\"\n case_obj = context.obj[\"db\"].family(case_id)\n\n if not case_obj:\n LOG.error(\"Case %s not found\", case_id)\n context.abort()\n\n # MIP formatted pedigree.yaml config\n config_data = context.obj[\"api\"].config(case_obj, pipeline=\"mip-rna\")\n\n if dry:\n print(config_data)\n else:\n # Write to trailblazer root dir / case_id\n out_path = context.obj[\"tb\"].save_config(config_data)\n LOG.info(\"saved config to: %s\", out_path)\n\n\nmip_rna.add_command(store_cmd)\nmip_rna.add_command(deliver_cmd)\n","sub_path":"cg/cli/workflow/mip_rna/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125450714","text":"from sympy.abc import *\nfrom sympy import *\n\n\n\n# ИНИЦИАЛИЗАЦИЯ СИМВОЛОВ\n\n# Время\nt = symbols('t') \n# Индексы\ni, P, C = symbols('i, P, C', cls=Idx) \n# (проекция вилки на e_z, проекция вилки на e_wheel, радиус, масса)\nh, d, r = symbols('h,d,r') \n# Тензор инерции\n# J = symbols('J') # J = IndexedBase('J') \n# Углы\npsi=IndexedBase('psi');\nbeta = IndexedBase('beta');\nalpha = symbols('alpha');\ntheta = IndexedBase('theta');\n#моменты инерции\nW = IndexedBase('W'); T = IndexedBase('T') \n# элементы матрицы моментов инерций для разных тел (ВРЕМЕННО)\na,b,c = symbols('a,b,c')\n\n\n#СЛОВАРИ И ИХ СОДЕРЖИМОЕ пример словаря: {'Аоексей': 21}\n\nm = {} # словарь для масс\ne, omega, v = {}, {}, {} # вектора, омега, скорость\nS, P, C, D = {}, {}, {}, {} # точки\neq = {} # словарь со всякими выражениями\ndelta = {}; \nnu = {} # псевдоскорости nu[1] и nu[2]\nA = {} # части уравнения Д'Аламбера лагранжа для разных тел\nvelocity = {} # скорость\nJ = {} # моменты инерции для разных тел (осевые?)\nK = {} # кинетический момент для тела K = I_z*omega\ndelta_r = {} # виртуальное перемещение для центра масс?\nM = {} # момент сил\nF = {} # силы, действующие на тело\nomega_delta = {} # виртуальны поворот\ncoeff = {} # коэффиценты у Д'Аламюера-Лагранжа\n\n# Виртуальные перемещения\ndelta['x'], delta['y'], delta['alpha'] = symbols('delta_x, delta_y, delta_alpha')\ndelta['theta'] = IndexedBase('delta_theta') \ndelta['psi'] = IndexedBase('delta_psi');\n# Псевдоскорости (взамен ẋ и ẏ)\nnu[1], nu[2] = symbols('nu1, nu2') \nm['platform'], m['wheel'] = symbols('m1, m2')\n\n\n#ЗАВИСИМОСТИ\n\nx = x(t); y = y(t); alpha = alpha(t);\nnu[1] = nu[1](t); nu[2] = nu[2](t);\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467277777","text":"import string\n\n\ndef render(board):\n \"\"\" This function returns a string containing the current state of the board \"\"\"\n\n schema = \"\"\n headers = \" \"\n alphabet = list(string.ascii_uppercase)\n\n alphabet.reverse()\n\n i = 0\n for line in board:\n line_txt = \"\"\n headers += alphabet.pop().__add__(\" \")\n\n line_txt += str(i + 1).__add__(' ' * (i + 1))\n for stone in line:\n line_txt += \"⬡ \" if stone == 0 else \"⬢ \"\n\n schema += line_txt.__add__(\"\\n\")\n\n i = i + 1\n\n return headers.__add__(\"\\n\") + schema\n","sub_path":"src/render/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467842231","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom Quiniela.models import *\n__author__ = 'anyul.rivas'\n\n\nclass PronosticoForm(forms.ModelForm):\n goles_equipo_a = forms.IntegerField(min_value=0, max_value=10)\n goles_equipo_b = forms.IntegerField(min_value=0, max_value=10)\n\n class Meta:\n model = Pronostico\n exclude = [\"puntos\"]\n widgets = {\n \"usuario\": forms.HiddenInput(),\n \"partido\": forms.HiddenInput()\n }\n\n\nclass UsuarioForm(UserCreationForm):\n pass\n\n\nclass PartidoForm(forms.ModelForm):\n\n class Meta:\n model = Partido\n exclude = [\"tipo_partido\", \"equipo_ganador\", \"fecha\", \"equipo_a\", \"equipo_b\"]","sub_path":"Quiniela/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"612123","text":"import pygame\n\nwhite = (255, 255, 255)\nred = (255, 0, 0)\nblack = (0, 0, 0)\npygame.init()\nclock = pygame.time.Clock()\ngameDisplay = pygame.display.set_mode((800, 600))\nw, h = gameDisplay.get_size()\npygame.display.set_caption(\"GAME\")\npygame.mouse.set_cursor(*pygame.cursors.broken_x)\ngameExit = False\n\nscore = 0\nnew_score = 0\nbullets = 13\n\n\nclass Target:\n def __init__(self, speed, x, lwr):\n self.image = pygame.image.load('targ.jpeg')\n self.image = pygame.transform.scale(self.image, (75, 75))\n self.targetPos = self.image.get_rect()\n self.targetPos[0] = x\n self.lower = lwr\n self.hit = 0\n if lwr:\n self.targetPos[1] = 100\n gameDisplay.blit(self.image, self.targetPos)\n if lwr:\n self.speed = [speed, 0]\n else:\n self.speed = [-speed, 0]\n\n def move(self):\n gameDisplay.blit(self.image, self.targetPos)\n\n if self.targetPos[0] >= gameDisplay.get_size()[0] and self.lower == 1:\n self.targetPos[0] = 0\n\n elif self.targetPos[0] < -75:\n self.targetPos[0] = 800\n\n self.targetPos.move_ip(self.speed)\n\n def is_hit(self, pos):\n if self.hit == 0 and self.targetPos.collidepoint(pos):\n self.hit = 1\n self.image.fill(white)\n return 1\n return 0\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('arial')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, black)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n surf.blit(text_surface, text_rect)\n\n\ntargets = []\nfor i in range(12):\n lower = i % 2\n spd = 4\n if lower:\n spd = 6\n targ = Target(spd, i * 75, lower)\n targets.append(targ)\n\n\nwhile not gameExit:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n elif event.type == pygame.MOUSEBUTTONUP:\n scores = 0\n if bullets > 0:\n for targ in targets:\n scores += targ.is_hit(pygame.mouse.get_pos())\n bullets -= 1\n new_score += scores\n\n gameDisplay.fill(white)\n\n for targ in targets:\n targ.move()\n\n if new_score > score:\n score = new_score\n\n draw_text(gameDisplay, str(\"Score: {}\".format(score)), 50, 400, 550)\n draw_text(gameDisplay, str(\"Bullets: {}\".format(bullets)), 25, 45, 600-25)\n clock.tick(60)\n pygame.display.update()\n\n","sub_path":"target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561218239","text":"import numpy as np\nfrom sys import argv\nfrom matplotlib import pyplot as plt\nfrom phonecal import raw, plot, io\nfrom phonecal.general import bin_centers, weighted_mean, Rsquare\nfrom glob import glob\nfrom scipy.optimize import curve_fit\n\nfolder_main = argv[1]\nfolders = glob(folder_main+\"/*iso*\")\nisos = np.tile(np.nan, len(folders))\ngains = isos.copy()\ngainerrs = gains.copy()\nRONs = gains.copy()\nRONerrs = RONs.copy()\n\nfor i,folder in enumerate(folders):\n isos[i] = int(folder.split(\"iso\")[-1])\n print(isos[i], end=\" \")\n first_image = glob(folder+\"/*/*.dng\")[0]\n isos[i] = io.load_exif(first_image)[\"EXIF ISOSpeedRatings\"].values[0]\n print(isos[i])\n try:\n gains[i], gainerrs[i], RONs[i], RONerrs[i] = np.load(folder+\"/gain_ron.npy\")\n except FileNotFoundError:\n continue\n\ninvgains = 1/gains\ninvgainerrs = invgains**2 * gainerrs\n\ndef model(iso, slope, offset, knee):\n iso2 = np.copy(iso)\n results = np.tile(knee * slope + offset, len(iso2))\n results[iso2 < knee] = iso2[iso2 < knee] * slope + offset\n return results\n\ndef model_err(iso, popt, pcov):\n iso2 = np.copy(iso)\n results = np.tile(popt[2]**2 * pcov[0,0] + popt[0]**2 * pcov[2,2] + pcov[1,1], len(iso2))\n results[iso2 < popt[2]] = iso2[iso2 < popt[2]]**2 * pcov[0,0] + pcov[1,1]\n results = np.sqrt(results)\n return results\n\nind = np.where(~np.isnan(gains))\npopt, pcov = curve_fit(model, isos[ind], invgains[ind], p0=[0.1, 0.1, 200], sigma=invgainerrs[ind])\n\nirange = np.arange(0, 1850, 3)\ninvgain_fit = model(irange, *popt)\nerr_fit = model_err(irange, popt, pcov)\ngain_fit = 1/invgain_fit\ngain_err_fit = err_fit / invgain_fit**2\n\nfit_measured = model(isos, *popt)\nR2 = Rsquare(invgains[ind], fit_measured[ind])\n\ninvRONs = invgains * RONs\ninvRONerrs = np.sqrt(invgains**2 * RONerrs**2 + RONs**2 * invgainerrs**2)\n\nLUT_iso = np.arange(0, 2000, 1)\nLUT_invgain = model(LUT_iso, *popt)\nLUT_invgain_err = model_err(LUT_iso, popt, pcov)\nLUT_gain = 1/LUT_invgain\nLUT_gain_err = LUT_invgain_err\nLUT = np.stack([LUT_iso, LUT_gain, LUT_gain_err])\nnp.save(\"results/gain_new/LUT_exif.npy\", LUT)\n\nfor xmax in (1850, 250):\n plt.figure(figsize=(7,5), tight_layout=True)\n plt.errorbar(isos, invgains, yerr=invgainerrs, fmt=\"o\", c=\"k\")\n plt.plot(irange, invgain_fit, c=\"k\", label=f\"slope: {popt[0]:.4f}\\noffset: {popt[1]:.4f}\\nknee: {popt[2]:.1f}\")\n plt.fill_between(irange, invgain_fit-err_fit, invgain_fit+err_fit, color=\"0.5\",\n label=f\"$\\sigma$ slope: {np.sqrt(pcov[0,0]):.4f}\\n$\\sigma$ offset: {np.sqrt(pcov[1,1]):.4f}\\n$\\sigma$ knee: {np.sqrt(pcov[2,2]):.1f}\")\n plt.xlabel(\"ISO\")\n plt.ylabel(\"$1/G$ (ADU/e$^-$)\")\n plt.xlim(0, xmax)\n plt.ylim(0, 5)\n plt.title(f\"$R^2 = {R2:.4f}$\")\n plt.legend(loc=\"lower right\")\n plt.savefig(f\"results/gain_new/iso_invgain_{xmax}_exif.png\")\n plt.show()\n plt.close()\n\n plt.figure(figsize=(7,5), tight_layout=True)\n plt.errorbar(isos, gains, yerr=gainerrs, fmt=\"o\", c=\"k\")\n plt.plot(irange, gain_fit, c=\"k\", label=f\"slope: {popt[0]:.4f}\\noffset: {popt[1]:.4f}\\nknee: {popt[2]:.1f}\")\n plt.fill_between(irange, gain_fit-gain_err_fit, gain_fit+gain_err_fit, color=\"0.5\",\n label=f\"$\\sigma$ slope: {np.sqrt(pcov[0,0]):.4f}\\n$\\sigma$ offset: {np.sqrt(pcov[1,1]):.4f}\\n$\\sigma$ knee: {np.sqrt(pcov[2,2]):.1f}\")\n plt.xlabel(\"ISO\")\n plt.ylabel(\"$G$ (e$^-$/ADU)\")\n plt.xlim(0, xmax)\n plt.ylim(0, 5)\n plt.title(f\"$R^2 = {R2:.4f}$\")\n plt.legend(loc=\"upper right\")\n plt.savefig(f\"results/gain_new/iso_gain_{xmax}_exif.png\")\n plt.show()\n plt.close()\n\n plt.figure(figsize=(7,5), tight_layout=True)\n plt.errorbar(isos, RONs, yerr=RONerrs, fmt=\"o\", c=\"k\")\n plt.xlabel(\"ISO\")\n plt.ylabel(\"RON (e$^-$)\")\n plt.xlim(0, xmax)\n plt.savefig(f\"results/gain_new/iso_RON_{xmax}_exif.png\")\n plt.show()\n plt.close()\n\n plt.figure(figsize=(7,5), tight_layout=True)\n plt.errorbar(isos, invRONs, yerr=invRONerrs, fmt=\"o\", c=\"k\")\n plt.xlabel(\"ISO\")\n plt.ylabel(\"RON/G (ADU)\")\n plt.xlim(0, xmax)\n plt.savefig(f\"results/gain_new/iso_invRON_{xmax}_exif.png\")\n plt.show()\n plt.close()\n","sub_path":"legacy/gain/gain_iso_relation_with_exif.py","file_name":"gain_iso_relation_with_exif.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502429112","text":"from django.conf.urls import url\n\nfrom upday.modules.tip.views import tip_views\n\nurlpatterns = [\n url(\n r'^create$',\n tip_views.CreateView.as_view(),\n name='CreateView'\n ),\n\n url(\n r'^delete$',\n tip_views.DeleteView.as_view(),\n name='DeleteView'\n ),\n url(\n r'^detail$',\n tip_views.PreviewView.as_view(),\n name='PreviewView'\n ),\n # 插入组件\n url(\n r'^insert-component$',\n tip_views.InsertComponentView.as_view(),\n name='InsertComponentView'\n ),\n]\n","sub_path":"upday/modules/tip/console_urls.py","file_name":"console_urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598397287","text":"import numpy as np\r\nimport json\r\n\r\n\r\ndef p3_angle(a, b, c):\r\n a = np.array(a)[[0, 1]]\r\n b = np.array(b)[[0, 1]]\r\n c = np.array(c)[[0, 1]]\r\n\r\n ba = a - b\r\n bc = c - b\r\n\r\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\r\n angle = np.arccos(cosine_angle)\r\n\r\n return np.degrees(angle)\r\n\r\n\r\ndef p2_diff(a, b):\r\n a = np.array(a)[[0, 1]]\r\n b = np.array(b)[[0, 1]]\r\n return a - b\r\n\r\n\r\nclass MyEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, np.integer):\r\n return int(obj)\r\n elif isinstance(obj, np.floating):\r\n return float(obj)\r\n elif isinstance(obj, np.ndarray):\r\n return obj.tolist()\r\n else:\r\n return super(MyEncoder, self).default(obj)\r\n","sub_path":"anal_poses/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141158382","text":"import pandas as pd\nimport numpy as np\n\nfrom recsys import mappers\nfrom recsys.asserters import *\n\nANY_ITEM1 = 10\nANY_ITEM2 = 20\nANY_ITEM3 = 30\nANY_ITEM4 = 40\n\n\ndef test_create_map_is_correct():\n # given\n\n input_series = pd.Series([ANY_ITEM1, ANY_ITEM1, ANY_ITEM2, ANY_ITEM3, ANY_ITEM1, ANY_ITEM4])\n expected_map = {ANY_ITEM1: 0, ANY_ITEM2: 1, ANY_ITEM3: 2, ANY_ITEM4: 3}\n expected_invmap = {0: ANY_ITEM1, 1: ANY_ITEM2, 2: ANY_ITEM3, 3: ANY_ITEM4}\n\n # when\n actual_map, actual_invmap = mappers.create_maps(input_series)\n\n # that\n assert expected_map == actual_map\n assert expected_invmap == actual_invmap\n\n\ndef test_apply_map_on_column_is_correct():\n # given\n input_series = pd.Series([ANY_ITEM1, ANY_ITEM1, ANY_ITEM2, ANY_ITEM3, ANY_ITEM1, ANY_ITEM4])\n col_map = {ANY_ITEM1: 0, ANY_ITEM2: 1, ANY_ITEM3: 2, ANY_ITEM4: 3}\n expected_mapped_col = np.array([0, 0, 1, 2, 0, 3])\n\n # when\n actual_mapped_col = mappers.apply_map_on_column(input_series, col_map)\n\n # that\n assert np.all(expected_mapped_col == actual_mapped_col)\n\n\ndef test_how_to_teststamps_works():\n # given\n input_column = pd.Series([\"2013-01-01\", \"2015-03-01 14:00:00\"])\n expected_column = np.array([1356998400, 1425218400])\n # when\n actual_timestamps = mappers.to_timestamp(input_column)\n # that\n assert np.all(actual_timestamps == expected_column)\n\n\ndef test_if_map_log_maps_correctly():\n ANY_USER1 = \"David\"\n ANY_USER2 = \"Gilmour\"\n ANY_USER3 = \"Roger\"\n ANY_USER4 = \"Waters\"\n\n ANY_ITEM1 = \"The Wall\"\n ANY_ITEM2 = \"The Final Cut\"\n ANY_ITEM3 = \"The Dark Side of the Moon\"\n ANY_ITEM4 = \"Atom Heart Mother\"\n\n ANY_EVENT_TYPE1 = \"Like\"\n ANY_EVENT_TYPE2 = \"Purchase\"\n\n input_log = pd.DataFrame([[\"1970-01-01 1:00\", ANY_USER1, ANY_ITEM4, ANY_EVENT_TYPE1],\n [\"1970-01-01 1:01\", ANY_USER2, ANY_ITEM3, ANY_EVENT_TYPE2],\n [\"1970-01-01 1:02\", ANY_USER3, ANY_ITEM2, ANY_EVENT_TYPE2],\n [\"1970-01-01 1:03\", ANY_USER4, ANY_ITEM1, ANY_EVENT_TYPE1]],\n columns=[\"Timestamp\", \"User\", \"Item\", \"Event\"])\n\n actual_mapped_log, _, _, _, _, _, _ = mappers.map_log(input_log, mapped_log_cleaner=lambda x: x)\n\n expected_mapped_log = pd.DataFrame([[3600, 0, 0, 0],\n [3660, 1, 1, 1],\n [3720, 2, 2, 1],\n [3780, 3, 3, 0]],\n columns=[\"timestamp\", \"user\", \"item\", \"event\"])\n assert_df(actual_mapped_log, expected_mapped_log)\n","sub_path":"recsys/tests/test_mappers.py","file_name":"test_mappers.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565449354","text":"import PIL\nimport threading\nimport queue\nfrom Xlib import X, display, Xutil\nfrom builtins import property\n\nfrom g13gui.bitwidgets.displaydevice import DisplayDevice\n\n\nclass X11DisplayDevice(DisplayDevice, threading.Thread):\n def __init__(self, name=\"BitWidgets\"):\n threading.Thread.__init__(self, daemon=True)\n self._queue = queue.Queue()\n self._running = False\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n if self._win:\n self._setName()\n\n def _setName(self):\n self._win.set_wm_name(self._name)\n self._win.set_wm_icon_name(self._name)\n self._win.set_wm_class('bitwidgets', self._name)\n\n def run(self):\n self._display = display.Display()\n self.createWindow()\n self._running = True\n\n while self._running:\n while self._display.pending_events():\n self._display.next_event()\n\n image = self._queue.get()\n if image is None:\n self._running = False\n self._display.close()\n return\n\n points = []\n for x in range(0, 160):\n for y in range(0, 42):\n if image.getpixel((x, y)) == 1:\n points.append((x, y))\n\n self._win.fill_rectangle(self._inversegc, 0, 0, 160, 42)\n self._win.poly_point(self._gc, X.CoordModeOrigin, points)\n\n def createWindow(self):\n self._screen = self._display.screen()\n self._win = self._screen.root.create_window(\n 0, 0, 160, 42, 2,\n self._screen.root_depth,\n X.InputOutput,\n X.CopyFromParent,\n background_pixel=self._screen.black_pixel,\n event_mask=(X.ExposureMask | X.StructureNotifyMask),\n colormap=X.CopyFromParent)\n self._gc = self._win.create_gc(\n foreground=self._screen.white_pixel,\n background=self._screen.black_pixel)\n self._inversegc = self._win.create_gc(\n foreground=self._screen.black_pixel,\n background=self._screen.white_pixel)\n\n self._setName()\n\n self._win.set_wm_normal_hints(\n flags=(Xutil.PPosition | Xutil.PSize | Xutil.PMinSize),\n min_width=160,\n min_height=42)\n self._win.map()\n\n @property\n def dimensions(self):\n return (160, 48)\n\n def update(self, image):\n if not self._running:\n raise RuntimeError('X11DisplayDevice is not running -- '\n 'cannot update.')\n\n self._queue.put(image)\n\n def shutdown(self):\n self._queue.put(None)\n","sub_path":"g13gui/bitwidgets/x11displaydevice.py","file_name":"x11displaydevice.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452920405","text":"# Problem No.: 11438\n# Solver: Jinmin Goh\n# Date: 20220617\n# URL: https://www.acmicpc.net/problem/11438\n\nimport sys\nfrom math import log2\n\nsys.setrecursionlimit(10 ** 5)\n\n# dp table construncting function\ndef dp(graph, depth, ancestor, current, parent):\n depth[current] = depth[parent] + 1\n ancestor[current][0] = parent\n max_level = int(log2(10 ** 5 + 1))\n for i in range(1, max_level + 1):\n temp = ancestor[current][i - 1] # temp: current's 2 ** (i - 1) th ancestor\n ancestor[current][i] = ancestor[temp][i - 1] # current's 2 ** i th ancestor is same with current's 2 ** (i - 1) th ancestor's 2 ** (i - 1) th ancestor\n # dfs\n for node in graph[current]:\n if node != parent:\n dp(graph, depth, ancestor, node, current)\n return\n\ndef main():\n n = int(input())\n graph = {}\n # graph construction\n for _ in range(n - 1):\n a, b = map(int, sys.stdin.readline().split())\n if a not in graph:\n graph[a] = []\n graph[a].append(b)\n if b not in graph:\n graph[b] = []\n graph[b].append(a)\n # get query\n m = int(input())\n query = []\n for _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n query.append((a, b))\n \n # dp table construction\n depth = [0 for _ in range(n + 1)]\n depth[0] = -1\n ancestor = [[0 for _ in range(20)] for __ in range(n + 1)]\n dp(graph, depth, ancestor, 1, 0)\n \n # query loop\n for (a, b) in query:\n # set depth of a and b same\n if depth[a] != depth[b]:\n if depth[a] > depth[b]:\n a, b = b, a\n for i in range(int(log2(10 ** 5 + 1)), -1, -1):\n # if b's 2 ** i th ancestor's depth is bigger than a's depth, keep go up\n if depth[a] <= depth[ancestor[b][i]]:\n b = ancestor[b][i]\n # find LCA\n lca = a\n if a != b:\n for i in range(int(log2(10 ** 5 + 1)), -1, -1):\n if ancestor[a][i] != ancestor[b][i]:\n a = ancestor[a][i]\n b = ancestor[b][i]\n lca = ancestor[a][i]\n print(lca)\n return\n\nif __name__ == \"__main__\":\n main()","sub_path":"Solved/11438/11438.py","file_name":"11438.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601958483","text":"import wrapped_flappy_bird as game\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport glob\nimport h5py\nt0 = time.time()\n\nALPHA = .7 # learning rate\nGAMMA = 0.95 # discount factor\n# EPISODES = 100_000 # 17 minute run time\n# EPISODES = 600_000 # 36 minute run time\n# EPISODES = 600_000 # 93 minute run time\n# EPISODES = 600_000 # 93 minute run time\n# EPISODES = 600_000*5.4 # 93 minute run time\nEPISODES = 3240000 # 93 minute run time\n# EPISODES = 10 # 17 minute run time\n# EPISODES = 10_000\n# EPISODES = 1000\n# SHOW_EVERY = 100_000\nSHOW_EVERY = 3240000\n# SHOW_EVERY = 1_000\n# SHOW_EVERY = 1\n\n# AFTER = 80_000\nAFTER = 0\n\n# Exploration settings\nepsilon = 1 # not a constant, qoing to be decayed\nSTART_EPSILON_DECAYING = 1\nEND_EPSILON_DECAYING = EPISODES//2\nepsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)\n\n# FLAP_EVERY = 17\n\nbin_count = [200, 410, 410, 10] # [20, 20]\n# bin_count = [220, 451, 451, 380, 10] # [20, 20]\nenv_state_high = np.array([250, 234, 234, 11])\nenv_state_low = np.array([30, -217, -217, -9])\nenv_number_of_actions = 2\n# bin_size = ([234 - -60, 200 - -200 ]) / bin_count\nbin_size = (env_state_high - env_state_low) / bin_count\n\n# q_table = np.random.uniform(low= -0.2, high=0.2, size=(bin_count[0],bin_count[1],2))\n# q_table = np.random.uniform(low= -0.1, high=0.0, size=(bin_count + [env_number_of_actions]))\n\n# q_table = np.random.uniform(low= -0.2, high=0.0, size=(bin_count + [env_number_of_actions]))\n\n# q_table[:,:,1] = np.random.uniform(low=-.5, high=0.0, size=(bin_count[0],bin_count[1])) # de-emphasize flap (avoid hitting ceiling)\n\n# q_table = np.load(f\"./qtables/{7078}-qtable.npy\")\n\n# hfr = h5py.File(f\"qtables/{6640}-qtable.h5\", 'r')\n# q_table = np.array(hfr.get('dataset_1'))\n# hfr.close()\n\nhfr = h5py.File(f\"qtables/qtable_long.h5\", 'r')\nq_table = np.array(hfr.get('dataset_1'))\nhfr.close()\n\n\ndef discretize_state(state):\n # print(state)\n # print(state - env.observation_space.low)\n discrete_state = (state - env_state_low) / bin_size\n # print(discrete_state)\n return tuple(discrete_state.astype(int))\n\nepisode_state_action_new_states = []\nframes_survived = []\nenv_max_measured_values = [-999, -999, -999, -999, -999]\nenv_min_measured_values = [999, 999, 999, 999, 999]\nbest_frames_survived = 0\nfor episode in range(EPISODES):\n game_state = game.GameState()\n total_frames = 0\n max_frames = 10000 # Can change this number according to yourself\n\n action = 0 # first action will always be nothing\n state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)\n # print(\"starting state: \", state)\n\n action = 0 # first action will always be nothing\n state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)\n # print(\"starting state: \", state)\n\n discrete_state = discretize_state(state)\n for frame in range(max_frames):\n\n try:\n action = np.argmax(q_table[discrete_state])\n\n # if np.random.random() > epsilon:\n # # Get action from Q table\n # action = np.argmax(q_table[discrete_state])\n # else:\n # # Get random action\n # roll = np.random.uniform(low=0.0, high=1.0)\n # if roll < 0.80: # do random action, with emphasis on doing nothing\n # action = 0\n # else:\n # action = 1\n\n # action = np.argmax(q_table[discrete_state])\n\n # if frame % FLAP_EVERY == 0: action = 1\n # else: action = 0\n except:\n print(state)\n # new_state, reward, done = game_state.frame_step(action, headless=False, desired_fps=10)\n if episode % SHOW_EVERY == 0 and episode > AFTER:\n new_state, reward, done = game_state.frame_step(action, headless=False, desired_fps=30)\n print(new_state, action)\n else:\n new_state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)\n\n # if new_state[0] == 257.0:\n # pass\n # print(\"stop\")\n\n\n total_frames += 1\n if not done:\n # if new_state[0] < env_min_measured_values[0]:\n # env_min_measured_values[0] = new_state[0]\n # if new_state[1] < env_min_measured_values[1]:\n # env_min_measured_values[1] = new_state[1]\n # if new_state[2] < env_min_measured_values[2]:\n # env_min_measured_values[2] = new_state[2]\n # if new_state[3] < env_min_measured_values[3]:\n # env_min_measured_values[3] = new_state[3]\n #\n # if new_state[0] > env_max_measured_values[0]:\n # env_max_measured_values[0] = new_state[0]\n # if new_state[1] > env_max_measured_values[1]:\n # env_max_measured_values[1] = new_state[1]\n # if new_state[2] > env_max_measured_values[2]:\n # env_max_measured_values[2] = new_state[2]\n # if new_state[3] > env_max_measured_values[3]:\n # env_max_measured_values[3] = new_state[3]\n\n\n new_discrete_state = discretize_state(new_state)\n\n episode_state_action_new_states.append((discrete_state, action, new_discrete_state))\n\n # # max_future_q = np.max(q_table[discrete_state]) # big mistake\n # max_future_q = np.max(q_table[new_discrete_state])\n # current_q = q_table[discrete_state][action]\n # new_q = (1 - ALPHA) * current_q + ALPHA * (reward + GAMMA * max_future_q)\n # q_table[discrete_state][action] = new_q\n elif done:\n # new_q = (1 - ALPHA) * current_q + ALPHA * (reward)\n # q_table[discrete_state][action] = new_q\n\n episode_state_action_new_states.reverse() # already not appending very very last faulty state (don't reach if not done above)\n last_flap_dealt_with = False\n\n if episode_state_action_new_states[0][0][1] > 0: upper_pipe_death = True\n else: upper_pipe_death = False\n\n # bird has died, update q values\n for idx, state_action_new_state in enumerate(episode_state_action_new_states):\n discrete_state_ = state_action_new_state[0]\n action_ = state_action_new_state[1]\n new_discrete_state_ = state_action_new_state[2]\n\n\n # idea behind this: if there was an upper pipe death, it was ACTION that caused that, versus no action, if lower pipe death\n # if upper_pipe_death == True:\n if last_flap_dealt_with == False and upper_pipe_death == True and action_ == 1: # deal with last flap if we haven't before and action = 1 = flap and we had upper_pipe_death\n max_future_q = np.max(q_table[new_discrete_state_])\n current_q = q_table[discrete_state_][action_]\n new_q = (1 - ALPHA) * current_q + ALPHA * (-1000 + GAMMA * max_future_q) # -1000 reward\n q_table[discrete_state_][action_] = new_q\n last_flap_dealt_with = True\n elif idx == 0 or idx == 1: # punish anything near ceiling, floor, or pipes\n max_future_q = np.max(q_table[new_discrete_state_])\n current_q = q_table[discrete_state_][action_]\n new_q = (1 - ALPHA) * current_q + ALPHA * (-1000 + GAMMA * max_future_q) # -1000 reward\n q_table[discrete_state_][action_] = new_q\n else: # else, normal case, just give +1 reward\n max_future_q = np.max(q_table[new_discrete_state_])\n current_q = q_table[discrete_state_][action_]\n new_q = (1 - ALPHA) * current_q + ALPHA * (1 + GAMMA * max_future_q) # +1 reward\n q_table[discrete_state_][action_] = new_q\n\n\n episode_state_action_new_states = [] # empty out saved states action state tuples\n\n print(\"Total Frames \", str(total_frames), \" for episode \", episode)\n if total_frames > best_frames_survived:\n best_frames_survived = total_frames\n # if total_frames > 4000: # save hard drive space\n # # np.save(f\"qtables/{total_frames}-qtable.npy\", q_table)\n # hfw = h5py.File(f\"qtables/{total_frames}-qtable.h5\", 'w')\n # hfw.create_dataset('dataset_1', data=q_table)\n # hfw.close()\n if total_frames >= 10000: # save hard drive space\n print(\"saving q table over 4000\")\n # np.save(f\"qtables/{total_frames}-qtable.npy\", q_table)\n # hfw = h5py.File(f\"qtables/{11111111}-qtable.h5\", 'w')\n hfw = h5py.File(f\"qtables/{total_frames}-qtable_long.h5\", 'w')\n hfw.create_dataset('dataset_1', data=q_table)\n hfw.close()\n print(\"q table done saving over 4000\")\n if episode == EPISODES-1: # save hard drive space\n print(\"saving q table\")\n # np.save(f\"qtables/{total_frames}-qtable.npy\", q_table)\n # hfw = h5py.File(f\"qtables/{11111111}-qtable.h5\", 'w')\n hfw = h5py.File(f\"qtables/qtable_long.h5\", 'w')\n hfw.create_dataset('dataset_1', data=q_table)\n hfw.close()\n print(\"q table done saving\")\n\n break\n\n discrete_state = new_discrete_state\n\n frames_survived.append(total_frames)\n\n # Decaying is being done every episode if episode number is within decaying range\n if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:\n epsilon -= epsilon_decay_value\n\nprint(\" \")\nprint(\"best_frames_survived: \", best_frames_survived)\nt1 = time.time()\nprint(\"total time: \", t1-t0) # 9.764827251434326, 20,000 episodes, completely headless, 16000 FPS\n\nplt.plot(range(len(frames_survived)), frames_survived, linestyle='', marker='.')\nplt.show()\n\nprint(\"total frames survived = \", sum(frames_survived))\n\nprint(\"min frames survived: \", min(frames_survived) )\nprint(\"average frames survived: \", sum(frames_survived)/len(frames_survived) )\nprint(\"max frames survived: \", max(frames_survived))\n\nprint(\" \")\nprint(\"env_min_measured_values: \", env_min_measured_values)\nprint(\"env_max_measured_values: \", env_max_measured_values)","sub_path":"rl_script.py","file_name":"rl_script.py","file_ext":"py","file_size_in_byte":10370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142014045","text":"# Create your views here.\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom tictactoe.forms import BattleForm, TurnForm\nfrom tictactoe.models import Battle, Player\n\n\nclass IndexView(CreateView):\n \"\"\" Index view \"\"\"\n form_class = BattleForm\n template_name = 'index.html'\n success_url = reverse_lazy('play')\n\n def form_valid(self, form):\n if form.cleaned_data.get('play_with_ia'):\n form.instance.ai_player = Player.objects.get_random()\n form.instance.first_player = Player.objects.get_random()\n self.object = form.save()\n\n self.request.session['battle'] = self.object.id.hex\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('play')\n\n\nclass PlayView(CreateView):\n \"\"\" Turn creation view \"\"\"\n template_name = 'battle.html'\n form_class = TurnForm\n success_url = reverse_lazy('play')\n\n battle = None\n player = None\n\n def dispatch(self, request, *args, **kwargs):\n self.battle = get_object_or_404(Battle, pk=request.session.get('battle'))\n self.player = self.battle.get_next_player()\n\n if not self.battle.is_finished and self.player == self.battle.ai_player:\n self.player.make_ai_turn(battle=self.battle)\n self.battle.check_winner()\n return redirect('play')\n return super(PlayView, self).dispatch(request, *args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super(PlayView, self).get_form_kwargs()\n kwargs['battle'] = self.battle\n return kwargs\n\n def form_valid(self, form):\n form.instance.battle = self.battle\n form.instance.player = self.player\n\n self.object = form.save()\n\n self.battle.check_winner()\n return HttpResponseRedirect(self.get_success_url())\n\n def get_context_data(self, **kwargs):\n context = super(PlayView, self).get_context_data(**kwargs)\n context['player'] = self.player\n context['battle'] = self.battle\n return context\n\n\nclass BattleBaseView(object):\n \"\"\" base battle view mixin \"\"\"\n # allow view unfinished battles?\n queryset = Battle.objects.filter(is_finished=True)\n\n\nclass BattlesListView(BattleBaseView, ListView):\n \"\"\"\"\"\"\n template_name = 'battles_list.html'\n paginate_by = 10\n\n\nclass BattleDetailView(BattleBaseView, DetailView):\n \"\"\" Battle replay page view \"\"\"\n template_name = 'battle_replay.html'\n\n def get_object(self, queryset=None):\n qs = self.get_queryset()\n try:\n return self.get_queryset().get(pk=self.kwargs.get('pk'))\n # uuid error handling\n except (ValueError, self.queryset.model.DoesNotExist):\n raise Http404(_(\"No %(verbose_name)s found matching the query\") %\n {'verbose_name': qs.model._meta.verbose_name})\n","sub_path":"tictactoe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631950956","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom api.views import getMessages, getAmbulances\n\nurlpatterns = [\n path('', views.homepage),\n path('index/', views.homepage),\n path('signUp/', views.signUp),\n path('signIn/', views.signIn),\n path('contact/', views.contactUs),\n path('ambulanceRegister/', views.ambulanceRegister),\n path('search/', views.searchPage),\n path('joinMeeting/', views.joinMeeting),\n path('sendMessage/', views.sendMessage),\n path('logout/', views.signOut),\n path('location/', views.location),\n path('patientUpdate/', views.patientUpdate),\n path('messages/', getMessages),\n path('showAmbulances/', views.showAmbulances),\n path('hospitalAmbulances/', getAmbulances),\n path('doctorSide/', views.doctorSide),\n path('todelete/', views.todelete)\n]\n","sub_path":"ambulanceSystem/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101207374","text":"import turtle\n\n\ndef draw_triangle(painter, position):\n painter.penup()\n painter.setpos(position[0][0], position[0][1])\n painter.pendown()\n painter.setpos(position[1][0], position[1][1])\n painter.setpos(position[2][0], position[2][1])\n painter.setpos(position[0][0], position[0][1])\n\n\ndef half_triangle(one, two):\n new_x = (one[0] + two[0]) / 2\n new_y = (one[1] + two[1]) / 2\n return new_x, new_y\n\n\ndef serp_algorithm(position, depth, painter):\n draw_triangle(painter, position)\n if depth != 0:\n serp_algorithm([position[0],\n half_triangle(position[0], position[1]),\n half_triangle(position[0], position[2])],\n depth - 1, painter)\n\n serp_algorithm([position[1],\n half_triangle(position[0], position[1]),\n half_triangle(position[1], position[2])],\n depth - 1, painter)\n\n serp_algorithm([position[2],\n half_triangle(position[2], position[1]),\n half_triangle(position[0], position[2])],\n depth - 1, painter)\n\n\ndef main():\n painter = turtle.Turtle()\n painter.speed(12)\n position = [[-400, -300], [0, 300], [400, -300]]\n depth = raw_input(\"Input depth of triangle \")\n depth = int(depth)\n serp_algorithm(position, depth, painter)\n\nmain()\nturtle.done()\n","sub_path":"serpinski.py","file_name":"serpinski.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286277480","text":"import time\nfrom datetime import datetime\nfrom requests import request, HTTPError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.base import ContentFile\n\nfrom bluebottle.clients import properties\n\nUSER_MODEL = get_user_model()\n\n\ndef user_from_request(strategy, backend, *args, **kwargs):\n user = strategy.request.user\n\n if user.is_authenticated():\n return {'user': strategy.request.user}\n\n\ndef fallback_email(strategy, backend, *args, **kwargs):\n if 'email' not in kwargs['details']:\n kwargs['details']['email'] = kwargs['uid']\n\n\ndef save_profile_picture(strategy, user, response, details, backend,\n is_new=False, *args, **kwargs):\n if is_new and backend.name == 'facebook':\n url = 'http://graph.facebook.com/{0}/picture'.format(response['id'])\n\n try:\n response = request('GET', url, params={'type': 'large'})\n response.raise_for_status()\n except HTTPError:\n pass\n else:\n if not user.picture:\n user.picture.save('{0}_fb_social.jpg'.format(user.username),\n ContentFile(response.content))\n user.save()\n\n\ndef refresh(strategy, social, *args, **kwargs):\n \"\"\"Refresh the facebook token, so that we get a long lived backend token.\"\"\"\n social.refresh_token(strategy)\n\n\ndef set_language(strategy, user, response, details,\n is_new=False, *args, **kwargs):\n supported_langauges = dict(properties.LANGUAGES).keys()\n\n try:\n language = response['locale'][:2]\n if language in supported_langauges:\n user.primary_language = language\n user.save()\n except KeyError:\n pass\n\n\ndef get_extra_facebook_data(strategy, user, response, details,\n is_new=False, *args, **kwargs):\n \"\"\"\n From Facebook we get the following properties with the 'public_profile'\n permission:\n id, name, first_name, last_name, link, gender, locale, age_range\n \"\"\"\n\n if not user.first_name:\n user.first_name = response.get('first_name', '')\n if not user.last_name:\n user.last_name = response.get('last_name', '')\n if not user.gender:\n user.gender = response.get('gender', '')\n\n fb_link = response.get('link', None)\n\n birthday = response.get('birthday', None)\n if birthday and not user.birthdate:\n birthdate = time.strptime(birthday, \"%m/%d/%Y\")\n user.birthdate = datetime.fromtimestamp(time.mktime(birthdate))\n\n if fb_link and len(fb_link) < 50:\n user.facebook = fb_link\n\n user.save()\n","sub_path":"bluebottle/auth/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"437591442","text":"import pika\nimport sys\n\nprint(\"[*] Starting topic log receiver\")\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq'))\n\nchannel = connection.channel()\n\nchannel.exchange_declare(\n exchange='topic_logs',\n exchange_type='topic'\n)\n\nresult = channel.queue_declare(\n queue='',\n exclusive=True\n)\n\nqueue_name = result.method.queue\n\nbinding_keys = sys.argv[1:]\nif not binding_keys:\n sys.stderr.write(\"Usage: %s [binding_key]...\\n\" % sys.argv[0])\n sys.exit(1)\n\nfor binding_key in binding_keys:\n channel.queue_bind(\n exchange='topic_logs',\n queue=queue_name,\n routing_key=binding_key\n )\n\nprint(\"[*] Waiting for logs. Press CTRL+C to quit\")\n\n\ndef callback(ch, method, properties, body):\n print(\"[*] %r:%r\" % (method.routing_key, body))\n\n\nchannel.basic_consume(\n queue=queue_name,\n on_message_callback=callback,\n auto_ack=True\n)\n\nchannel.start_consuming()\n","sub_path":"5-tutorial-five-python/receive_logs_topic/app/receive_logs_topic.py","file_name":"receive_logs_topic.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292230236","text":"import subprocess\n\ndef mask_tile(tile_id, input_tif):\n # mask the tcd tile to only have values greater than 30\n \n\tmasked_tile_30 = '--outfile={}.tif'.format(tile_id)\n\n\tcmd = [r'C:\\Program Files\\GDAL\\gdal_calc.py', '-A', input_tif, outfile, '--calc=\"A>30\"', '--NoDataValue=0', '-co', 'COMPRESS=LZW']\n\n\tsubprocess.check_call(cmd)\n\n\treturn masked_tile_30\n","sub_path":"gdal_calc_mask.py","file_name":"gdal_calc_mask.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200612943","text":"# coding: utf-8\nfrom django.test.testcases import TestCase\nfrom factories import LanguageFactory, ProjectFactory\n\n\nclass ProjectTestCase(TestCase):\n def test_list(self):\n for name in ('Python', 'C++', 'Java'):\n language = LanguageFactory.create(name=name)\n [ProjectFactory.create(language=language) for t in range(3)]\n\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n","sub_path":"django/cookiecutters/projects/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264214440","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\nfrom torch_scatter import scatter_max\r\nfrom transformers import BertModel, BertTokenizer\r\n\r\ndef return_mask_lengths(ids):\r\n mask = torch.sign(ids).float()\r\n lengths = mask.sum(dim=1).long()\r\n return mask, lengths\r\n\r\ndef return_num(model):\r\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\r\n params = sum([np.prod(p.size()) for p in model_parameters])\r\n return params\r\n\r\ndef cal_attn(left, right, mask):\r\n mask = (1.0 - mask.float()) * -10000.0\r\n attn_logits = torch.matmul(left, right.transpose(-1, -2).contiguous())\r\n attn_logits = attn_logits + mask\r\n attn_weights = F.softmax(input=attn_logits, dim=-1)\r\n attn_outputs = torch.matmul(attn_weights, right)\r\n return attn_outputs, attn_logits\r\n\r\ndef gumbel_softmax(logits, tau=1, hard=False, eps=1e-20, dim=-1):\r\n # type: (Tensor, float, bool, float, int) -> Tensor\r\n\r\n gumbels = -(torch.empty_like(logits).exponential_() + eps).log() # ~Gumbel(0,1)\r\n gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)\r\n y_soft = gumbels.softmax(dim)\r\n\r\n if hard:\r\n # Straight through.\r\n index = y_soft.max(dim, keepdim=True)[1]\r\n y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)\r\n ret = y_hard - y_soft.detach() + y_soft\r\n else:\r\n # Re-parametrization trick.\r\n ret = y_soft\r\n return ret\r\n\r\nclass Embedding(nn.Module):\r\n def __init__(self, bert_model):\r\n super(Embedding, self).__init__()\r\n bert_embeddings = BertModel.from_pretrained(bert_model).embeddings\r\n self.word_embeddings = bert_embeddings.word_embeddings\r\n self.token_type_embeddings = bert_embeddings.token_type_embeddings\r\n self.position_embeddings = bert_embeddings.position_embeddings\r\n self.LayerNorm = bert_embeddings.LayerNorm\r\n self.dropout = bert_embeddings.dropout\r\n\r\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros_like(input_ids)\r\n if position_ids is None:\r\n seq_length = input_ids.size(1)\r\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\r\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\r\n\r\n words_embeddings = self.word_embeddings(input_ids)\r\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\r\n position_embeddings = self.position_embeddings(position_ids)\r\n\r\n embeddings = words_embeddings + token_type_embeddings + position_embeddings\r\n embeddings = self.LayerNorm(embeddings)\r\n embeddings = self.dropout(embeddings)\r\n\r\n return embeddings\r\n\r\nclass ContextualizedEmbedding(nn.Module):\r\n def __init__(self, bert_model):\r\n super(ContextualizedEmbedding, self).__init__()\r\n bert = BertModel.from_pretrained(bert_model)\r\n self.embedding = bert.embeddings\r\n self.encoder = bert.encoder\r\n self.num_hidden_layers = bert.config.num_hidden_layers\r\n\r\n def forward(self, input_ids, attention_mask, token_type_ids=None):\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros_like(input_ids)\r\n\r\n seq_length = input_ids.size(1)\r\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\r\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\r\n\r\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2).float()\r\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\r\n head_mask = [None] * self.num_hidden_layers\r\n\r\n embedding_output = self.embedding(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\r\n encoder_outputs = self.encoder(embedding_output,\r\n extended_attention_mask,\r\n head_mask=head_mask)\r\n sequence_output = encoder_outputs[0]\r\n\r\n return sequence_output\r\n\r\nclass KLLoss(nn.Module):\r\n def __init__(self):\r\n super(KLLoss, self).__init__()\r\n\r\n def forward(self, P, Q):\r\n log_P = P.log()\r\n log_Q = Q.log()\r\n kl = (P * (log_P - log_Q)).sum(dim=-1).sum(dim=-1)\r\n return kl\r\n\r\nclass CustomLSTM(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_layers, dropout, bidirectional=False):\r\n super(CustomLSTM, self).__init__()\r\n self.num_layers = num_layers\r\n self.hidden_size = hidden_size\r\n self.bidirectional = bidirectional\r\n self.dropout = nn.Dropout(dropout)\r\n if dropout > 0.0 and num_layers == 1:\r\n dropout = 0.0\r\n\r\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,\r\n num_layers=num_layers, dropout=dropout,\r\n bidirectional=bidirectional, batch_first=True)\r\n\r\n def forward(self, input, input_lengths, state=None):\r\n batch_size, total_length, _ = input.size()\r\n\r\n input_packed = pack_padded_sequence(input, input_lengths,\r\n batch_first=True, enforce_sorted=False)\r\n\r\n self.lstm.flatten_parameters()\r\n output_packed, state = self.lstm(input_packed, state)\r\n\r\n output = pad_packed_sequence(output_packed, batch_first=True, total_length=total_length)[0]\r\n output = self.dropout(output)\r\n\r\n return output, state\r\n\r\n\r\nclass PosteriorEncoder(nn.Module):\r\n def __init__(self, embedding, emsize,\r\n nhidden, nlayers,\r\n nz, nzdim,\r\n dropout=0.0):\r\n super(PosteriorEncoder, self).__init__()\r\n\r\n self.embedding = embedding\r\n self.nhidden = nhidden\r\n self.nlayers = nlayers\r\n self.nz = nz\r\n self.nzdim = nzdim\r\n\r\n self.question_encoder = CustomLSTM(input_size=emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=True)\r\n\r\n self.question_linear = nn.Linear(2 * nhidden, 2 * nhidden)\r\n\r\n self.context_answer_encoder = CustomLSTM(input_size=emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=True)\r\n\r\n self.context_answer_linear = nn.Linear(2 * nhidden, 2 * nhidden)\r\n\r\n self.posterior_linear = nn.Linear(2 * 4 * nhidden, nz * nzdim)\r\n\r\n def forward(self, c_ids, q_ids, a_ids):\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n q_mask, q_lengths = return_mask_lengths(q_ids)\r\n\r\n # question enc\r\n q_embeddings = self.embedding(q_ids)\r\n q_hs, q_state = self.question_encoder(q_embeddings, q_lengths)\r\n q_h = q_state[0].view(self.nlayers, 2, -1, self.nhidden)[-1]\r\n q_h = q_h.transpose(0, 1).contiguous().view(-1, 2 * self.nhidden)\r\n\r\n # answer enc\r\n c_a_embeddings = self.embedding(c_ids, a_ids, None)\r\n # tag embedding\r\n\r\n c_a_hs, c_a_state = self.context_answer_encoder(c_a_embeddings, c_lengths)\r\n c_a_h = c_a_state[0].view(self.nlayers, 2, -1, self.nhidden)[-1]\r\n c_a_h = c_a_h.transpose(0, 1).contiguous().view(-1, 2 * self.nhidden)\r\n\r\n mask = q_mask.unsqueeze(1)\r\n q_attned_by_ca, _ = cal_attn(self.question_linear(c_a_h).unsqueeze(1), q_hs, mask)\r\n q_attned_by_ca = q_attned_by_ca.squeeze(1)\r\n\r\n mask = c_mask.unsqueeze(1)\r\n ca_attned_by_q, _ = cal_attn(self.context_answer_linear(q_h).unsqueeze(1), c_a_hs, mask)\r\n ca_attned_by_q = ca_attned_by_q.squeeze(1)\r\n\r\n h = torch.cat([q_h, q_attned_by_ca, c_a_h, ca_attned_by_q], dim=-1)\r\n\r\n posterior_z_logits = self.posterior_linear(h).view(-1, self.nz, self.nzdim).contiguous()\r\n posterior_z_prob = F.softmax(posterior_z_logits, dim=-1)\r\n posterior_z = gumbel_softmax(posterior_z_logits, hard=True)\r\n\r\n return posterior_z_prob, posterior_z\r\n\r\nclass PriorEncoder(nn.Module):\r\n def __init__(self, embedding, emsize,\r\n nhidden, nlayers,\r\n nz, nzdim,\r\n dropout=0):\r\n super(PriorEncoder, self).__init__()\r\n\r\n self.embedding = embedding\r\n self.nhidden = nhidden\r\n self.nlayers = nlayers\r\n self.nz = nz\r\n self.nzdim = nzdim\r\n\r\n self.context_encoder = CustomLSTM(input_size=emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=True)\r\n\r\n self.prior_linear = nn.Linear(2 * nhidden, nz * nzdim)\r\n\r\n def forward(self, c_ids):\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n\r\n c_embeddings = self.embedding(c_ids)\r\n _, c_state = self.context_encoder(c_embeddings, c_lengths)\r\n c_h = c_state[0].view(self.nlayers, 2, -1, self.nhidden)[-1]\r\n h = c_h.transpose(0, 1).contiguous().view(-1, 2 * self.nhidden)\r\n\r\n prior_z_logits = self.prior_linear(h).view(-1, self.nz, self.nzdim)\r\n prior_z_prob = F.softmax(prior_z_logits, dim=-1)\r\n prior_z = gumbel_softmax(prior_z_logits, hard=True)\r\n\r\n return prior_z_prob, prior_z\r\n\r\nclass AnswerDecoder(nn.Module):\r\n def __init__(self, embedding, emsize,\r\n nhidden, nlayers,\r\n dropout=0.0):\r\n super(AnswerDecoder, self).__init__()\r\n\r\n self.embedding = embedding\r\n\r\n self.context_lstm = CustomLSTM(input_size=4 * emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=True)\r\n\r\n self.start_linear = nn.Linear(2 * nhidden, 1)\r\n self.end_linear = nn.Linear(2 * nhidden, 1)\r\n self.ls = nn.LogSoftmax(dim=1)\r\n\r\n def forward(self, init_state, c_ids):\r\n batch_size, max_c_len = c_ids.size()\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n\r\n H = self.embedding(c_ids, c_mask)\r\n U = init_state.unsqueeze(1).repeat(1, max_c_len, 1)\r\n G = torch.cat([H, U, H * U, torch.abs(H - U)], dim=-1)\r\n M, _ = self.context_lstm(G, c_lengths)\r\n\r\n start_logits = self.start_linear(M).squeeze(-1)\r\n end_logits = self.end_linear(M).squeeze(-1)\r\n\r\n start_end_mask = (c_mask == 0)\r\n masked_start_logits = start_logits.masked_fill(start_end_mask, -10000.0)\r\n masked_end_logits = end_logits.masked_fill(start_end_mask, -10000.0)\r\n\r\n return masked_start_logits, masked_end_logits\r\n\r\n def generate(self, init_state, c_ids):\r\n start_logits, end_logits = self.forward(init_state, c_ids)\r\n c_mask, _ = return_mask_lengths(c_ids)\r\n batch_size, max_c_len = c_ids.size()\r\n\r\n mask = torch.matmul(c_mask.unsqueeze(2).float(), c_mask.unsqueeze(1).float())\r\n mask = torch.triu(mask) == 0\r\n score = (self.ls(start_logits).unsqueeze(2) + self.ls(end_logits).unsqueeze(1))\r\n score = score.masked_fill(mask, -10000.0)\r\n score, start_positions = score.max(dim=1)\r\n score, end_positions = score.max(dim=1)\r\n start_positions = torch.gather(start_positions, 1, end_positions.view(-1, 1)).squeeze(1)\r\n\r\n idxes = torch.arange(0, max_c_len, out=torch.LongTensor(max_c_len))\r\n idxes = idxes.unsqueeze(0).to(start_logits.device).repeat(batch_size, 1)\r\n\r\n start_positions = start_positions.unsqueeze(1)\r\n start_mask = (idxes >= start_positions).long()\r\n end_positions = end_positions.unsqueeze(1)\r\n end_mask = (idxes <= end_positions).long()\r\n a_ids = start_mask + end_mask - 1\r\n\r\n return a_ids, start_positions.squeeze(1), end_positions.squeeze(1)\r\n\r\n\r\nclass ContextEncoderforQG(nn.Module):\r\n def __init__(self, embedding, emsize,\r\n nhidden, nlayers,\r\n dropout=0.0):\r\n super(ContextEncoderforQG, self).__init__()\r\n self.embedding = embedding\r\n self.context_lstm = CustomLSTM(input_size=emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=True)\r\n self.context_linear = nn.Linear(2 * nhidden, 2 * nhidden)\r\n self.fusion = nn.Linear(4 * nhidden, 2 * nhidden, bias=False)\r\n self.gate = nn.Linear(4 * nhidden, 2 * nhidden, bias=False)\r\n\r\n def forward(self, c_ids, a_ids):\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n c_embeddings = self.embedding(c_ids, c_mask, a_ids)\r\n c_outputs, _ = self.context_lstm(c_embeddings, c_lengths)\r\n # attention\r\n mask = torch.matmul(c_mask.unsqueeze(2), c_mask.unsqueeze(1))\r\n c_attned_by_c, _ = cal_attn(self.context_linear(c_outputs),\r\n c_outputs,\r\n mask)\r\n c_concat = torch.cat([c_outputs, c_attned_by_c], dim=2)\r\n c_fused = self.fusion(c_concat).tanh()\r\n c_gate = self.gate(c_concat).sigmoid()\r\n c_outputs = c_gate * c_fused + (1 - c_gate) * c_outputs\r\n return c_outputs\r\n\r\nclass QuestionDecoder(nn.Module):\r\n def __init__(self, sos_id, eos_id,\r\n embedding, contextualized_embedding, emsize,\r\n nhidden, ntokens, nlayers,\r\n dropout=0.0,\r\n max_q_len=64):\r\n super(QuestionDecoder, self).__init__()\r\n\r\n self.sos_id = sos_id\r\n self.eos_id = eos_id\r\n self.emsize = emsize\r\n self.embedding = embedding\r\n self.nhidden = nhidden\r\n self.ntokens = ntokens\r\n self.nlayers = nlayers\r\n # this max_len include sos eos\r\n self.max_q_len = max_q_len\r\n\r\n self.context_lstm = ContextEncoderforQG(contextualized_embedding, emsize,\r\n nhidden // 2, nlayers, dropout)\r\n\r\n self.question_lstm = CustomLSTM(input_size=emsize,\r\n hidden_size=nhidden,\r\n num_layers=nlayers,\r\n dropout=dropout,\r\n bidirectional=False)\r\n\r\n self.question_linear = nn.Linear(nhidden, nhidden)\r\n\r\n self.concat_linear = nn.Sequential(nn.Linear(2*nhidden, 2*nhidden),\r\n nn.Dropout(dropout),\r\n nn.Linear(2*nhidden, 2*emsize))\r\n\r\n self.logit_linear = nn.Linear(emsize, ntokens, bias=False)\r\n\r\n # fix output word matrix\r\n self.logit_linear.weight = embedding.word_embeddings.weight\r\n for param in self.logit_linear.parameters():\r\n param.requires_grad = False\r\n\r\n self.discriminator = nn.Bilinear(emsize, nhidden, 1)\r\n\r\n def postprocess(self, q_ids):\r\n eos_mask = q_ids == self.eos_id\r\n no_eos_idx_sum = (eos_mask.sum(dim=1) == 0).long() * (self.max_q_len - 1)\r\n eos_mask = eos_mask.cpu().numpy()\r\n q_lengths = np.argmax(eos_mask, axis=1) + 1\r\n q_lengths = torch.tensor(q_lengths).to(q_ids.device).long() + no_eos_idx_sum\r\n batch_size, max_len = q_ids.size()\r\n idxes = torch.arange(0, max_len, out=torch.LongTensor(max_len))\r\n idxes = idxes.unsqueeze(0).to(q_ids.device).repeat(batch_size, 1)\r\n q_mask = (idxes < q_lengths.unsqueeze(1))\r\n q_ids = q_ids.long() * q_mask.long()\r\n return q_ids\r\n\r\n def forward(self, init_state, c_ids, q_ids, a_ids):\r\n batch_size, max_q_len = q_ids.size()\r\n\r\n c_outputs = self.context_lstm(c_ids, a_ids)\r\n\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n q_mask, q_lengths = return_mask_lengths(q_ids)\r\n\r\n # question dec\r\n q_embeddings = self.embedding(q_ids)\r\n q_outputs, _ = self.question_lstm(q_embeddings, q_lengths, init_state)\r\n\r\n # attention\r\n mask = torch.matmul(q_mask.unsqueeze(2), c_mask.unsqueeze(1))\r\n c_attned_by_q, attn_logits = cal_attn(self.question_linear(q_outputs),\r\n c_outputs,\r\n mask)\r\n\r\n # gen logits\r\n q_concated = torch.cat([q_outputs, c_attned_by_q], dim=2)\r\n q_concated = self.concat_linear(q_concated)\r\n q_maxouted, _ = q_concated.view(batch_size, max_q_len, self.emsize, 2).max(dim=-1)\r\n gen_logits = self.logit_linear(q_maxouted)\r\n\r\n # copy logits\r\n bq = batch_size * max_q_len\r\n c_ids = c_ids.unsqueeze(1).repeat(1, max_q_len, 1).view(bq, -1).contiguous()\r\n attn_logits = attn_logits.view(bq, -1).contiguous()\r\n copy_logits = torch.zeros(bq, self.ntokens).to(c_ids.device)\r\n copy_logits = copy_logits - 10000.0\r\n copy_logits, _ = scatter_max(attn_logits, c_ids, out=copy_logits)\r\n copy_logits = copy_logits.masked_fill(copy_logits == -10000.0, 0)\r\n copy_logits = copy_logits.view(batch_size, max_q_len, -1).contiguous()\r\n\r\n logits = gen_logits + copy_logits\r\n\r\n # mutual information btw answer and question\r\n a_emb = c_outputs * a_ids.float().unsqueeze(2)\r\n a_mean_emb = torch.sum(a_emb, dim=1) / a_ids.sum(dim=1).unsqueeze(1).float()\r\n fake_a_mean_emb = torch.cat([a_mean_emb[-1].unsqueeze(0), a_mean_emb[:-1]], dim=0)\r\n\r\n q_emb = q_maxouted * q_mask.unsqueeze(2)\r\n q_mean_emb = torch.sum(q_maxouted, dim=1) / q_lengths.unsqueeze(1).float()\r\n fake_q_mean_emb = torch.cat([q_mean_emb[-1].unsqueeze(0), q_mean_emb[:-1]], dim=0)\r\n\r\n bce_loss = nn.BCEWithLogitsLoss()\r\n true_logits = self.discriminator(q_mean_emb, a_mean_emb)\r\n true_labels = torch.ones_like(true_logits)\r\n\r\n fake_a_logits = self.discriminator(q_mean_emb, fake_a_mean_emb)\r\n fake_q_logits = self.discriminator(fake_q_mean_emb, a_mean_emb)\r\n fake_logits = torch.cat([fake_a_logits, fake_q_logits], dim=0)\r\n fake_labels = torch.zeros_like(fake_logits)\r\n\r\n true_loss = bce_loss(true_logits, true_labels)\r\n fake_loss = 0.5 * bce_loss(fake_logits, fake_labels)\r\n loss_info = 0.5 * (true_loss + fake_loss)\r\n\r\n return logits, loss_info\r\n\r\n def generate(self, init_state, c_ids, a_ids):\r\n c_mask, c_lengths = return_mask_lengths(c_ids)\r\n c_outputs = self.context_lstm(c_ids, a_ids)\r\n\r\n batch_size = c_ids.size(0)\r\n\r\n q_ids = torch.LongTensor([self.sos_id] * batch_size).unsqueeze(1)\r\n q_ids = q_ids.to(c_ids.device)\r\n token_type_ids = torch.zeros_like(q_ids)\r\n position_ids = torch.zeros_like(q_ids)\r\n q_embeddings = self.embedding(q_ids, token_type_ids, position_ids)\r\n\r\n state = init_state\r\n\r\n # unroll\r\n all_q_ids = list()\r\n all_q_ids.append(q_ids)\r\n for _ in range(self.max_q_len - 1):\r\n position_ids = position_ids + 1\r\n q_outputs, state = self.question_lstm.lstm(q_embeddings, state)\r\n\r\n # attention\r\n mask = c_mask.unsqueeze(1)\r\n c_attned_by_q, attn_logits = cal_attn(self.question_linear(q_outputs),\r\n c_outputs,\r\n mask)\r\n\r\n # gen logits\r\n q_concated = torch.cat([q_outputs, c_attned_by_q], dim=2)\r\n q_concated = self.concat_linear(q_concated)\r\n q_maxouted, _ = q_concated.view(batch_size, 1, self.emsize, 2).max(dim=-1)\r\n gen_logits = self.logit_linear(q_maxouted)\r\n\r\n # copy logits\r\n attn_logits = attn_logits.squeeze(1)\r\n copy_logits = torch.zeros(batch_size, self.ntokens).to(c_ids.device)\r\n copy_logits = copy_logits - 10000.0\r\n copy_logits, _ = scatter_max(attn_logits, c_ids, out=copy_logits)\r\n copy_logits = copy_logits.masked_fill(copy_logits == -10000.0, 0)\r\n\r\n logits = gen_logits + copy_logits.unsqueeze(1)\r\n\r\n q_ids = torch.argmax(logits, 2)\r\n all_q_ids.append(q_ids)\r\n\r\n q_embeddings = self.embedding(q_ids, token_type_ids, position_ids)\r\n\r\n q_ids = torch.cat(all_q_ids, 1)\r\n q_ids = self.postprocess(q_ids)\r\n\r\n return q_ids\r\n\r\nclass DiscreteVAE(nn.Module):\r\n def __init__(self, args):\r\n super(DiscreteVAE, self).__init__()\r\n tokenizer = BertTokenizer.from_pretrained(args.bert_model)\r\n padding_idx = tokenizer.vocab['[PAD]']\r\n sos_id = tokenizer.vocab['[CLS]']\r\n eos_id = tokenizer.vocab['[SEP]']\r\n ntokens = len(tokenizer.vocab)\r\n\r\n bert_model = args.bert_model\r\n if \"large\" in bert_model:\r\n emsize = 1024\r\n else:\r\n emsize = 768\r\n\r\n enc_nhidden = args.enc_nhidden\r\n enc_nlayers = args.enc_nlayers\r\n enc_dropout = args.enc_dropout\r\n dec_a_nhidden = args.dec_a_nhidden\r\n dec_a_nlayers = args.dec_a_nlayers\r\n dec_a_dropout = args.dec_a_dropout\r\n self.dec_q_nhidden = dec_q_nhidden = args.dec_q_nhidden\r\n self.dec_q_nlayers = dec_q_nlayers = args.dec_q_nlayers\r\n dec_q_dropout = args.dec_q_dropout\r\n self.nz = nz = args.nz\r\n self.nzdim = nzdim = args.nzdim\r\n\r\n max_q_len = args.max_q_len\r\n\r\n embedding = Embedding(bert_model)\r\n contextualized_embedding = ContextualizedEmbedding(bert_model)\r\n for param in embedding.parameters():\r\n param.requires_grad = False\r\n for param in contextualized_embedding.parameters():\r\n param.requires_grad = False\r\n\r\n self.posterior_encoder = PosteriorEncoder(embedding, emsize,\r\n enc_nhidden, enc_nlayers,\r\n nz, nzdim,\r\n enc_dropout)\r\n\r\n self.prior_encoder = PriorEncoder(embedding, emsize,\r\n enc_nhidden, enc_nlayers,\r\n nz, nzdim, enc_dropout)\r\n\r\n self.answer_decoder = AnswerDecoder(contextualized_embedding, emsize,\r\n dec_a_nhidden, dec_a_nlayers,\r\n dec_a_dropout)\r\n\r\n self.question_decoder = QuestionDecoder(sos_id, eos_id,\r\n embedding, contextualized_embedding, emsize,\r\n dec_q_nhidden, ntokens, dec_q_nlayers,\r\n dec_q_dropout,\r\n max_q_len)\r\n\r\n self.q_h_linear = nn.Linear(nz * nzdim, dec_q_nlayers * dec_q_nhidden, False)\r\n self.q_c_linear = nn.Linear(nz * nzdim, dec_q_nlayers * dec_q_nhidden, False)\r\n self.a_linear = nn.Linear(nz * nzdim, emsize, False)\r\n\r\n self.q_rec_criterion = nn.CrossEntropyLoss(ignore_index=padding_idx)\r\n self.kl_criterion = KLLoss()\r\n \"\"\"\r\n print(\"posterior_encoder: \" + str(return_num(self.posterior_encoder)))\r\n print(\"prior_encoder: \" + str(return_num(self.prior_encoder)))\r\n print(\"answer_decoder: \" + str(return_num(self.answer_decoder)))\r\n print(\"question_decoder: \" + str(return_num(self.question_decoder)))\r\n print(\"q_h_linear: \" + str(return_num(self.q_h_linear)))\r\n print(\"q_c_linear: \" + str(return_num(self.q_c_linear)))\r\n print(\"a_linear: \" + str(return_num(self.a_linear)))\r\n \"\"\"\r\n\r\n def return_init_state(self, z_flatten):\r\n\r\n q_init_h = self.q_h_linear(z_flatten)\r\n q_init_c = self.q_c_linear(z_flatten)\r\n q_init_h = q_init_h.view(-1, self.dec_q_nlayers, self.dec_q_nhidden).transpose(0, 1).contiguous()\r\n q_init_c = q_init_c.view(-1, self.dec_q_nlayers, self.dec_q_nhidden).transpose(0, 1).contiguous()\r\n q_init_state = (q_init_h, q_init_c)\r\n\r\n a_init_state = self.a_linear(z_flatten)\r\n\r\n return q_init_state, a_init_state\r\n\r\n def forward(self, c_ids, q_ids, a_ids, start_positions, end_positions):\r\n\r\n posterior_z_prob, posterior_z = self.posterior_encoder(c_ids, q_ids, a_ids)\r\n prior_z_prob, _ = self.prior_encoder(c_ids)\r\n posterior_z_flatten = posterior_z.view(-1, self.nz * self.nzdim).contiguous()\r\n\r\n q_init_state, a_init_state = self.return_init_state(posterior_z_flatten)\r\n\r\n # answer decoding\r\n start_logits, end_logits = self.answer_decoder(a_init_state, c_ids)\r\n # question decoding\r\n q_logits, loss_info = self.question_decoder(q_init_state, c_ids, q_ids, a_ids)\r\n\r\n # q rec loss\r\n loss_q_rec = self.q_rec_criterion(q_logits[:, :-1, :].transpose(1, 2).contiguous(),\r\n q_ids[:, 1:])\r\n\r\n # a rec loss\r\n max_c_len = c_ids.size(1)\r\n a_rec_criterion = nn.CrossEntropyLoss(ignore_index=max_c_len)\r\n start_positions.clamp_(0, max_c_len)\r\n end_positions.clamp_(0, max_c_len)\r\n loss_start_a_rec = a_rec_criterion(start_logits, start_positions)\r\n loss_end_a_rec = a_rec_criterion(end_logits, end_positions)\r\n loss_a_rec = 0.5 * (loss_start_a_rec + loss_end_a_rec)\r\n\r\n # kl loss\r\n loss_kl = self.kl_criterion(posterior_z_prob.mean(dim=0),\r\n prior_z_prob.mean(dim=0))\r\n\r\n loss = loss_q_rec + loss_a_rec + loss_kl + loss_info\r\n\r\n return loss, loss_q_rec, loss_a_rec, loss_kl, loss_info\r\n\r\n def generate(self, z, c_ids):\r\n\r\n c_mask, _ = return_mask_lengths(c_ids)\r\n\r\n z_flatten = z.view(-1, self.nz * self.nzdim).contiguous()\r\n\r\n q_init_state, a_init_state = self.return_init_state(z_flatten)\r\n\r\n a_ids, start_positions, end_positions = self.answer_decoder.generate(a_init_state, c_ids)\r\n\r\n q_ids = self.question_decoder.generate(q_init_state, c_ids, a_ids)\r\n\r\n return q_ids, start_positions, end_positions\r\n\r\n def return_answer_logits(self, z, c_ids):\r\n\r\n c_mask, _ = return_mask_lengths(c_ids)\r\n\r\n z_flatten = z.view(-1, self.nz * self.nzdim).contiguous()\r\n\r\n q_init_state, a_init_state = self.return_init_state(z_flatten)\r\n\r\n start_logits, end_logits = self.answer_decoder(a_init_state, c_ids)\r\n\r\n return start_logits, end_logits\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":27054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"521843358","text":"#!usr/bin/env python\n#-*- coding:utf-8 _*-\n\"\"\"\n@author:alvin\n@file: s_server.py\n@time: 2019/01/13\n\"\"\"\nimport socket\nimport os\ns = socket.socket()\ns.bind((\"127.0.0.1\",8002))\ns.listen()\nwhile True:\n conn ,addr = s.accept()\n print(\"client is online\")\n while True:\n data = conn.recv(1024)\n print(str(data))\n if not data:\n print(\"client is lose\")\n conn.send(data.upper())\n\n if str(data,encoding='utf-8') == \"exit\":\n print(\"clien is send exit!\")\n os._exit(1)\n\ns.close()","sub_path":"ms/s_server.py","file_name":"s_server.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"599583711","text":"# @Paul: ein python Skript gleichen Inhalts wie das jupyter Notebook; aber in .py files kann ich debuggen, deswegen habe ich das angelegt\n\n\n# imports\n\nimport numpy as np\nimport pandas as pd\nfrom lmfit import Parameters, report_fit\n\nfrom model_funcs import sim_single_exp\nfrom parest_funcs import par_est_main\n\n# create parameter structure with initial fit parameter values, bounds, etc.\n\n# Parameter structure\np0 = Parameters()\np0.add('mumax', value=0.5, min=0.0001, max=1.)\np0.add('Yxs', value=0.2, min=0.0001, max=1.)\np0.add('Ks', value=1.0, vary=False)\np0.add('base_coef', value=1.0, min=0.0001) # proportinality factor between biomass growth and base consumption, in [L/g]\n\n# control values\n# c[0] ... time point when feed was switched on [h]\n# c[1] ... feed rate [L/h]\n# c[2] ... substrate concentration in feed [g/L]\nc = [5, .02, 200]\n\n# initial values\n# y[0] ... substrate mass (mS) in [g]\n# y[1] ... bio dry mass (mX) in [g]\n# y[2] ... volume of fermentation broth [L]\ny0 = [3, 0.2, .5]\n\n# time grid in [h]\nt_grid = np.linspace(0,10, 1001)\n\n# run simulation\nsim_exp = sim_single_exp(t_grid, y0, p0, c)\n\n# plot results\nprint(sim_exp)\nsim_exp.plot(y=['cS', 'cX'])\nsim_exp.plot(y=['V', 'base_rate'])\n\n# define experiments to include\nexp_list = { # key: name of experiment (arbitrary); value: filename of excel file\n 'Experiment 1': './exp1.xlsx',\n 'Experiment 2': './exp2.xlsx',\n}\nfit_results = par_est_main(exp_list, p0)\n\n\n# print fit parameter values\nreport_fit(fit_results)","sub_path":"Parest_Multiexp_MVP.py","file_name":"Parest_Multiexp_MVP.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69739724","text":"from Card import Card\nfrom random import randint\n\nclass Deck:\n def __init__(self):\n self.cards = []\n self.size = 52\n self.suits = [\"spades\", \"hearts\", \"clubs\", \"diamonds\"]\n self.faces = [ \"Ace\", \"King\", \"Queen\", \"Jack\", \"10\", \"9\", \"8\", \"7\", \"6\", \"5\", \"4\", \"3\", \"2\" ]\n self.values = [ 11, 10, 10, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2 ]\n\n def build_deck(self):\n for suit in self.suits:\n for i in range(len(self.values)):\n self.cards.append(Card(suit, self.faces[i], self.values[i])) \n\n def print_deck(self):\n for card in self.cards:\n print(card)\n\n def shuffle(self):\n for i in range(self.size):\n index_to_swap = randint(0, self.size-1)\n # temp = self.cards[i] \n # self.cards[i] = self.cards[index_to_swap]\n # self.cards[index_to_swap] = temp\n\n self.cards[i], self.cards[index_to_swap] = \\\n self.cards[index_to_swap], self.cards[i]\n\n def deal_card(self):\n card = self.cards.pop()\n self.size -= 1\n return card\n\n \n","sub_path":"Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328617778","text":"from services.misc import group\nfrom services.sync import count_elements_for_functext\n\n\nsample_func = '!((aBC+Ad)(B+C+aD) + Bd + !(AbcD + bc))'\n\n\ndef table_view(data, free_space_right=4, free_space_left=1, column_separator=\"|\"):\n datas = [x for x in data if isinstance(x, (list, tuple))]\n zipdata = list(zip(*datas))\n column_widths = [max([len(str(x)) for x in col]) + free_space_right + free_space_left for col in zipdata]\n line_width = sum(column_widths) + len(column_separator) * (len(zipdata) - 1)\n for item in data:\n if isinstance(item, str):\n print(item * line_width)\n else:\n while \"sum\" in item:\n ind = item.index(\"sum\")\n item[ind] = sum(\n [x[ind] for x in data if isinstance(x, (tuple, list)) and isinstance(x[ind], (int, float))])\n print(column_separator.join(\n [\" \" * free_space_left + \"{:<{x}}\".format(item[i], x=x - free_space_left) for i, x in\n enumerate(column_widths)]).format(*item))\n\n\ndef get_vector_carno(vector):\n vector = vector[::-1]\n arr = [0, 1, 3, 2, 4, 5, 7, 6, 12, 13, 15, 14, 8, 9, 11, 10]\n groups = group(arr, 4)\n lst = [['', 'cd', 'cD', 'CD', 'Cd']]\n rows = ['ab', 'aB', 'AB', 'Ab']\n for i, grp in enumerate(groups):\n lst += [[rows[i]] + [vector[x] for x in grp]]\n table_view(lst, free_space_right=1)\n\n\nif __name__ == \"__main__\":\n print(sample_func)\n r = count_elements_for_functext(sample_func)\n for key, val in r.items():\n print(\"{}: {}\".format(key, val))\n","sub_path":"src/sync/magellan.py","file_name":"magellan.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160453890","text":"from tkinter import *\r\nroot = Tk()\r\nimport random\r\n\r\nf = Frame(root, )\r\nf.pack()\r\n\r\nlf = Label(f, text=\"Hello world!\",\r\n cursor='heart', font=\"Impact\",\r\n bg=\"cyan\", fg=\"red\",\r\n bd=10, relief=\"groove\",\r\n width=73,\r\n )\r\nlf.pack(side=TOP)\r\n\r\nquotlist = [\r\n\"\"\"\r\n1)“Talk is cheap. Show me the code.” \r\n― Linus Torvalds\r\n\"\"\",\r\n\r\n\"\"\"\r\n2)“When you don't create things, you become defined by your tastes rather than ability. your tastes only narrow & exclude people. so create.” \r\n― Why The Lucky Stiff\r\n\"\"\",\r\n\r\n\"\"\"\r\n3)“I'm not a great programmer; I'm just a good programmer with great habits.”\r\n― Kent Beck\r\n\"\"\",\r\n\r\n\"\"\"\r\n4)“A language that doesn't affect the way you think about programming is not worth knowing.” \r\n― Alan J. Perlis\r\n\"\"\",\r\n\r\n\"\"\"\r\n5)“The most disastrous thing that you can ever learn is your first programming language.” \r\n― Alan Kay\r\n\"\"\",\r\n]\r\n\r\nquotlist2 = []\r\n\r\ni = 0\r\n\r\ndef f():\r\n try:\r\n global i\r\n tl = Toplevel()\r\n txtvar = StringVar()\r\n txtvar.set(quotlist[i])\r\n l = Label(tl, textvariable=txtvar,\r\n bg=\"indigo\", fg=\"red\",font=(\"Script\", 20))\r\n l.pack()\r\n tl.title(\"Quote NO %s\" %(i+1))\r\n i += 1\r\n except:\r\n i = 0\r\n \r\nb = Button(root, text=\"Click to get a quote sequentally from the quote list\", command=f,\r\n font=\"Courier\", cursor='hand1', width=59, bd=5, bg='violet', fg='yellow')\r\nb.pack()\r\n\r\n\r\ndef f2():\r\n tl = Toplevel()\r\n rq = random.choice(quotlist)\r\n txtvar = StringVar()\r\n txtvar.set(rq)\r\n l = Label(tl, textvariable=txtvar,\r\n bg=\"orange\", fg=\"turquoise\", font=(\"Comic Sans MS\", 15))\r\n l.pack()\r\n tl.title(\"Random Quote NO %s\"%(i+1))\r\n \r\nb2 = Button(root, text=\"Click to get a random programming quote from the quote list\", command=f2,\r\n cursor=\"hand2\", bg=\"purple\", fg=\"gold\", font=\"Courier\")\r\nb2.pack()\r\n\r\nroot.title(\"Click\")\r\n","sub_path":"programming quotes.py","file_name":"programming quotes.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519928824","text":"from __future__ import print_function\nimport os\nimport neat\nimport numpy as np\nimport multiprocessing\nfrom tqdm import tqdm\nimport librosa\n\nfrom anti_spoofing.data_utils import ASVDataset\nfrom anti_spoofing.metrics_utils import rocch2eer, rocch\nfrom anti_spoofing.utils_ASV import softmax, whiten, gate_activation_ce, make_visualize, SAMPLING_RATE\n\n\n\"\"\"\nNEAT APPLIED TO ASVspoof 2019\n\"\"\"\n\nnb_samples_train = 10 # number of audio files used for training\nnb_samples_test = 10 # number of audio files used for testing\n\nindex_train = [k for k in range(5)] + [k for k in range(2590, 2595)] # index of audio files to use for training\n\nn_processes = 10 # multiprocessing.cpu_count() # number of workers to use for evaluating the fitness\nn_generation = 300 # number of generations\n\ntrain_loader = ASVDataset(length=None, is_train=True, is_eval=False, index_list=index_train)\ntest_loader = ASVDataset(length=None, is_train=False, is_eval=False, index_list=index_train)\n\n\n\ntrainloader = []\nfor data in train_loader:\n inputs, output = data[0], data[2]\n inputs = np.ravel(librosa.feature.mfcc(y=inputs, sr=SAMPLING_RATE))\n inputs = whiten(inputs)\n trainloader.append((inputs, output))\n \ntestloader = []\nfor data in test_loader:\n inputs, output = data[0], data[2]\n inputs = np.ravel(librosa.feature.mfcc(y=inputs, sr=SAMPLING_RATE))\n inputs = whiten(inputs)\n testloader.append((inputs, output))\n\n\ndef eval_genomes(genomes, config_):\n \"\"\"\n Most important part of NEAT since it is here that we adapt NEAT to our problem.\n We tell what is the phenotype of a genome and how to calculate its fitness (same idea than a loss)\n :param config_: config from the config file\n :param genomes: list of all the genomes to get evaluated\n \"\"\"\n for _, genome in tqdm(genomes):\n net = neat.nn.RecurrentNetwork.create(genome, config_)\n cross_entropy = 0\n for data in trainloader:\n inputs, output = data[0], data[1]\n net.reset()\n mask, scores = gate_activation_ce(net, inputs)\n selected_score = scores[mask]\n if selected_score.size == 0:\n scores = 1/7 * np.ones(7)\n else:\n xo = np.sum(selected_score, axis=0) / selected_score.size\n print(\"xo =\", xo)\n scores = softmax(xo)\n print(\"scores =\", scores)\n cross_entropy -= np.log(scores[output] + 10**-20)\n\n genome.fitness = 1 - cross_entropy/19.5\n \n\ndef eval_genome(genome, config_):\n \"\"\"\n Most important part of NEAT since it is here that we adapt NEAT to our problem.\n We tell what is the phenotype of a genome and how to calculate its fitness \n (same idea than a loss)\n :param config_: config from the config file\n :param genome: list of all the genomes to get evaluated\n this version is intented to use ParallelEvaluator and should be much faster\n \"\"\"\n net = neat.nn.RecurrentNetwork.create(genome, config_)\n cross_entropy = 0\n for data in trainloader:\n inputs, output = data[0], data[1]\n net.reset()\n mask, scores = gate_activation_ce(net, inputs)\n selected_score = scores[mask]\n if selected_score.size == 0:\n scores = 1 / 7 * np.ones(7)\n else:\n xo = np.sum(selected_score, axis=0) / selected_score.size\n xo[np.isinf(xo)] = 100\n xo[np.isnan(xo)] = 100\n scores = softmax(xo)\n cross_entropy -= np.log(scores[output] + 10**-20)\n\n return 1 - cross_entropy/19.5\n \n\ndef evaluate(net, data_loader):\n\n correct = 0\n total = 0\n net.reset()\n target_scores = []\n non_target_scores = []\n for data in tqdm(data_loader):\n inputs, output = data[0], data[1]\n mask, scores = gate_activation_ce(net, inputs)\n selected_score = scores[mask]\n if selected_score.size == 0:\n scores = 1 / 7 * np.ones(7)\n else:\n xo = np.sum(selected_score, axis=0) / selected_score.size\n scores = softmax(xo)\n total += 1\n correct += (scores.argmax() == output)\n if output == 0:\n target_scores.append(scores[0])\n else:\n non_target_scores.append(scores[0])\n \n target_scores = np.array(target_scores)\n non_target_scores = np.array(non_target_scores)\n \n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n \n return target_scores, non_target_scores, float(correct)/total, eer\n\n\ndef run(config_file, n_gen):\n \"\"\"\n Launches a run until convergence or max number of generation reached\n :param config_file: path to the config file\n :param n_gen: lax number of generation\n :return: the best genontype (winner), the configs, the stats of the run and the accuracy on the testing set\n \"\"\"\n # Load configuration.\n config_ = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config_)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats_ = neat.StatisticsReporter()\n p.add_reporter(stats_)\n # p.add_reporter(neat.Checkpointer(5))\n\n # Run for up to n_gen generations.\n # multi processing\n if n_processes > 1:\n pe = neat.ParallelEvaluator(n_processes, eval_genome)\n winner_ = p.run(pe.evaluate, n_gen)\n else:\n winner_ = p.run(eval_genomes, n_gen)\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner_))\n\n # Show output of the most fit genome against training data.\n print('\\n')\n winner_net = neat.nn.RecurrentNetwork.create(winner_, config_)\n\n training_target_scores, training_non_target_scores, training_accuracy, training_eer = evaluate(winner_net, trainloader)\n target_scores, non_target_scores, accuracy, eer = evaluate(winner_net, testloader)\n\n print(\"**** training accuracy = {} ****\".format(training_accuracy))\n print(\"**** training target scores = {} ****\".format(training_target_scores))\n print(\"**** training non target scores = {} ****\".format(training_non_target_scores))\n print(\"**** training equal error rate = {} ****\".format(training_eer))\n\n\n print(\"\\n\")\n print(\"**** accuracy = {} ****\".format(accuracy))\n print(\"**** testing target scores = {} ****\".format(target_scores))\n print(\"**** testing non target scores = {} ****\".format(non_target_scores))\n print(\"**** equal error rate = {} ****\".format(eer))\n\n return winner_, config_, stats_\n\n\nif __name__ == '__main__':\n\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'neat.cfg')\n\n winner, config, stats = run(config_path, n_generation)\n make_visualize(winner, config, stats)","sub_path":"anti_spoofing/main_toy_data_set_ce.py","file_name":"main_toy_data_set_ce.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"432939709","text":"# -*- coding: utf-8 -*-\nimport email\n\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nfrom .base import Page\n\nclass user(Page):\n\n url = \"/admin/account\"\n\n user_serrch_username_loc = (By.NAME,\"nickname\")#姓名搜索框\n user_search_loginname_loc = (By.NAME,\"username\")#账号搜索框\n user_search_button_loc = (By.XPATH,'//*[@id=\"epoque-search\"]/div/div[2]/div/div[1]')#查询按钮\n user_delete_comfirm_loc = (By.XPATH,\"/html/body/div[3]/div/div/div[2]/button[2]\")#删除确认按钮\n user_delete_ok_loc = (By.XPATH,\"/html/body/div[3]/div/div/div[2]/button\")#删除成功按钮\n user_add_loc = (By.XPATH,'//*[@id=\"epoque-search\"]/div/div[2]/div/div[2]')#新增按钮\n user_edit_loc = (By.XPATH,'//*[@id=\"wrap\"]/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[1]/td[7]/a[1]')#编辑按钮\n user_nextpage_loc = (By.LINK_TEXT,u'下一頁')#下一页按钮\n user_lastpage_loc = (By.LINK_TEXT,u'上一頁')#上一页按钮\n user_reset_comfirm_loc = (By.XPATH,\"/html/body/div[3]/div/div/div[2]/button[2]\")#确认重置按钮\n user_organization_loc = (By.XPATH,\"//*[@id='epoque-search']/div/div[1]/div[3]/div/div/div[1]/div\")#组织搜索框\n user_part_loc = (By.XPATH,\"//*[@id='epoque-search']/div/div[1]/div[4]/div/div/div[1]/div\")#角色搜索框\n user_info_username = (By.XPATH,'//*[@id=\"wrap\"]/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[1]/td[2]')#列表第一行的姓名\n user_info_loginname = (By.XPATH,'//*[@id=\"wrap\"]/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[1]/td[3]')#列表第一行的账号\n user_info_organization = (By.XPATH,'//*[@id=\"wrap\"]/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[1]/td[4]')#列表第一行的组织\n user_info_part = (By.XPATH,'//*[@id=\"wrap\"]/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[1]/td[5]')#列表第一行的角色\n user_reset_ok_loc = (By.XPATH,\"/html/body/div[3]/div/div/div[2]/button\")#重置成功按钮\n index_loginname_loc = (By.XPATH,\"//*[@id='page-content']/header/div/div/a/span\")#首页右上角登录名\n #index_user_operation_button = (By.XPATH,\"//*[@id='page-content']/header/div/div/a/span/i\")#首页右上角账号操作按钮\n index_user_logout_button = (By.ID,\"logout-btn\")#首页右上角账号退出按钮\n add_edit_loginname_loc = (By.XPATH, \"//*[@id='account']/div[1]/div/input\")#新增/编辑页面的账号编辑框\n add_edit_username_loc = (By.XPATH, \"//*[@id='account']/div[2]/div/input\")#新增/编辑页面的姓名编辑框\n add_edit_email_loc = (By.XPATH, \"//*[@id='account']/div[5]/div/div/input\")#新增/编辑页面的邮箱编辑框\n add_edit_address_loc = (By.XPATH, \"//*[@id='account']/div[6]/div/input\")#新增/编辑页面的地址编辑框\n add_edit_telephone_loc = (By.XPATH, \"//*[@id='account']/div[7]/div/input\")#新增/编辑页面的联系方式编辑框\n add_edit_save_button_loc = (By.XPATH, \"//*[@id='save-btn']\")#新增/编辑页面的保存按钮\n add_edit_ok_button_loc = (By.XPATH, \"/html/body/div[3]/div/div/div[2]/button\")#新增/编辑页面保存成功后的ok按钮\n\n #姓名搜索\n def user_search_username(self,username):\n sleep(1)\n self.find_element(*self.user_serrch_username_loc).clear()\n sleep(1)\n self.find_element(*self.user_serrch_username_loc).send_keys(username)\n def search_username_success(self):\n sleep(1)\n return self.find_element(*self.user_info_username).text\n\n #账号搜索\n def user_search_loginname(self,loginname):\n sleep(1)\n self.find_element(*self.user_search_loginname_loc).clear()\n sleep(1)\n self.find_element(*self.user_search_loginname_loc).send_keys(loginname)\n def search_loginname_success(self):\n sleep(1)\n return self.find_element(*self.user_info_loginname).text\n\n #组织搜索\n def user_organization(self,organization):\n sleep(1)\n self.find_element(*self.user_organization_loc).click()\n sleep(1)\n organizations = self.driver.find_element_by_name('groupid')\n sleep(1)\n organizations.find_element_by_xpath(\"//*[@id='epoque-search']/div/div[1]/div[3]/div/div/div[2]/div/div[%s]\"%organization).click()\n sleep(1)\n def search_organization_success(self):\n sleep(1)\n return self.find_element(*self.user_info_organization).text\n\n #角色搜索\n def user_part(self,organization,part):\n sleep(1)\n self.find_element(*self.user_organization_loc).click()\n sleep(1)\n organizations = self.driver.find_element_by_name('groupid')\n sleep(1)\n organizations.find_element_by_xpath(\"//*[@id='epoque-search']/div/div[1]/div[3]/div/div/div[2]/div/div[%s]\"%organization).click()\n sleep(1)\n self.find_element(*self.user_part_loc).click()\n parts = self.driver.find_element_by_name('roleid')\n sleep(1)\n parts.find_element_by_xpath(\"//*[@id='epoque-search']/div/div[1]/div[4]/div/div/div[2]/div/div[%s]\"%part).click()\n sleep(1)\n def search_part_success(self):\n sleep(1)\n return self.find_element(*self.user_info_part).text\n\n #删除\n def user_delete(self):\n sleep(1)\n self.driver.find_element_by_class_name('delete-btn').click()\n sleep(1)\n self.find_element(*self.user_delete_comfirm_loc).click()\n sleep(1)\n self.find_element(*self.user_delete_ok_loc).click()\n\n #重置密码\n def user_reset_password(self,usernumber):\n sleep(1)\n self.driver.find_element_by_xpath(\"//*[@id='wrap']/div/div[2]/div/div/div[1]/div/div/table/tbody/tr[%s]/td[7]/button\"%usernumber).click()\n sleep(1)\n self.find_element(*self.user_reset_comfirm_loc).click()\n sleep(1)\n self.find_element(*self.user_reset_ok_loc).click()\n\n #下一页\n def user_nextpage(self):\n sleep(1)\n self.find_element(*self.user_nextpage_loc).click()\n\n #上一页\n def user_lastpage(self):\n sleep(1)\n self.find_element(*self.user_lastpage_loc).click()\n\n #查询按钮\n def user_search_button(self):\n sleep(1)\n self.find_element(*self.user_search_button_loc).click()\n sleep(2)\n\n #新增账号\n def user_add(self,loginname,username,organization,part,emael,address,telephone):\n sleep(1)\n self.find_element(*self.user_add_loc).click()\n sleep(1)\n self.find_element(*self.add_edit_loginname_loc).send_keys(loginname)#输入账号\n sleep(1)\n self.driver.find_element_by_xpath(\"//*[@id='checkUserName']\").click()#用户名检测\n sleep(1)\n self.driver.find_element_by_xpath(\"/html/body/div[3]/div/div/div[2]/button\").click()\n sleep(2)\n self.find_element(*self.add_edit_username_loc).send_keys(username)#输入姓名\n #-------组织选择-------\n self.driver.find_element_by_xpath(\"//*[@id='account']/div[3]/div/div/div[1]/div\").click()\n sleep(1)\n organizations = self.driver.find_element_by_name('groupid')\n sleep(1)\n organizations.find_element_by_xpath(\"//*[@id='account']/div[3]/div/div/div[2]/div/div[%s]\" % organization).click()\n sleep(1)\n #-------角色选择-------\n self.driver.find_element_by_xpath(\"//*[@id='account']/div[4]/div/div/div[1]/div\").click()\n sleep(1)\n parts = self.driver.find_element_by_name('roleid')\n sleep(1)\n parts.find_element_by_xpath(\"//*[@id='account']/div[4]/div/div/div[2]/div/div[%s]\" % part).click()\n sleep(1)\n #----------------------\n self.find_element(*self.add_edit_email_loc).send_keys(emael)#输入邮箱\n self.find_element(*self.add_edit_address_loc).send_keys(address)#输入地址\n self.find_element(*self.add_edit_telephone_loc).send_keys(telephone)#输入电话\n self.find_element(*self.add_edit_save_button_loc).click()#点击保存\n self.find_element(*self.add_edit_ok_button_loc).click()#点击ok\n\n #查看/编辑账号\n def user_edit(self,username,organization,part,email,address,telephone):\n sleep(1)\n self.find_element(*self.user_edit_loc).click()\n sleep(1)\n self.find_element(*self.add_edit_username_loc).clear()#姓名编辑框清空\n sleep(1)\n self.find_element(*self.add_edit_username_loc).send_keys(username)#修改姓名\n sleep(1)\n #-------组织修改-------\n self.driver.find_element_by_xpath(\"//*[@id='account']/div[3]/div/div/div[1]/div\").click()\n sleep(1)\n organizations = self.driver.find_element_by_name('groupid')\n sleep(1)\n organizations.find_element_by_xpath(\"//*[@id='account']/div[3]/div/div/div[2]/div/div[%s]\" % organization).click()\n sleep(1)\n #-------角色修改-------\n self.driver.find_element_by_xpath(\"//*[@id='account']/div[4]/div/div/div[1]/div\").click()\n sleep(1)\n parts = self.driver.find_element_by_name('roleid')\n sleep(1)\n parts.find_element_by_xpath(\"//*[@id='account']/div[4]/div/div/div[2]/div/div[%s]\" % part).click()\n sleep(1)\n #----------------------\n self.find_element(*self.add_edit_email_loc).clear()#邮箱编辑框清空\n self.find_element(*self.add_edit_email_loc).send_keys(email)#修改邮箱\n self.find_element(*self.add_edit_address_loc).clear()#地址编辑框清空\n self.find_element(*self.add_edit_address_loc).send_keys(address)#修改地址\n self.find_element(*self.add_edit_telephone_loc).clear()#电话编辑框清空\n self.find_element(*self.add_edit_telephone_loc).send_keys(telephone)#修改电话\n self.find_element(*self.add_edit_save_button_loc).click()#保存\n self.find_element(*self.add_edit_ok_button_loc).click()#点击ok\n\n # 访问账号权限管理\n def surf_user(self, username='admin', password='000000'):\n self.driver.get('http://bossdev.epoque.cn/login')\n self.driver.find_element_by_id('username').clear()\n self.driver.find_element_by_id('username').send_keys(username)\n self.driver.find_element_by_id('password').clear()\n self.driver.find_element_by_id('password').send_keys(password)\n innerText = self.driver.find_element_by_id(\"code_box\").get_attribute('innerText')\n self.driver.find_element_by_xpath(\"//*[@id='code']\").send_keys(innerText)\n sleep(5)\n self.driver.find_element_by_xpath(u\"//input[@value='登录']\").click()\n self.driver.get('http://bossdev.epoque.cn/admin/account')\n self.open()\n\n #退出登陆\n def logout(self):\n sleep(1)\n self.find_element(*self.index_loginname_loc).click()\n sleep(1)\n self.find_element(*self.index_user_logout_button).click()\n\n #右上角登录名\n def login_message(self):\n return self.find_element(*self.index_loginname_loc).text\n","sub_path":"epoque_admin/background/test_case/page_obj/userPage.py","file_name":"userPage.py","file_ext":"py","file_size_in_byte":11041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464781061","text":"import requests, json\nimport sys\n\nclass Payload(object):\n\tdef __init__(self, j):\n\t\tself.__dict__ = json.loads(j)\n\ndef getAccountCharacter():\n\t# Default values if no arguments are passed.\n\taccount, character = 'zoki2008', 'Kmica'\n\t# Set passed values from command line arguments.\n\tif len(sys.argv) > 2:\n\t\taccount, character = sys.argv[1], sys.argv[2]\n\n\treturn account, character\n\n#########################################################################\n\naccount, character = getAccountCharacter()\n\n# Configure the api call.\nAPI_URL = 'https://www.pathofexile.com/character-window/get-items'\npayload = 'character' + '=' + character + '&' + 'accountName' + '=' + account\n\n# Get response from api.\nr = requests.post(API_URL + '?' + payload)\n\n# Convert json to dict.\ndata = Payload(r.text)\n\nprint(data.character['name'])","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"407925253","text":"from .issue import Issue\nfrom copy import copy\n\nclass Edge:\n\n def __init__(self, head, tail, vector_id, multiples=1):\n # the vector id is the index of the column that this vector was named\n # for\n self.vector_id = vector_id\n # this is the number of edges needed to complete the cycle this vector\n # is based off of (if any, remember 'basis edges' are selected by\n # the creator of the kirkhoff graph)\n self.num_edges = multiples\n # these hold the head and tail positions of the edge\n self.head_position = head\n self.tail_position = tail\n # the first entry is the one at the tail, the second is the one at the\n # head\n self.vertices = [None, None]\n # finally we add a position so that this can be indexed by position\n self.position = head\n # this will hold the weight node, which will get assigned by the block\n self.weight = None\n\n \"\"\"\n this tries to add a vertex. If the edge is not redundant this function will \n return True letting us know the vertex was added to the edge and visa versa\n if it was redundant, we will return false therefore indicating the edge is \n unnecessary note that if it is redundant at the tail vertex it will be \n redundant at the other \n \"\"\"\n def AddVertices(self, tail_vertex, head_vertex):\n if tail_vertex.position != self.tail_position or head_vertex.position != self.head_position:\n raise Issue('one or both vertices do not touch edge')\n self.vertices[0] = tail_vertex\n self.vertices[1] = head_vertex\n if tail_vertex.AddEdge(self):\n head_vertex.AddEdge(self)\n return True # this lets us know the edge was accepted\n return False # let's us know the edge was rejected\n\n def __str__(self):\n return 'id:%shead:%stail:%s' % (self.vector_id, self.head_position, self.tail_position)\n\n def __hash__(self):\n return hash(str(self))\n\n def __repr__(self):\n return str(self)\n\n def __eq__(self, other):\n if self.head_position != other.head_position:\n return False\n if self.tail_position != other.tail_position:\n return False\n if self.vector_id != other.vector_id:\n return False\n return True\n\n\"\"\" \nthe following class handles creating, adding, and tracking edges \nover a vertex pool. This allows us to keep collective state \nabout the edge weights\n\"\"\"\nclass EdgePool:\n \n def __init__(self):\n self.edge_weights = []\n self.current_id = 0\n \n # these two methods allow us to keep track of the edge weight nodes\n def AddEdgeWeight(self, weight):\n self.edge_weights.append(weight)\n weight.weight_id = self.current_id\n self.current_id += 1\n \n def RemoveEdgeWeight(self):\n self.edge_weights.pop(-1)\n self.current_id -= 1\n\nclass Block:\n\n def __init__(self, vertex_pool, edge_pool):\n self.vertex_pool = vertex_pool\n self.edge_pool = edge_pool\n self.edges = []\n self.dimension = self.vertex_pool.dimension\n self.num_vectors = 0\n\n \"\"\"\n This method will attempt to add an edge. It will succeed if the edge it \n is adding is not redundant.\n If the action succeeds then we will append this edge to self.edges\n If it does not succeed we do not just pass on but also remove the node\n that represented the edge weight both from edge_weights and from the \n vertex pool's web\n \"\"\"\n def AddEdge(self, edge):\n # note these functions will create the vertices if they do not \n # yet exist. This is important for shift and add\n tail_vertex = self.vertex_pool.GetVertex(edge.tail_position)\n head_vertex = self.vertex_pool.GetVertex(edge.head_position)\n if edge.AddVertices(tail_vertex, head_vertex):\n # if the edge was accepted we add it to the block's list of edges\n self.edges.append(edge)\n else:\n self.edge_pool.RemoveEdgeWeight()\n self.vertex_pool.web.RemoveNode()\n \n \"\"\"\n We use this to create an edge because this handles not only calling the \n constructor for the edge, but creating a node for the edge weight and \n registering it with edge_weights. \n \"\"\"\n def CreateEdge(self, tail_position, head_position, vector_id, num_edges=1):\n edge = Edge(tail_position, head_position, vector_id, num_edges)\n edge.weight = self.vertex_pool.web.CreateNode()\n edge.weight.kind = 'edge'\n self.edge_pool.AddEdgeWeight(edge.weight)\n return edge\n \n def Size(self):\n return self.vertex_pool.size\n \n def Vertices(self):\n return self.vertex_pool.vertices\n \n \"\"\"\n This creates a copy of each edge in the block shifted by the input amount \n in the direction of the input dimension and attempts to add each \n into the block (creating vertices where needed through the implementation of \n AddEdge). Thus this essentially creates a copy of the block and adds the block\n and this copy together without ever duplicating an edge (because AddEdge \n will not add an edge if doing so we cause a duplicate) \n \"\"\"\n def AddShift(self, amount, dimension):\n if dimension > self.dimension:\n raise Issue('this dimension is outside of the dimensions of this block')\n num = len(self.edges)\n for i in range(0,num):\n edge = self.edges[i]\n new_head = copy(edge.head_position)\n new_head[dimension] += amount\n new_tail = copy(edge.tail_position)\n new_tail[dimension] += amount\n new_edge = self.CreateEdge(new_head,new_tail, edge.vector_id, edge.num_edges)\n self.AddEdge(new_edge)","sub_path":"kirky/kirky/edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556620350","text":"\"\"\"\nThe application view router\n\"\"\"\nimport logging\nfrom datetime import timedelta\nimport uuid\n\nimport flask\nimport arrow\nfrom sqlalchemy import exc\n\nfrom scheduler import app, db\nimport schedbuilder as builder\nimport models\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.addHandler(logging.StreamHandler())\n\n\n@app.route('/')\ndef index():\n \"\"\"\n The home page of the app. This is the only HTML that our app returns;\n the rest is JSON\n \"\"\"\n LOGGER.debug('Hit index page')\n return flask.render_template('index.html')\n\n\n@app.route('/register', methods=['POST'])\ndef register():\n \"\"\"\n Registers the user in the system.\n Registering a user also logs them in.\n \"\"\"\n LOGGER.info('Register user')\n data = flask.request.get_json()\n\n try:\n user = models.User.create(\n username=data['username'],\n password=data['password'],)\n coach = db.session.query(models.Coach).filter(\n models.Coach.id == data['coach']).one()\n client = models.Client(\n id=str(uuid.uuid1()),\n user_id=user.id,\n coach_id=coach.id)\n db.session.add(user)\n db.session.add(client)\n db.session.commit()\n status = True\n flask.session['user_id'] = user.id\n except exc.SQLAlchemyError:\n LOGGER.exception('User registration failed')\n status = False\n db.session.rollback()\n\n return flask.jsonify({'success': status})\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n \"\"\"\n Log in the user and mark them as logged in on the session\n \"\"\"\n LOGGER.info('Login')\n data = flask.request.get_json()\n try:\n user = db.session.query(models.User).filter_by(username=data['username']).one()\n if user.password_matches(data['password']):\n flask.session['user_id'] = user.id\n return flask.jsonify({'success': True})\n except exc.SQLAlchemyError:\n LOGGER.exception('Login failed for %s', data['username'])\n db.session.rollback()\n\n return flask.jsonify({'success': False})\n\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n \"\"\"\n Log out the user\n \"\"\"\n LOGGER.info('Logout')\n flask.session.pop('user_id', None)\n return flask.redirect('/')\n\n\n@app.route('/coaches')\ndef coaches():\n \"\"\"\n Retrieves a list of coaches in the db\n \"\"\"\n return flask.jsonify({\n 'coaches': [\n dict(\n id=coach.id,\n name=coach.user.fullname)\n for coach in db.session.query(models.Coach).all()]})\n\n\n@app.route('/hour/<string:today>/<int:hour>', methods=['PUT', 'DELETE'])\ndef hour(today, hour):\n \"\"\"\n Adds or removes a scheduled call for the specified day/hour\n \"\"\"\n user_id = flask.session.get('user_id')\n if not user_id:\n LOGGER.warn('Attempted infiltration by enemy agents')\n return flask.jsonify({})\n\n # get the client\n try:\n client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()\n except exc.SQLAlchemyError:\n LOGGER.exception('Failed to retrieve client')\n db.session.rollback()\n return flask.jsonify({})\n\n # add schedule\n if flask.request.method == 'PUT':\n LOGGER.info('Scheduling call for %s at %s',\n today, hour)\n try:\n builder.schedule_call(\n day=arrow.get(today).date(),\n hour=hour,\n client=client)\n except exc.SQLAlchemyError:\n LOGGER.exception('Failed to unschedule call')\n db.session.rollback()\n return flask.jsonify({})\n\n # remove schedule\n elif flask.request.method == 'DELETE':\n LOGGER.info('Unscheduling call for %s at %s',\n today, hour)\n try:\n builder.unschedule_call(\n day=arrow.get(today).date(),\n hour=hour,\n client=client)\n except exc.SQLAlchemyError:\n LOGGER.exception('Failed to unschedule call')\n db.session.rollback()\n return flask.jsonify({})\n\n return month(today)\n\n\n@app.route('/day/<string:today>')\ndef day(today):\n \"\"\"\n Gets the current calendar data for week containing `today`\n GET retrieves the data for the specified week.\n POST posts the appointments for a specified week.\n \"\"\"\n user_id = flask.session.get('user_id')\n if not user_id:\n LOGGER.warn('Attempted infiltration by enemy agents')\n return flask.jsonify({})\n\n try:\n client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()\n\n day = arrow.get(today).date()\n\n LOGGER.info('Getting calendar for %s', day)\n return flask.jsonify({\n 'name': client.user.username,\n 'coach': client.coach.user.fullname,\n 'previous': (day - timedelta(days=1)).isoformat(),\n 'next': (day + timedelta(days=1)).isoformat(),\n 'day': builder.calendar_day(day, client),\n })\n except exc.SQLAlchemyError:\n LOGGER.exception('Day retrieval failed for %s', user_id)\n db.session.rollback()\n return flask.jsonify({})\n\n\n@app.route('/month/<string:today>')\ndef month(today):\n \"\"\"\n Gets the current calendar data for month containing `today`\n\n GET retrieves the data for the specified month.\n POST posts the appointments for a specified month.\n \"\"\"\n user_id = flask.session.get('user_id')\n if not user_id:\n LOGGER.warn('Attempted infiltration by enemy agents')\n return flask.jsonify({})\n\n try:\n client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()\n\n start_day = arrow.get(today).date()\n days = builder.monthdays(start_day)\n\n LOGGER.info('Getting calendar for month %s', month)\n return flask.jsonify({\n 'name': client.user.username,\n 'coach': client.coach.user.fullname,\n 'month': start_day.strftime('%B %Y'),\n 'previous': (days[0] - timedelta(days=1)).isoformat(),\n 'next': (days[-1] + timedelta(days=1)).isoformat(),\n 'weeks': builder.calendar_days(days, client),\n })\n except exc.SQLAlchemyError:\n LOGGER.exception('Week retrieval failed for %s', user_id)\n db.session.rollback()\n return flask.jsonify({})\n\n","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279654378","text":"#!/usr/bin/env python3\n\nimport os\nfrom random import randrange\nfrom math import ceil\n\n# Variables du jeu\nplayerMoney = 200\nplayerNumber = -1\nplayerActualBet = -1\nwinningNumber = -1\ncontinueGame = True\n\n\"\"\"\nif(playerMoney < 1):\n\tprint(\"You don't have enough money to play, the game is stopping now :(\")\n\"\"\"\n\n\ndef controlUserNumberInput(playerNumber):\n\tcorrect = False\n\twhile correct != True:\n\t\tplayerNumber = input(\"Pick a number between 0 and 49 : \")\n\t\ttry:\n\t\t\tplayerNumber = int(playerNumber)\n\t\texcept ValueError:\n\t\t\tprint(\"This is not a number !\")\n\t\t\tcontinue\n\t\tif playerNumber > 0 and playerNumber < 49:\n\t\t\tcorrect = True\n\n\treturn playerNumber\n\n\ndef controlUserBetInput(playerMoney, playerActualBet):\n\tcorrect = False\n\twhile correct != True:\n\t\tplayerActualBet = input(\"Choose your bet between 5, 10 or 25 : \")\n\t\ttry:\n\t\t\tplayerActualBet = int(playerActualBet)\n\t\texcept ValueError:\n\t\t\tprint(\"This is not a number !\")\n\t\t\tcontinue\n\t\tif playerActualBet == 5 or playerActualBet == 10 or playerActualBet == 25:\n\t\t\tif playerActualBet > playerMoney:\n\t\t\t\tprint(\"You cannot bet that much, you only have %d dollars left\" %playerMoney)\n\t\t\telse:\n\t\t\t\tcorrect = True\n\n\treturn playerActualBet\n\n\ndef controlUserMoney(playerNumber, winningNumber, playerActualBet, playerMoney):\n\tprint(\"\\nWinning number : %d\" %winningNumber)\n\tprint(\"Your number : %d\\n\" %playerNumber)\n\t\n\tif playerNumber == winningNumber:\n\t\tprint(\"Congrats, you have the correct number !\")\n\t\tplayerMoney += 3 * playerActualBet\n\telif playerNumber%2 == winningNumber%2:\n\t\tprint(\"Your number is the same color as the winning number\")\n\t\tplayerMoney += playerActualBet\n\n\telse:\n\t\tprint(\"It is lost for this try, better luck next time :)\")\n\t\tplayerMoney -= playerActualBet \n\n\tprint(\"You now have %d dollars\\n\\n\" %playerMoney)\n\treturn playerMoney\n\ndef checkIfEnoughMoney(playerMoney):\n\tif playerMoney < 5:\n\t\tprint(\"You have not enough money to continue playing => GAME OVER\")\n\t\treturn False\n\telse:\n\t\treturn True\n\n\nprint(\"Beginning of the game : you start with %d dollars\" %playerMoney)\nwhile continueGame:\n\tplayerNumber = controlUserNumberInput(playerNumber)\n\tplayerActualBet = controlUserBetInput(playerMoney, playerActualBet);\n\twinningNumber = randrange(0,50)\n\tplayerMoney = controlUserMoney(playerNumber, winningNumber, playerActualBet, playerMoney)\n\tcontinueGame = checkIfEnoughMoney(playerMoney)\n\n\n\n","sub_path":"zcasino.py","file_name":"zcasino.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201883879","text":"from __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nimport os\nimport jwt\nimport json\nimport base64\nimport time\n\nfrom six.moves.configparser import ConfigParser\nfrom six.moves.urllib.parse import urlencode\n\nfrom qvarntesting import services\nfrom qvarntesting.http import json_response\nfrom qvarntesting.tokens import get_jwt_token\nfrom qvarntesting.templating import render\n\n\ndef auth_token(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n\n method, value = request.authorization\n if method == 'Basic':\n clientid, secret = base64.b64decode(value).split(b':', 1)\n if not isinstance(clientid, str):\n clientid = clientid.decode('UTF-8')\n if not isinstance(secret, str):\n secret = secret.decode('UTF-8')\n else:\n raise Exception(\"%s authentication method is not supported.\" % method)\n\n if request.POST['grant_type'] == 'authorization_code':\n if os.environ.get('APP_CONFIG'):\n config = ConfigParser()\n config.read(os.environ['APP_CONFIG'])\n scopes = (config.get('qvarn', 'scope') or '').replace(',', ' ').split()\n else:\n scopes = []\n\n user = services.get_gluu_user(int(request.POST['code']))\n\n return json_response({\n 'access_token': get_jwt_token({\n 'scope': ' '.join(scopes),\n 'aud': clientid,\n 'sub': request.POST['code'],\n 'c_hash': 'c-hash',\n }),\n 'id_token': get_jwt_token({\n 'scope': ' '.join(scopes),\n 'aud': clientid,\n 'sub': request.POST['code'],\n 'inum': request.POST['code'],\n 'at_hash': 'at-hash',\n 'email': next((x['value'] for x in user['emails'] if x['primary'] == 'true'), ''),\n 'family_name': user.get('name', {}).get('familyName'),\n 'given_name': user.get('name', {}).get('givenName'),\n 'user_name': user.get('userName', ''),\n 'name': user.get('displayName', ''),\n }),\n 'refresh_token': 'refresh-token',\n })\n\n else:\n now = time.time()\n return json_response({\n 'access_token': get_jwt_token({\n 'aud': clientid,\n 'sub': clientid,\n 'scope': request.POST['scope'],\n\n }),\n 'expires_in': now + 3600,\n 'scope': request.POST['scope'],\n 'token_type': 'bearer',\n })\n\n\ndef authorize(request, start_response):\n start_response('200 OK', [('Content-type', 'text/html')])\n\n users = []\n redirect_url = request.GET['redirect_uri']\n with services.database() as db:\n for row in db['gluu_user'].all():\n user = services.get_gluu_user(row['id'])\n user['redirect_url'] = redirect_url + '?' + urlencode({\n 'state': request.GET.get('state'),\n 'code': user['id'],\n })\n users.append(user)\n\n return render('authorize.html', {\n 'users': users,\n })\n\n\ndef user_info(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n\n method, value = request.authorization\n if method == 'Bearer':\n token = jwt.decode(value, verify=False)\n else:\n raise Exception(\"%s authentication method is not supported.\" % method)\n\n user = services.get_gluu_user(int(token['sub']))\n\n return json_response({\n 'sub': token['sub'],\n 'inum': token['sub'],\n 'email': next((x['value'] for x in user['emails'] if x['primary'] == 'true'), ''),\n 'family_name': user.get('name', {}).get('familyName'),\n 'given_name': user.get('name', {}).get('givenName'),\n 'user_name': user.get('userName', ''),\n 'name': user.get('displayName', ''),\n })\n\n\ndef rsrc_pr(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n return json_response({\n 'ticket': 'rsrc-pr-ticket',\n })\n\n\ndef perm(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n return json_response({\n 'rpt': 'rtp-perm',\n })\n\n\ndef get_user(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n\n _, userid = request.path.rsplit('/', 1)\n user = services.get_gluu_user(int(userid))\n\n return json_response(user)\n\n\ndef create_user(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n\n with services.database() as db:\n user_id = db['gluu_user'].insert({\n 'data': json.dumps(request.json),\n })\n\n return json_response(services.get_gluu_user(user_id))\n\n\ndef get_users(request, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n\n with services.database() as db:\n users = [\n services.get_gluu_user(row['id'])\n for row in db['gluu_user'].all()\n ]\n\n return json_response(users)\n","sub_path":"qvarntesting/views/gluu.py","file_name":"gluu.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306131273","text":"'''\nCrie um programa para o governo, onde é possivel armazenar o nome de uma\npessoa, rg, cpf e depois possa armazenar esses dados num formato de bytes\n1233456789\n123.456.789-10\n'''\nimport struct\nimport dbm\n\nclass InvalidRg(Exception):\n pass\n\nclass InvalidCpf(Exception):\n pass\n\nclass FileAlreadyExists(Exception):\n pass\n\n\ndef main():\n while True:\n operation = input('Digite a operação desejada: (n = novo cadastro, p = pesquisar cadastro, a = apagar cadastro): ')\n if operation not in 'npa' and len(operation) > 1:\n print('Comando inválido. Tente novamente.')\n elif operation == 'n':\n NewEntry()\n elif operation == 'p':\n while True:\n operation = input('p = procurar por nome, l = mostrar nomes cadastrados, c = cancelar: ')\n if operation not in 'plc' and len(operation) > 1:\n print('Comando inválido. Tente novamente.')\n elif operation == 'p':\n name = input('Digite o nome: ')\n try:\n SearchData(name)\n except FileAlreadyExists:\n ReadData(name)\n break\n \n else:\n print('Arquivo nao encontrado.')\n \n elif operation == 'l':\n ShowList()\n break\n \n else:\n break\n else:\n while True:\n name = input('Digite o nome: ')\n try:\n SearchData(name)\n except FileAlreadyExists:\n ReadData(name)\n while True:\n operation = input('Deseja mesmo apagar esse cadastro?(s/n): ')\n if operation not in 'sn' and len(operation) > 1:\n print('Comando inválido. Tente novamente.')\n elif operation == 's':\n EraseData(name)\n print('Dados apagados com sucesso.')\n break\n else:\n break\n break\n \n else:\n print('Nome não encontrado, tente novamente.')\n \ndef NewEntry():\n '''\n Cria um novo cadastro, com nome, RG e CPF.\n '''\n valid_name = False\n valid_rg = False\n valid_cpf = False\n\n while True:\n while not valid_name:\n name = input('Por favor, digite seu nome: ')\n if not name.isalpha():\n print('Nome inválido! Tente novamente.')\n else:\n valid_name = True\n try: \n SearchData(name)\n except FileAlreadyExists:\n decision = input('O nome já foi cadastrado. Deseja modificar o cadastro?(s/n): ')\n while True:\n if decision not in 'sn' and len(decision) > 1:\n print('Comando inválido. Tente Novamente.')\n elif decision == 's':\n break\n else:\n valid_name = False\n break\n \n while not valid_rg:\n try:\n rg = int(input('Por favor, digite o seu RG: '))\n CheckRG(rg)\n except (InvalidRg, ValueError):\n print('RG invlálido! Tente novamente.')\n else:\n valid_rg = True\n \n while not valid_cpf:\n try:\n cpf = input('Por favor, digite o seu cpf (formato: xxx.xxx.xxx-xx): ')\n CheckCPF(cpf)\n except InvalidCpf:\n print('CPF inválido! Tente novamente.')\n else:\n valid_cpf = True\n \n if valid_name and valid_rg and valid_cpf:\n break\n \n CodeData(name, rg, cpf)\n print('Arquivo empacotado com sucesso.')\n\ndef CheckRG(rg):\n '''\n Checa se o RG é valido.\n '''\n if len(str(rg)) > 10:\n raise InvalidRg\n\ndef CheckCPF(cpf):\n '''\n Checa se o CPF é válido.\n '''\n if len(cpf) != 14:\n raise InvalidCpf\n if cpf[3] and cpf[7] != '.':\n raise InvalidCpf\n if cpf[11] != '-':\n raise InvalidCpf\n cpf = cpf.replace('-', '.')\n cpf = cpf.split('.')\n for i in range(len(cpf)):\n if not cpf[i].isdigit():\n raise InvalidCpf\n\ndef CodeData(name, rg, cpf):\n '''\n Empacota os dados obtidos em NewEntry()\n '''\n coded_name = name.encode()\n form = '{}s I 14s'.format(len(name))\n data = struct.pack(form, coded_name, rg, cpf.encode())\n WriteData(data, name)\n\ndef WriteData(data, name):\n '''\n Armazena os dados empacotados no banco de dados.\n '''\n db = dbm.open('data.db', 'c')\n db[name] = data\n db.close()\n\ndef SearchData(name):\n '''\n Verifica se o nome pesquisado existe no banco de dados.\n '''\n db = dbm.open('data.db', 'c')\n if name in db:\n db.close()\n raise FileAlreadyExists\n\ndef ShowList():\n '''\n Mostra todos os dados registrados no banco de dados.\n '''\n db = dbm.open('data.db', 'c')\n for key in db:\n print(key.decode())\n db.close()\n \ndef ReadData(name):\n '''\n Desempacota os dados armazenados e os exibe na tela.\n '''\n db = dbm.open('data.db', 'c')\n packed_data = db[name]\n form = '{}s I 14s'.format(len(name))\n data = struct.unpack(form, packed_data)\n db.close()\n print('Nome = %s'%data[0].decode())\n print('RG = %i'%data[1])\n print('CPF = %s'%data[2].decode())\n\ndef EraseData(name):\n '''\n Apaga uma chave do banco de dados.\n '''\n db = dbm.open('data.db', 'c')\n del db[name]\n db.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"armazenamento.py","file_name":"armazenamento.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306583502","text":"#!/usr/bin/env python -u\n# -*- coding: utf-8 -*-\n\"\"\"\nPull titles from datastore\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport epedia\nimport pypandoc\nimport random\nimport sys\nimport time\nimport wptools\n\n\nclass EpediaPuller:\n \"\"\"\n Pull pages by title from datastore\n \"\"\"\n\n convert_errors = 0\n disambiguations = 0\n found_count = 0\n notfound_count = 0\n outfile = None\n page_disposition = \"\"\n page_found = False\n page_kb = 0\n pull_count = 0\n pull_sec = 0\n redirects = 0\n\n def __init__(self, titles, store, cformat=None, outfile=None, sample=0):\n self.start = time.time()\n self.cformat = cformat\n self.titles = open(titles, 'r')\n self.store = store\n self._init_sample(sample)\n self._init_outfile(outfile)\n\n def _init_sample(self, sample):\n self.sample = sample\n if sample:\n self.num_titles = self.get_num_titles()\n self.sample = self.sample_range(sample)\n\n def _init_outfile(self, outfile):\n stream = sys.stdout\n if outfile:\n self.outfile = open(outfile, 'w')\n stream = self.outfile\n if self.cformat and 'html' in self.cformat:\n print(\"<!doctype html><head><meta charset=utf-8></head>\",\n file=stream)\n\n def __del__(self):\n if self.outfile:\n self.outfile.close()\n self.titles.close()\n fin = []\n fin.append(\"%d titles pulled\" % (self.pull_count))\n fin.append(\"%d found\" % self.found_count)\n fin.append(\"- %d convert errors\" % self.convert_errors)\n fin.append(\"- %d disambiguations\" % self.disambiguations)\n fin.append(\"- %d notfound\" % self.notfound_count)\n fin.append(\"- %d redirects\" % self.redirects)\n fin.append(\"%5.3f seconds\" % (time.time() - self.start))\n print(\"\\n\".join(fin), file=sys.stderr)\n\n def emit(self, title, content):\n stream = sys.stdout\n if self.outfile:\n stream = self.outfile\n print(self.separator(title), file=stream)\n print(content, file=stream)\n\n def pull_titles(self):\n \"\"\"walk titles, pull pages, and emit converted\"\"\"\n count = 0\n for title in self.titles:\n title = title.strip()\n if self.sample and count not in self.sample:\n count += 1\n continue\n page = self.pull_page(title)\n content = self.page_disposition\n if self.page_found:\n wikitext = epedia.utils.wikitext(page)\n epl = epedia.EpediaLead(title, wikitext, debug=self.debug)\n lead = epl.lead()[\"lead\"]\n if self.cformat:\n content = self.convert(lead)\n else:\n epc = epedia.EpediaConvert(debug=self.debug)\n content = epc.plain(lead)[\"plain\"]\n self.page_disposition = self.content_disposition(content)\n self.emit(title, content)\n self.report(title)\n count += 1\n\n def content_disposition(self, content):\n dispo = self.page_disposition\n if \"may refer to\" in content:\n dispo = \"DISAMBIGUATION\"\n self.disambiguations += 1\n if \"{{Disambiguation}}\" in content:\n dispo = \"DISAMBIGUATION\"\n self.disambiguations += 1\n if \"REDIRECT\" in content:\n dispo = \"REDIRECT\"\n self.redirects += 1\n return dispo\n\n def convert(self, wikitext):\n try:\n cout = pypandoc.convert(wikitext, self.cformat, format='mediawiki')\n except Exception as detail:\n cout = self.convert_error(detail)\n try:\n return cout.encode('utf-8')\n except:\n return cout\n\n def convert_error(self, exc):\n err = str(exc)\n if err.startswith(\"Invalid output format!\"):\n raise\n if 'html' in self.cformat:\n err = \"<pre>%s</pre>\" % err\n self.convert_errors += 1\n self.page_disposition = \"CONVERTERR\"\n return err\n\n def get_num_titles(self):\n \"\"\"returns number of titles found in input file\"\"\"\n num = len(self.titles.read().split(\"\\n\")) - 1\n self.titles.seek(0)\n return num\n\n def pull_page(self, title):\n \"\"\"returns <page/> from datastore by title\"\"\"\n sec = time.time()\n parser = epedia.EpediaPullParser(title, self.store)\n content = parser.get_page()\n self.pull_count += 1\n if content == \"NOTFOUND\":\n self.page_disposition = content\n self.page_found = False\n self.page_kb = 0\n self.notfound_count += 1\n else:\n self.page_disposition = \"OK\"\n self.page_found = True\n self.page_kb = int(sys.getsizeof(content) / 1000.0)\n self.found_count += 1\n self.pull_sec = time.time() - sec\n return content\n\n def report(self, title):\n rep = []\n rep.append(\"[%d]\" % (self.pull_count))\n rep.append(\"%5.3f sec\" % self.pull_sec)\n rep.append(\"%s KB\" % self.page_kb)\n rep.append(self.page_disposition)\n rep.append(\"<< %s\" % title)\n print(\" \".join(rep), file=sys.stderr)\n\n def sample_range(self, howmany):\n \"\"\"returns random sample of title indices\"\"\"\n return random.sample(xrange(self.num_titles), howmany)\n\n def separator(self, title):\n sep = []\n text = title.strip()\n href = self.wiki_url(title)\n if self.cformat and 'html' in self.cformat:\n sep.append(\"\\n<hr>\")\n link = \"<a href=\\\"%s\\\">%s</a>\" % (href, text)\n sep.append(\"[%d] <b>%s</b>\" % (self.pull_count, link))\n else:\n sep.append(\"\\n[%d] <%s>\\n\" % (self.pull_count, href))\n return \"\\n\".join(sep)\n\n def wiki_url(self, title):\n wiki = wptools.WPToolsFetch.ENDPOINT\n path = wptools.utils.wiki_path(title)\n return \"%s%s\" % (wiki, path)\n\n\ndef main(tfile, sdir, cformat, outfile, sample, verbose):\n plr = EpediaPuller(tfile, sdir, cformat, outfile, sample)\n plr.debug = verbose\n try:\n plr.pull_titles()\n except RuntimeError as detail:\n print(str(detail))\n except KeyboardInterrupt:\n print(\" ABORT\", file=sys.stderr)\n\n\nif __name__ == \"__main__\":\n desc = \"Pull titles from datastore\"\n argp = argparse.ArgumentParser(description=desc)\n argp.add_argument(\"tfile\", help=\"titles file\")\n argp.add_argument(\"store\", help=\"datastore directory\")\n argp.add_argument(\"-f\", \"-format\", help=\"Pandoc conversion format\")\n argp.add_argument(\"-o\", \"-outfile\", help=\"output file\")\n argp.add_argument(\"-s\", \"-sample\", type=int,\n help=\"sample S (int) titles\")\n argp.add_argument(\"-v\", \"-verbose\", action='store_true',\n help=\"verbose output\")\n args = argp.parse_args()\n main(args.tfile, args.store, args.f, args.o, args.s, args.v)\n","sub_path":"pull.py","file_name":"pull.py","file_ext":"py","file_size_in_byte":7007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"366867646","text":"\"\"\"\nGiven a list of integers S and a target number k, write a function that returns a subset of S\nthat adds up to k. If such a subset cannot be made, then return null. Integers can appear more\nthan once in the list. You may assume all numbers in the list are positive.\n\nInput: S = [12, 1, 61, 5, 9, 2] and k = 24\nOutput: [12, 9, 2, 1] since it sums up to 24.\n\"\"\"\n\nfrom typing import List, Optional\n\nclass SubsetSum:\n\n\tdef __init__(self, nums: List[int]):\n\t\tself.nums = nums\n\t\tself.max_sum = 50\n\t\tself._build_table()\n\n\tdef _build_table(self) -> None:\n\t\t\"\"\"\n\t\tThis method will build a table to the max sum and we will query this table to get our \n\t\tresults/subsets.\n\t\t\"\"\"\n\n\t\trows = len(self.nums)\n\t\t# Initializing table values\n\t\tself.table = [[False]*(self.max_sum+1) for row in range(rows)]\n\n\t\tfor row in range(rows):\n\t\t\tfor col in range(self.max_sum+1):\n\t\t\t\t# If target is zero then we will have empty subset\n\t\t\t\tif col == 0:\n\t\t\t\t\tself.table[row][col] = True\n\t\t\t\t\n\t\t\t\t# Checking if the current no. is greater the target sum \n\t\t\t\telif self.nums[row] > col:\n\t\t\t\t\tif row > 0:\n\t\t\t\t\t\t# Will ignore the current no.\n\t\t\t\t\t\tself.table[row][col] = self.table[row-1][col]\n\t\t\t\t\n\t\t\t\t# We can include or ignore the current no. \n\t\t\t\telse:\n\t\t\t\t\tif row == 0:\n\t\t\t\t\t\tif self.nums[row] == col:\n\t\t\t\t\t\t\tself.table[row][col] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.table[row][col] = self.table[row-1][col] or self.table[row-1][col-self.nums[row]]\n\n\tdef check_subset(self, target_sum: int) -> Optional[List[int]]:\n\t\t\"\"\"\n\t\tThis method will return a subset that adds up to the target sum if possible or none if there is no such \n\t\tpossible subset.\n\n\t\t:param target_sum: target sum to be achieved\n\t\t:rtype: subset of no.s that can sum up to the target or else none \n\t\t\"\"\"\n\n\t\tif target_sum > self.max_sum:\n\t\t\tprint(\"Target sum cannot be greater than {}\".format(self.max_sum))\n\t\t\treturn None\n\n\t\trows = len(self.nums)\n\n\t\t# Checking if the target sum is achievable\n\t\tcheck = self.table[rows-1][target_sum]\n\t\tif check:\n\t\t\tsubset = []\n\t\t\twhile target_sum != 0:\n\t\t\t\tif row != 0:\n\t\t\t\t\tif not self.table[row-1][target_sum]:\n\t\t\t\t\t\tsubset.append(self.nums[row])\n\t\t\t\t\t\ttarget_sum -= self.nums[row]\n\t\t\t\telse:\n\t\t\t\t\t target_sum -= self.nums[row]\n\t\t\t\t\t subset.append(self.nums[row])\n\n\t\t\t\trow -= 1\n\n\t\t\treturn subset\n\nif __name__ == \"__main__\":\n\tob = SubsetSum([6, 1, 5, 20, 25, 4, 12])\n\tprint(ob.check_subset(45))","sub_path":"Arrays_Strings/subset_sum.py","file_name":"subset_sum.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286558803","text":"# -*- coding: utf-8 -*-\n\"\"\"Bin the shapiro step data (current vs power) for different parallel fields\n\nThe plot is done as a function of power and frequency and the power is\nnormalized by the power at which the step 0 disappear.\n\nThe plot use the folling axes:\n- x axis: Normalized power\n- y axis: Frequency\n- color axis: bin count in current\n\n\"\"\"\n\n# =============================================================================\n# --- Parameters --------------------------------------------------------------\n# =============================================================================\n\n#: Labber directory to walk to find all the data files to analyse\nLABBER_DIRECTORY = '/Users/mdartiailh/Labber/Data/2019'\n\n#: CSV file containing the frequencies, gate, fields etc associated to each\n#: measurement.\n#: The expected column names are:\n#: Frequency, Gate voltage V, Parallel field mT, Critical current,\n#: Normal resistance, Ic file determination, Rn file determination,\n#: Attenuation, Shapiro, Comment\nCSV_SUMMARY_PATH = ('/Users/mdartiailh/Documents/PostDocNYU/DataAnalysis/'\n 'Shapiro/2019-01/2019-data-summary.csv')\n\n#: Path of the directory in which to store the results.\nRESULT_PATH = ('/Users/mdartiailh/Documents/PostDocNYU/DataAnalysis/'\n 'Shapiro/2019-01/StepWidthAnalysis')\n\n#: Index of the setps for which to generate a plot.\nSTEP_INDEXES = [0, 1, 2, 3, 4]\n\n#: Name of the column containing the frequency for scans in which multiple\n#: frequencies exist.\nFREQUENCY_NAME = ('SC_C - Frequency', 'EXG - Frequency')\n\n#: Name of the column containing the gate voltage for scans in which multiple\n#: gate voltages exist.\nGATE_NAME = ('Keithley 1 - Source voltage', )\n\n#: Name of the column containing the parallel field for scans in which multiple\n#: parallel fields exist.\nFIELD_NAME = ('Magnet - By', )\n\n#: Name or index of the column containing the power data\nPOWER_NAME = ('SC_C - Amplitude', 'EXG - Power')\n\n#: Name or index of the column containing the current data\n#: This should be a stepped channel ! use the applied voltage not the\n#: measured current\nCURRENT_NAME = 'Yoko 1 - Voltage'\n\n#: Name or index of the column containing the voltage data\nVOLTAGE_NAME = 'DMM 1 - Value'\n\n#: Number of points on which to average to correct the offset in the measured\n#: voltage. Use zero to not correct.\nCORRECT_VOLTAGE_OFFSET = 20\n\n#: Conversion factor to apply to the current data (allow to convert from\n#: applied voltage to current bias).\nCURRENT_CONVERSION = 1e-6\n\n#: Fraction of a shapiro step used for binning\nSTEP_FRACTION = 0.1\n\n#: Threshold as fraction of the low power step used to identify the normalizing\n#: power, defined at the first power for which the count of the step 0 is below\n#: the threshold.\nNORMALIZING_THRESHOLD = 0.05\n\n#: Should the plots allowing to check the normalizing power be displayed.\nPLOT_NORM_POWER_CHECK = False\n\n# =============================================================================\n# --- Execution ---------------------------------------------------------------\n# =============================================================================\nimport os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom shabanipy.jj.shapiro import normalize_db_power\nfrom shabanipy.jj.shapiro.binning import (bin_power_shapiro_steps,\n extract_step_weight)\nfrom shabanipy.utils.labber_io import LabberData\nfrom shabanipy.utils.file_discovery import list_all_files, filter_files\n\nsummary = pd.read_csv(CSV_SUMMARY_PATH)\n\n# Match only the files corresponding to actual Shapiro steps data.\nfile_ids = [f'{s:03d}' for s in set(summary['Shapiro'])]\npattern = f\"JS124S_BM002_({'|'.join(file_ids)}).hdf5$\"\n\n# Identify the measurement by their number to allow to easily retrieve the\n# matching parameters.\npaths = {int(os.path.split(path)[1][-8:-5]): path\n for path in filter_files(list_all_files(LABBER_DIRECTORY), pattern)}\n\n# Create the analysis summary that contains additional information compared to\n# to the data summary.\nanalysis_summary = {'Meas id': [], 'Frequency': [], 'Gate': [], 'Field': [],\n 'Rn': [], 'Ic': [], 'Pnorm': []}\n\nfor _, parameters in summary.iterrows():\n\n mid = parameters['Shapiro']\n path = paths[mid]\n\n frequency, gate, field = (parameters['Frequency'],\n parameters['Gate voltage V'],\n parameters['Parallel field mT'])\n\n ic, rn, att = (parameters['Critical current'],\n parameters['Normal resistance'],\n parameters['Attenuation'])\n print(f'\\nTreating data for dataset {mid}\\n'\n f'Frequency {frequency} GHz\\n'\n f'Gate voltage {gate} V\\n'\n f'Magnetic field {field} mt')\n\n with LabberData(path) as data:\n\n filters = {}\n channels = data.list_channels()\n power_name = [p for p in POWER_NAME if p in channels][0]\n for names, val in zip((FREQUENCY_NAME, GATE_NAME, FIELD_NAME),\n (frequency*1e9, gate, field)):\n for name in names:\n if name in channels:\n filters[name] = val\n step_counts = {s_i: None for s_i in STEP_INDEXES}\n\n shape = data.compute_shape((power_name, CURRENT_NAME))\n\n power = data.get_data(power_name, filters) + att\n curr = data.get_data(CURRENT_NAME, filters)\n volt = data.get_data(VOLTAGE_NAME, filters)\n\n # Handle interruptions in the last scan.\n while len(power) < shape[0]*shape[1]:\n shape[1] -= 1\n\n length = shape[0]*shape[1]\n power = power[:length].reshape(shape)\n volt = volt[:length].reshape(shape)\n curr = curr[:length].reshape(shape)\n\n # Filter out rows that contain a Nan (skipped values)\n mask = np.isfinite(volt).all(axis=0)\n power = power.T[mask].T\n volt = volt.T[mask].T\n curr = curr.T[mask].T\n\n if CORRECT_VOLTAGE_OFFSET:\n avg_len = CORRECT_VOLTAGE_OFFSET\n low_power_ind = np.unravel_index(np.argmin(power), shape)[1]\n zero_curr_ind = np.unravel_index(np.argmin(np.abs(curr)), shape)[0]\n volt -= np.mean(volt[zero_curr_ind:zero_curr_ind + avg_len,\n low_power_ind])\n\n # Convert the current data if requested\n if CURRENT_CONVERSION is not None:\n curr *= CURRENT_CONVERSION\n\n # Bin the data\n power, voltage, histo = bin_power_shapiro_steps(power, curr, volt,\n frequency*1e9,\n STEP_FRACTION)\n\n # Find the normalizing power\n step_0 = extract_step_weight(voltage, histo, 0)\n indexes = np.where(np.less(step_0, step_0[0]*NORMALIZING_THRESHOLD))[0]\n if len(indexes):\n print(f'\\tAt f={frequency} threshold power: {power[indexes][0]}')\n norm_p = power[np.min(indexes)]\n else:\n plt.plot(power, step_0)\n plt.show()\n msg = ('\\tPower was always lower than threshold for '\n f'f={frequency} GHz')\n warnings.warn(msg)\n norm_p = power[-1]\n\n if mid == 150:\n norm_p = -0.4\n\n if PLOT_NORM_POWER_CHECK:\n plt.figure()\n plt.imshow(volt.T,\n extent=(curr[0, 0], curr[-1, 0], power[0], power[-1]),\n origin='lower',\n aspect='auto')\n cbar = plt.colorbar()\n plt.axhline(norm_p)\n plt.show()\n\n for n, v in zip(('Meas id', 'Frequency', 'Gate', 'Field', 'Rn', 'Ic',\n 'Pnorm'),\n (mid, frequency, gate, field, rn, ic, norm_p)):\n analysis_summary[n].append(v)\n\n # Fill the results\n norm_power = normalize_db_power(power, norm_p)\n norm_bessel_arg = (2*1.6e-19*rn*ic/(6.626e-34*frequency*1e9) *\n np.power(10, norm_power/20))\n for i in step_counts:\n step_counts[i] = extract_step_weight(voltage, histo, i)/ic/1e-6\n\n to_save = {}\n to_save['Log power'] = norm_power\n to_save['Scaled ac current'] = norm_bessel_arg\n for s in STEP_INDEXES:\n to_save[f'Step{s}'] = step_counts[s]\n table = pd.DataFrame(to_save)\n filename = f'{mid}_f={frequency}_g={gate}_b={field}.dat'\n with open(os.path.join(RESULT_PATH, filename), 'w') as f:\n f.write(f'# Step fraction {STEP_FRACTION}\\n'\n f'# Normalizing threshold {NORMALIZING_THRESHOLD}\\n'\n f'# Source file: {path}\\n')\n table.to_csv(f, index=False)\n\ntable = pd.DataFrame(analysis_summary)\nwith open(os.path.join(RESULT_PATH, 'analysis_summary.csv'), 'w') as f:\n f.write(f'# Normalizing threshold {NORMALIZING_THRESHOLD}\\n')\n table.to_csv(f, index=False)\n","sub_path":"scripts/jj/shapiro/shapiro_bessel_weigth.py","file_name":"shapiro_bessel_weigth.py","file_ext":"py","file_size_in_byte":8962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349296431","text":"import numpy as np\nfrom numpy import linalg as LA\nfrom sklearn import datasets\nfrom sklearn import decomposition\nfrom sklearn.lda import LDA\nimport matplotlib.pyplot as plt\n\n\n#PCA, LDA codes using numpy\n\n#class LDA\nclass LDa:\n def __init__(self,x,y):\n self.x= x\n self.y= y\n self.class_means= [np.mean(x[y==z],axis=0) for z in set(y)]\n self.mean = np.mean(x,axis=0)\n self.n_feature = len(x[0])\n \n n=self.n_feature\n scatter_matrix = np.zeros((n,n))\n for target,mv in zip(list(set(self.y)), self.class_means):\n sc = np.zeros((n,n))\n for row in self.x[self.y == target]:\n row, mv = row.reshape(n,1), mv.reshape(n,1) \n sc += np.dot((row-mv),(row-mv).T) \n scatter_matrix += sc \n \n between_class_scatter = np.zeros((n,n))\n for i,class_mean in zip(range(len(self.class_means)),self.class_means):\n N = self.x[self.y==i,:].shape[0]\n mean =self.mean.reshape(n,1)\n class_mean = class_mean.reshape(n,1)\n between_class_scatter += N * np.dot((class_mean - mean),((class_mean - mean).T))\n\n evl, evec = LA.eig(np.dot(LA.inv(scatter_matrix_w),(between_class_scatter)))\n self.evl= evl\n \n \n def transform(self,n_components):\n lda = np.dot(self.x,evec) \n return lda[:,0:n_components]\n\n def explained_variance_ratio(self):\n return [x/sum(self.evl) for x in self.evl ]\n \n \n\n\n#class PCA \nclass PCa:\n\tdef __init__(self,data):\n\t\tself.basedata = data\n\t\tself.mean0data = data - data.mean(axis=0)\n\t\tself.covx = np.cov(self.mean0data.T)\n\n\tdef explained_variance_ratio(self):\n\t\tevl, evec = LA.eig(self.covx)\n\t\treturn [x/sum(evl) for x in evl ]\n\n\tdef explained_variance(self):\n\t\tevl, evec = LA.eig(self.covx)\n\t\treturn evl\n\n\tdef transform(self,n_components):\n\t\tif n_components > len(self.basedata[0]) or n_components < 0:\n\t\t\treturn 'error'\n\t\tevl, evec = LA.eig(self.covx)\n\t\tx_pca = np.dot(self.mean0data,evec)\n\t\treturn x_pca[:,0:n_components]\n\n\n\n\n\n\na=PCa(x)\nx_pcaH=a.transform(n_components= 4)\nprint(\"-----------------------------------------\")\nprint(\"explained_variance - of homework PCA\")\nprint(a.explained_variance())\nprint(\"-----------------------------------------\")\nprint(\"explained_variance_ratio - of homework PCA\")\nprint(a.explained_variance_ratio())\n\n#package PCA\npca = decomposition.PCA(n_components=4)\npca.fit(x)\nx_pcaP=pca.transform(x)\nprint(\"-----------------------------------------\")\nprint(\"explained_variance - of package PCA\")\nprint(pca.explained_variance_)\nprint(\"-----------------------------------------\")\nprint(\"explained_variance_ratio - of package PCA\")\nprint(pca.explained_variance_ratio_)\nprint(\"-----------------------------------------\")\n\n#homework LDA\n\nb = LDa(x,y)\nx_ldaH = b.transform(n_components = 4)\nprint(\"explained_variance_ratio - of homework LDA\")\nprint(b.explained_variance_ratio())\nprint(\"-----------------------------------------\")\n\n\n#package LDA\nlda = LDA(n_components=4,store_covariance=True)\nlda.fit(x,y)\nx_ldaP =lda.transform(x)\n\n\n\nplt.scatter(x_pcaH[:,0],x_pcaH[:,1],c=y)\nplt.show()\n\n\nplt.scatter(x_pcaP[:,0],x_pcaP[:,1],c=y)\nplt.show()\n\n\n\n\n\n\nplt.scatter(x_ldaH[:,0],x_ldaH[:,1],c=y)\nplt.show()\n\n\nplt.scatter(x_ldaP[:,0],x_ldaP[:,1],c=y)\nplt.show()\n","sub_path":"course/PCA_LDA.py","file_name":"PCA_LDA.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358834565","text":"from numpy import *\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# y = mx + b\r\n# m is slope, b is y-intercept\r\ndef compute_error_for_line_given_points(thetha0, thetha1, thetha2, x,y,z):\r\n totalError = 0\r\n max_x=np.amax(x)\r\n min_x=np.amin(x)\r\n mean_x=np.mean(x)\r\n print(\"max value of x is::\",max_x)\r\n print(\"min value of x is::\",min_x)\r\n print(\"mean value of x is::\",mean_x)\r\n max_z=np.amax(z)\r\n min_z=np.amin(z)\r\n mean_z=np.mean(z)\r\n for i in range(1, len(x)):\r\n feature_x= (x[i]-mean_x)/(max_x-min_x)\r\n feature_z= (z[i]-mean_z)/(max_z-min_z)\r\n \r\n # print(\"total error before:::\",totalError)\r\n # print(\"error calculated:::\",( (m * x + b)-y) ** 2)\r\n totalError += (y[i] - (thetha2*feature_z + thetha1 * feature_x + thetha0)) ** 2\r\n #print(\"length of points:::\",float(len(points)))\r\n #print (\"total points are::::\",totalError)\r\n #print(\"error here is....\",totalError/ (2*(float(len(x)))))\r\n return totalError / (2*(float(len(x))))\r\n\r\ndef step_gradient(thetha0_current, thetha1_current, thetha2_current, x,y,z, learningRate):\r\n thetha0_gradient = 0\r\n thetha1_gradient = 0\r\n thetha2_gradient = 0\r\n N = float(len(x))\r\n max_x=np.amax(x)\r\n min_x=np.amin(x)\r\n mean_x=np.mean(x)\r\n max_z=np.amax(z)\r\n min_z=np.amin(z)\r\n mean_z=np.mean(z)\r\n for i in range(1, len(x)):\r\n feature_x= (x[i]-mean_x)/(max_x-min_x)\r\n feature_z= (z[i]-mean_z)/(max_z-min_z)\r\n # print(\"brfore calculating b_gradient is:::\",b_gradient)\r\n # print(\"brfore calculating m_gradient is:::\",m_gradient)\r\n thetha0_gradient += ( (((thetha2_current*(feature_x ** 2))) +(thetha1_current * feature_x) + thetha0_current)-y[i])\r\n thetha1_gradient += feature_x * (((thetha2_current*(feature_x ** 2)) +(thetha1_current * feature_x) + thetha0_current)-y[i])\r\n thetha2_gradient += (feature_z) * (((thetha2_current*(feature_z)) +(thetha1_current * feature_x) + thetha0_current)-y[i])\r\n #print(\"after calculating b_gradient is:::\",b_gradient)\r\n #print(\"after calculating m_gradient is:::\",m_gradient)\r\n #print(\"current value of b is:::\",b_current)\r\n #print(\"current value of b is:::\",m_current)\r\n new_thetha0 = thetha0_current - ((learningRate/N) * thetha0_gradient)\r\n new_thetha1 = thetha1_current - ((learningRate/N) * thetha1_gradient)\r\n new_thetha2 = thetha2_current - ((learningRate/N) * thetha2_gradient)\r\n #new_m = m_current - ((learningRate/N) * m_gradient)\r\n \r\n return [new_thetha0, new_thetha1, new_thetha2]\r\n\r\ndef gradient_descent_runner(x,y,z, starting_thetha0, starting_thetha1, starting_thetha2, learning_rate, num_iterations):\r\n thetha0 = starting_thetha0\r\n thetha1 = starting_thetha1\r\n thetha2 = starting_thetha2\r\n for i in range(num_iterations):\r\n thetha0, thetha1, thetha2 = step_gradient(thetha0, thetha1, thetha2, x,y,z, learning_rate)\r\n \r\n return [thetha0, thetha1, thetha2]\r\n\r\ndef run():\r\n points = pd.read_csv(\"BreastCancerData.csv\", delimiter=\",\")\r\n x = np.array(points['Perimeter'])\r\n y = np.array(points['Compactness'])\r\n z = np.array(points['Area'])\r\n #print (x,y)\r\n learning_rate = 0.1\r\n initial_thetha0 = 0.1\r\n initial_thetha1 = 0.1\r\n initial_thetha2 = 0.1\r\n num_iterations = 1000\r\n plt.scatter(x, y)\r\n\r\n print (\"Starting gradient descent at thetha0 = {0}, thetha1 = {1}, thetha2 = {2}, error = {3}\".format(initial_thetha0, initial_thetha1, initial_thetha2, compute_error_for_line_given_points(initial_thetha0, initial_thetha1, initial_thetha2, x,y,z)))\r\n print (\"Running...\")\r\n [thetha0, thetha1, thetha2] = gradient_descent_runner(x,y,z, initial_thetha0, initial_thetha1, initial_thetha2, learning_rate, num_iterations)\r\n max_x=np.amax(x)\r\n min_x=np.amin(x)\r\n mean_x=np.mean(x)\r\n max_z=np.amax(z)\r\n min_z=np.amin(z)\r\n mean_z=np.mean(z)\r\n feature_x= (x-mean_x)/(max_x-min_x)\r\n feature_z= (z-mean_z)/(max_z-min_z)\r\n plt.plot(x, thetha2*feature_z+thetha1*feature_x+thetha0, color='r')\r\n plt.show()\r\n print (\"After {0} iterations thetha0 = {1}, thetha1 = {2}, thetha2 = {3}, error = {4}\".format(num_iterations, thetha0, thetha1,thetha2, compute_error_for_line_given_points(thetha0, thetha1, thetha2, x,y,z)))\r\n\r\nif __name__ == '__main__':\r\n run()\r\n\r\nprint(\"hello, inside gradient descent\");\r\n","sub_path":"linear_extra_input.py","file_name":"linear_extra_input.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"89144596","text":"\"\"\"\nMuch is borrowed from Leandro Von Werra's TRL library https://github.com/lvwerra/trl\n- though improved via type annotations, documentation and linting.\n\"\"\"\nimport json\nimport os\nimport dataclasses\nfrom typing import List, Dict, Any, Mapping, Optional, Tuple\nimport argparse\n\nimport torch\nimport torch.nn.functional as f\nimport numpy as np\nimport transformers\nimport tqdm\nimport time\nimport random\nimport pandas as pd\nimport wandb\nimport deepspeed\nfrom transformers import GPT2LMHeadModel\n\nimport openai.evaluation as evaluation\nimport openai.training as training\nimport openai.constants as constants\n\n#######################################\n# Tensors: Names, Shapes and Contents #\n#######################################\n\"\"\"\nName - Shape - Content\n----------------------\nquery - [batch_size, query_length] - contains token ids of query\nresponse - [batch_size, response_length] - contains token ids of responses\nscores - [batch_size] - rewards of each query/response pair\nmodel_input - [batch_size, query_length + response_length] - combined query and response tokens\nm_input - [forward_batch_size, query_length + response_length] - small forward batch of model_input\nlogits - [forward_batch_size, query_length + response_length, vocab_size] - logits from model outputs\nref_logits - [forward_batch_size, query_length + response_length, vocab_size] - logits from ref_model outputs\nlogprobs - [batch_size, response_length] - log-probabilities of response tokens\nref_logprobs - [batch_size, response_length] - reference log-probabilities of response tokens\nrewards - [batch_size, response_length] - the model rewards including kl-score for each token\nnon_score_reward - [batch_size, response_length] - the model kl-score for each token\nvalues - [batch_size, response_length] - the values of the model response for each token\n\"\"\"\n\n\n@dataclasses.dataclass\nclass RLConfig:\n generative_model_name: str = constants.FINAL_XL_BEHAVIOR_CLONING\n\n reward_model_name: str = \"gpt2-xl-reddit-writingprompts-reward-model-full/checkpoint-41000\"\n\n steps: int = 819200\n\n batch_size: int = 512\n \"Number of samples per optimisation step\"\n\n forward_batch_size: int = 1\n \"Number of samples forward passed through model at a time\"\n\n ppo_epochs: int = 4\n \"Number of optimisation epochs per batch of samples\"\n\n txt_in_len: int = 64\n \"The Truncated length of the input tokens, optimal if txt_in_len + txt_out_len is a power of 2\"\n\n txt_out_len: int = 192\n \"The Length of the output tokens - constrained to be equal so that we can batch.\"\n\n lr: float = 1.41e-5\n \"'lr' (float): Adam learning rate, default: 1.41e-5\"\n\n init_kl_coef: float = 0.2\n \"Initial KL penalty coefficient (used for adaptive and linear control)\"\n\n adap_kl_ctrl: bool = True\n \"Use adaptive KL control, otherwise linear\"\n\n target: int = 6\n \"Target KL value for adaptive KL control\"\n\n horizon: int = 10000\n \"Horizon for adaptive KL control\"\n\n gamma: int = 1\n \"Gamma parameter for advantage calculation\"\n\n lam: float = 0.95\n \"Lambda parameter for advantage calculation,\"\n\n cliprange: float = .2\n \"Range for clipping in PPO policy gradient loss\"\n\n cliprange_value: float = .2\n \"Range for clipping values in loss calculation\"\n\n vf_coef: float = .1\n \"Scaling factor for value loss\"\n\n\ndef flatten_dict(nested: Mapping[str, Any], sep: str = '/') -> dict:\n \"\"\"Flatten dictionary and concatenate nested keys with separator.\"\"\"\n\n def rec(nest: Mapping[str, Any], prefix: str, into: dict):\n for k, v in nest.items():\n if sep in k:\n raise ValueError(f\"separator '{sep}' not allowed to be in key '{k}'\")\n try:\n rec(v, prefix + k + sep, into)\n except AttributeError:\n into[prefix + k] = v\n\n flat = {}\n rec(nested, '', flat)\n return flat\n\n\ndef stack_dicts(stats_dicts: List[dict]) -> dict:\n \"\"\"Stack the values of a dict.\"\"\"\n results = dict()\n for k in stats_dicts[0]:\n stats_list = [torch.flatten(d[k]) for d in stats_dicts]\n results[k] = torch.stack(stats_list)\n return results\n\n\ndef add_suffix(input_dict: Dict[str, Any], suffix: str) -> dict:\n \"\"\"Add suffix to dict keys.\"\"\"\n return dict((k + suffix, v) for k, v in input_dict.items())\n\n\ndef pad_to_size(tensor, size, dim=1, padding: int = 50256):\n \"\"\"Pad tensor to size.\"\"\"\n t_size = tensor.size()[dim]\n if t_size == size:\n return tensor\n else:\n return torch.nn.functional.pad(tensor, (0, size - t_size), 'constant', padding)\n\n\ndef logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The LogSoftmax is defined here: https://pytorch.org/docs/stable/generated/torch.nn.LogSoftmax.html\n - This is simply the Log of the Softmax.\n - Since we use the Softmax to get probabilities from logits, we use the LogSoftmax to get the log probabilities.\n - Recall that the logits tensor has shape = [forward_batch_size, query_length + response_length, vocab_size]\n - Hence the softmax is correctly over the vocab\n\n The Gather is defined here: https://pytorch.org/docs/stable/generated/torch.gather.html\n - In the current usage, labels is m_input\n - Recall that m_input has shape = [forward_batch_size, query_length + response_length]\n - In particular, here we want to extract the log probabilities corresponding to the tokens that were generated.\n - Per the definition of gather, we therefore get logpy = logp[i][j][labels[i][j][k]]\n\n Example:\n if forward_batch_size = 2, query_length + response_length = 2, and vocab_size = 2, and\n we take logp = [[[1,2],[3,4]], [[5,6],[7,8]] and labels = [[0, 1], [1, 0]]\n then labels.unsqueeze(2) = [[[0], [1]], [[1], [0]]]\n then torch.gather(logp, dim=2, index=labels.unsqueeze(2)) = [[[1], [4]], [[6], [7]]]\n then torch.gather(logp, dim=2, index=labels.unsqueeze(2)).squeeze(-1) = [[1, 4], [6, 7]]\n so we get the logits of each token of each response in the forward batch.\n\n This function returns a tensor with the same shape as labels\n hence [forward_batch_size, query_length + response_length]\n \"\"\"\n logp = f.log_softmax(logits, dim=2)\n logpy = torch.gather(logp, dim=2, index=labels.unsqueeze(2)).squeeze(-1)\n return logpy\n\n\ndef z_scale(values: torch.Tensor, shift_mean: bool = True) -> torch.Tensor:\n \"\"\"\n Apply Z-scale normalization. See https://en.wikipedia.org/wiki/Standard_score for motivation.\n \"\"\"\n mean, var = torch.mean(values), torch.var(values)\n whitened = (values - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef clip_by_value(x: torch.Tensor, tensor_min: torch.Tensor, tensor_max: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Tensor extension to torch.clamp\n https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713\n \"\"\"\n clipped = torch.max(torch.min(x, tensor_max), tensor_min)\n return clipped\n\n\ndef entropy_from_logits(logits: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculate entropy from logits.\n - Recall that the logits tensor has shape = [forward_batch_size, query_length + response_length, vocab_size]\n - so the operations over dim=-1 are over the vocab_size\n\n The SoftMax is defined here: https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html\n - Specifically we have probs = exp(logits)/sum_{vocab}(exp(logits))\n - Hence log(probs) = logits - log(sum_{vocab}(exp(logits)))\n\n The LogSumExp is defined here: https://pytorch.org/docs/stable/generated/torch.logsumexp.html\n - Specifically, we have logsumexp(logits) = log(sum_{vocab}(exp(logits)))\n\n The relevant notion of entropy is defined here https://en.wikipedia.org/wiki/Entropy#Information_theory\n - Specifically, we have S = - sum_{vocab} probs * log(probs)\n - So putting the above together, we get: S = logsumexp(logits) - sum_{vocab}(probs*logits)\n \"\"\"\n probs = f.softmax(logits, dim=-1)\n # normalization terms + entropy term\n entropy = torch.logsumexp(logits, dim=-1) - torch.sum(probs * logits, dim=-1)\n return entropy\n\n\ndef average_torch_dicts(list_of_dicts: List[dict]) -> dict:\n \"\"\"Average values of a list of dicts with torch tensors.\"\"\"\n average_dict = dict()\n for key in list_of_dicts[0].keys():\n average_dict[key] = torch.mean(torch.stack([d[key] for d in list_of_dicts]), dim=0)\n return average_dict\n\n\ndef stats_to_np(stats_dict: dict) -> dict:\n \"\"\"Cast all torch.tensors in dict to numpy arrays.\"\"\"\n new_dict = dict()\n for k, v in stats_dict.items():\n if isinstance(v, torch.Tensor):\n new_dict[k] = v.detach().cpu().numpy()\n else:\n new_dict[k] = v\n if np.isscalar(new_dict[k]):\n new_dict[k] = float(new_dict[k])\n return new_dict\n\n\nclass ValueHead(torch.nn.Module):\n \"\"\"The ValueHead class implements a head for GPT2 that returns a scalar for each output token.\"\"\"\n\n def __init__(self, config: transformers.GPT2Config):\n super().__init__()\n self.detach_head = False\n self.summary_type = config.summary_type if hasattr(config, \"summary_type\") else \"last\"\n if self.summary_type == \"attn\":\n raise NotImplementedError\n\n self.summary = torch.nn.Identity()\n if hasattr(config, \"summary_use_proj\") and config.summary_use_proj:\n if hasattr(config, \"summary_proj_to_labels\") and config.summary_proj_to_labels and config.num_labels > 0:\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = torch.nn.Linear(config.hidden_size, num_classes)\n\n self.activation = torch.nn.Identity()\n if hasattr(config, \"summary_activation\") and config.summary_activation == \"tanh\":\n self.activation = torch.nn.Tanh()\n\n self.first_dropout = torch.nn.Identity()\n if hasattr(config, \"summary_first_dropout\") and config.summary_first_dropout > 0:\n self.first_dropout = torch.nn.Dropout(config.summary_first_dropout)\n\n # the last_dropout is not a standard config option.\n self.last_dropout = torch.nn.Identity()\n try:\n summary_last_dropout = getattr(config, 'summary_last_dropout')\n if summary_last_dropout > 0:\n self.last_dropout = torch.nn.Dropout(summary_last_dropout)\n except AttributeError:\n pass\n\n self.flatten = torch.nn.Flatten()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n if self.detach_head:\n output = hidden_states.detach()\n else:\n output = hidden_states\n output = self.first_dropout(output)\n output = self.summary(output)\n output = self.activation(output)\n output = self.last_dropout(output)\n\n return output\n\n\nclass GPT2HeadWithValueModel(transformers.GPT2PreTrainedModel):\n \"\"\"The GPT2HeadWithValueModel class implements a GPT2 language model with a secondary, scalar head.\"\"\"\n\n def __init__(self, config: transformers.GPT2Config):\n super().__init__(config)\n # just output one value\n config.num_labels = 1\n # This is the transformer which outputs raw hidden-states without a specific head.\n self.transformer = transformers.GPT2Model(config)\n # so we add a language model head.\n self.lm_head = torch.nn.Linear(config.n_embd, config.vocab_size, bias=False)\n # add a value function head.\n self.v_head = ValueHead(config)\n\n super().init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def detach_value_head(self):\n self.v_head.detach_head = True\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n past_key_values: Tuple[Tuple[torch.Tensor]] = None,\n attention_mask: torch.FloatTensor = None,\n token_type_ids: torch.LongTensor = None,\n position_ids: torch.LongTensor = None,\n head_mask: torch.FloatTensor = None,\n inputs_embeds: torch.FloatTensor = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns the logits and values.\n \"\"\"\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n value = self.v_head(hidden_states).squeeze(-1)\n return lm_logits, value\n\n\ndef respond_to_batch(model: transformers.PreTrainedModel,\n input_ids: torch.Tensor,\n txt_len: int = 50, top_k: int = 50, top_p: float = 0.92):\n \"\"\"Sample text from language model.\"\"\"\n for _ in range(txt_len):\n outputs = model(input_ids)\n next_token_logits = outputs[0][:, -1, :]\n next_token_logits = transformers.top_k_top_p_filtering(\n next_token_logits, top_k=top_k, top_p=top_p)\n probabilities = f.softmax(next_token_logits, dim=-1)\n next_token = torch.multinomial(probabilities, num_samples=1)\n input_ids = torch.cat([input_ids, next_token], dim=-1)\n return input_ids[:, -txt_len:]\n\n\nclass AdaptiveKLController:\n \"\"\"\n Adaptive KL controller described in\n Fine-Tuning Language Models from Human Preferences - https://arxiv.org/pdf/1909.08593.pdf\n See section 2.2 in particular\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: int, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current_kl: torch.Tensor, n_steps: int):\n \"\"\"\n current_kl is a zero-dimensional tensor\n \"\"\"\n target = self.target\n # this proportional error is called e_t in Section 2.2\n proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2)\n # Instead of using the fixed ratio of K_{\\beta}=0.1, we use K_{\\beta}=n_steps/self.horizon\n # we do this to penalize errors at the end of the horizon more than errors at the start.\n factor = 1 + proportional_error * n_steps / self.horizon\n self.value *= factor\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef: float):\n self.value = kl_coef\n\n def update(self, current_kl: torch.Tensor, n_steps: int):\n \"\"\"\n current_kl is a zero-dimensional tensor\n \"\"\"\n pass\n\n\nclass PPOTrainer:\n \"\"\"\n The PPO_trainer uses Proximal Policy Optimization to optimise language models.\n \"\"\"\n\n def __init__(self,\n model: transformers.PreTrainedModel,\n optimizer: torch.optim.Optimizer,\n ref_model: transformers.PreTrainedModel,\n config: RLConfig = RLConfig()):\n \"\"\"\n Initialize PPOTrainer.\n Args:\n model (torch.model): Hugging Face transformer GPT2 model with value head\n ref_model (torch.model): Hugging Face transformer GPT2 reference model used for KL penalty\n config (RLConfig): Settings for PPO - See the documentation for RLConfig for its list of attributes\n \"\"\"\n self.config = config\n self.ref_model = ref_model\n self.model = model\n self.optimizer = optimizer\n self.kl_ctl = AdaptiveKLController(init_kl_coef=self.config.init_kl_coef,\n target=self.config.target,\n horizon=self.config.horizon)\n\n def step(self, query: torch.Tensor, response: torch.Tensor, scores: torch.Tensor) -> dict:\n \"\"\"\n Run a PPO optimisation step.\n args:\n query (torch.tensor): tensor containing the encoded queries, shape [batch_size, query_length]\n response (torch.tensor): tensor containing the encoded responses, shape [batch_size, response_length]\n scores (torch.tensor): tensor containing the scores, shape [batch_size]\n returns:\n train_stats (dict): a summary of the training statistics\n \"\"\"\n\n bs = self.config.batch_size\n timing = dict()\n t0 = time.time()\n\n gen_len = response.shape[1]\n model_input = torch.cat((query, response), dim=1)\n\n t = time.time()\n logprobs, ref_logprobs, values = self.batched_forward_pass(\n model_input, gen_len)\n timing['time/ppo/forward_pass'] = time.time() - t\n\n t = time.time()\n rewards, non_score_reward, kl_coef = self.compute_rewards(\n scores, logprobs, ref_logprobs)\n timing['time/ppo/compute_rewards'] = time.time() - t\n\n t = time.time()\n all_stats = []\n idxs = list(range(bs))\n for _ in range(self.config.ppo_epochs):\n random.shuffle(idxs)\n for i in range(bs):\n idx = idxs[i]\n train_stats = self.train_minibatch(\n logprobs[idx:idx + 1], values[idx:idx + 1], rewards[idx:idx + 1],\n response[idx:idx + 1], model_input[idx:idx + 1])\n all_stats.append(train_stats)\n timing['time/ppo/optimize_step'] = time.time() - t\n\n t = time.time()\n train_stats = stack_dicts(all_stats)\n\n # reshape advantages/ratios such that they are not averaged.\n train_stats['policy/advantages'] = torch.flatten(train_stats['policy/advantages']).unsqueeze(0)\n train_stats['policy/ratio'] = torch.flatten(train_stats['policy/ratio']).unsqueeze(0)\n\n stats = record_step_stats(scores=scores, logprobs=logprobs, ref_logprobs=ref_logprobs,\n non_score_reward=non_score_reward, train_stats=train_stats,\n kl_coef=kl_coef)\n stats = stats_to_np(stats)\n timing['time/ppo/calc_stats'] = time.time() - t\n\n self.kl_ctl.update(current_kl=stats['objective/kl'], n_steps=self.config.batch_size)\n\n timing['time/ppo/total'] = time.time() - t0\n stats.update(timing)\n return stats\n\n def batched_forward_pass(self, model_input: torch.Tensor, gen_len: int\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Calculate model outputs in multiple batches.\n\n returns:\n logprobs - [batch_size, response_length] - log probs of model response\n ref_logprobs - [batch_size, response_length] - log probs of ref_model response\n values - [batch_size, response_length] - the values of the model response for each token\n \"\"\"\n bs = self.config.batch_size\n fbs = self.config.forward_batch_size\n logprobs = []\n ref_logprobs = []\n values = []\n\n for i in range(int(bs / fbs)):\n m_input = model_input[i * fbs:(i + 1) * fbs]\n logits, v = self.model(m_input)\n ref_logits, _ = self.ref_model(m_input)\n # add the results from the value head after detaching them.\n # the [-gen_len-1:-1] slice only gets the values corresponding to the response.\n values.append(v[:, -gen_len - 1:-1].detach())\n # note that the values slide of [-gen_len-1:-1] is offset by one from the\n # [-gen_len:] slice for the logprobs.\n logprobs.append(logprobs_from_logits(logits[:, :-1, :], m_input[:, 1:])[:, -gen_len:].detach())\n ref_logprobs.append(logprobs_from_logits(ref_logits[:, :-1, :], m_input[:, 1:])[:, -gen_len:].detach())\n\n return torch.cat(logprobs), torch.cat(ref_logprobs), torch.cat(values)\n\n def train_minibatch(self, logprobs: torch.Tensor, values: torch.Tensor, rewards: torch.Tensor,\n response: torch.Tensor, model_input: torch.Tensor) -> dict:\n \"\"\"Train one PPO minibatch\"\"\"\n loss_p, loss_v, train_stats = self.loss(logprobs, values, rewards, response, model_input)\n loss = loss_p + loss_v\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return train_stats\n\n def compute_rewards(self, scores: torch.Tensor, logprobs: torch.Tensor, ref_logprobs: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, float]:\n \"\"\"\n Compute per token rewards from scores and KL-penalty.\n \"\"\"\n kl = logprobs - ref_logprobs\n non_score_reward = -self.kl_ctl.value * kl\n rewards = non_score_reward.clone().detach()\n # add the score to the non_score_reward of the last token in the response.\n rewards[:, -1] += scores\n return rewards, non_score_reward, self.kl_ctl.value\n\n def loss(self, old_logprobs: torch.Tensor, values: torch.Tensor, rewards: torch.Tensor,\n response: torch.Tensor, model_input: torch.Tensor):\n \"\"\"\n Calculate policy and value losses.\n For a detailed explanation of the policy gradient see the PPO paper https://arxiv.org/abs/1707.06347\n For a detailed explanation of the value function clipping, see the PPO2 documentation:\n https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n For a detailed explanation of the advantage see John Schulman's thesis http://joschu.net/docs/thesis.pdf\n\n ! Note that this implementation does not include an entropy term in the loss, but the PPO reference does.\n ! Note that we keep some unnecessary artifacts of the reference for ease of comparison.\n \"\"\"\n # gae is short for Generalized Advantage Estimate\n # lam is short for lambda\n lastgaelam = 0\n advantages_reversed = []\n gen_len = response.shape[1]\n\n for t in reversed(range(gen_len)):\n # take the returns from the value head for the time steps corresponding to the response.\n nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0\n # delta is defined in equation (12) of the PPO paper\n # See Chapter 4 of the Schulman thesis for a full explanation of where delta comes from.\n delta = rewards[:, t] + (self.config.gamma * nextvalues) - values[:, t]\n # the \"lastgaelam\" corresponds to each additive term in equation (11) of the PPO paper\n lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam\n # e.g. with T=gen_len, when t=T, we get lastgaelam = delta_T\n # then, when t=T-1, we get lastgaelam = delta_{T-1} + delta_T*(gamma*lambda)\n # then, when t=T-2, we get lastgaelam = delta_{T-2} + delta_{T-1}*(gamma*lambda) + delta_T*(gamma*lambda)**2\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)\n\n # add the values back to the advantages to get the full returns.\n returns = advantages + values\n advantages = z_scale(advantages)\n advantages = advantages.detach()\n\n logits, vpred = self.model(model_input)\n logprob = logprobs_from_logits(logits[:, :-1, :], model_input[:, 1:])\n\n # for calculating the loss, we only need the log probs and the values for the response (not the query)\n logprob, vpred = logprob[:, -gen_len:], vpred[:, -gen_len - 1:-1]\n\n # per the OpenAI PPO2 implementation, we also clip the value function\n vpredclipped = clip_by_value(vpred,\n values - self.config.cliprange_value,\n values + self.config.cliprange_value)\n\n # vf means value function here and below\n vf_losses1 = (vpred - returns) ** 2\n vf_losses2 = (vpredclipped - returns) ** 2\n vf_loss = .5 * torch.mean(torch.max(vf_losses1, vf_losses2))\n # record the fraction of times that we used the clipped value rather than the original\n vf_clipfrac = torch.mean(torch.gt(vf_losses2, vf_losses1).double())\n\n # The ratio is \\pi_{\\theta}(a_t | s_t) / \\pi_{\\theta_{\\text{old}}}(a_t | s_t)\n # In other words, the probability of producing a certain token under the new policy\n # divided by the probability of producing a certain token under the old policy.\n # This division of probabilities can be expressed in terms of the logprobs here:\n ratio = torch.exp(logprob - old_logprobs)\n\n # pg means policy gradient here and below.\n pg_losses = -advantages * ratio\n # clip the probability ratio as per equation (7) of the PPO paper.\n pg_losses2 = -advantages * torch.clamp(ratio,\n 1.0 - self.config.cliprange,\n 1.0 + self.config.cliprange)\n\n # since the sign is negative, take the max, rather than the min as in equation (7)\n pg_loss = torch.mean(torch.max(pg_losses, pg_losses2))\n # record the fraction of times that we used the clipped ratio rather than the original.\n pg_clipfrac = torch.mean(torch.gt(pg_losses2, pg_losses).double())\n\n loss = pg_loss + self.config.vf_coef * vf_loss\n\n entropy = torch.mean(entropy_from_logits(logits))\n approxkl = .5 * torch.mean((logprob - old_logprobs) ** 2)\n policykl = torch.mean(logprob - old_logprobs)\n return_mean, return_var = torch.mean(returns), torch.var(returns)\n value_mean, value_var = torch.mean(values), torch.var(values)\n\n stats = dict(\n loss=dict(policy=pg_loss, value=vf_loss, total=loss),\n policy=dict(entropy=entropy, approxkl=approxkl, policykl=policykl, clipfrac=pg_clipfrac,\n advantages=advantages, advantages_mean=torch.mean(advantages), ratio=ratio),\n returns=dict(mean=return_mean, var=return_var),\n val=dict(vpred=torch.mean(vpred), error=torch.mean((vpred - returns) ** 2),\n clipfrac=vf_clipfrac, mean=value_mean, var=value_var),\n )\n return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)\n\n\ndef record_step_stats(**data) -> dict:\n \"\"\"Record training step statistics.\"\"\"\n kl = data['logprobs'] - data['ref_logprobs']\n mean_kl = torch.mean(torch.sum(kl, dim=-1))\n mean_entropy = torch.mean(torch.sum(-data['logprobs'], dim=1))\n mean_non_score_reward = torch.mean(torch.sum(data['non_score_reward'], dim=1))\n stats = {\n 'objective/kl': mean_kl,\n 'objective/kl_dist': kl,\n 'objective/logprobs': data['logprobs'],\n 'objective/ref_logprobs': data['ref_logprobs'],\n 'objective/kl_coef': data['kl_coef'],\n 'objective/entropy': mean_entropy,\n 'ppo/mean_non_score_reward': mean_non_score_reward,\n }\n\n for k, v in data['train_stats'].items():\n stats[f'ppo/{k}'] = torch.mean(v, dim=0)\n stats['ppo/val/var_explained'] = 1 - stats['ppo/val/error'] / stats['ppo/returns/var']\n return stats\n\n\ndef get_arguments() -> dict:\n parser = argparse.ArgumentParser(description='PPO with deepspeed')\n parser.add_argument('--local_rank', type=int, default=-1,\n help='local rank passed from distributed launcher')\n parser = deepspeed.add_config_arguments(parser)\n return parser.parse_args()\n\n\ndef deepspeed_gen_init(cmd_args: dict, gen_model_base: str, config: RLConfig\n ) -> Tuple[transformers.PreTrainedModel, torch.optim.Optimizer]:\n gen_model = GPT2HeadWithValueModel.from_pretrained(gen_model_base)\n gen_model_opt = torch.optim.Adam(gen_model.parameters(), lr=config.lr)\n gen_model, gen_model_opt, _, _ = deepspeed.initialize(\n args=cmd_args, model=gen_model,\n model_parameters=gen_model.parameters(),\n optimizer=gen_model_opt)\n return gen_model, gen_model_opt\n\n\ndef ppo_train(prompts: List[str], config: Optional[RLConfig] = RLConfig(), readme: str = ''):\n cmd_args = get_arguments()\n \n gen_model_base = constants.MODELS_BASEPATH + config.generative_model_name\n gen_model_base = 'gpt2-large'\n gen_model_zero, gen_model_zero_opt = deepspeed_gen_init(cmd_args, gen_model_base, config)\n gen_model_one, gen_model_one_opt = deepspeed_gen_init(cmd_args, gen_model_base, config)\n gen_model_ref, gen_model_ref_opt = deepspeed_gen_init(cmd_args, gen_model_base, config)\n\n ppo_trainer_zero = PPOTrainer(gen_model_zero, gen_model_zero_opt, gen_model_ref, config)\n ppo_trainer_one = PPOTrainer(gen_model_one, gen_model_one_opt, gen_model_ref, config)\n\n reward_model = GPT2LMHeadModel.from_pretrained('gpt2-large')\n #reward_model = GPT2LMHeadModel.from_pretrained(\n # constants.MODELS_BASEPATH + config.reward_model_name)\n compare = evaluation.Comparator(reward_model)\n \n prompts_df = pd.DataFrame([{'query': training.behavior_clone_string(prompt)}\n for prompt in prompts])\n training.tokenizer.pad_side = 'left'\n training.tokenizer.pad_token = ' '\n prompts_df['tokens'] = prompts_df['query'].progress_apply(\n lambda x: training.tokenizer.encode(\n x, return_tensors=\"pt\", padding='max_length',\n truncation=True, max_length=config.txt_in_len)[0])\n\n fbs = config.forward_batch_size\n\n num_steps = int(np.ceil(config.steps / config.batch_size))\n\n steps_to_save = [1, 2, 5, 10, 25, 50, 100, 200, 400, 800, 1600]\n base_output_path = constants.MODELS_BASEPATH + config.generative_model_name + '-ppo'\n os.makedirs(base_output_path, exist_ok=True)\n\n wandb.init(name='from-base', project='ppo-large', \n config=dataclasses.asdict(config))\n git_hash = training.get_git_hash()\n\n for step_idx in tqdm.tqdm(range(num_steps)):\n torch.cuda.empty_cache()\n logs = dict()\n game_data = dict()\n timing = dict()\n t0 = time.time()\n\n # get a batch from the dataset\n df_batch = prompts_df.sample(config.batch_size)\n game_data['query'] = df_batch['query'].tolist()\n query_tensors = torch.stack(df_batch['tokens'].tolist())\n query_tensors_zero = query_tensors.to(gen_model_zero.device)\n query_tensors_one = query_tensors.to(gen_model_one.device)\n\n # get response from gpt2\n t = time.time()\n response_tensors_zero = []\n response_tensors_one = []\n\n for i in tqdm.tqdm(range(int(config.batch_size / fbs))):\n response_zero = respond_to_batch(\n gen_model_zero,\n query_tensors_zero[i * fbs:(i + 1) * fbs],\n txt_len=config.txt_out_len)\n response_one = respond_to_batch(\n gen_model_one,\n query_tensors_one[i * fbs:(i + 1) * fbs],\n txt_len=config.txt_out_len)\n response_tensors_zero.append(response_zero)\n response_tensors_one.append(response_one)\n\n response_tensors_zero = torch.cat(response_tensors_zero)\n response_tensors_one = torch.cat(response_tensors_one)\n\n game_data['response_zero'] = [training.tokenizer.decode(response_tensors_zero[i, :])\n for i in range(config.batch_size)]\n game_data['response_one'] = [training.tokenizer.decode(response_tensors_one[i, :])\n for i in range(config.batch_size)]\n timing['time/get_response'] = time.time() - t\n\n response_tensors_zero = response_tensors_zero.to(gen_model_zero.device)\n response_tensors_one = response_tensors_one.to(gen_model_one.device)\n\n # tokenize text for comparison operator\n t = time.time()\n outcomes = [compare(q, r0, r1) for q, r0, r1 in\n zip(game_data['query'], game_data['response_zero'], game_data['response_one'])]\n timing['time/build_input_sentiment'] = time.time() - t\n\n # give the reward to each winning response.\n t = time.time()\n rewards_zero = []\n rewards_one = []\n for i in range(int(config.batch_size / fbs)):\n reward_zero = sum(1.0 for outcome in outcomes[i * fbs: (i + 1) * fbs] if outcome == 0)\n reward_one = sum(1.0 for outcome in outcomes[i * fbs: (i + 1) * fbs] if outcome == 1)\n rewards_zero.append(reward_zero)\n rewards_one.append(reward_one)\n rewards_zero = torch.tensor(rewards_zero).float().to(gen_model_zero.device)\n rewards_one = torch.tensor(rewards_one).float().to(gen_model_zero.device)\n timing['time/get_sentiment_preds'] = time.time() - t\n\n # Run PPO training\n t = time.time()\n stats_zero = ppo_trainer_zero.step(query_tensors_zero, response_tensors_zero, rewards_zero)\n stats_one = ppo_trainer_one.step(query_tensors_one, response_tensors_one, rewards_one)\n timing['time/optimization'] = time.time() - t\n\n # Log everything\n timing['time/epoch'] = time.time() - t0\n table_rows = [list(r) for r in zip(\n game_data['query'], game_data['response_zero'], game_data['response_one'],\n rewards_zero.cpu().tolist(), rewards_one.cpu().tolist())]\n logs.update({'game_log': wandb.Table(\n columns=['query', 'response_zero', 'response_one', 'reward_zero', 'reward_one'],\n rows=table_rows)})\n logs.update(timing)\n logs.update(stats_zero)\n logs.update(stats_one)\n logs['env/reward_zero_mean'] = torch.mean(rewards_zero).cpu().numpy()\n logs['env/reward_zero_std'] = torch.std(rewards_zero).cpu().numpy()\n logs['env/reward_zero_dist'] = rewards_zero.cpu().numpy()\n logs['env/reward_one_mean'] = torch.mean(rewards_one).cpu().numpy()\n logs['env/reward_one_std'] = torch.std(rewards_one).cpu().numpy()\n logs['env/reward_one_dist'] = rewards_one.cpu().numpy()\n wandb.log(logs)\n\n if step_idx in steps_to_save:\n checkpoint_path = f'{base_output_path}/checkpoint-{step_idx}'\n unwrapped_gen_model_zero = transformers.modeling_utils.unwrap_model(\n gen_model_zero)\n unwrapped_gen_model_zero.save_pretrained(checkpoint_path + '-zero')\n training.tokenizer.save_pretrained(checkpoint_path + '-zero')\n unwrapped_gen_model_one = transformers.modeling_utils.unwrap_model(\n gen_model_one)\n unwrapped_gen_model_one.save_pretrained(checkpoint_path + '-one')\n training.tokenizer.save_pretrained(checkpoint_path + '-one')\n\n with open('config/rl_ds_config.json', 'r') as fp:\n ds_config = json.load(fp)\n\n metadata_path = base_output_path + f'/checkpoint-{step_idx}-metadata.json'\n with open(metadata_path, 'w') as fp:\n metadata = {\n 'readme': readme,\n 'deepspeed_config': ds_config,\n 'git_hash': git_hash,\n 'rl_config': dataclasses.asdict(config)\n }\n json.dump(metadata, fp)\n return gen_model_zero, gen_model_one\n\n\nif __name__ == '__main__':\n prompt_responses = pd.read_csv(constants.PROMPT_RESPONSES)\n _prompts = list(prompt_responses['prompt'][:1000])\n ppo_train(\n prompts=_prompts,\n config=RLConfig(txt_in_len=64, txt_out_len=192),\n readme=\"\"\"\n Reinforcement Learning with GPT2-XL and PPO.\n \"\"\"\n )\n","sub_path":"openai/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":35436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"303630376","text":"import math\nfrom collections import Counter\n\n\ndef num_nCr(n, r):\n return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))\n\n\nN = int(input())\nA_list = list(map(int, input().split()))\n\ncum_sum = [0]\nfor i in range(N):\n cum_sum.append(cum_sum[i] + A_list[i])\ncum_sum = cum_sum[1:]\n\nd = Counter(cum_sum)\n\nans = 0\nfor key, val in d.items():\n if key == 0:\n if val == 1:\n ans += val\n else:\n ans += val + num_nCr(val, 2)\n else:\n if val > 1:\n ans += num_nCr(val, 2)\n\nprint(ans)\n","sub_path":"kakomon/AGC023-A.py","file_name":"AGC023-A.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572609787","text":"from flask import Blueprint, abort, jsonify, request\nfrom bson.objectid import ObjectId\nimport math\n\nfrom ..models import mongo\n\nfrom ..scripts.character_calc import calc_skill_mods\n\ncharacter = Blueprint('character', __name__)\n\nAPI_STUB = '/api/v1'\n\n@character.route(f'{API_STUB}/character', methods=['GET'])\ndef get_character():\n _id = request.args.get('_id', default=None, type=str)\n if not _id:\n return jsonify(ok=False, msg='_id field required'), 400\n this_char = mongo.db.characters.find_one({'_id': ObjectId(_id)})\n if this_char:\n return jsonify(ok=True, character=this_char)\n else:\n return jsonify(ok=False, msg='Character not found'), 404\n\n@character.route(f'{API_STUB}/character', methods=['POST'])\ndef create_character():\n # TODO: verify player doesnt exist\n # go to characters store\n characters = mongo.db.characters\n # insert request\n pid = characters.insert(request.json)\n # find new player\n new_char = characters.find_one({'_id': pid})\n return jsonify(ok=True, character=new_char)\n\n@character.route(f'{API_STUB}/character', methods=['PUT'])\ndef update_character():\n _id = ObjectId(request.json['_id'])\n if not request.json:\n return jsonify(ok=False, msg='JSON format required'), 400\n if not _id:\n return jsonify(ok=False, msg='ID is required to update'), 400\n # find the object by id and update from the rest of the json\n # update_json = {'_id': _id, '$set': request.json}\n request.json.pop('_id', None)\n mongo.db.characters.update_one({'_id': _id}, {'$set': request.json})\n this_char = mongo.db.characters.find_one({'_id': _id})\n\n return jsonify(ok=True, character=this_char)\n\n@character.route(f'{API_STUB}/characters', methods=['GET'])\ndef get_characters():\n # get all characters (return a max per_page)\n results = list(mongo.db.characters.find())\n\n return jsonify(ok=True, characters=results)\n\n@character.route(f'{API_STUB}/character', methods=['DELETE'])\ndef delete_character():\n _id = request.args.get('_id', default=None, type=str)\n if not _id:\n return jsonify(ok=False, msg='_id field required'), 400\n del_char = mongo.db.characters.find_one({'_id': ObjectId(_id)})\n if not del_char:\n return jsonify(ok=False, msg='Character not found'), 404\n del_result = mongo.db.characters.delete_one({'_id': ObjectId(_id)})\n if del_result:\n return jsonify(ok=True, character=del_char)\n\n\n@character.route(f'{API_STUB}/character/learn_spell', methods=['PUT'])\ndef mark_spell_learned():\n # need character id, spell id\n try:\n char_id = ObjectId(request.args.get('_id', default=None, type=str))\n except:\n return jsonify(ok=False, msg='Invalid id format')\n spell_id = request.args.get('spell_id', default=None, type=int)\n if not char_id or not spell_id:\n return jsonify(ok=False, msg='_id and spell_id required')\n # verify the given character and spell can be found\n char = mongo.db.characters.find_one({'_id': char_id})\n # assign the found spell if its in the learnable spell list\n spell_to_learn = None\n for spell in char[\"spells\"]:\n if spell[\"id\"] == spell_id:\n spell_to_learn = spell\n break\n if not spell_to_learn:\n return jsonify(ok=False, msg='Cannot find given spell by id {spell_id}'), 404\n # set the learned attribute to the opposite of it's current value\n try:\n learned_status = spell_to_learn[\"learned\"]\n except:\n learned_status = False\n # flip current status\n learned_status = not learned_status\n spell_to_learn[\"learned\"] = learned_status\n\n mongo.db.characters.update_one({'_id': char_id}, {'$set': char})\n return jsonify(ok=True, data=char)\n\n ","sub_path":"app/api/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513484776","text":"from datetime import datetime\nfrom unittest.mock import Mock, patch\n\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.data_quality_threshold_check_operator import DataQualityThresholdCheckOperator\nfrom airflow.models import Connection, TaskInstance\n\nfrom .helper import get_records_mock, dummy_dag\n\ndef test_inside_threshold_values(mocker):\n min_threshold, max_threshold = 10, 15\n sql = \"SELECT MIN(value) FROM test;\"\n\n mocker.patch.object(\n PostgresHook,\n \"get_records\",\n side_effect=get_records_mock,\n )\n\n mocker.patch.object(\n BaseHook,\n \"get_connection\",\n return_value=Connection(conn_id='test_id', conn_type='postgres')\n )\n\n task = DataQualityThresholdCheckOperator(\n task_id=\"test_inside_threshold_values\",\n conn_id=\"postgres\",\n sql=sql,\n min_threshold=min_threshold,\n max_threshold=max_threshold,\n dag=dummy_dag\n )\n task.push = Mock(return_value=None)\n task_instance = TaskInstance(task=task, execution_date=datetime.now())\n result = task.execute(task_instance.get_template_context())\n\n result = task.execute(context={\n \"execution_date\": datetime.now(),\n })\n\n assert len(result) == 7\n assert result[\"within_threshold\"]\n\n\ndef test_outside_threshold_values(mocker):\n min_threshold, max_threshold = 50, 75\n sql = \"SELECT AVG(value) FROM test;\"\n\n mocker.patch.object(\n PostgresHook,\n \"get_records\",\n side_effect=get_records_mock,\n )\n\n mocker.patch.object(\n BaseHook,\n \"get_connection\",\n return_value=Connection(conn_id='test_id', conn_type='postgres')\n )\n\n task = DataQualityThresholdCheckOperator(\n task_id=\"test_outside_threshold_values\",\n conn_id=\"postgres\",\n sql=sql,\n min_threshold=min_threshold,\n max_threshold=max_threshold,\n dag=dummy_dag\n )\n task.push = Mock(return_value=None)\n task_instance = TaskInstance(task=task, execution_date=datetime.now())\n\n mock_patch = patch.object(\n DataQualityThresholdCheckOperator,\n \"send_failure_notification\",\n side_effect=lambda info_dict: info_dict)\n\n with mock_patch as notif_mock:\n result = task.execute(task_instance.get_template_context())\n\n assert notif_mock.called\n assert len(result) == 7\n assert not result[\"within_threshold\"]\n","sub_path":"tests/test_data_quality_threshold_check_operator.py","file_name":"test_data_quality_threshold_check_operator.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"369452576","text":"import sys\n\n\ndef agario(przeciwnicy):\n wynik = 2\n przeciwnicy = sorted(przeciwnicy, reverse=True)\n if przeciwnicy[-1] >= wynik:\n return \"NIE\"\n najwiekszy = przeciwnicy[0]\n czas = 0\n mniejsi = []\n while wynik < najwiekszy:\n while przeciwnicy[-1] < wynik:\n mniejsi.append(przeciwnicy.pop())\n try:\n wynik += mniejsi.pop()\n czas += 1\n except IndexError:\n return \"NIE\"\n return czas\n\n\ndef main(indata):\n _ = int(next(indata))\n przeciwnicy = map(int, next(indata).split())\n return [agario(przeciwnicy)]\n\n\ndef run():\n for line in main((line[:-1] for line in sys.stdin)):\n print(line)\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"oij/python/agario/agario.py","file_name":"agario.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"164770268","text":"\"\"\"\nCP1404 - Practical 02\nGet user name and save in a txt file\n\"\"\"\nMENU = \"\"\"Select a task:\n(W)rite to file\n(R)ead from file\n(Q)uit\"\"\"\nprint(MENU)\n\noption = input(\">>> \")\n\nif option == \"W\": # Write to file\n name = input(\"Enter your name: \")\n file_out = open(\"name.txt\", \"w\")\n print(name, file = file_out)\n file_out.close()\n\nelif option == \"R\": # Read from file\n file_in = open(\"name.txt\", \"r\")\n name = file_in.read().strip()\n print(\"Your name is\", name)\n file_in.close()\n\nelse: print(\"Finished.\")\n","sub_path":"name_file.py","file_name":"name_file.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582818336","text":"#coding=utf-8\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render_to_response\r\nfrom django.shortcuts import render\r\nfrom django.template.context import RequestContext\r\nfrom django.conf import settings\r\nfrom novnc.vnc import VNC\r\n\r\ndef vnc_view(req):\r\n vncid = req.GET.get(\"vncid\")\r\n close = req.GET.get('close')\r\n if not vncid:\r\n return HttpResponse('error.')\r\n \r\n vncmamanger = VNC() \r\n if close:\r\n vncmamanger.del_token(vncid)\r\n return HttpResponse('<script language=\"javascript\">window.close();</script>VNC链接成功关闭。')\r\n \r\n else:\r\n dic = {'vncid': vncid}\r\n http_host = req.META['HTTP_HOST']\r\n http_host = http_host.split(':')[0]\r\n dic['url'] = 'http://%(host)s:%(port)d/vnc_auto.html?path=websockify/?token=%(vncid)s' % {\r\n 'host': http_host,\r\n 'port': settings.NOVNC_PORT,\r\n 'vncid': vncid\r\n }\r\n \r\n #return render_to_response('novnc.html', dic, context_instance=RequestContext(req)) \r\n return render(req, 'novnc.html', dic)\r\n","sub_path":"novnc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118329671","text":"import torch\nimport numpy as np\n\nfrom overrides import overrides\nfrom typing import Iterator, Dict, List, Tuple, Union\n\nfrom allennlp.models import Model\nfrom allennlp.modules.text_field_embedders import (\n TextFieldEmbedder,\n BasicTextFieldEmbedder,\n)\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\nfrom allennlp.training.metrics.average import Average\nfrom allennlp.training.metrics.metric import Metric\nfrom allennlp.common.checks import ConfigurationError\n\nTM_ONE_HOT = \"one_hot\"\nTM_FIXED = \"fixed_temperature\"\nTM_DECREASED = \"decreased_temperature\"\nTM_NO = \"no_temperature\"\n\n\nclass Cpm(Model):\n \"\"\"\n The ``Cpm`` applies a \"contextualizing\"\n ``Seq2SeqEncoder`` to uncontextualized embeddings, using a ``torch.nn.functional.kl_div``\n module to compute the language modeling loss.\n If bidirectional is True, the language model is trained to predict the next and\n previous tokens for each token in the input. In this case, the contextualizer must\n be bidirectional. If bidirectional is False, the language model is trained to only\n predict the next token for each token in the input; the contextualizer should also\n be unidirectional.\n If your language model is bidirectional, it is IMPORTANT that your bidirectional\n ``Seq2SeqEncoder`` contextualizer does not do any \"peeking ahead\". That is, for its\n forward direction it should only consider embeddings at previous timesteps, and for\n its backward direction only embeddings at subsequent timesteps. Similarly, if your\n language model is unidirectional, the unidirectional contextualizer should only\n consider embeddings at previous timesteps. If this condition is not met, your\n language model is cheating.\n Parameters\n ----------\n vocab: ``Vocabulary``\n text_field_embedder: ``TextFieldEmbedder``\n Used to embed the indexed tokens we get in ``forward``.\n contextualizer: ``Seq2SeqEncoder``\n Used to \"contextualize\" the embeddings. As described above,\n this encoder must not cheat by peeking ahead.\n dropout: ``float``, optional (default: None)\n If specified, dropout is applied to the contextualized embeddings before computation of\n the softmax. The contextualized embeddings themselves are returned without dropout.\n bidirectional: ``bool``, optional (default: False)\n Train a bidirectional language model, where the contextualizer\n is used to predict the next and previous token for each input token.\n This must match the bidirectionality of the contextualizer.\n \"\"\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n contextualizer: Seq2SeqEncoder,\n hparams: Dict,\n ) -> None:\n super().__init__(vocab)\n self.text_field_embedder = text_field_embedder\n\n self.contextualizer = contextualizer\n self.bidirectional = contextualizer.is_bidirectional()\n\n if self.bidirectional:\n self.forward_dim = contextualizer.get_output_dim() // 2\n else:\n self.forward_dim = contextualizer.get_output_dim()\n\n dropout = hparams[\"dropout\"]\n if dropout:\n self.dropout = torch.nn.Dropout(dropout)\n else:\n self.dropout = lambda x: x\n\n self.hidden2chord = torch.nn.Sequential(\n torch.nn.Linear(self.forward_dim, hparams[\"fc_hidden_dim\"]),\n torch.nn.ReLU(True),\n torch.nn.Linear(hparams[\"fc_hidden_dim\"], vocab.get_vocab_size()),\n )\n self.perplexity = PerplexityCustom()\n self.accuracy = CategoricalAccuracy()\n self.real_loss = Average()\n\n self.similarity_matrix = hparams[\"similarity_matrix\"]\n self.training_mode = hparams[\"training_mode\"]\n\n self.T_initial = hparams[\"T_initial\"]\n self.T = self.T_initial\n self.decay_rate = hparams[\"decay_rate\"]\n\n self.batches_per_epoch = hparams[\"batches_per_epoch\"]\n self.epoch = 0\n self.batch_counter = 0\n\n def num_layers(self) -> int:\n \"\"\"\n Returns the depth of this LM. That is, how many layers the contextualizer has plus one for\n the non-contextual layer.\n \"\"\"\n if hasattr(self.contextualizer, \"num_layers\"):\n return self.contextualizer.num_layers + 1\n else:\n raise NotImplementedError(\n f\"Contextualizer of type {type(self.contextualizer)} \"\n + \"does not report how many layers it has.\"\n )\n\n def loss_helper(\n self, direction_embeddings: torch.Tensor, direction_targets: torch.Tensor\n ):\n mask = direction_targets > 0\n # we need to subtract 1 to undo the padding id since the softmax\n # does not include a padding dimension\n\n # shape (batch_size * timesteps, )\n non_masked_targets = direction_targets.masked_select(mask)\n\n # shape (batch_size * timesteps, embedding_dim)\n non_masked_embeddings = direction_embeddings.masked_select(\n mask.unsqueeze(-1)\n ).view(-1, self.forward_dim)\n # note: need to return average loss across forward and backward\n # directions, but total sum loss across all batches.\n # Assuming batches include full sentences, forward and backward\n # directions have the same number of samples, so sum up loss\n # here then divide by 2 just below\n probs = torch.nn.functional.log_softmax(\n self.hidden2chord(non_masked_embeddings), dim=-1\n )\n\n real_loss = torch.nn.functional.nll_loss(\n probs, non_masked_targets, reduction=\"sum\"\n )\n # transform targets into probability distributions using Embedding\n # then compute loss using torch.nn.functional.kl_div\n if self.training:\n if self.training_mode == TM_ONE_HOT:\n train_loss = real_loss\n elif self.training_mode == TM_NO:\n target_distributions = self.similarity_matrix(non_masked_targets)\n train_loss = torch.nn.functional.kl_div(\n probs, target_distributions, reduction=\"sum\"\n )\n elif self.training_mode == TM_FIXED or self.training_mode == TM_DECREASED:\n target_distributions = self.similarity_matrix(non_masked_targets)\n target_distributions = torch.nn.functional.softmax(\n target_distributions / self.T, dim=1\n )\n train_loss = torch.nn.functional.kl_div(\n probs, target_distributions, reduction=\"sum\"\n )\n else:\n raise ValueError(\"Unknown training mode: {}\".format(self.training_mode))\n else:\n train_loss = real_loss\n return train_loss, real_loss\n\n @overrides\n def forward(\n self,\n input_tokens: Dict[str, torch.LongTensor],\n forward_output_tokens: Dict[str, torch.LongTensor],\n backward_output_tokens: Dict[str, torch.LongTensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Computes the averaged forward (and backward, if language model is bidirectional)\n LM loss from the batch.\n Returns\n -------\n Dict with keys:\n ``'loss'``: ``torch.Tensor``\n forward negative log likelihood, or the average of forward/backward\n if language model is bidirectional\n ``'forward_loss'``: ``torch.Tensor``\n forward direction negative log likelihood\n ``'backward_loss'``: ``torch.Tensor`` or ``None``\n backward direction negative log likelihood. If language model is not\n bidirectional, this is ``None``.\n ``'contextual_embeddings'``: ``Union[torch.Tensor, List[torch.Tensor]]``\n (batch_size, timesteps, embed_dim) tensor of top layer contextual representations or\n list of all layers. No dropout applied.\n ``'noncontextual_token_embeddings'``: ``torch.Tensor``\n (batch_size, timesteps, token_embed_dim) tensor of bottom layer noncontextual\n representations\n ``'mask'``: ``torch.Tensor``\n (batch_size, timesteps) mask for the embeddings\n \"\"\"\n self.batch_counter += 1\n if self.batch_counter % self.batches_per_epoch == 0:\n self.epoch += 1\n if self.training_mode == TM_DECREASED:\n self.T *= 1 / (1 + self.decay_rate * self.epoch)\n if self.T < 1e-20:\n self.T = 1e-20\n\n mask = get_text_field_mask(input_tokens)\n\n # shape (batch_size, timesteps, embedding_size)\n embeddings = self.text_field_embedder(input_tokens)\n\n contextual_embeddings = self.contextualizer(embeddings, mask)\n contextual_embeddings_with_dropout = self.dropout(contextual_embeddings)\n\n if self.bidirectional:\n forward_embeddings, backward_embeddings = contextual_embeddings_with_dropout.chunk(\n 2, -1\n )\n backward_logits = self.hidden2chord(backward_embeddings)\n else:\n forward_embeddings = contextual_embeddings_with_dropout\n backward_logits = None\n forward_logits = self.hidden2chord(forward_embeddings)\n\n forward_targets = forward_output_tokens.get(\"tokens\")\n if self.bidirectional:\n backward_targets = backward_output_tokens.get(\"tokens\")\n\n # compute loss\n forward_loss, forward_real_loss = self.loss_helper(\n forward_embeddings, forward_targets\n )\n if self.bidirectional:\n backward_loss, backward_real_loss = self.loss_helper(\n backward_embeddings, backward_targets\n )\n else:\n backward_loss, backward_real_loss = None, None\n\n return_dict = {}\n\n num_targets = torch.sum((forward_targets > 0).long())\n if num_targets > 0:\n if self.bidirectional:\n average_loss = (\n 0.5 * (forward_loss + backward_loss) / num_targets.float()\n )\n average_real_loss = (\n 0.5 * (forward_real_loss + backward_real_loss) / num_targets.float()\n )\n else:\n average_loss = forward_loss / num_targets.float()\n average_real_loss = forward_real_loss / num_targets.float()\n else:\n average_loss = torch.tensor(0.0).to(forward_targets.device)\n average_real_loss = torch.tensor(0.0).to(forward_targets.device)\n\n self.perplexity(average_real_loss)\n self.accuracy(forward_logits, forward_targets, mask)\n self.real_loss(average_real_loss)\n\n return_dict.update({\"loss\": average_loss})\n\n return_dict.update(\n {\n # Note: These embeddings do not have dropout applied.\n \"contextual_embeddings\": contextual_embeddings,\n \"noncontextual_token_embeddings\": embeddings,\n \"forward_logits\": forward_logits,\n \"backward_logits\": backward_logits,\n \"mask\": mask,\n }\n )\n\n return return_dict\n\n def get_metrics(self, reset: bool = False):\n return {\n \"perplexity\": self.perplexity.get_metric(reset=reset),\n \"accuracy\": self.accuracy.get_metric(reset=reset),\n \"real_loss\": float(self.real_loss.get_metric(reset=reset)),\n }\n\n\n@Metric.register(\"perplexity_custom\")\nclass PerplexityCustom(Average):\n \"\"\"\n Perplexity is a common metric used for evaluating how well a language model\n predicts a sample.\n Notes\n -----\n Assumes negative log likelihood loss of each batch (base e). Provides the\n average perplexity of the batches.\n \"\"\"\n\n @overrides\n def get_metric(self, reset: bool = False) -> float:\n \"\"\"\n Returns\n -------\n The accumulated perplexity.\n \"\"\"\n average_loss = super().get_metric(reset)\n if average_loss == 0:\n return 0.0\n\n # Exponentiate the loss to compute perplexity\n return float(torch.exp(average_loss))\n","sub_path":"modules/chord_progression_models.py","file_name":"chord_progression_models.py","file_ext":"py","file_size_in_byte":12427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"549428674","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.cluster import DBSCAN\n\n# Note: float32 is used to meet other tensorflow functions\n\n# 'P_cam': Intrinsic matrix (3x4)\n# 'R_lidar2cam': Extrinsic matrix (4x4)\nclass CalibFields(object):\n intrinsic = 'P_cam'\n extrinsic = 'R_lidar2cam'\n\nclass CalibConfigs(object):\n _D_MAX = 75.0\n _D_MIN = 2.0\n _CMAP = plt.get_cmap('brg')\n _MODE = 'standard'\n\n init_v_t = np.array([-0.5,0.02,-0.98],dtype=np.float32)\n init_R_rot = np.array([[0., 0., -1.],\n [0., -1., 0.],\n [1., 0., 0.]],dtype=np.float32)\n # init_R_int = np.array([[898.7, 0., 359., 0.],\n # [0., 901.4, 652., 0.],\n # [0., 0., 1., 0.]],dtype=np.float32)\n\n init_R_int = np.array([[935.0, 0., 359., 0.],\n [0., 935., 640., 0.],\n [0., 0., 1., 0.]],dtype=np.float32)\n\n\"\"\" \n<Coordinate system (Lidar)>\nx: forward \ny: left\nz: up\n\n<Coordinate system (Camera/image)>\nx: down\ny: right\n\"\"\"\n\ndef loadCalib(f_int,f_ext,\n R_int=CalibConfigs.init_R_int,\n v_t=CalibConfigs.init_v_t,\n R_rot=CalibConfigs.init_R_rot,\n ltype='m8'):\n dict_calib = {}\n # Intrinsic matrix (3x4)\n if f_int:\n dict_calib[CalibFields.intrinsic] = np.loadtxt(f_int, delimiter=' ')\\\n .astype(np.float32)\n else:\n dict_calib[CalibFields.intrinsic] = R_int\n # Extrinsic Matrix (4x4)\n if f_ext:\n dict_calib[CalibFields.extrinsic] = np.loadtxt(f_ext, delimiter=' ')\\\n .astype(np.float32)\n else:\n dict_calib[CalibFields.extrinsic] = np.eye(4,dtype=np.float32)\n dict_calib[CalibFields.extrinsic][:3,3] = v_t\n if ltype == 'm8':\n dict_calib[CalibFields.extrinsic][:3,:3] = R_rot\n elif ltype == 'velo':\n rot90 = np.zeros((3,3))\n rot90[0,1] = -1.0\n rot90[1,0] = 1.0\n rot90[2,2] = 1.0\n dict_calib[CalibFields.extrinsic][:3,:3] = np.dot(R_rot,rot90)\n else:\n dict_calib[CalibFields.extrinsic][:3,:3] = R_rot\n return dict_calib\n\ndef minmax_scale(x,i_min,i_max,o_min,o_max):\n # MinMax scaling of x\n # i_min<= x <= i_max to o_min<= x_new <= o_max\n return (x-i_min)/float(i_max-i_min)*(o_max-o_min)+o_min\n\ndef coord_transform(points, t_mat):\n # Change to homogeneous form\n points = np.hstack([points,np.ones((np.shape(points)[0],1))])\n t_points = np.dot(points,t_mat.T)\n # Normalize\n t_points = t_points[:,:-1]/t_points[:,[-1]]\n return t_points\n\ndef project_lidar_to_img(dict_calib,points,im_height,im_width):\n # Extract depth data first before projection to 2d image space\n trans_mat = dict_calib[CalibFields.extrinsic]\n points3D = coord_transform(points,trans_mat)\n pointsDist = points3D[:,2]\n pointsDistR = np.linalg.norm(points3D,axis=1) # Radial distance\n\n # Project to image space\n trans_mat = np.dot(dict_calib[CalibFields.intrinsic],trans_mat)\n points2D = coord_transform(points,trans_mat)\n\n # Find only feasible points\n idx1 = (points2D[:,0]>=0) & (points2D[:,0] <=im_height-1)\n idx2 = (points2D[:,1]>=0) & (points2D[:,1] <=im_width-1)\n idx3 = (pointsDist>=0)\n idx_in = idx1 & idx2 & idx3\n\n return points2D[idx_in,:], pointsDist[idx_in], pointsDistR[idx_in]\n\ndef dist_to_pixel(val_dist, mode,\n d_max=CalibConfigs._D_MAX, d_min=CalibConfigs._D_MIN):\n \"\"\" Returns pixel value from distance measurment\n Args:\n val_dist: distance value (m)\n mode: 'inverse' vs 'standard'\n d_max: maximum distance to consider\n d_min: minimum distance to consider\n Returns:\n pixel value in 'uint8' format\n \"\"\"\n val_dist = d_max if val_dist>d_max else val_dist if val_dist>d_min else d_min\n if mode == 'standard':\n return np.round(minmax_scale(val_dist,\n d_max,d_min,\n 1,255)).astype('uint8')\n elif mode == 'inverse':\n return np.round(minmax_scale(1.0/val_dist,\n 1.0/d_max,1.0/d_min,\n 1,255)).astype('uint8')\n else:\n # Default is standard\n return np.round(minmax_scale(val_dist,\n d_max,d_min,\n 1,255)).astype('uint8')\n\ndef points_to_img(points2D,pointsDist,im_height,im_width,\n mode=CalibConfigs._MODE):\n points2D = np.round(points2D).astype('int')\n im_depth = np.zeros((im_height,im_width),dtype=np.uint8)\n for i,point in enumerate(points2D):\n im_depth[point[0],point[1]] = dist_to_pixel(pointsDist[i],mode=mode)\n\n return im_depth.reshape(im_height,im_width,1)\n\ndef points_on_img(points2D,pointsDist,image,\n mode=CalibConfigs._MODE):\n points2D = np.round(points2D).astype('int')\n for i,point in enumerate(points2D):\n pre_pixel = dist_to_pixel(pointsDist[i],mode=mode)\n image[point[0],point[1],:] = (255*np.array(\n CalibConfigs._CMAP(pre_pixel/255.0)[:3]))\\\n .astype(np.uint8)\n\n return image\n\ndef dist_from_lidar_bbox(points2D,pointsDist,pointsDistR,bbox,\n im_height,im_width,mode='min'):\n \"\"\"\n Args:\n points2D: lidar points in image coordinate 2d (nx2)\n pointsDist: Forward-wise distance of points (nx1)\n pointsDistR: Radial distance of points from the cam (nx1)\n dict_calib: dictionary for intrinsic/extrinsic parameters(Lidar->CAM)\n bbox: coordinates of bounding box\n (ymin, xmin, ymax, xmax) (normalized 0~1)\n im_height: height of an image\n im_width: width of an image\n Returns:\n distance of the object in the box from the car(camera)\n \"\"\"\n # Index of points in the given bounding box\n idx_in = (points2D[:,0]>=(bbox[0]*im_height)) & \\\n (points2D[:,0]<(bbox[2]*im_height)) & \\\n (points2D[:,1]>=(bbox[1]*im_width)) & \\\n (points2D[:,1]<(bbox[3]*im_width))\n points2D_obj = points2D[idx_in,:]\n pointsDist_obj = pointsDist[idx_in]\n pointsDistR_obj = pointsDist[idx_in]\n if len(points2D_obj)==0:\n # print('!! Warning: No corresponding point in the box: {} points'.format(\n # len(points2D_obj)))\n return 10.0, True\n # Cluster points based on the z-axis distance\n # Return the average radial distance of points in the cluster with max size\n db = DBSCAN().fit(pointsDist_obj.reshape(-1,1))\n c_labels = db.labels_\n labels_list = list(set(c_labels)-set([-1]))\n if len(labels_list)==0:\n # print('!! Warning: Clustering failed. {} points'.format(\n # len(points2D_obj)))\n return np.min(pointsDistR_obj), False\n # return np.mean(pointsDistR_obj)\n if mode == 'min':\n c_dists = [np.mean(pointsDist_obj[c_labels==label]) \\\n for label in labels_list]\n c_consider = labels_list[c_dists.index(min(c_dists))]\n else:\n c_sizes = [sum(c_labels==label) for label in labels_list]\n c_consider = labels_list[c_sizes.index(max(c_sizes))]\n return np.mean(pointsDistR_obj[c_labels==c_consider]), False\n\n# --------------------------------------------------------------\n# Functions for Tensorflow\n# --------------------------------------------------------------\n\ndef tf_coord_transform(points, t_mat):\n # Change to homogeneous form\n points = tf.concat([points,tf.ones([tf.shape(points)[0],1],tf.float32)], 1)\n t_points = tf.matmul(points,tf.transpose(t_mat))\n # Normalize\n t_points = tf.div(t_points[:,:-1],tf.expand_dims(t_points[:,-1],1))\n return t_points\n\ndef tf_project_lidar_to_img(dict_calib,points,im_height,im_width):\n # Extract depth data first before projection to 2d image space\n trans_mat = dict_calib[CalibFields.extrinsic]\n points3D = tf_coord_transform(points,trans_mat)\n pointsDist = points3D[:,2]\n pointsDistR = tf.norm(points3D,axis=1)\n\n # Project to image space\n trans_mat = tf.matmul(dict_calib[CalibFields.intrinsic],trans_mat)\n points2D = tf_coord_transform(points,trans_mat)\n\n # Find only feasible points\n idx1 = (points2D[:,0]>=0) & (points2D[:,0] <=tf.to_float(im_height)-1)\n idx2 = (points2D[:,1]>=0) & (points2D[:,1] <=tf.to_float(im_width)-1)\n idx3 = (pointsDist>=0)\n idx_in = idx1 & idx2 & idx3\n\n return (tf.boolean_mask(points2D,idx_in), tf.boolean_mask(pointsDist,idx_in),\n tf.boolean_mask(pointsDistR,idx_in))\n\ndef tf_dist_to_pixel(val_dist, mode,\n d_max=CalibConfigs._D_MAX, d_min=CalibConfigs._D_MIN):\n \"\"\" Returns pixel value from distance measurment\n Args:\n val_dist: distance value (m)\n mode: 'inverse' vs 'standard'\n d_max: maximum distance to consider\n d_min: minimum distance to consider\n Returns:\n pixel value in 'uint8' format\n \"\"\"\n val_dist = tf.maximum(val_dist,d_min)\n val_dist = tf.minimum(val_dist,d_max)\n if mode == 'standard':\n return tf.cast(tf.round(minmax_scale(val_dist,\n d_max,d_min,\n 1,255)),tf.uint8)\n elif mode == 'inverse':\n return tf.cast(tf.round(minmax_scale(1.0/val_dist,\n 1.0/d_max,1.0/d_min,\n 1,255)),tf.uint8)\n else:\n # Default is standard\n return tf.cast(tf.round(minmax_scale(1.0/val_dist,\n d_max,d_min,\n 1,255)),tf.uint8)\n\ndef tf_points_to_img(points2D,pointsDist,im_height,im_width,\n mode=CalibConfigs._MODE):\n pointsPixel = tf_dist_to_pixel(pointsDist,mode=mode)\n points2D_yx = tf.cast(tf.round(points2D),tf.int32)\n img = tf.scatter_nd(points2D_yx,pointsPixel,[im_height,im_width])\n\n return tf.expand_dims(img, 2)\n\ndef imlidarwrite(fname,im,im_depth):\n \"\"\"Write image with RGB and depth\n Args:\n fname: file name\n im: RGB image array (h x w x 3)\n im_depth: depth image array (h x w)\n \"\"\"\n im_out = im.copy()\n im_depth = np.squeeze(im_depth,axis=2)\n idx_h, idx_w = np.nonzero(im_depth)\n for hi,wi in zip(idx_h,idx_w):\n im_out[hi,wi,:] = (255*np.array(\n CalibConfigs._CMAP(im_depth[hi,wi]/255.0)[:3]))\\\n .astype(np.uint8)\n imsave(fname,im_out)\n print(\" ... Write:{}\".format(fname))\n","sub_path":"config/utils_data.py","file_name":"utils_data.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"451356581","text":"\"\"\"Project a graph, GeoDataFrame, or geometry to a different CRS.\"\"\"\n\nimport geopandas as gpd\nimport numpy as np\n\nfrom . import settings\nfrom . import utils\nfrom . import utils_graph\n\n\ndef is_projected(crs):\n \"\"\"\n Determine if a coordinate reference system is projected or not.\n\n Parameters\n ----------\n crs : string or pyproj.CRS\n the identifier of the coordinate reference system, which can be\n anything accepted by `pyproj.CRS.from_user_input()` such as an\n authority string or a WKT string\n\n Returns\n -------\n projected : bool\n True if crs is projected, otherwise False\n \"\"\"\n return gpd.GeoSeries(crs=crs).crs.is_projected\n\n\ndef coords_to_utm_zone(coords):\n \"\"\"\n Return the CRS of the UTM zone that contains a (lat, lng) point.\n\n The simple Universal Transverse Mercator coordinate system zone calculator\n in this function works well for most latitudes, but ignores irregular zone\n boundaries and may not work in extreme northern or southern locations.\n\n Parameters\n ----------\n coords : tuple of floats\n the (lat, lng) coordinates\n\n Returns\n -------\n utm_crs : string\n PROJ.4 string defining the CRS for this UTM zone\n \"\"\"\n lat, lng = coords\n\n # calculate UTM zone from lng, and whether it's south of equator from lat\n zone = int(np.floor((lng + 180) / 6) + 1)\n south = \" +south\" if lat < 0 else \"\"\n return f\"+proj=utm +zone={zone}{south} +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n\n\ndef project_geometry(geometry, crs=None, to_crs=None, to_latlong=False):\n \"\"\"\n Project a Shapely geometry from its current CRS to another.\n\n If `to_latlong` is `True`, this projects the geometry to the CRS defined\n by `settings.default_crs`, otherwise it projects it to the CRS defined by\n `to_crs`. If `to_crs` is `None`, it projects it to the CRS of the UTM zone\n in which `geometry`'s approximate centroid lies, using the\n `coords_to_utm_zone` function.\n\n Parameters\n ----------\n geometry : shapely geometry\n the geometry to be projected\n crs : string or pyproj.CRS\n the initial CRS of `geometry`. if None, it will be set to\n `settings.default_crs`\n to_crs : string or pyproj.CRS\n if None, project to an appropriate UTM zone, otherwise project to\n this CRS\n to_latlong : bool\n if True, project to `settings.default_crs` and ignore `to_crs`\n\n Returns\n -------\n geometry_proj, crs : tuple\n the projected geometry and its new CRS\n \"\"\"\n if crs is None:\n crs = settings.default_crs\n\n gdf = gpd.GeoDataFrame(geometry=[geometry], crs=crs)\n gdf_proj = project_gdf(gdf, to_crs=to_crs, to_latlong=to_latlong)\n geometry_proj = gdf_proj[\"geometry\"].iloc[0]\n return geometry_proj, gdf_proj.crs\n\n\ndef project_gdf(gdf, to_crs=None, to_latlong=False):\n \"\"\"\n Project a GeoDataFrame from its current CRS to another.\n\n If `to_latlong` is `True`, this projects the GeoDataFrame to the CRS defined\n by `settings.default_crs`, otherwise it projects it to the CRS defined by\n `to_crs`. If `to_crs` is `None`, it projects it to the CRS of the UTM zone\n in which `gdf`'s approximate centroid lies, using the `coords_to_utm_zone`\n function.\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame\n the GeoDataFrame to be projected\n to_crs : string or pyproj.CRS\n if None, project to an appropriate UTM zone, otherwise project to\n this CRS\n to_latlong : bool\n if True, project to `settings.default_crs` and ignore `to_crs`\n\n Returns\n -------\n gdf_proj : geopandas.GeoDataFrame\n the projected GeoDataFrame\n \"\"\"\n if gdf.crs is None or len(gdf) < 1: # pragma: no cover\n msg = \"GeoDataFrame must have a valid CRS and cannot be empty\"\n raise ValueError(msg)\n\n # if to_latlong is True, project the gdf to the default_crs\n if to_latlong:\n to_crs = settings.default_crs\n\n # else if to_crs is None, project gdf to an appropriate UTM zone\n elif to_crs is None:\n if is_projected(gdf.crs): # pragma: no cover\n msg = \"Geometries must be unprojected to calculate a UTM zone\"\n raise ValueError(msg)\n\n # calculate the \"typical\" lat-long across all geometries in gdf\n rp = gdf.representative_point()\n to_crs = coords_to_utm_zone((rp.y.median(), rp.x.median()))\n\n # project the gdf\n gdf_proj = gdf.to_crs(to_crs)\n utils.log(f\"Projected GeoDataFrame to {to_crs}\")\n return gdf_proj\n\n\ndef project_graph(G, to_crs=None, to_latlong=False):\n \"\"\"\n Project a graph from its current CRS to another.\n\n If `to_latlong` is `True`, this projects the graph to the CRS defined by\n `settings.default_crs`, otherwise it projects it to the CRS defined by\n `to_crs`. If `to_crs` is `None`, it projects it to the CRS of the UTM zone\n in which `G`'s approximate centroid lies, using the `coords_to_utm_zone`\n function.\n\n Parameters\n ----------\n G : networkx.MultiDiGraph\n the graph to be projected\n to_crs : string or pyproj.CRS\n if None, project to an appropriate UTM zone, otherwise project to\n this CRS\n to_latlong : bool\n if True, project to `settings.default_crs` and ignore `to_crs`\n\n Returns\n -------\n G_proj : networkx.MultiDiGraph\n the projected graph\n \"\"\"\n if to_latlong:\n to_crs = settings.default_crs\n\n # STEP 1: PROJECT THE NODES\n gdf_nodes = utils_graph.graph_to_gdfs(G, edges=False)\n\n # create new lat/lng columns to preserve lat/lng for later reference if\n # cols do not already exist (ie, don't overwrite in later re-projections)\n if \"lon\" not in gdf_nodes.columns or \"lat\" not in gdf_nodes.columns:\n gdf_nodes[\"lon\"] = gdf_nodes[\"x\"]\n gdf_nodes[\"lat\"] = gdf_nodes[\"y\"]\n\n # project the nodes GeoDataFrame and extract the projected x/y values\n gdf_nodes_proj = project_gdf(gdf_nodes, to_crs=to_crs)\n gdf_nodes_proj[\"x\"] = gdf_nodes_proj[\"geometry\"].x\n gdf_nodes_proj[\"y\"] = gdf_nodes_proj[\"geometry\"].y\n to_crs = gdf_nodes_proj.crs\n gdf_nodes_proj = gdf_nodes_proj.drop(columns=[\"geometry\"])\n\n # STEP 2: PROJECT THE EDGES\n if \"simplified\" in G.graph and G.graph[\"simplified\"]:\n # if graph has previously been simplified, project the edge geometries\n gdf_edges = utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=False)\n gdf_edges_proj = project_gdf(gdf_edges, to_crs=to_crs)\n else:\n # if not, you don't have to project these edges because the nodes\n # contain all the spatial data in the graph (unsimplified edges have\n # no geometry attributes)\n gdf_edges_proj = utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=False).drop(\n columns=[\"geometry\"]\n )\n\n # STEP 3: REBUILD GRAPH\n # turn projected node/edge gdfs into a graph and update its CRS attribute\n G_proj = utils_graph.graph_from_gdfs(gdf_nodes_proj, gdf_edges_proj, G.graph)\n G_proj.graph[\"crs\"] = to_crs\n\n utils.log(f\"Projected graph with {len(G)} nodes and {len(G.edges)} edges\")\n return G_proj\n","sub_path":"osmnx/projection.py","file_name":"projection.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"106542128","text":"\"\"\"\nMeasure one resonator per LO frequency. Since each measurement has only one channel, record SingleSweepStreams.\n\"\"\"\nimport time\n\nimport numpy as np\n\nfrom kid_readout.roach import analog, hardware_tools\nfrom kid_readout.measurement import acquire, basic\nfrom kid_readout.equipment import hardware\n\nacquire.show_settings()\nacquire.show_git_status()\nlogger = acquire.get_script_logger(__file__)\n\n# Parameters\nsuffix = 'uncontrolled'\nall_f0_MHz = np.array([2522.24, 2605.96, 2723.65, 2787.96, 3851.13])\nf0_MHz = all_f0_MHz\nfrequency_shift = 1\nf0_MHz *= frequency_shift\nattenuations = [10, 20, 30]\nsweep_interval = 2\ntone_sample_exponent = 18\nsweep_length_seconds = 0.1\nstream_length_seconds = 10\nlo_round_to_MHz = 0.1\nf_minimum = 10e6 # Keep the tones away from the LO by at least this frequency.\nf_stream_offset_MHz = 10 # Set a second tone away from the resonance by this amount\nnum_sweep_tones = 127\nfft_gain = 3\n\n# Hardware\nconditioner = analog.HeterodyneMarkII()\nshield = hardware.Thing(name='mu_metal_pocket', state={'orientation': 'horizontal'})\nhw = hardware.Hardware(conditioner, shield)\nri = hardware_tools.r1h11_with_mk2(initialize=True, use_config=False)\nri.adc_valon.set_ref_select(0) # internal\nri.lo_valon.set_ref_select(1) # external\nri.set_fft_gain(fft_gain)\n\n# Calculate LO and baseband frequencies\nnum_tone_samples = 2**tone_sample_exponent\nf_resolution = ri.state.adc_sample_rate / num_tone_samples\nminimum_integer = int(f_minimum / f_resolution)\noffset_integers = minimum_integer + sweep_interval * np.arange(num_sweep_tones)\noffset_frequencies_MHz = 1e-6 * f_resolution * offset_integers\noffset_array_MHz = offset_frequencies_MHz[:, np.newaxis] + np.array([0, f_stream_offset_MHz])[np.newaxis, :]\nall_lo_MHz = lo_round_to_MHz * np.round((f0_MHz - offset_frequencies_MHz.mean()) / lo_round_to_MHz)\nlogger.info(\"Frequency spacing is {:.1f} kHz\".format(1e3 * (offset_frequencies_MHz[1] - offset_frequencies_MHz[0])))\nlogger.info(\"Sweep span is {:.1f} MHz\".format(offset_frequencies_MHz.ptp()))\n\n# Run\nnpd = acquire.new_npy_directory(suffix=suffix)\ntic = time.time()\ntry:\n ri.set_tone_baseband_freqs(offset_array_MHz, nsamp=num_tone_samples)\n for lo_index, lo_MHz in enumerate(all_lo_MHz):\n ri.set_lo(lomhz=lo_MHz, chan_spacing=lo_round_to_MHz)\n for attenuation_index, attenuation in enumerate(attenuations):\n assert np.all(ri.adc_valon.get_phase_locks())\n assert np.all(ri.lo_valon.get_phase_locks())\n ri.set_dac_attenuator(attenuation)\n state = hw.state()\n state['lo_index'] = lo_index\n sweep_array = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,\n tone_bank_indices=np.arange(num_sweep_tones),\n demod=True)\n on_sweep = sweep_array[0]\n off_sweep = sweep_array[1]\n off_sweep.state = state\n f0_MHz = 1e-6 * on_sweep.resonator.f_0\n logger.info(\"Fit resonance frequency is {:.3f} MHz\".format(f0_MHz))\n # Overwrite the last waveform after the first loop.\n is_not_first_loop = (lo_index > 0) or (attenuation_index > 0)\n f_stream_MHz = ri.add_tone_freqs(np.array([f0_MHz, f0_MHz + f_stream_offset_MHz]),\n overwrite_last=is_not_first_loop)\n ri.select_bank(num_sweep_tones)\n ri.select_fft_bins(np.arange(f_stream_MHz.size))\n logger.info(\"Recording {:.1f} s streams at MHz frequencies {}\".format(stream_length_seconds, f_stream_MHz))\n stream_array = ri.get_measurement(num_seconds=stream_length_seconds,\n demod=True)\n on_stream = stream_array[0]\n off_stream = stream_array[1]\n off_stream.state = state\n sweep_stream = basic.SingleSweepStream(sweep=on_sweep, stream=on_stream, state=state,\n description='f_0 = {:.1f}'.format(f0_MHz))\n npd.write(sweep_stream)\n npd.write(off_sweep)\n npd.write(off_stream)\n # Record an ADCSnap with the stream tones playing.\n npd.write(ri.get_adc_measurement())\nfinally:\n npd.close()\n print(\"Wrote {}\".format(npd.root_path))\n print(\"Elapsed time {:.0f} minutes.\".format((time.time() - tic) / 60))\n","sub_path":"apps/data_taking_scripts/cooldown/2017-02-10_hpd/r1h11_sweepstream_uncontrolled.py","file_name":"r1h11_sweepstream_uncontrolled.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"2152092","text":"from metapoppy.event import Event\nfrom ..tbcompartments import *\nimport numpy\n\n\nclass CellDeath(Event):\n def __init__(self, death_rate_key, dying_compartment, additional_parameter_keys=None):\n self._dying_compartment = dying_compartment\n if dying_compartment in INTERNAL_BACTERIA_FOR_CELL:\n self._internal_compartment = INTERNAL_BACTERIA_FOR_CELL[dying_compartment]\n else:\n self._internal_compartment = None\n Event.__init__(self, death_rate_key, additional_parameter_keys)\n\n def _calculate_state_variable_at_patch(self, network, patch_id):\n return network.get_compartment_value(patch_id, self._dying_compartment)\n\n def perform(self, network, patch_id):\n changes = {self._dying_compartment: -1}\n if self._internal_compartment:\n bac_to_release = int(round(float(\n network.get_compartment_value(patch_id, self._internal_compartment)) /\n network.get_compartment_value(patch_id, self._dying_compartment)))\n changes[self._internal_compartment] = bac_to_release * -1\n changes[BACTERIUM_EXTRACELLULAR_DORMANT] = bac_to_release\n if self._dying_compartment == MACROPHAGE_INFECTED:\n changes[CASEUM] = 1\n network.update_patch(patch_id, changes)\n\n\nclass MacrophageBursting(CellDeath):\n def __init__(self, death_rate_key, sigmoid_key, capacity_key):\n self._sigmoid_key = sigmoid_key\n self._capacity_key = capacity_key\n CellDeath.__init__(self, death_rate_key, MACROPHAGE_INFECTED, [sigmoid_key, capacity_key])\n\n def _calculate_state_variable_at_patch(self, network, patch_id):\n bac = network.get_compartment_value(patch_id, BACTERIUM_INTRACELLULAR_MACROPHAGE)\n if not bac:\n return 0\n mac = network.get_compartment_value(patch_id, self._dying_compartment)\n sig = self._parameters[self._sigmoid_key]\n cap = self._parameters[self._capacity_key]\n return mac * ((float(bac) ** sig) / (bac ** sig + ((cap * mac) ** sig)))\n\n\nclass TCellDestroysMacrophage(CellDeath):\n def __init__(self, death_rate_key, half_sat_key):\n self._half_sat_key = half_sat_key\n CellDeath.__init__(self, death_rate_key, MACROPHAGE_INFECTED, [half_sat_key])\n\n def _calculate_state_variable_at_patch(self, network, patch_id):\n tcell = network.get_compartment_value(patch_id, T_CELL_ACTIVATED)\n if not tcell:\n return 0\n mac = network.get_compartment_value(patch_id, self._dying_compartment)\n return mac * (float(tcell) / (tcell + self._parameters[self._half_sat_key]))\n\n\nclass MacrophageDestroysBacterium(Event):\n def __init__(self, death_rate_key, macrophage_type, half_sat_key):\n self._macrophage_type = macrophage_type\n self._half_sat_key = half_sat_key\n Event.__init__(self, death_rate_key, [half_sat_key])\n\n def _calculate_state_variable_at_patch(self, network, patch_id):\n mac = network.get_compartment_value(patch_id, self._macrophage_type)\n bac = network.get_compartment_value(patch_id, BACTERIUM_EXTRACELLULAR_REPLICATING) + \\\n network.get_compartment_value(patch_id, BACTERIUM_EXTRACELLULAR_DORMANT)\n if not mac or not bac:\n return 0\n return mac * float(bac) / (bac + self._parameters[self._half_sat_key])\n\n def perform(self, network, patch_id):\n replicating = network.get_compartment_value(patch_id, BACTERIUM_EXTRACELLULAR_REPLICATING)\n dormant = network.get_compartment_value(patch_id, BACTERIUM_EXTRACELLULAR_DORMANT)\n total_bacteria = replicating + dormant\n\n prob = numpy.array([replicating, dormant], dtype=numpy.dtype('float')) / total_bacteria\n bacteria_type_chosen = numpy.random.choice([BACTERIUM_EXTRACELLULAR_REPLICATING,\n BACTERIUM_EXTRACELLULAR_DORMANT],\n p=prob)\n\n network.update_patch(patch_id, {bacteria_type_chosen: -1})","sub_path":"tbmetapoppy/events/cell_death.py","file_name":"cell_death.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"174248280","text":"\"\"\"\n 继承\n\"\"\"\nimport logging\n\n\n# 1. Logger: 产生日志,创建一个logger\n# 继承关系是以 \".\" 来体现的\nlogger1 = logging.getLogger(\"root\")\nlogger2 = logging.getLogger(\"root.child1\")\nlogger3 = logging.getLogger(\"root.child1.child2\")\n\n\n# 2. filter ==》 一般不用\n\n# 3. Handler: 接受logger传过来的日志,进行日志格式化,可以打印到终端,也可以打印到文件\n\n# 打印到终端\nsh = logging.StreamHandler()\n\n# 4. Formatter: 日志格式\nformatter = logging.Formatter(\n fmt=\"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S %p\",\n)\n\n# 5. 为handler 绑定日志格式\nsh.setFormatter(formatter)\n\n# 6. 为logger 绑定handler\nlogger1.addHandler(sh)\nlogger2.addHandler(sh)\nlogger3.addHandler(sh)\n\n# 设置日志级别\nlogger1.setLevel(10) # 这个最先产生, 所以logger的日志级别必须比下面的级别低,下面的设置才有效\nlogger2.setLevel(10)\nlogger3.setLevel(10)\nsh.setLevel(0)\n\n# 7. 测试\nlogger1.debug(\"爷爷\")\nlogger2.debug(\"爸爸\")\nlogger3.debug(\"儿子\")\n","sub_path":"prac/logging_prac/03_日志的继承.py","file_name":"03_日志的继承.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}