diff --git "a/1359.jsonl" "b/1359.jsonl" new file mode 100644--- /dev/null +++ "b/1359.jsonl" @@ -0,0 +1,614 @@ +{"seq_id":"209501423","text":"from imblearn.over_sampling import RandomOverSampler\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\n\ndef read_data(filename='MTH1_DSF_frags_train.csv'):\n df = pd.read_csv(filename)\n print(df.head())\n return df\n\ndef features_yfill(data):\n y = data.loc[:,'Hit']\n yfill = y.fillna(0)\n features = data.loc[:,'SlogP':]\n return features, yfill\n\ndef features_ydTm(data):\n y = data.loc[:,'dTm']\n yfill = y.fillna(0)\n features = data.loc[:,'SlogP':]\n return features, yfill\n\ndef features_ytm_score(data):\n y = data['dTm'].map(lambda x: 2 if x>9 else 1 if 59 else 1 if 5 0:\n return letter\n start = start + 1\n\n return ''\n\n\ndef recurring_character(word: str):\n\n ocurrences = set()\n for letter in word:\n if letter in ocurrences:\n return letter\n ocurrences.add(letter)\n\n return ''\n","sub_path":"RecurringCharacter/recurring_character.py","file_name":"recurring_character.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"600950239","text":"import re\n\n\ndef line_to_sentences(line, need_get_substring=True):\n white_space_regex = re.compile(r\"\"\"[' '\\n\\r\\t\\xa0@。;?!??|;!!【】]\"\"\")\n content = white_space_regex.sub(\"\\n\", line)\n dont_need_mark = re.compile(r\"[\\\"…… /]\")\n content = dont_need_mark.sub(\" \", content)\n\n if need_get_substring:\n split_mark = re.compile(r\"\"\"[,,<> · () ():)()]\"\"\")\n content = split_mark.sub(\" \", content)\n\n content = re.sub(\"\\s+\", ' ', content).strip()\n return content.split()\n\n\ndef delete_bracket(string, bracket='(', end_pair=')'):\n '''\n delete the bracket content in a string. \n e.g 北京办公楼外北侧的雨水收集池(位于建筑物20米开外)起火,原因是工人操作不当,引燃了塑料材料。目前火已扑灭,现场无人员伤亡,感谢大家的关心。”\n change to 北京办公楼外北侧的雨水收集池起火,原因是工人操作不当,引燃了塑料材料。目前火已扑灭,现场无人员伤亡,感谢大家的关心。”\n :param string: \n :return: \n '''\n brackets = \"[\\(\\[\\(\\【].*?[\\)\\)\\]\\】]\"\n string = re.sub(brackets, \"\", string)\n return string\n\n\n","sub_path":"sentence_manager/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"219460904","text":"#Character Input\n#User enters input and number of copies to be printed\n\nNameInput = input(\"Input your name: \")\nAgeInput = int(input(\"Input your age: \"))\nYearby100 = str(2019 + (100-AgeInput))\nprint(NameInput + \", you will turn 100 years old in \" + Yearby100)\n\n\n#Extras\n#CharInput = input(\"Input characters: \")\n#CopyInput = int(input(\"Input number of copies: \"))\n#Simple Way\n#print(CopyInput*(CharInput+\"\\n\"))\n\n#Better Output - no blank at the end\n#for i in range(CopyInput):\n# print(CharInput)\n# if i == CopyInput:\n# break\n","sub_path":"Exer1/Exer1.py","file_name":"Exer1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175070636","text":"# f = open('hello.txt','w')\n# f.write('hello wolrd, python')\n#\n# f.close()\n\n\n#向文件读取数据\n# f = open('hello.txt')\n#\n# content = f.read()\n#\n# print(content)\n#\n# f.close()\n\n#复制文件\n\n#1.输入要复制的文件的名字\nold_file_name = input('请输入要复制文件的名字!')\n\n#2.打开old文件\nold_file = open(old_file_name)\n\n#3.创建一个new文件\n#hello.txt -> hello[副本].txt\nfind_postion = old_file_name.find('.')\nnew_file_name = old_file_name[:find_postion] + '[副本]' + old_file_name[find_postion:]\nnew_file = open(new_file_name,'w')\n\n#4.复制文件\n# content = new_file.read()\n#\n# new_file.wirte(content)\n\nwhile True:\n\n content = old_file.read(1024)\n\n if len(content) <=0:\n break\n new_file.write(content)\n\n\n#5.关闭文件\n\nold_file.close()\n\nnew_file.close()","sub_path":"每天demo/总结/向文件写入数据.py","file_name":"向文件写入数据.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405298892","text":"\"\"\"\nThis modules provides functions for creating, updating, and deleting\nfriend records from our database.\n\"\"\"\n\nimport sqlite3\n\n\nclass Datastore:\n \"\"\"\n Provides an interface to an SQLite database and associated methods.\n \"\"\"\n\n def __init__(self):\n self.connection = sqlite3.connect(\"/tmp/friends.db\")\n\n def friends(self) -> dict:\n \"\"\"\n Return a representation of all rows in the friends table.\n\n Returns\n A JSON ready dictionary representing all rows of the friends table.\n \"\"\"\n cursor = self.connection.execute(\n 'select id, firstName, lastName, telephone, email, notes '\n 'from friends')\n\n friends_collection = list()\n for friend_row in cursor.fetchall():\n friends_collection.append(\n {\"id\": friend_row[0],\n \"firstName\": friend_row[1],\n \"lastName\": friend_row[2],\n \"telephone\": friend_row[3],\n \"email\": friend_row[4],\n \"notes\": friend_row[5]})\n\n return friends_collection\n\n def friend(self, id: str) -> dict:\n \"\"\"\n Obtain a specific friend record and return a representation of it.\n\n Args:\n id (str): An `id` value which will be used to find a specific\n datastore row.\n\n Returns\n A JSON ready dictionary representing a specific\n row of the friends table.\n \"\"\"\n cursor = self.connection.execute(\n 'select id, firstName, lastName, telephone, email, notes '\n 'from friends where lower(id) = ?',\n [id.lower()])\n\n friend_row = cursor.fetchone()\n\n if friend_row:\n return {\n \"id\": friend_row[0],\n \"firstName\": friend_row[1],\n \"lastName\": friend_row[2],\n \"telephone\": friend_row[3],\n \"email\": friend_row[4],\n \"notes\": friend_row[5]}\n\n def create_friend(self, data: dict):\n \"\"\"\n Create a new friend record in our database.\n\n Args:\n data: A dictionary of data for our new friend. Must have\n the following elements: ['id', 'firstName', 'lastName',\n 'telephone', 'email', 'notes']\n\n Raises:\n ValueError: If data is None, doesn't contain all required\n elements, or an existing record with the same id exists\n in the friends table.\n \"\"\"\n if data is None:\n raise ValueError(\n \"`None` was received when a dict was expected during \"\n \"the attempt to create a new friend resource.\")\n\n required_elements = {\"id\", \"firstName\", \"lastName\", \"telephone\",\n \"email\", \"notes\"}\n\n if not required_elements.issubset(data):\n raise ValueError(\"Some of the data required to create a friend \"\n \"was not present. The following elements \"\n \"must be present to create a friend: {}\".format(\n required_elements))\n\n if self.friend(data['id']):\n raise ValueError(\n \"A friend already exists with the `id` specified: {}\".format(\n data['id']))\n\n self.connection.execute(\n 'insert into friends (id, firstName, lastName, telephone, email, notes) '\n 'values (?, ?, ?, ?, ?, ?)',\n [data['id'],\n data['firstName'],\n data['lastName'],\n data['telephone'],\n data['email'],\n data['notes']])\n self.connection.commit()\n\n def update_friend(self, id: str, data: dict):\n \"\"\"\n Update an existing friend entry is our database.\n\n Args:\n id: The id value of the friend to update.\n data: A dictionary of data to update an existing friend entry with.\n\n Raises:\n ValueError: If data is None or if not matching friend entry is found.\n \"\"\"\n if data is None:\n raise ValueError(\n \"`None` was received when a dict was expected during \"\n \"the attempt to update an existing friend resource.\")\n\n required_elements = {\"id\", \"firstName\", \"lastName\", \"telephone\",\n \"email\", \"notes\"}\n\n if not required_elements.issubset(data):\n raise ValueError(\"Some of the data required to create a friend \"\n \"was not present. The following elements \"\n \"must be present to create a friend: {}\".format(\n required_elements))\n\n if not self.friend(id):\n raise ValueError(\n \"No existing friend was found matching id: {}\".format(id))\n\n self.connection.execute(\n \"UPDATE friends \"\n \"SET id=?, firstName=?, lastName=?, telephone=?, email=?, notes=? \"\n \"WHERE lower(id) = ?\",\n [data['id'],\n data['firstName'],\n data['lastName'],\n data['telephone'],\n data['email'],\n data['notes'],\n data['id'].lower()])\n self.connection.commit()\n\n def destroy_friend(self, id: str):\n \"\"\"\n Remove an existing friend entry from our datastore of friends.\n\n Args:\n id: The id value of the friend to delete.\n\n Returns:\n ValueError: If the `id` parameter doesn't match any existing\n friend records in our database.\n\n \"\"\"\n if self.friend(id):\n cursor = self.connection.execute(\n 'DELETE '\n 'from friends where lower(id) = ?',\n [id.lower()])\n\n if not cursor.rowcount:\n raise ValueError()\n else:\n self.connection.commit()\n else:\n raise ValueError(\n \"No existing friend was found matching id: {}\".format(id))\n","sub_path":"training/level-4-creating-web-services/bfp-reference/exercise_11/friends_api/datastore.py","file_name":"datastore.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"290251028","text":"\"\"\"\nMulti-thead and pygame\n\"\"\"\n\nimport random\nimport math\nimport time\n\nimport Queue\n\nfrom starfish import pygm\nfrom starfish import consts\n\nfrom starfish import multithrd\nfrom starfish import snowspt\nfrom starfish import sptdraw\n\n\nclass ThrdTime(multithrd.PgThrd):\n def __init__(self, q=None):\n super(ThrdTime, self).__init__(q)\n\n self.t_last = None\n\n def do_once(self):\n t = int(time.time())\n if t % 2 == 0 and t != self.t_last:\n #print t\n self.q.put(t)\n self.t_last = t\n\n\nclass MTScene(pygm.PyGMScene):\n def __init__(self, q, *args, **kwargs):\n super(MTScene, self).__init__(*args, **kwargs)\n\n self.q = q\n\n self.lb1 = pygm.SptLbl(0, c=consts.WHITE)\n self.lb1.rect.top = 460\n self.lb1.rect.left = 60\n self.disp_add(self.lb1)\n\n self.sn1 = snowspt.SptSnow((800, 550), 100)\n self.disp_add(self.sn1)\n\n\n def f(x):\n #return x ** 3 + x ** 2 + x\n #return x ** 2 + x\n return (x / 2.0) * math.sin(x / 100.0) + 200.0\n self.drfn1 = sptdraw.SptDrawFunc((400, 400), f, (0, 400), 6)\n self.disp_add(self.drfn1)\n\n def refresh(self, fps_clock, *args, **kwargs):\n if not self.q.empty(): # or use self.q.get_nowait()\n t = self.q.get()\n #print t\n if t:\n self.lb1.lbl_set(t)\n\n\nclass GMMultithrd(pygm.PyGMGame):\n def __init__(self, title, winw, winh, *args, **kwargs):\n super(GMMultithrd, self).__init__(title, winw, winh)\n\n self.bk = pygm.SptImg('data/img_bk_1.jpg')\n self.bk.rect.top = -230\n self.disp_add(self.bk)\n\n self.q = Queue.Queue()\n\n self.scn1 = MTScene(self.q)\n self.disp_add(self.scn1)\n\n # add a time thread\n self.thrd_tm = ThrdTime(self.q)\n self.thrd_add(self.thrd_tm)\n self.thrd_tm.start()\n\n\ndef main():\n lk = GMMultithrd('Mulit Thrd', 800, 550)\n lk.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bluelake/multithrdtmp.py","file_name":"multithrdtmp.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"135805377","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 20 22:02:12 2021\r\n\r\n@author: sevgi.tas\r\n\"\"\"\r\n\r\n\r\nimport random as rnd \r\na=[[0]*3 for i in range(3)]\r\n\r\nfor i in range(0,3):\r\n for j in range(0,3):\r\n a[i][j]=rnd.randint(1, 100)\r\nfor row in a:\r\n \r\n print(' '.join([str(x) for x in row]))","sub_path":"homework1-sevgi.py","file_name":"homework1-sevgi.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"281254738","text":"from setuptools import setup\n\npackage_name = 'piety'\n\n\nsetup(\n name=package_name,\n description='A modern curses compatible terminal built using Vulcan and GLFW',\n version='0.1.0',\n packages=[package_name],\n classifiers=[\n 'Development Status :: 1 - Planning',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: MIT License',\n\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.7',\n ],\n keywords=['vulcan', 'glfw', 'terminal', 'curses'],\n )\n","sub_path":"pypi_install_script/piety-0.1.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"576009903","text":"import math\nimport copy\n\nclass Point:\n \"\"\"Point in 2-D space\"\"\"\n def __init__(self, X=0, Y=0):\n self.x = X\n self.y = Y\n\n def distance_to_point(self, point):\n return math.sqrt(math.pow(point.x-self.x, 2)+math.pow(point.y-self.y, 2));\n\n def toString(self):\n return \"(\"+str(self.x)+\", \"+str(self.y)+\")\";\n\npoint_1 = Point();\npoint_1.x = 3;\npoint_1.y = 4;\n\npoint_2 = Point();\npoint_2.x = 5;\npoint_2.y = 5;\n\ndef distance_between_points(point1, point2):\n return math.sqrt(math.pow(point2.x-point1.x, 2)+math.pow(point2.y-point1.y, 2));\n\n#print(distance_between_points(point_1, point_2));\n\nclass Rectangle:\n \"\"\"Rectangle in 2-D space\"\"\"\n def __init__(self, Width=0, Height=0, corner_x=0, corner_y=0):\n self.width = Width\n self.height = Height\n self.x = corner_x\n self.y = corner_y\n self.corner = Point(corner_x,corner_y)\n\n def move_rectangle(self, dx=0, dy=0):\n self.corner.x = self.corner.x + dx;\n self.corner.y = self.corner.y + dy;\n\n def move_rectangle_copy(self, dx=0, dy=0):\n new_box = copy.deepcopy(self);\n new_box.move_rectangle(dx,dy)\n return new_box;\n\nbox = Rectangle();\nbox.width = 300;\nbox.height = 200;\nbox.corner = Point();\nbox.corner.x = 0;\nbox.corner.y = 0;\n\n#print(box.corner.x);\n\n#rect = box.move_rectangle_copy();\n#print(rect.corner.x);\n\nclass Circle:\n \"\"\"\n attributes: center, radius;\n \"\"\"\n def __init__(self, X=0, Y=0, Radius=0):\n self.center = Point(X,Y)\n self.radius = Radius\n\ncircle = Circle();\ncircle.center = Point();\ncircle.center.x = 150;\ncircle.center.y = 100;\n\ncircle.radius = 75;\n\ndef point_in_circle(circ, p):\n\n d = distance_between_points(p, circ.center);\n print(d);\n return d <= circ.radius;\n\ndef rect_in_circle(circ, rectang):\n\n \"\"\"points for rect_in\"\"\"\n left_up_point = Point(rectang.y+rectang.height, rectang.corner.x);\n\n right_up_point = Point(rectang.x+rectang.width, rectang.y+rectang.height);\n\n left_down_point = Point(rectang.x, rectang.y);\n\n right_down_point = Point(rectang.x+rectang.width, rectang.y);\n\n print();\n\n if distance_between_points(left_up_point, circ.center) == distance_between_points(right_up_point, circ.center) == distance_between_points(left_down_point, circ.center) == distance_between_points(right_down_point, circ.center) and distance_between_points(left_up_point, circ.center) <= circ.radius:\n return True;\n else:\n return False;\n\n\ndef rect_out_circle(circ, rectang):\n\n \"\"\"points for rect_out\"\"\"\n left_mid_point = Point(rectang.corner.x, rectang.y+rectang.height/2);\n\n right_mid_point = Point(rectang.x+rectang.width, rectang.y+rectang.height/2);\n\n top_mid_point = Point(rectang.x+rectang.width/2, rectang.y+rectang.height);\n\n down_mid_point = Point(rectang.x+rectang.width/2, rectang.y);\n\n if distance_between_points(left_mid_point, circ.center) == distance_between_points(right_mid_point, circ.center) == distance_between_points(top_mid_point, circ.center) == distance_between_points(down_mid_point, circ.center) and distance_between_points(left_up_point, circ.center) >= circ.radius:\n return True;\n else:\n return False;\n\nprint(rect_in_circle(circle, box));\nprint(rect_out_circle(circle, box));\n\n#def rect_circle_overlap(circ, rectang):\n# return;\n","sub_path":"Interface design/Exercises/mypython.py","file_name":"mypython.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"458257584","text":"import datetime\nimport requests\n\nfrom odoo import models\n\n\nclass sync_payrun(models.TransientModel):\n _inherit = \"is_keypay.sync\"\n\n def _createPayRun(self, company, journalId, kpPayrun, key, businessId, endingDate, journalDate):\n ref = \"Pay period ending \" + endingDate.strftime('%d/%m/%Y') + \" (#\" + str(kpPayrun[\"id\"]) + \")\"\n if len(self.env[\"account.move\"].search([['ref', '=', ref]])) > 0:\n return\n url = self.url_base + 'business/{0}/journal/{1}'.format(businessId, kpPayrun[\"id\"])\n response = requests.get(url, auth=(key,''))\n response.raise_for_status()\n \n journalItems = []\n for kpJournalItem in response.json():\n itemAccount = self.env['account.account'].search([('company_id','=',company.id),('code','=',kpJournalItem[\"accountCode\"])])\n if not itemAccount or len(itemAccount) != 1:\n raise Exception('Odoo Account not found: ' + kpJournalItem[\"accountCode\"])\n journalItems.append((0,0,{\n 'journal_id': journalId.id,\n 'account_id': itemAccount.id,\n 'name': kpJournalItem[\"reference\"],\n 'debit': abs(kpJournalItem[\"amount\"]) if kpJournalItem[\"isDebit\"] else 0,\n 'credit': abs(kpJournalItem[\"amount\"]) if kpJournalItem[\"isCredit\"] else 0\n }))\n \n accountMove = self.env['account.move'].create({\n 'journal_id': journalId.id,\n 'ref': ref,\n 'date': journalDate,\n 'company_id': company.id,\n 'line_ids': journalItems\n })\n accountMove.post()\n \n def sync(self, company): \n super(sync_payrun, self).sync(company)\n \n if not company.keypay_enable_payrun_journal_sync:\n return\n \n key = company.keypay_api_key\n businessId = company.keypay_business_id\n journalId = company.keypay_journal\n excludeDate = datetime.datetime.strptime(company.keypay_payrun_journal_sync_exclude_before, \"%Y-%m-%d\") if company.keypay_payrun_journal_sync_exclude_before else False\n \n if not key or not businessId or not journalId:\n raise Exception(\"Company {0} does not have the apikey, businessId or the JournalId set\".format(company.name))\n url = self.url_base + 'business/{0}/payrun'.format(businessId)\n response = requests.get(url, auth=(key,''))\n response.raise_for_status()\n \n for kpPayrun in response.json():\n if not kpPayrun[\"isFinalised\"]:\n continue\n endingDate = datetime.datetime.strptime(kpPayrun[\"payPeriodEnding\"], \"%Y-%m-%dT%H:%M:%S\")\n journalDate = datetime.datetime.strptime(kpPayrun[\"datePaid\"], \"%Y-%m-%dT%H:%M:%S\")\n if excludeDate and excludeDate > journalDate:\n continue\n self._createPayRun(company, journalId, kpPayrun, key, businessId, endingDate, journalDate)\n","sub_path":"is_keypay/models/sync_payrun.py","file_name":"sync_payrun.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39953569","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\nfrom .. import Message\n\n\nclass TestChannel:\n def test_with(self, conn):\n with conn as cxn:\n assert cxn.transport is not None\n\n with cxn.channel(1) as ch:\n assert 1 in cxn.channels\n\n # do something with the channel\n ch.exchange_declare('unittest.fanout', 'fanout', auto_delete=True)\n\n msg = Message('unittest message',\n content_type='text/plain',\n application_headers={'foo': 7, 'bar': 'baz'})\n\n ch.basic_publish(msg, 'unittest.fanout')\n\n # check that the channel was closed\n assert 1 not in cxn.channels\n assert ch.is_open is False\n\n # check that the connection was closed\n assert cxn.transport is None\n","sub_path":"amqpy/tests/test_with.py","file_name":"test_with.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509851608","text":"import cv2 as cv\nimport numpy as np\nimport math\n\n\"\"\"Parameters\"\"\"\npixels_to_mm = 0.1\ntrack_y = 50 #value to crop top and bottom of the image by\ncanny_lower_threshold = 150\ncanny_upper_threshold = 300\nHL_rho = 1 #1\nHL_theta = 180 #180\nHL_threshold = 150 #150\nHLP_rho = 1 #1\nHLP_theta = 180 #180\nHLP_threshold = 50 #150\nHLP_min_line_length = 10\n\n\n\"\"\"Reference Image\"\"\"\nimage = cv.imread('C:/Users/Amy/OneDrive/Documents/IGEN 330/ref images/image6.jpg', cv.IMREAD_GRAYSCALE)\nimage_track = image[track_y: -track_y-30]\n\n\ndef vertical_crack(edges, threshold=1):\n linesP = cv.HoughLinesP(edges, HLP_rho, np.pi / HLP_theta, HLP_threshold, None, HLP_min_line_length, 10)\n xvalues = []\n if linesP is not None:\n for i in range(0, len(linesP)):\n l = linesP[i][0]\n if l[0] <= l[2] + threshold & l[0] >= l[2] - threshold:\n xvalues.append(l[0])\n xvalues.append(l[2])\n cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv.LINE_AA)\n try:\n width_of_crack(xvalues)\n except:\n print(\"no vertical cracks\")\n image_track_blur = cv.medianBlur(image_track, 7)\n detect_circle(image_track_blur)\n\n\ndef detect_circle(image_track_blur):\n ##circles\n # HoughCircles(image, method, dp (acc has same resolution of same size), minDist, param1, param2)\n circles = cv.HoughCircles(edges, cv.HOUGH_GRADIENT, 1, 80, param1=50, param2=32, minRadius=0,\n maxRadius=0)\n\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n # draw the outer circle\n cv.circle(image_track, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv.circle(image_track, (i[0], i[1]), 2, (0, 0, 255), 3)\n except:\n print(\"no circles found\")\n\n\ndef width_of_crack(values):\n width = max(values) - min(values)\n print(width * pixels_to_mm)\n\n#output variables\nedges = cv.Canny(image_track, canny_lower_threshold, canny_upper_threshold, None, 3)\nedgeimage = cv.cvtColor(edges, cv.COLOR_GRAY2BGR) #to show the canny image in imshow\ncdstP = np.copy(edgeimage)\n\ncv.imshow(\"Canny Transform\", edgeimage)\n\n#standard hough transform\n#cv2.HoughLines(image, rho - distance resolution of accumulator, theta - angle resolution of accumulator, threshold[, lines[, srn[, stn]]]) → lines\n# lines = cv.HoughLines(edges, HL_rho, np.pi/HL_theta, HL_threshold, None, 0, 0) #lines stores the coordinates of start/end line\n#\n# if lines is not None:\n# for i in range(0, len(lines)):\n# rho = lines[i][0][0]\n# theta = lines[i][0][1]\n# a = math.cos(theta)\n# b = math.sin(theta)\n# x0 = a * rho\n# y0 = b * rho\n# pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))\n# pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))\n# cv.line(edgeimage, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA)\n\nvertical_crack(edges)\n\ncv.imshow('detected circles',image_track)\n# cv.imshow(\"Detected Lines (in red) - Standard Hough Line Transform\", edgeimage)\ncv.imshow(\"Detected Lines (in red) - Probabilistic Line Transform\", cdstP)\ncv.waitKey()","sub_path":"TrackDetection.py","file_name":"TrackDetection.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515761039","text":"import datetime\nimport os\nimport sys\n\nimport geopandas as gpd\nfrom geopandas.testing import assert_geodataframe_equal\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport trackintel as ti\n\n\n@pytest.fixture\ndef geolife_pfs_stps_long():\n \"\"\"Read geolife_long and generate stps.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife_long\"))\n pfs, stps = pfs.as_positionfixes.generate_staypoints(method=\"sliding\", dist_threshold=25, time_threshold=5)\n return pfs, stps\n\n\nclass TestGenerate_staypoints:\n \"\"\"Tests for generate_staypoints() method.\"\"\"\n\n def test_duplicate_columns(self):\n \"\"\"Test if running the function twice, the generated column does not yield exception in join statement\"\"\"\n\n # we run generate_staypoints twice in order to check that the extra column(tripleg_id) does\n # not cause any problems in the second run\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n pfs_run_1, _ = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5.0, include_last=True\n )\n pfs_run_2, _ = pfs_run_1.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5.0, include_last=True\n )\n assert set(pfs_run_1.columns) == set(pfs_run_2.columns)\n\n def test_sliding_min(self):\n \"\"\"Test if using small thresholds, stp extraction yields each pfs.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n pfs, stps = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=0, time_threshold=0, include_last=True\n )\n assert len(stps) == len(pfs)\n\n def test_sliding_max(self):\n \"\"\"Test if using large thresholds, stp extraction yield no pfs.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n _, stps = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=sys.maxsize, time_threshold=sys.maxsize\n )\n assert len(stps) == 0\n\n def test_missing_link(self):\n \"\"\"Test nan is assigned for missing link between pfs and stps.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n pfs, _ = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=sys.maxsize, time_threshold=sys.maxsize\n )\n\n assert pd.isna(pfs[\"staypoint_id\"]).any()\n\n def test_dtype_consistent(self, geolife_pfs_stps_long):\n \"\"\"Test the dtypes for the generated columns.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n assert pfs[\"user_id\"].dtype == stps[\"user_id\"].dtype\n assert pfs[\"staypoint_id\"].dtype == \"Int64\"\n assert stps.index.dtype == \"int64\"\n\n def test_index_start(self, geolife_pfs_stps_long):\n \"\"\"Test the generated index start from 0 for different methods.\"\"\"\n _, stps = geolife_pfs_stps_long\n\n assert (stps.index == np.arange(len(stps))).any()\n\n def test_include_last(self):\n \"\"\"Test if the include_last arguement will include the last pfs as stp.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n\n pfs_wo, stps_wo = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5.0, include_last=False\n )\n pfs_include, stps_include = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5.0, include_last=True\n )\n # stps_wo does not include the last staypoint\n assert len(stps_wo) == len(stps_include) - 1\n # the last pfs of pfs_include has stp connection\n assert not pfs_include.tail(1)[\"staypoint_id\"].isna().all()\n assert pfs_wo.tail(1)[\"staypoint_id\"].isna().all()\n\n def test_print_progress(self):\n \"\"\"Test if the result from print progress agrees with the original.\"\"\"\n pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife\"))\n pfs_ori, stps_ori = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5\n )\n pfs_print, stps_print = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=100, time_threshold=5, print_progress=True\n )\n assert_geodataframe_equal(pfs_ori, pfs_print)\n assert_geodataframe_equal(stps_ori, stps_print)\n\n def test_temporal(self):\n \"\"\"Test if the stps generation result follows predefined time_threshold and gap_threshold.\"\"\"\n pfs_input, _ = ti.io.dataset_reader.read_geolife(os.path.join(\"tests\", \"data\", \"geolife_long\"))\n\n # the duration should be not longer than time_threshold\n time_threshold_ls = [3, 5, 10]\n for time_threshold in time_threshold_ls:\n _, stps = pfs_input.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=50, time_threshold=time_threshold\n )\n\n duration_stps_min = (stps[\"finished_at\"] - stps[\"started_at\"]).dt.total_seconds() / 60\n # all durations should be longer than the time_threshold\n assert (duration_stps_min > time_threshold).all()\n\n # the missing time should not exceed gap_threshold\n gap_threshold_ls = [10, 15, 20]\n for gap_threshold in gap_threshold_ls:\n pfs, _ = pfs_input.as_positionfixes.generate_staypoints(\n method=\"sliding\", dist_threshold=50, time_threshold=time_threshold, gap_threshold=gap_threshold\n )\n # get the difference between pfs tracking time, and assign back to the previous pfs\n pfs[\"diff\"] = ((pfs[\"tracked_at\"] - pfs[\"tracked_at\"].shift(1)).dt.total_seconds() / 60).shift(-1)\n # get the last pf of stps and check the gap size\n pfs.dropna(subset=[\"staypoint_id\"], inplace=True)\n pfs.drop_duplicates(subset=[\"staypoint_id\"], keep=\"last\", inplace=True)\n # all last pfs should be shorter than the gap_threshold\n assert (pfs[\"diff\"] < gap_threshold).all()\n\n\nclass TestGenerate_triplegs:\n \"\"\"Tests for generate_triplegs() method.\"\"\"\n\n def test_duplicate_columns(self, geolife_pfs_stps_long):\n \"\"\"Test if running the function twice, the generated column does not yield exception in join statement\"\"\"\n\n # we run generate_triplegs twice in order to check that the extra column (tripleg_id) does\n # not cause any problems in the second run\n pfs, stps = geolife_pfs_stps_long\n\n pfs_run_1, _ = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n pfs_run_2, _ = pfs_run_1.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n assert set(pfs_run_1.columns) == set(pfs_run_2.columns)\n\n def test_user_without_stps(self, geolife_pfs_stps_long):\n \"\"\"Check if it is safe to have users that have pfs but no stps.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n # test for case 1\n # manually change the first pfs' user_id, which has no stp correspondence\n pfs.loc[0, \"user_id\"] = 5000\n _, tpls_1 = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n # result should be the same ommiting the first row\n _, tpls_2 = pfs.iloc[1:].as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n assert_geodataframe_equal(tpls_1, tpls_2)\n\n # test for case 2\n pfs.drop(columns=\"staypoint_id\", inplace=True)\n # manually change the first pfs' user_id, which has no stp correspondence\n _, tpls_1 = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n # result should be the same ommiting the first row\n _, tpls_2 = pfs.iloc[1:].as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n assert_geodataframe_equal(tpls_1, tpls_2)\n\n def test_pfs_without_stps(self, geolife_pfs_stps_long):\n \"\"\"Delete pfs that belong to staypoints and see if they are detected.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n _, tpls_case1 = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n # only keep pfs where staypoint id is nan\n pfs_nostps = pfs[pd.isna(pfs[\"staypoint_id\"])].drop(columns=\"staypoint_id\")\n print(pfs_nostps)\n _, tpls_case2 = pfs_nostps.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n\n assert_geodataframe_equal(tpls_case1, tpls_case2)\n\n def test_stability(self, geolife_pfs_stps_long):\n \"\"\"Checks if the results are same for different cases in tripleg_generation method.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n # case 1\n pfs_case1, tpls_case1 = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n # case 1 without stps\n pfs_case1_wo, tpls_case1_wo = pfs.as_positionfixes.generate_triplegs(method=\"between_staypoints\")\n\n # case 2\n pfs = pfs.drop(columns=\"staypoint_id\")\n pfs_case2, tpls_case2 = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n\n assert_geodataframe_equal(pfs_case1.drop(columns=\"staypoint_id\", axis=1), pfs_case2)\n assert_geodataframe_equal(pfs_case1, pfs_case1_wo)\n assert_geodataframe_equal(tpls_case1, tpls_case2)\n assert_geodataframe_equal(tpls_case1, tpls_case1_wo)\n\n def test_random_order(self, geolife_pfs_stps_long):\n \"\"\"Checks if same tpls will be generated after random shuffling pfs.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n # ensure proper order of pfs\n pfs.sort_values(by=[\"user_id\", \"tracked_at\"], inplace=True)\n\n # original order\n pfs_ori, tpls_ori = pfs.as_positionfixes.generate_triplegs(stps)\n\n # resample/shuffle pfs\n pfs_shuffle = pfs.sample(frac=1, random_state=0)\n pfs_shuffle, tpls_shuffle = pfs_shuffle.as_positionfixes.generate_triplegs(stps)\n\n # order should be the same -> pfs.sort_values within function\n # generated tpls index should be the same\n assert_geodataframe_equal(pfs_ori, pfs_shuffle)\n assert_geodataframe_equal(tpls_ori, tpls_shuffle)\n\n def test_pfs_index(self, geolife_pfs_stps_long):\n \"\"\"Checks if same tpls will be generated after changing pfs index.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n # original index\n pfs_ori, tpls_ori = pfs.as_positionfixes.generate_triplegs(stps)\n\n # create discontinues index\n pfs.index = np.arange(len(pfs)) * 2\n pfs_index, tpls_index = pfs.as_positionfixes.generate_triplegs(stps)\n\n # generated tpls index should be the same\n assert_geodataframe_equal(pfs_ori.reset_index(drop=True), pfs_index.reset_index(drop=True))\n assert_geodataframe_equal(tpls_ori, tpls_index)\n\n def test_dtype_consistent(self, geolife_pfs_stps_long):\n \"\"\"Test the dtypes for the generated columns.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)\n assert pfs[\"user_id\"].dtype == tpls[\"user_id\"].dtype\n assert pfs[\"tripleg_id\"].dtype == \"Int64\"\n assert tpls.index.dtype == \"int64\"\n\n def test_missing_link(self, geolife_pfs_stps_long):\n \"\"\"Test nan is assigned for missing link between pfs and tpls.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n pfs, _ = pfs.as_positionfixes.generate_triplegs(stps, method=\"between_staypoints\")\n\n assert pd.isna(pfs[\"tripleg_id\"]).any()\n\n def test_index_start(self, geolife_pfs_stps_long):\n \"\"\"Test the generated index start from 0 for different cases.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n _, tpls_case1 = pfs.as_positionfixes.generate_triplegs(stps)\n _, tpls_case2 = pfs.drop(\"staypoint_id\", axis=1).as_positionfixes.generate_triplegs(stps)\n\n assert (tpls_case1.index == np.arange(len(tpls_case1))).any()\n assert (tpls_case2.index == np.arange(len(tpls_case2))).any()\n\n def test_invalid_inputs(self, geolife_pfs_stps_long):\n \"\"\"Test if AttributeError will be raised after invalid method input.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n\n with pytest.raises(AttributeError, match=\"Method unknown\"):\n pfs.as_positionfixes.generate_triplegs(stps, method=\"random\")\n with pytest.raises(AttributeError, match=\"Method unknown\"):\n pfs.as_positionfixes.generate_triplegs(stps, method=12345)\n\n def test_temporal(self, geolife_pfs_stps_long):\n \"\"\"Test if the tpls generation result follows predefined gap_threshold.\"\"\"\n pfs_input, stps = geolife_pfs_stps_long\n\n gap_threshold_ls = [0.1, 0.2, 1, 2]\n for gap_threshold in gap_threshold_ls:\n pfs, _ = pfs_input.as_positionfixes.generate_triplegs(stps, gap_threshold=gap_threshold)\n\n # conti_tpl checks whether the next pfs is in the same tpl\n pfs[\"conti_tpl\"] = (pfs[\"tripleg_id\"] - pfs[\"tripleg_id\"].shift(1)).shift(-1)\n # get the time difference of pfs, and assign to the previous pfs\n pfs[\"diff\"] = ((pfs[\"tracked_at\"] - pfs[\"tracked_at\"].shift(1)).dt.total_seconds() / 60).shift(-1)\n # we only take tpls that are splitted in the middle (tpl - tpl) and the second user\n pfs = pfs.loc[(pfs[\"conti_tpl\"] == 1) & (pfs[\"user_id\"] == 1)]\n # check if the cuts are appropriate\n assert (pfs[\"diff\"] > gap_threshold).all()\n\n def test_stps_tpls_overlap(self, geolife_pfs_stps_long):\n \"\"\"Tpls and stps should not overlap when generated using the default extract triplegs method.\"\"\"\n pfs, stps = geolife_pfs_stps_long\n pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)\n\n stps = stps[[\"started_at\", \"finished_at\", \"user_id\"]]\n tpls = tpls[[\"started_at\", \"finished_at\", \"user_id\"]]\n stps_tpls = stps.append(tpls)\n stps_tpls.sort_values(by=[\"user_id\", \"started_at\"], inplace=True)\n\n for user_id_this in stps[\"user_id\"].unique():\n stps_tpls_this = stps_tpls[stps_tpls[\"user_id\"] == user_id_this]\n diff = stps_tpls_this[\"started_at\"] - stps_tpls_this[\"finished_at\"].shift(1)\n # transform to numpy array and drop first values (always nan due to shift operation)\n diff = diff.values[1:]\n\n # all values have to greater or equal to zero. Otherwise there is an overlap\n assert all(diff >= np.timedelta64(datetime.timedelta()))\n","sub_path":"tests/preprocessing/test_positionfixes.py","file_name":"test_positionfixes.py","file_ext":"py","file_size_in_byte":14686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"311793381","text":"import tkinter as tk\nfrom Generator.MediaSpace import MediaSpace\nfrom Generator.MoreSpace import MoreSpace\nfrom tkinter import ttk\n\n\nclass Generator:\n\n def __init__(self, root, menubar=None):\n self.root = root\n self.root.wm_iconbitmap('Graphics/logo.ico')\n\n self.modules = [\"More Space\", \"Media Space\", \"Fabrication Services\", \"CNC Milling\"]\n\n self.master = tk.LabelFrame(self.root, borderwidth=0, highlightthickness=0)\n self.current_module = None\n\n self.menubar = menubar or tk.Menu(self.root, tearoff=False)\n\n def start(self):\n self.new()\n self.create_menubar()\n self.root.mainloop()\n\n def reset_master(self):\n self.master.destroy()\n self.master = tk.LabelFrame(self.root, borderwidth=0, highlightthickness=0)\n self.master.pack()\n\n def new(self):\n self.reset_master()\n if self.current_module:\n self.current_module.close()\n self.current_module = None\n\n self.root.geometry('280x255')\n self.root.title(\"Invoice Generator\")\n\n tk.Label(self.master, text='Invoice Generator', font=('Arial', 14)).pack(pady=20)\n\n for name in self.modules:\n button = ttk.Button(self.master, text=name, width=25,\n command=lambda module=name: self.create(module))\n button.pack(pady=5)\n\n def create(self, module_name):\n self.master.destroy()\n\n modules = {'More Space': MoreSpace,\n 'Media Space': MediaSpace,\n 'Fabrication Services': MoreSpace,\n 'CNC Milling': MoreSpace}\n\n module = modules[module_name]\n\n my_module = module(self.root)\n self.current_module = my_module\n self.master = self.current_module.master\n\n my_module.start()\n\n def create_menubar(self):\n\n self.root.config(menu=self.menubar)\n\n file_menubar = tk.Menu(self.menubar, tearoff=False)\n\n new_menubar = tk.Menu(file_menubar, tearoff=False)\n for name in self.modules:\n new_menubar.add_command(label=name, command=lambda module=name: self.create(module))\n\n self.menubar.add_cascade(label='File', menu=file_menubar)\n file_menubar.add_cascade(label='New... ', menu=new_menubar)\n file_menubar.add_command(label='Clear', command=self.clear)\n file_menubar.add_command(label='Preview', command=self.preview)\n file_menubar.add_separator()\n file_menubar.add_command(label='Save', command=self.save)\n\n save_as_menubar = tk.Menu(file_menubar, tearoff=False)\n for option in ['PDF', 'Excel']:\n save_as_menubar.add_command(label=option, command=lambda choice=option: self.save(choice))\n\n file_menubar.add_cascade(label='Save As...', menu=save_as_menubar)\n\n file_menubar.add_separator()\n file_menubar.add_command(label='Exit', command=self.close)\n\n self.menubar.add_command(label='Mode', command=self.change_mode)\n\n def clear(self):\n if self.current_module:\n self.current_module.clear()\n\n def save(self, choice='PDF'):\n if self.current_module:\n self.current_module.save(choice)\n\n def preview(self):\n if self.current_module:\n self.current_module.preview()\n\n def change_mode(self):\n pass\n\n def close(self):\n for item in ['File', 'Mode']:\n self.menubar.delete(item)\n\n if self.current_module:\n self.current_module.close()\n\n self.master.destroy()\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n Generator(root)\n","sub_path":"Generator/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"320828302","text":"import json\nfrom settings import REDDIT_TLDR_TABLE_FROM_COMPETITION, ENGINE_PATH\nfrom sqlalchemy import create_engine\nimport pandas as pd\n\nclass SplittingUpLargeJson():\n\n def __init__(self,\n orig_json_path,\n new_files_path,\n table_name,\n engine_path=ENGINE_PATH,\n replace_init=True):\n self.new_files_path = new_files_path\n self.table_name = table_name\n self.orig_json_path = orig_json_path\n self.row_num = 0\n self.engine = create_engine(engine_path)\n self.replace_init = replace_init\n\n def save_row_to_json(self, row):\n new_fp = self.new_files_path + \"_row_\" + str(self.row_num) + \".json\"\n with open(new_fp, \"w\") as fnew:\n json.dump(row, fnew)\n\n def save_row_to_db(self, row, replace):\n if replace:\n if_exists = \"replace\"\n else:\n if_exists = \"append\"\n df = pd.DataFrame([row])\n if replace:\n self.cols = df.columns\n else:\n df = df[self.cols]\n df.to_sql(self.table_name,\n self.engine, if_exists=if_exists)\n\n def save_json_per_line(self):\n with open(self.orig_json_path, \"r\") as f:\n for line in f:\n row = json.loads(line)\n self.save_row_to_json(row)\n if self.row_num == 0:\n replace = self.replace_init\n else:\n replace = False\n self.save_row_to_db(row, replace)\n if self.row_num % 10000 == 0:\n print(\"on row number = \" + str(self.row_num))\n self.row_num += 1\n\n\nif __name__ == \"__main__\":\n from settings import RAW_REDDIT_JSON_PATH\n json_splitter = SplittingUpLargeJson(\n \"/scratch/gobi2/bparker/summarization_data/interim/reddit/tldr/corpus-webis-tldr-17/orig_one_large_json/corpus-webis-tldr-17.json\",\n RAW_REDDIT_JSON_PATH,\n REDDIT_TLDR_TABLE_FROM_COMPETITION\n )\n json_splitter.save_json_per_line()\n","sub_path":"save_large_json_into_one_json_per_row.py","file_name":"save_large_json_into_one_json_per_row.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358791593","text":"from .base import BaseClient as __BaseClient\nfrom typing import List as _List\n\n\nclass Attachment:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"uploadDestinationId\" in data:\n self.uploadDestinationId: str = str(data[\"uploadDestinationId\"])\n else:\n self.uploadDestinationId: str = None\n if \"fileName\" in data:\n self.fileName: str = str(data[\"fileName\"])\n else:\n self.fileName: str = None\n\n\nclass LinkObject:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"href\" in data:\n self.href: str = str(data[\"href\"])\n else:\n self.href: str = None\n if \"name\" in data:\n self.name: str = str(data[\"name\"])\n else:\n self.name: str = None\n\n\nclass MessagingAction:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"name\" in data:\n self.name: str = str(data[\"name\"])\n else:\n self.name: str = None\n\n\nclass Schema:\n def __init__(self, data):\n super().__init__()\n self.data = data\n\n\nclass GetMessagingActionsForOrderResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"_links\" in data:\n self._links: dict = dict(data[\"_links\"])\n else:\n self._links: dict = None\n if \"_embedded\" in data:\n self._embedded: dict = dict(data[\"_embedded\"])\n else:\n self._embedded: dict = None\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass GetMessagingActionResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"_links\" in data:\n self._links: dict = dict(data[\"_links\"])\n else:\n self._links: dict = None\n if \"_embedded\" in data:\n self._embedded: dict = dict(data[\"_embedded\"])\n else:\n self._embedded: dict = None\n if \"payload\" in data:\n self.payload: MessagingAction = MessagingAction(data[\"payload\"])\n else:\n self.payload: MessagingAction = None\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass GetSchemaResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"_links\" in data:\n self._links: dict = dict(data[\"_links\"])\n else:\n self._links: dict = None\n if \"payload\" in data:\n self.payload: Schema = Schema(data[\"payload\"])\n else:\n self.payload: Schema = None\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateConfirmCustomizationDetailsRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n if \"attachments\" in data:\n self.attachments: _List[Attachment] = [Attachment(datum) for datum in data[\"attachments\"]]\n else:\n self.attachments: _List[Attachment] = []\n\n\nclass CreateConfirmCustomizationDetailsResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateConfirmDeliveryDetailsRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n\n\nclass CreateConfirmDeliveryDetailsResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateNegativeFeedbackRemovalResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateLegalDisclosureRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"attachments\" in data:\n self.attachments: _List[Attachment] = [Attachment(datum) for datum in data[\"attachments\"]]\n else:\n self.attachments: _List[Attachment] = []\n\n\nclass CreateLegalDisclosureResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateConfirmOrderDetailsRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n\n\nclass CreateConfirmOrderDetailsResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateConfirmServiceDetailsRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n\n\nclass CreateConfirmServiceDetailsResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateAmazonMotorsRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"attachments\" in data:\n self.attachments: _List[Attachment] = [Attachment(datum) for datum in data[\"attachments\"]]\n else:\n self.attachments: _List[Attachment] = []\n\n\nclass CreateAmazonMotorsResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateWarrantyRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"attachments\" in data:\n self.attachments: _List[Attachment] = [Attachment(datum) for datum in data[\"attachments\"]]\n else:\n self.attachments: _List[Attachment] = []\n if \"coverageStartDate\" in data:\n self.coverageStartDate: str = str(data[\"coverageStartDate\"])\n else:\n self.coverageStartDate: str = None\n if \"coverageEndDate\" in data:\n self.coverageEndDate: str = str(data[\"coverageEndDate\"])\n else:\n self.coverageEndDate: str = None\n\n\nclass CreateWarrantyResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass GetAttributesResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"buyer\" in data:\n self.buyer: dict = dict(data[\"buyer\"])\n else:\n self.buyer: dict = None\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateDigitalAccessKeyRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n if \"attachments\" in data:\n self.attachments: _List[Attachment] = [Attachment(datum) for datum in data[\"attachments\"]]\n else:\n self.attachments: _List[Attachment] = []\n\n\nclass CreateDigitalAccessKeyResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass CreateUnexpectedProblemRequest:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"text\" in data:\n self.text: str = str(data[\"text\"])\n else:\n self.text: str = None\n\n\nclass CreateUnexpectedProblemResponse:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"errors\" in data:\n self.errors: ErrorList = ErrorList(data[\"errors\"])\n else:\n self.errors: ErrorList = None\n\n\nclass Error:\n def __init__(self, data):\n super().__init__()\n self.data = data\n if \"code\" in data:\n self.code: str = str(data[\"code\"])\n else:\n self.code: str = None\n if \"message\" in data:\n self.message: str = str(data[\"message\"])\n else:\n self.message: str = None\n if \"details\" in data:\n self.details: str = str(data[\"details\"])\n else:\n self.details: str = None\n\n\nclass ErrorList(list, _List[\"Error\"]):\n def __init__(self, data):\n super().__init__([Error(datum) for datum in data])\n self.data = data\n\n\nclass MessagingClient(__BaseClient):\n def getMessagingActionsForOrder(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"GET\", params=data)\n return {\n 200: GetMessagingActionsForOrderResponse,\n 400: GetMessagingActionsForOrderResponse,\n 403: GetMessagingActionsForOrderResponse,\n 404: GetMessagingActionsForOrderResponse,\n 413: GetMessagingActionsForOrderResponse,\n 415: GetMessagingActionsForOrderResponse,\n 429: GetMessagingActionsForOrderResponse,\n 500: GetMessagingActionsForOrderResponse,\n 503: GetMessagingActionsForOrderResponse,\n }[response.status_code](response.json())\n\n def confirmCustomizationDetails(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/confirmCustomizationDetails\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateConfirmCustomizationDetailsResponse,\n 400: CreateConfirmCustomizationDetailsResponse,\n 403: CreateConfirmCustomizationDetailsResponse,\n 404: CreateConfirmCustomizationDetailsResponse,\n 413: CreateConfirmCustomizationDetailsResponse,\n 415: CreateConfirmCustomizationDetailsResponse,\n 429: CreateConfirmCustomizationDetailsResponse,\n 500: CreateConfirmCustomizationDetailsResponse,\n 503: CreateConfirmCustomizationDetailsResponse,\n }[response.status_code](response.json())\n\n def createConfirmDeliveryDetails(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/confirmDeliveryDetails\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateConfirmDeliveryDetailsResponse,\n 400: CreateConfirmDeliveryDetailsResponse,\n 403: CreateConfirmDeliveryDetailsResponse,\n 404: CreateConfirmDeliveryDetailsResponse,\n 413: CreateConfirmDeliveryDetailsResponse,\n 415: CreateConfirmDeliveryDetailsResponse,\n 429: CreateConfirmDeliveryDetailsResponse,\n 500: CreateConfirmDeliveryDetailsResponse,\n 503: CreateConfirmDeliveryDetailsResponse,\n }[response.status_code](response.json())\n\n def createLegalDisclosure(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/legalDisclosure\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateLegalDisclosureResponse,\n 400: CreateLegalDisclosureResponse,\n 403: CreateLegalDisclosureResponse,\n 404: CreateLegalDisclosureResponse,\n 413: CreateLegalDisclosureResponse,\n 415: CreateLegalDisclosureResponse,\n 429: CreateLegalDisclosureResponse,\n 500: CreateLegalDisclosureResponse,\n 503: CreateLegalDisclosureResponse,\n }[response.status_code](response.json())\n\n def createNegativeFeedbackRemoval(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/negativeFeedbackRemoval\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateNegativeFeedbackRemovalResponse,\n 400: CreateNegativeFeedbackRemovalResponse,\n 403: CreateNegativeFeedbackRemovalResponse,\n 404: CreateNegativeFeedbackRemovalResponse,\n 413: CreateNegativeFeedbackRemovalResponse,\n 415: CreateNegativeFeedbackRemovalResponse,\n 429: CreateNegativeFeedbackRemovalResponse,\n 500: CreateNegativeFeedbackRemovalResponse,\n 503: CreateNegativeFeedbackRemovalResponse,\n }[response.status_code](response.json())\n\n def createConfirmOrderDetails(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/confirmOrderDetails\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateConfirmOrderDetailsResponse,\n 400: CreateConfirmOrderDetailsResponse,\n 403: CreateConfirmOrderDetailsResponse,\n 404: CreateConfirmOrderDetailsResponse,\n 413: CreateConfirmOrderDetailsResponse,\n 415: CreateConfirmOrderDetailsResponse,\n 429: CreateConfirmOrderDetailsResponse,\n 500: CreateConfirmOrderDetailsResponse,\n 503: CreateConfirmOrderDetailsResponse,\n }[response.status_code](response.json())\n\n def createConfirmServiceDetails(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/confirmServiceDetails\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateConfirmServiceDetailsResponse,\n 400: CreateConfirmServiceDetailsResponse,\n 403: CreateConfirmServiceDetailsResponse,\n 404: CreateConfirmServiceDetailsResponse,\n 413: CreateConfirmServiceDetailsResponse,\n 415: CreateConfirmServiceDetailsResponse,\n 429: CreateConfirmServiceDetailsResponse,\n 500: CreateConfirmServiceDetailsResponse,\n 503: CreateConfirmServiceDetailsResponse,\n }[response.status_code](response.json())\n\n def CreateAmazonMotors(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/amazonMotors\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateAmazonMotorsResponse,\n 400: CreateAmazonMotorsResponse,\n 403: CreateAmazonMotorsResponse,\n 404: CreateAmazonMotorsResponse,\n 413: CreateAmazonMotorsResponse,\n 415: CreateAmazonMotorsResponse,\n 429: CreateAmazonMotorsResponse,\n 500: CreateAmazonMotorsResponse,\n 503: CreateAmazonMotorsResponse,\n }[response.status_code](response.json())\n\n def CreateWarranty(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/warranty\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateWarrantyResponse,\n 400: CreateWarrantyResponse,\n 403: CreateWarrantyResponse,\n 404: CreateWarrantyResponse,\n 413: CreateWarrantyResponse,\n 415: CreateWarrantyResponse,\n 429: CreateWarrantyResponse,\n 500: CreateWarrantyResponse,\n 503: CreateWarrantyResponse,\n }[response.status_code](response.json())\n\n def GetAttributes(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/attributes\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"GET\", params=data)\n return {\n 200: GetAttributesResponse,\n 400: GetAttributesResponse,\n 403: GetAttributesResponse,\n 404: GetAttributesResponse,\n 413: GetAttributesResponse,\n 415: GetAttributesResponse,\n 429: GetAttributesResponse,\n 500: GetAttributesResponse,\n 503: GetAttributesResponse,\n }[response.status_code](response.json())\n\n def createDigitalAccessKey(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/digitalAccessKey\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateDigitalAccessKeyResponse,\n 400: CreateDigitalAccessKeyResponse,\n 403: CreateDigitalAccessKeyResponse,\n 404: CreateDigitalAccessKeyResponse,\n 413: CreateDigitalAccessKeyResponse,\n 415: CreateDigitalAccessKeyResponse,\n 429: CreateDigitalAccessKeyResponse,\n 500: CreateDigitalAccessKeyResponse,\n 503: CreateDigitalAccessKeyResponse,\n }[response.status_code](response.json())\n\n def createUnexpectedProblem(\n self,\n amazonOrderId: str,\n marketplaceIds: _List[str],\n ):\n url = \"/messaging/v1/orders/{amazonOrderId}/messages/unexpectedProblem\".format(\n amazonOrderId=amazonOrderId,\n )\n data = {}\n if marketplaceIds is not None:\n data[\"marketplaceIds\"] = \",\".join(map(str, marketplaceIds))\n response = self.request(url, method=\"POST\", data=data)\n return {\n 201: CreateUnexpectedProblemResponse,\n 400: CreateUnexpectedProblemResponse,\n 403: CreateUnexpectedProblemResponse,\n 404: CreateUnexpectedProblemResponse,\n 413: CreateUnexpectedProblemResponse,\n 415: CreateUnexpectedProblemResponse,\n 429: CreateUnexpectedProblemResponse,\n 500: CreateUnexpectedProblemResponse,\n 503: CreateUnexpectedProblemResponse,\n }[response.status_code](response.json())\n","sub_path":"amazon_sp_api_clients/messaging.py","file_name":"messaging.py","file_ext":"py","file_size_in_byte":21552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"212126902","text":"from django.core.validators import RegexValidator\nfrom django.db import models\n\nfrom bumblebee.users.models import CustomUser\n\nfrom .validators import validate_date_lt_today\n\n\nclass Profile(models.Model):\n \"\"\" \"\"\"\n\n user = models.OneToOneField(\n CustomUser, related_name=\"profile\", on_delete=models.CASCADE\n )\n created_date = models.DateTimeField(auto_now_add=True)\n updated_date = models.DateTimeField(auto_now=True)\n\n account_verified = models.BooleanField(default=False)\n\n use_persona = models.BooleanField(default=True)\n\n persona = models.PositiveIntegerField(default=1)\n avatar = models.ImageField(default=\"default.png\", upload_to=\"avatar/\")\n cover = models.ImageField(default=\"default.png\", upload_to=\"cover/\")\n\n bio = models.CharField(\n max_length=200, help_text=\"What's on your mind?\", null=True, blank=True\n )\n name = models.CharField(\n max_length=747,\n unique=False,\n help_text=\"Full Name. For eg. Will Smith \",\n )\n nickname = models.CharField(\n max_length=50,\n unique=False,\n help_text=\"Nickname. The name you want to be called. For eg. Will\",\n )\n dob = models.DateTimeField(\n validators=[validate_date_lt_today],\n null=True,\n unique=False,\n blank=True,\n )\n location = models.CharField(\n max_length=200,\n help_text=\"User Location. Street, Municipality/VDC, State, Country\",\n null=True,\n blank=True,\n )\n\n phone_validator = RegexValidator(\n regex=r\"^\\+?1?\\d{9,14}$\",\n message=\"Phone number. It must contain Dialing code and contact number. For eg. +977980000000000\",\n )\n phone = models.CharField(\n validators=[phone_validator],\n max_length=17,\n unique=True,\n blank=True,\n null=True,\n )\n phone_verified = models.BooleanField(\n help_text=\"Contact number verified\", default=False\n )\n\n private = models.BooleanField(help_text=\"Profile Privacy\", default=False)\n\n # def __init__(self, *args, **kwargs):\n # \"\"\"\n # Overriding init method\n # \"\"\"\n # super(Profile, self).__init__(*args, **kwargs)\n\n # def save(self, *args, **kwargs):\n # \"\"\"\n # Overriding default save method\n # \"\"\"\n # super(Profile, self).save(*args, **kwargs)\n\n def __str__(self):\n return f\"{self.user.username} Profile\"\n\n def get_username(self):\n return self.user.username\n","sub_path":"bumblebee/profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"435129958","text":"#!/usr/bin/env python3\n#coding=UTF-8\n\nimport os\nimport torch\nimport yaml\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport numpy as np\nfrom src.data.vocabulary import Vocabulary\nfrom src.data.vocabulary import PAD, BOS, EOS, UNK\nimport subprocess\nimport time\n\ndef random_text_selection(config_path,\n data_size=100,\n save_log=\"random_sents\"):\n # load configs\n np.random.seed(32767)\n with open(config_path.strip()) as f:\n configs = yaml.load(f)\n data_configs = configs[\"data_configs\"]\n return_set = []\n with open(data_configs[\"train_data\"][0], \"r\") as src, open(save_log, \"w\") as out:\n i = 0\n for line in src:\n if np.random.uniform() > 0.5 and i < data_size:\n i += 1\n out.write(line)\n return_set += [line]\n return return_set\n\n\ndef random_pair_selection(config_path,\n data_size=100,\n save_log=\"random_sents\"):\n \"\"\"\n randomly choose from parallel data, and save to the save_logs\n :param config_path:\n :param data_size:\n :param save_log:\n :return: random selected pairs\n \"\"\"\n np.random.seed(32767)\n with open(config_path.strip()) as f:\n configs = yaml.load(f)\n data_configs = configs[\"data_configs\"]\n with open(data_configs[\"train_data\"][0], \"r\") as src, \\\n open(data_configs[\"train_data\"][1], \"r\") as trg, \\\n open(save_log+\".src\", \"w\") as out_src, open(save_log+\".trg\", \"w\") as out_trg:\n counter=0\n return_src=[]\n return_trg=[]\n for sent_s, sent_t in zip(src,trg):\n if np.random.uniform()<0.2 and counter\n >\n \"\"\"\n if emit_as_id:\n assert src_vocab is not None, \"src_vocab must be provided when emit_as_id\"\n with open(save_to) as similar_vocab:\n word2p = OrderedDict()\n word2near_vocab = OrderedDict()\n for line in similar_vocab:\n line = line.split(\"\\t\")\n if emit_as_id:\n word2near_vocab[src_vocab.token2id(line[0])] = [src_vocab.token2id(i) for i in line[1:-2]]\n word2p[src_vocab.token2id(line[0])] = float(line[-1])\n else:\n word2near_vocab[line[0]] = line[1:-2]\n word2p[line[0]] = float(line[-1])\n return word2p, word2near_vocab\n\n# load translation model parameters\ndef load_translate_model(path, map_location=\"cpu\"):\n state_dict = torch.load(path, map_location=map_location)\n if \"model\" in state_dict:\n return state_dict[\"model\"]\n return state_dict\n\ndef initial_random_perturb(config_path,\n inputs,\n w2p, w2vocab,\n mode=\"len_based\",\n key_type=\"token\",\n show_bleu=False):\n \"\"\"\n batched random perturb, perturb is based on random probability from the collected candidates\n meant to test initial attack rate.\n :param config_path: victim configs\n :param inputs: raw batched input (list) sequences in [batch_size, seq_len]\n :param w2p: indicates how likely a word is perturbed\n :param w2vocab: near candidates\n :param mode: based on word2near_vocab, how to distribute likelihood among candidates\n :param key_type: inputs are given by raw sequences of tokens or tokenized labels\n :param show_bleu: whether to show bleu of perturbed seqs (compare to original seqs)\n :return: list of perturbed inputs and list of perturbed flags\n \"\"\"\n np.random.seed(int(time.time()))\n assert mode in [\"uniform\", \"len_based\"], \"Mode must be in uniform or multinomial.\"\n assert key_type in [\"token\", \"label\"], \"inputs key type must be token or label.\"\n # load configs\n with open(config_path.strip()) as f:\n configs = yaml.load(f)\n data_configs = configs[\"data_configs\"]\n\n # load vocabulary file and tokenize\n src_vocab = Vocabulary(**data_configs[\"vocabularies\"][0])\n perturbed_results = []\n flags = []\n for sent in inputs:\n if np.random.uniform() < 0.5: # perturb the sentence \n perturbed_sent = []\n if key_type == \"token\":\n tokenized_sent = src_vocab.tokenizer.tokenize(sent)\n for word in tokenized_sent:\n if np.random.uniform() < w2p[word]:\n # need to perturb on lexical level\n if mode == \"uniform\":\n # uniform choose from candidates:\n perturbed_sent += [w2vocab[word][np.random.choice(len(w2vocab[word]),\n 1)[0]]]\n elif mode == \"len_based\":\n # weighted choose from candidates:\n weights = [1./(1+abs(len(word)-len(c))) for c in w2vocab[word]]\n norm_weights = [c/sum(weights) for c in weights]\n perturbed_sent += [w2vocab[word][np.random.choice(len(w2vocab[word]),\n 1,\n p=norm_weights\n )[0]]]\n else:\n perturbed_sent += [word]\n # print(perturbed_sent) # yield same form of sequences of tokens\n perturbed_sent = src_vocab.tokenizer.detokenize(perturbed_sent)\n elif key_type == \"label\": # tokenized labels\n for word_index in sent:\n word = src_vocab.id2token(word_index)\n if np.random.uniform() < w2p[word]:\n if mode == \"uniform\":\n # uniform choose from candidates:\n perturbed_label = src_vocab.token2id(w2vocab[word][np.random.choice(\n len(w2vocab[word]), 1\n )[0]])\n perturbed_sent += [perturbed_label]\n elif mode == \"len_based\":\n # weighted choose from candidates:\n weights = [1. / (1 + abs(len(word) - len(c))) for c in w2vocab[word]]\n norm_weights = [c / sum(weights) for c in weights]\n perturbed_label = src_vocab.token2id(w2vocab[word][np.random.choice(len(w2vocab[word]),\n 1,\n p=norm_weights\n )[0]])\n perturbed_sent += [perturbed_label]\n else:\n perturbed_sent += [word_index]\n perturbed_results += [perturbed_sent]\n flags += [1]\n # out.write(perturbed_sent + \"\\n\")\n else:\n perturbed_results += [sent]\n flags += [0]\n return perturbed_results, flags\n\ndef collect_pinyin(pinyin_path, src_path):\n # generate pinyin for every Chinese characters in training data\n \"\"\"\n read from pinyin_path to generate pinyin dictionary\n :param pinyin_path: path to pin data file\n :param src_path: chinese src data path to collect\n :return: two dictionary of pinyin2char:{pinyin: [list of characters]},\n and char2pinyin: {ord(char): [list of pinyin]}\n \"\"\"\n char2pyDict = {}\n py2charDict = {}\n count_char = {}\n for line in open(pinyin_path):\n k, v = line.strip().split('\\t')\n char2pyDict[k] = v.split(\" \") # there can be multiple values(pinyin) for a key\n\n with open(src_path, \"r\") as input_src:\n line_counter = 0\n for line in input_src:\n line_counter += 1\n # if line_counter%1000 == 0:\n # break\n # collect characters and their pinyin\n for char in line.strip():\n key = \"%X\" % ord(char)\n if char in count_char:\n count_char[char] += 1\n else:\n count_char[char] = 1\n try:\n for pinyin in char2pyDict[key]:\n pinyin = pinyin.strip() # .lower()\n if pinyin in py2charDict:\n if char not in py2charDict[pinyin]:\n py2charDict[pinyin].append(char)\n else:\n py2charDict[pinyin] = [char]\n except: # special char without pinyin\n continue\n return char2pyDict, py2charDict\n\n\ndef gen_UNK(src_token, vocab, char2pyDict, py2charDict):\n \"\"\"\n when src_token is to be replaced by UNK, generate a token by randomly replace\n a character that has the same vocal (pinyin) as the src character\n (and make sure new token is UNK to vocab)\n if no UNK is found, return original token by default\n :param src_token: chinese src_token to be replaced by UNK\n :param vocab: data.vocabulary object to varify if result is UNK\n :param char2pyDict: dictionary {ord(char): pinyin}\n :param py2charDict: dictionary {pinyin}\n :return: a UNK word similar to src_token\n \"\"\"\n edit_range = len(src_token)\n if src_token.endswith(\"@@\"): # don't break the signal for BPE\n edit_range -= 2\n\n if (char2pyDict is not None) and (py2charDict is not None):\n # generate homophone\n index = np.random.randint(edit_range)\n for i in range(edit_range):\n ori_char = src_token[index]\n new_token = src_token\n py_key = \"%X\" % ord(ori_char)\n if py_key in char2pyDict:\n # this character is available in gen_UNK\n for pinyin in char2pyDict[py_key]:\n # check for every possible vocal\n for candidate in py2charDict[pinyin]:\n # check for every character share this vocal\n new_token = list(new_token)\n new_token[index] = candidate\n new_token = \"\".join(new_token)\n if candidate != ori_char and vocab.token2id(new_token) == UNK:\n return new_token\n else:\n continue\n index = (index+1) % edit_range\n else: # roman character replacement to generate unk\n # scramble the symble in between\n if edit_range > 3:\n index = np.random.randint(0, edit_range-2)\n new_token = src_token[:index] + \\\n src_token[index+1]+src_token[index]+\\\n src_token[index+2:]\n if vocab.token2id(new_token) == UNK:\n return new_token\n\n # nothing returned or token is too short, repeat last char\n char = src_token[edit_range - 1]\n token_stem = src_token[:edit_range]\n new_token = token_stem + char\n if src_token.endswith(\"@@\"):\n temp_token = new_token+\"@@\"\n else:\n temp_token = new_token\n while vocab.token2id(temp_token) != UNK:\n new_token = new_token + char\n # print(src_token, \"#>$#\", new_token)\n if src_token.endswith(\"@@\"):\n temp_token = new_token + \"@@\"\n else:\n temp_token = new_token\n new_token = temp_token\n # print(src_token, \"-->\", new_token)\n return new_token\n\ndef corpus_bleu_char(hyp_in, ref_in, need_tokenized=True):\n \"\"\"\n preprocess corpus into char level and test BLEU,\n proposed to check modification rate\n :param hyp_in: files to be tested\n :param ref_in: reference file\n :param need_tokenized: for languages needs tokenization\n :return:\n \"\"\"\n with open(hyp_in, \"r\") as hyp, open(ref_in, \"r\") as ref, \\\n open(\"hyp_char\", \"w\") as hyp_char, open(\"ref_char\", \"w\") as ref_char:\n for line_hyp_in, line_ref_in in zip(hyp, ref):\n if not need_tokenized:\n line_hyp_in = line_hyp_in.replace(\" \", \"\")\n line_ref_in = line_ref_in.replace(\" \", \"\")\n hyp_char.write(\" \".join(list(line_hyp_in)))\n ref_char.write(\" \".join(list(line_ref_in)))\n # cat hyp_char | sacrebleu -lc --score-only ref_char\n # sacrebleu_cmd = [\"sacrebleu\", \"-l\"] + [\"--score-only\",]+[\"ref_char\"]\n cat = subprocess.Popen((\"cat\", \"hyp_char\"), stdout=subprocess.PIPE)\n cmd_bleu = subprocess.Popen((\"/home/zouw/anaconda3/bin/sacrebleu\", \"-lc\", \"--score-only\", \"--force\", \"ref_char\"),\n stdin=cat.stdout,\n stdout=subprocess.PIPE)\n bleu = cmd_bleu.communicate()[0].decode(\"utf-8\").strip()\n print(bleu)\n bleu = float(bleu)\n subprocess.Popen(\"rm ref_char hyp_char\", shell=True)\n return bleu\n\n","sub_path":"adversarials/adversarial_utils.py","file_name":"adversarial_utils.py","file_ext":"py","file_size_in_byte":22207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"213817431","text":"from time import perf_counter\nfrom typing import Text\n\nfrom redditrepostsleuth.core.logging import log\n\n\nclass SearchTimes:\n def __init__(self):\n self._timers = []\n self.total_search_time: float = float(0)\n self.total_filter_time: float = float(0)\n\n def start_timer(self, name: Text):\n self._timers.append({\n 'name': name,\n 'start': perf_counter()\n })\n\n def stop_timer(self, name: Text):\n timer = next((x for x in self._timers if x['name'] == name), None)\n if not timer:\n log.error('Failed to find timer %s', name)\n if hasattr(self, name):\n setattr(self, name, round(perf_counter() - timer['start'], 5))","sub_path":"redditrepostsleuth/core/model/search_times.py","file_name":"search_times.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471344465","text":"\"\"\"ler varios numeros inteiros, mostrar a media, qual o maior, o menor. deve perguntar se quer continuar ou não a digitar outro numero\"\"\"\r\n\r\n\r\nn = int(input('Digite um número: '))\r\ncontinuar = True\r\ntotal = 0\r\ncount = 0\r\nmaior = 0\r\nmenor = 99999999999999999999999999999\r\nwhile continuar:\r\n total += n\r\n count += 1\r\n if n > maior:\r\n maior = n\r\n if n < menor:\r\n menor = n\r\n sair = str(input('Presione S para continuar ou qualquer outra letra para sair? ').strip().lower())\r\n if sair == 's':\r\n n = int(input('Digite outro número: '))\r\n else:\r\n continuar = False\r\nmedia = total / count\r\nprint('A média dos números digitados foi {}.\\nO maior número foi {}.\\nO menor número foi {}.\\n'.format(media, maior, menor))\r\n","sub_path":"065-maior-e-menor-v02.py","file_name":"065-maior-e-menor-v02.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332567371","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 3 13:45:43 2020\n\n@author: antony\n\"\"\"\n\nimport json\nimport time\nimport urllib.request\nimport collections\nimport subprocess\nimport re\n\npubs = json.load(open('publications.json', 'r'))\n\n\nURL = 'https://www.ncbi.nlm.nih.gov/pubmed/?term={}&report=docsum&format=text'\n\n\nfrom Bio import Entrez\nEntrez.email = 'abh2138@cumc.columbia.edu'\nEntrez.api_key = 'fd2a5fb8bbf75480b2371d464d6e7dd95f08'\n\n\n\n\npubmeds = collections.defaultdict(str)\n\nc = 0\n\nfor pub in pubs:\n title = pub['title']\n \n if title == '':\n continue\n\n if title not in pubmeds or pubmeds[title]['id'] == '':\n print(title)\n \n handle = Entrez.esearch(db='pubmed', term=title, field='title')\n record = Entrez.read(handle)\n handle.close()\n \n if len(record['IdList']) > 0:\n handle = Entrez.efetch(db='pubmed', id=record['IdList'][0], retmode='medline', rettype='text')\n text = handle.read()\n handle.close()\n \n id = ''\n \n matcher = re.search(r'PMID: (\\d+)', text)\n \n if matcher:\n id = matcher.group(1)\n \n print(id)\n pubmeds[title] = {'id':id, 'data':text}\n time.sleep(1)\n \n #break\n \n print(c)\n \n c += 1\n \n \nf = open('pubmed_ids.txt', 'w')\nf.write('title\\tpubmed\\n')\nfor title in sorted(pubmeds):\n f.write('{}\\t{}\\n'.format(title, pubmeds[title]['id']))\nf.close()","sub_path":"src/data/add_pubmed.py","file_name":"add_pubmed.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"260342397","text":"import numpy as np\nimport numpy.linalg as la\nimport time\n\ndef func(x):\n return np.array([x[1] - x[0]**2, x[0]**2 + x[1]**2 - 1])\n\ndef jacf(x):\n return np.array([\n np.array([-2*x[0], 1]),\n np.array([2*x[0],2*x[1]])\n ])\n\nx = (2,3)\n\ndef Secante(x):\n n_iter = 0\n while la.norm(func(x)) > 1e-5:\n\n d = la.solve(jacf(x), -1*func(x))\n\n s = x\n \n x = x + d\n \n y = func(x) - func(s)\n s = x - s\n\n\n n_iter = n_iter + 1\n return x, n_iter\n\n\nstart_time = time.time()\nres, it = Secante(x)\nend_time = time.time()\nprint('x:', res)\nprint('f(x):', func(res))\nprint('n_iter:', it)\nprint(f'Tempo de execução: {end_time - start_time}s')\n ","sub_path":"2020/Metodos2Ordem/secante.py","file_name":"secante.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"394517584","text":"import os\nimport sys\nimport json\nimport shutil\nimport base64\nimport argparse\nfrom os.path import join as pjoin\n\nimport numpy as np\n\nimport matplotlib\n# According to https://github.com/codalab/codalab-competitions/wiki/User_DetailedResultsPage\nmatplotlib.use('Agg') # Very important to make it work on Linux.\n\nimport matplotlib.pyplot as plt\n\n\nMAX_HANDICAP = 5\nHANDICAP_ADJUSTMENTS = {\n 0: 1.00,\n 1: 0.85,\n 2: 0.77,\n 3: 0.73,\n 4: 0.65,\n 5: 0.50,\n}\n\n\ndef get_total_score(stats):\n score = 0\n for gamefile in stats:\n for no_episode in range(len(stats[gamefile][\"runs\"])):\n score += stats[gamefile][\"runs\"][no_episode][\"score\"]\n\n return score\n\n\ndef get_max_score(stats):\n max_score = 0\n for gamefile in stats:\n for no_episode in range(len(stats[gamefile][\"runs\"])):\n max_score += stats[gamefile][\"max_scores\"]\n\n return max_score\n\n\ndef get_total_steps(stats):\n steps = 0\n for gamefile in stats:\n for no_episode in range(len(stats[gamefile][\"runs\"])):\n steps += stats[gamefile][\"runs\"][no_episode][\"steps\"]\n\n return steps\n\n\ndef get_handicap(requested_infos):\n requested_infos = set(requested_infos)\n handicap = 0\n\n if len(requested_infos & {\"description\", \"inventory\"}) > 0:\n handicap = 1\n\n if len(requested_infos & {\"verbs\", \"command_templates\"}) > 0:\n handicap = 2\n\n if len(requested_infos & {\"entities\"}) > 0:\n handicap = 3\n\n if len(requested_infos & {\"recipe\"}) > 0:\n handicap = 4\n\n if len(requested_infos & {\"admissible_commands\"}) > 0:\n handicap = 5\n\n return handicap\n\n\ndef score_leaderboard(stats, output_dir):\n # Get agent's handicap.\n handicap = get_handicap(stats[\"requested_infos\"])\n\n # Extract result from stats.\n leaderboard = {}\n leaderboard[\"score\"] = get_total_score(stats[\"games\"])\n leaderboard[\"adjusted_score\"] = HANDICAP_ADJUSTMENTS[handicap] * leaderboard[\"score\"]\n leaderboard[\"nb_steps\"] = get_total_steps(stats[\"games\"])\n leaderboard[\"handicap\"] = get_handicap(stats[\"requested_infos\"])\n\n # Write leaderboard results.\n if not os.path.exists(output_dir):\n\t os.makedirs(output_dir)\n\n content = \"\\n\".join(\"{}: {}\".format(k, v) for k, v in leaderboard.items())\n with open(pjoin(output_dir, \"scores.txt\"), \"w\") as f:\n f.write(content)\n\n stats[\"leaderboard\"] = leaderboard\n print(content)\n\n\ndef score_html(stats, output_dir):\n # Create folders for detailed results.\n html_dir = pjoin(output_dir, \"html\")\n if not os.path.exists(html_dir):\n\t os.makedirs(html_dir)\n\n images_dir = pjoin(html_dir, \"images\")\n if not os.path.exists(images_dir):\n\t os.makedirs(images_dir)\n\n #html = \"Available during the validation phase.\"\n # Load template files.\n with open(pjoin(os.path.dirname(__file__), \"template.html\")) as f:\n html = f.read()\n with open(pjoin(os.path.dirname(__file__), \"template.css\")) as f:\n css = f.read()\n with open(pjoin(os.path.dirname(__file__), \"template.js\")) as f:\n js = f.read()\n with open(pjoin(os.path.dirname(__file__), \"template_game.html\")) as f:\n game_html_template = f.read()\n\n # Get the information.\n # Agent's handicap.\n handicap = get_handicap(stats[\"requested_infos\"])\n\n total_score = get_total_score(stats[\"games\"])\n max_score = get_max_score(stats[\"games\"])\n adjusted_score = HANDICAP_ADJUSTMENTS[handicap] * total_score\n nb_steps = get_total_steps(stats[\"games\"])\n\n # Sort commands according to the number of skills present in the game.\n gamefiles = list(stats[\"games\"])\n gamefiles = sorted(gamefiles)\n gamefiles = sorted(gamefiles, key=lambda e: len(e.split(\"+\")))\n\n game_html = \"\"\n for gamefile in gamefiles:\n nb_runs = len(stats[\"games\"][gamefile][\"runs\"])\n scores = []\n steps = []\n for no_episode in range(nb_runs):\n scores.append(stats[\"games\"][gamefile][\"runs\"][no_episode][\"score\"])\n steps.append(stats[\"games\"][gamefile][\"runs\"][no_episode][\"steps\"])\n\n game_score = sum(scores)\n game_score_avg = game_score / nb_runs\n game_max_score = stats[\"games\"][gamefile][\"max_scores\"] * nb_runs\n game_score_ratio = game_score / game_max_score\n\n game_name = gamefile[:-4]\n skillset = game_name.rsplit(\"-\", 1)[0].split(\"-\", 2)[-1]\n plot1_path = pjoin(images_dir, game_name + \"_plot1.png\")\n plot2_path = pjoin(images_dir, game_name + \"_plot2.png\")\n plot12_path = pjoin(images_dir, game_name + \"_plot12.png\")\n\n # # Build plot #1\n # fig, ax1 = plt.subplots()\n # lines = []\n # lines += ax1.plot(range(1, nb_runs + 1), scores, color=\"#1f77b4\", label=\"Score\")\n # ax1.set_xlabel(\"Nb. runs\")\n # ax1.set_ylabel(\"Score\")\n # ax1.tick_params(axis='y', labelcolor=\"#1f77b4\")\n # plt.savefig(plot1_path)\n # plt.close()\n\n # # Build plot #2\n # fig, ax1 = plt.subplots()\n # lines = []\n # lines += ax1.plot(range(1, nb_runs + 1), steps, color=\"#ff7f0e\", label=\"Moves\")\n # ax1.set_xlabel(\"Nb. runs\")\n # ax1.set_ylabel(\"Moves\")\n # ax1.tick_params(axis='y', labelcolor=\"#ff7f0e\")\n # plt.savefig(plot2_path)\n # plt.close()\n\n # Build plot #12\n fig, ax1 = plt.subplots()\n lines = []\n lines += ax1.plot(range(1, nb_runs + 1), scores, color=\"#1f77b4\", label=\"Score\")\n ax1.set_xlabel(\"Nb. runs\")\n ax1.set_ylabel(\"Score\")\n ax1.tick_params(axis='y', labelcolor=\"#1f77b4\")\n\n ax2 = ax1.twinx()\n lines += ax2.plot(range(1, nb_runs + 1), steps, color=\"#ff7f0e\", label=\"Moves\")\n ax2.set_ylabel(\"Moves\")\n ax2.tick_params(axis='y', labelcolor=\"#ff7f0e\")\n ax1.legend(lines, [l.get_label() for l in lines])\n plt.savefig(plot12_path)\n plt.close()\n\n # plot1_src = pjoin(os.path.basename(images_dir), game_name + \"_plot1.png\") # Path relative to ./html/\n # plot2_src = pjoin(os.path.basename(images_dir), game_name + \"_plot2.png\") # Path relative to ./html/\n plot12_src = pjoin(os.path.basename(images_dir), game_name + \"_plot12.png\") # Path relative to ./html/\n\n # According to https://github.com/codalab/codalab-competitions/wiki/User_DetailedResultsPage\n # encode the image as base64 to embed it in the html.\n #data_uri = open(plot12_path, 'rb').read().encode('base64').replace('\\n', '')\n with open(plot12_path, 'rb') as f:\n data_uri = base64.b64encode(f.read()).decode().replace('\\n', '')\n plot12_src = 'data:image/png;base64,{0}'.format(data_uri)\n\n status = \"low\"\n if game_score_ratio >= 0.1:\n status = \"mid\"\n if game_score_ratio >= 0.9:\n status = \"high\"\n\n # Fill template.\n game_html += game_html_template.format(game_name=\"({:05.1%})\\t\".format(game_score_ratio) + skillset,\n status=status,\n game_score=game_score, game_max_score=game_max_score,\n game_score_ratio=game_score_ratio,\n #plot1_src=plot1_src, plot2_src=plot2_src,\n plot12_src=plot12_src,\n )\n\n # Fill template.\n html = html.format(css=css, js=js,\n total_score=total_score, max_score=max_score, score_ratio=total_score/max_score,\n adjusted_score=adjusted_score, handicap=handicap,\n game_stats=game_html)\n\n # Write detailed results.\n with open(pjoin(html_dir, \"detailed_results.html\"), \"w\") as f:\n f.write(html)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Extract score from `stats.json`.\")\n parser.add_argument(\"stats\", help=\"JSON file\")\n parser.add_argument(\"output_dir\")\n parser.add_argument(\"--html\", action=\"store_true\")\n args = parser.parse_args()\n\n with open(args.stats) as f:\n stats = json.load(f)\n\n score_leaderboard(stats, args.output_dir)\n if args.html:\n score_html(stats, args.output_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":8289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"275542392","text":"'''\na_list = [1,2,3]\nprint( sum( a_list ) )\n\nimport random\npoint1 = random.randrange(1,7)\npoint2 = random.randrange(1,7)\npoint3 = random.randrange(1,7)\n\nprint(point1, point2, point3)\n'''\n\nimport random\ndef roll_dice(numbers=3, points=None):\n print(\"<<<< ROLL THE DICE! >>>>\")\n if points is None:\n points = []\n while numbers > 0:\n point = random.randrange(1, 7)\n points.append(point)\n numbers = numbers - 1\n return points\n\ndef roll_result(total):\n isBig = 11 <= total <= 18\n isSmall = 3 <= total <= 10\n if isBig:\n return 'Big'\n elif isSmall:\n return 'Small'\n\n\ndef start_game(money=1000):\n print(\"<<<< GAME START! >>>>\")\n choices = ['Big', 'Small']\n your_choice = input('Big or Small : ')\n if your_choice in choices:\n your_help_win_money = int(input('How much you wanna bet ?'))\n\n points = roll_dice()\n total = sum(points)\n youWin = your_choice == roll_result(total)\n if youWin:\n money += your_help_win_money\n print(\"The points are \", points, 'You win!')\n print(\"You gained \", your_help_win_money, ', You have', money, ' now')\n start_game(money)\n else:\n money -= your_help_win_money\n print(\"The points are\", points, 'You lose!')\n print(\"You lost \", your_help_win_money, ', You have', money, ' now')\n\n if money > 0:\n start_game(money)\n else:\n print(\"GAME OVER\")\n\n else:\n print('Invalid Words')\n start_game(money)\n\nstart_game()","sub_path":"lesson/lesson 13.py","file_name":"lesson 13.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"296860477","text":"from unittest import mock\n\nimport pytest\n\nfrom cognite.client import CogniteClient\nfrom cognite.client.data_classes import Sequence, SequenceColumnUpdate, SequenceFilter, SequenceUpdate\nfrom cognite.client.exceptions import CogniteNotFoundError\nfrom tests.utils import set_request_limit\n\n\n@pytest.fixture\ndef new_seq(cognite_client):\n column_def = [\n {\"valueType\": \"STRING\", \"externalId\": \"user\", \"description\": \"some description\"},\n {\"valueType\": \"DOUBLE\", \"externalId\": \"amount\"},\n {\"valueType\": \"LONG\", \"externalId\": \"age\"},\n ]\n seq = cognite_client.sequences.create(Sequence(name=\"test_temp\", columns=column_def, metadata={\"a\": \"b\"}))\n yield seq\n cognite_client.sequences.delete(id=seq.id)\n assert cognite_client.sequences.retrieve(seq.id) is None\n\n\n@pytest.fixture\ndef get_spy(cognite_client):\n with mock.patch.object(cognite_client.sequences, \"_get\", wraps=cognite_client.sequences._get) as _:\n yield\n\n\n@pytest.fixture\ndef post_spy(cognite_client):\n with mock.patch.object(cognite_client.sequences, \"_post\", wraps=cognite_client.sequences._post) as _:\n yield\n\n\nclass TestSequencesAPI:\n def test_retrieve(self, cognite_client):\n listed_asset = cognite_client.sequences.list(limit=1)[0]\n retrieved_asset = cognite_client.sequences.retrieve(listed_asset.id)\n assert retrieved_asset == listed_asset\n\n def test_retrieve_multiple(self, cognite_client):\n res = cognite_client.sequences.list(limit=2)\n retrieved_assets = cognite_client.sequences.retrieve_multiple([s.id for s in res])\n for listed_asset, retrieved_asset in zip(res, retrieved_assets):\n retrieved_asset.external_id = listed_asset.external_id\n assert res == retrieved_assets\n\n @pytest.mark.parametrize(\"ignore_unknown_ids\", [False, True])\n def test_retrieve_multiple_ignore_unknown_ids(self, cognite_client, ignore_unknown_ids):\n res = cognite_client.sequences.list(limit=2)\n invalid_id = 1\n try:\n retrieved_assets = cognite_client.sequences.retrieve_multiple(\n [s.id for s in res] + [invalid_id], ignore_unknown_ids=ignore_unknown_ids\n )\n failed = False\n assert {s.id for s in retrieved_assets} == {s.id for s in res}\n except CogniteNotFoundError:\n failed = True\n\n assert failed ^ ignore_unknown_ids\n\n def test_call(self, cognite_client, post_spy):\n with set_request_limit(cognite_client.sequences, 10):\n res = [s for s in cognite_client.sequences(limit=20)]\n\n assert 20 == len(res)\n assert 2 == cognite_client.sequences._post.call_count\n\n def test_list(self, cognite_client, post_spy):\n with set_request_limit(cognite_client.sequences, 10):\n res = cognite_client.sequences.list(limit=20)\n\n assert 20 == len(res)\n assert 2 == cognite_client.sequences._post.call_count\n\n def test_list_assetid_nothing(self, cognite_client):\n res = cognite_client.sequences.list(asset_ids=[12345678910], limit=20)\n assert 0 == len(res)\n\n def test_aggregate(self, cognite_client):\n res = cognite_client.sequences.aggregate(filter=SequenceFilter(name=\"42\"))\n assert res[0].count > 0\n\n def test_search(self, cognite_client):\n res = cognite_client.sequences.search(name=\"42\", filter=SequenceFilter(created_time={\"min\": 0}))\n assert len(res) > 0\n\n def test_update(self, cognite_client, new_seq):\n assert new_seq.metadata == {\"a\": \"b\"}\n update_seq = SequenceUpdate(new_seq.id).name.set(\"newname\").metadata.set(None)\n res = cognite_client.sequences.update(update_seq)\n assert \"newname\" == res.name\n assert res.metadata == {}\n\n def test_update_full(self, cognite_client, new_seq):\n assert new_seq.metadata == {\"a\": \"b\"}\n new_seq.name = \"newname\"\n res = cognite_client.sequences.update(new_seq)\n assert \"newname\" == res.name\n\n def test_update_columns_add_remove_single(self, cognite_client, new_seq):\n assert len(new_seq.columns) == 3\n update_seq = SequenceUpdate(new_seq.id).columns.add(\n {\"valueType\": \"STRING\", \"externalId\": \"user_added\", \"description\": \"some description\"}\n )\n res = cognite_client.sequences.update(update_seq)\n assert len(res.columns) == 4\n assert res.column_external_ids[3] == \"user_added\"\n\n def test_update_columns_add_multiple(self, cognite_client, new_seq):\n assert len(new_seq.columns) == 3\n column_def = [\n {\"valueType\": \"STRING\", \"externalId\": \"user_added\", \"description\": \"some description\"},\n {\"valueType\": \"DOUBLE\", \"externalId\": \"amount_added\"},\n ]\n update_seq = SequenceUpdate(new_seq.id).columns.add(column_def)\n res = cognite_client.sequences.update(update_seq)\n assert len(res.columns) == 5\n assert res.column_external_ids[3:5] == [\"user_added\", \"amount_added\"]\n\n def test_update_columns_remove_single(self, cognite_client, new_seq):\n assert len(new_seq.columns) == 3\n update_seq = SequenceUpdate(new_seq.id).columns.remove(new_seq.columns[0][\"externalId\"])\n res = cognite_client.sequences.update(update_seq)\n assert len(res.columns) == 2\n assert res.columns[0:2] == new_seq.columns[1:3]\n\n def test_update_columns_remove_multiple(self, cognite_client, new_seq):\n assert len(new_seq.columns) == 3\n update_seq = SequenceUpdate(new_seq.id).columns.remove([col[\"externalId\"] for col in new_seq.columns[0:2]])\n res = cognite_client.sequences.update(update_seq)\n assert len(res.columns) == 1\n assert res.columns[0] == new_seq.columns[2]\n\n def test_update_columns_modify(self, cognite_client, new_seq):\n assert new_seq.columns[1].get(\"description\") is None\n column_update = [\n SequenceColumnUpdate(external_id=new_seq.columns[0][\"externalId\"]).external_id.set(\"new_col_external_id\"),\n SequenceColumnUpdate(external_id=new_seq.columns[1][\"externalId\"]).description.set(\"my new description\"),\n ]\n update_seq = SequenceUpdate(new_seq.id).columns.modify(column_update)\n res = cognite_client.sequences.update(update_seq)\n assert len(res.columns) == 3\n assert res.columns[0][\"externalId\"] == \"new_col_external_id\"\n assert res.columns[1][\"description\"] == \"my new description\"\n\n def test_get_new(self, cognite_client, new_seq):\n cognite_client.sequences.retrieve(id=new_seq.id)\n # assert [\"DOUBLE\"] == res.column_value_types # soon to change\n assert len(new_seq.columns) == 3\n\n def test_upsert_2_sequence_one_preexisting(self, cognite_client: CogniteClient) -> None:\n # Arrange\n new_sequence = Sequence(\n external_id=\"test_upsert_2_sequence_one_preexisting:new\",\n name=\"my new sequence\",\n columns=[{\"externalId\": \"col1\", \"valueType\": \"STRING\"}],\n )\n preexisting = Sequence(\n external_id=\"test_upsert_2_sequence_one_preexisting:preexisting\",\n name=\"my preexisting sequence\",\n columns=[{\"externalId\": \"col1\", \"valueType\": \"STRING\"}],\n )\n preexisting_update = Sequence._load(preexisting.dump(camel_case=True))\n preexisting_update.name = \"my preexisting sequence updated\"\n\n try:\n created_existing = cognite_client.sequences.create(preexisting)\n assert created_existing.id is not None\n\n # Act\n res = cognite_client.sequences.upsert([new_sequence, preexisting_update], mode=\"replace\")\n\n # Assert\n assert len(res) == 2\n assert new_sequence.external_id == res[0].external_id\n assert preexisting.external_id == res[1].external_id\n assert new_sequence.name == res[0].name\n assert preexisting_update.name == res[1].name\n finally:\n cognite_client.sequences.delete(\n external_id=[new_sequence.external_id, preexisting.external_id], ignore_unknown_ids=True\n )\n","sub_path":"tests/tests_integration/test_api/test_sequences.py","file_name":"test_sequences.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"486360171","text":"#!/usr/bin/python3\n\nfrom serial import Serial\nimport requests\nimport traceback\nimport time\nfrom BatteryMonitor import BatteryMonitor\nfrom optparse import OptionParser\nfrom threading import Thread\n\n\nclass ChargeMonitor(BatteryMonitor):\n def __init__(self, options, shuntResistance=1.408):\n super().__init__(options, shuntResistance=shuntResistance, voltMeterChannels=[0, 1, 5, 4])\n\n def startCharge(self):\n self.vm.enableRelay()\n\n def stopCharge(self):\n self.vm.disableRelay()\n\n\nclass ChargeTest:\n def __init__(self):\n options, _ = ChargeTest.getOptions()\n self.chargeMon = ChargeMonitor(options, shuntResistance=options.shunt)\n self.batteryID = options.battery_id\n self.run = True\n self.runThread = None\n self.cutOffCurrent = options.i_min\n\n def sendData(self, chargeSession, voltage, current, integratedCurrent, intPower, t):\n session = requests.session()\n session.trust_env = False\n\n lineString = \"batdischarge,battery=%s,charge_session=%s,direction=charge \" % (self.batteryID, chargeSession)\n lineString += \"U_bat=%f,\" % voltage\n lineString += \"I_bat=%f,\" % current\n lineString += \"E_int=%f,\" % intPower\n lineString += \"I_int=%f\" % integratedCurrent\n\n timestamp = t.timestamp()\n utc_time = int(timestamp) * 1000000000\n lineString += ' %d' % utc_time\n print(lineString)\n\n try:\n response = session.post('http://192.168.178.220:8090/telegraf', lineString)\n print(response)\n except:\n print('Failed to submit data string %s' % lineString)\n print(traceback.format_exc())\n\n @staticmethod\n def getOptions():\n opt = OptionParser()\n ChargeMonitor.getOptions(opt)\n opt.add_option('-m', '--i_min', dest='i_min', help='Current at which charging stops in mA', type='float', default=50.0)\n opt.add_option('-s', '--shunt', dest='shunt', help='Shunt resistance in Ohm', type='float', default=1.408)\n return opt.parse_args()\n\n def start(self):\n if self.runThread is not None:\n print('Run thread already exists. Aborting start')\n return\n\n print('Creating monitoring thread')\n self.runThread = Thread(target=self.run_a)\n self.runThread.start()\n print('Monitoring thread started')\n\n def run_a(self):\n print('Starting discharge monitor')\n self.chargeMon.start()\n doneCount = 0\n while self.run:\n self.chargeMon.readValues()\n print(self.chargeMon.rawValues)\n t, chargeSession, U_bat, I_bat, int_current, int_power = self.chargeMon.getCurrentState()\n self.sendData(chargeSession, U_bat, I_bat, int_current, int_power, t)\n if (self.cutOffCurrent / 1000) > I_bat:\n doneCount += 1\n if doneCount >= 5:\n self.chargeMon.stopCharge()\n print('Charging complete, capacity: %f' % int_current)\n else:\n doneCount = 0\n time.sleep(5)\n\n def stop(self):\n self.run = False\n self.runThread.join()\n self.runThread = None\n\n\nif __name__ == '__main__':\n cm = ChargeTest()\n cm.start()\n while True:\n res = input('press s to start discharge, x to stop discharge, e to end: ')\n if res == 's':\n cm.chargeMon.startCharge()\n elif res == 'x':\n cm.chargeMon.stopCharge()\n elif res == 'e':\n cm.stop()\n break\n else:\n print('Unknown command')\n\n\n","sub_path":"ChargeMonitor.py","file_name":"ChargeMonitor.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"9076401","text":"from tkinter import *\r\nimport random\r\n\r\ndef classPick():\r\n classRNG = random.randint(1,8)\r\n if classRNG == 1:\r\n choice = \"Warrior\"\r\n elif classRNG == 2:\r\n choice = \"Paladin\"\r\n elif classRNG == 3:\r\n choice = \"Hunter\"\r\n elif classRNG == 4:\r\n choice = \"Rogue\"\r\n elif classRNG == 5:\r\n choice = \"Priest\"\r\n elif classRNG == 6:\r\n choice = \"Death Knight\"\r\n elif classRNG == 7:\r\n choice = \"Shaman\"\r\n elif classRNG == 8:\r\n choice = \"Mage\"\r\n elif classRNG == 9:\r\n choice = \"Warlock\"\r\n elif classRNG == 10:\r\n choice = \"Monk\"\r\n elif classRNG == 11:\r\n choice = \"Druid\"\r\n elif classRNG == 12:\r\n choice = \"Demon Hunter\"\r\n return choice\r\n\r\ndef spec(choice):\r\n\r\n classChoice = choice\r\n\r\n if classChoice == \"Warrior\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Protection\"\r\n elif specRNG == 2:\r\n specChoice = \"Arms\"\r\n elif specRNG == 3:\r\n specChoice = \"Fury\"\r\n elif classChoice == \"Paladin\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Protection\"\r\n elif specRNG == 2:\r\n specChoice = \"Holy\"\r\n elif specRNG ==3:\r\n specChoice = \"Retribution\"\r\n elif classChoice == \"Hunter\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Marksman\"\r\n if specRNG == 2:\r\n specChoice = \"Beast Mastery\"\r\n if specRNG == 3:\r\n specChoice = \"Survival\"\r\n elif classChoice == \"Rogue\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Assassination\"\r\n if specRNG == 2:\r\n specChoice = \"Subtlety\"\r\n if specRNG == 3:\r\n specChoice = \"Outlaw\"\r\n elif classChoice == \"Priest\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Holy\"\r\n if specRNG == 2:\r\n specChoice = \"Discipline\"\r\n if specRNG == 3:\r\n specChoice = \"Shadow\"\r\n elif classChoice == \"Death Knight\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Unholy\"\r\n if specRNG == 2:\r\n specChoice = \"Frost\"\r\n if specRNG == 3:\r\n specChoice = \"Blood\"\r\n elif classChoice == \"Shaman\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Elemental\"\r\n if specRNG == 2:\r\n specChoice = \"Enhancement\"\r\n if specRNG == 3:\r\n specChoice = \"Restoration\"\r\n elif classChoice == \"Mage\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Fire\"\r\n if specRNG == 2:\r\n specChoice = \"Frost\"\r\n if specRNG == 3:\r\n specChoice = \"Arcane\"\r\n elif classChoice == \"Warlock\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Demonology\"\r\n if specRNG == 2:\r\n specChoice = \"Destruction\"\r\n if specRNG == 3:\r\n specChoice = \"Affliction\"\r\n elif classChoice == \"Monk\":\r\n specRNG = random.randint(1,3)\r\n if specRNG == 1:\r\n specChoice = \"Mistweaver\"\r\n if specRNG == 2:\r\n specChoice = \"Brewmaster\"\r\n if specRNG == 3:\r\n specChoice = \"Windwalker\"\r\n elif classChoice == \"Druid\":\r\n specRNG = random.randint(1,4)\r\n if specRNG == 1:\r\n specChoice = \"Feral\"\r\n if specRNG == 2:\r\n specChoice = \"Balance\"\r\n if specRNG == 3:\r\n specChoice = \"Guardian\"\r\n if specRNG == 4:\r\n specChoice = \"Restoration\"\r\n elif classChoice == \"Demon Hunter\":\r\n specRNG = random.randint(1,2)\r\n if specRNG == 1:\r\n specChoice = \"Havoc\"\r\n if specRNG == 2:\r\n specChoice = \"Vengeance\"\r\n return specChoice\r\n \r\n\r\ndef specandclass():\r\n classChoice = classPick()\r\n if classChoice == \"Warrior\":\r\n labelClass.config(fg=\"brown\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Paladin\":\r\n labelClass.config(fg=\"pink\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Hunter\":\r\n labelClass.config(fg=\"green\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Rogue\":\r\n labelClass.config(fg=\"yellow\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Priest\":\r\n labelClass.config(fg=\"white\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Death Knight\":\r\n labelClass.config(fg=\"red\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Shaman\":\r\n labelClass.config(fg=\"blue\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Mage\":\r\n labelClass.config(fg=\"light blue\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Warlock\":\r\n labelClass.config(fg=\"purple\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Monk\":\r\n labelClass.config(fg=\"light green\", bg=\"dark gray\", text=classChoice)\r\n elif classChoice == \"Demon Hunter\":\r\n labelClass.config(fg=\"purple\", bg=\"dark gray\", text=classChoice)\r\n labelClass.config(text=classChoice)\r\n specChoice = spec(classChoice)\r\n labelSpec.config(text=specChoice)\r\n\r\n\r\nroot = Tk()\r\n\r\nroot.configure(bg=\"dark gray\")\r\nlabelClass = Label(root, bg=\"dark gray\", text=\"Class\")\r\nlabelClass.grid(row = 0, column = 0, pady = 2)\r\nlabelSpec = Label(root, bg=\"dark gray\", text=\"Spec\")\r\nlabelSpec.grid(row = 2, column = 0, pady = 2)\r\nb = Button(root, text=\"Choose Class / Spec\", command=specandclass)\r\nb.grid(row = 3, column = 0, pady = 20)\r\n\r\nmainloop()\r\n","sub_path":"classchoose.py","file_name":"classchoose.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"620277777","text":"import psycopg2\nimport json\nimport os\n\n\nconnection = None\n\n\ndef connect():\n global connection\n\n if connection is None:\n create_connection()\n return connection\n\n\ndef create_connection():\n global connection\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(dir_path, \"../../files/database.json\"), \"r\") as fp:\n db = json.load(fp)\n\n connection = psycopg2.connect(\n host=db[\"host\"],\n database=db[\"database\"],\n user=db[\"username\"],\n password=db[\"password\"]\n )\n\n\ndef reconnect():\n global connection\n\n connection.close()\n create_connection()\n","sub_path":"functions/database/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481486616","text":"\"\"\"\nMerge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n\nExample:\n\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object): \n def __init__(self, x, node=None):\n self.val = x\n self.next = node\n \n def display(self):\n temp = self\n vals = []\n while temp:\n vals.append(temp.val)\n temp = temp.next\n print(vals)\n\n\n# recursive solution\ndef mergeTwoLists1(l1, l2):\n l3 = ListNode(0)\n \n def merge(l1,l2,lr):\n if l1==None:\n if l2!=None:\n lr.next = ListNode(l2.val)\n merge(l1, l2.next, lr.next)\n elif l2==None:\n if l1!=None:\n lr.next = ListNode(l1.val)\n merge(l1.next, l2, lr.next)\n elif l1.val <= l2.val:\n lr.next = ListNode(l1.val)\n merge(l1.next, l2, lr.next)\n else:\n lr.next = ListNode(l2.val)\n merge(l1, l2.next, lr.next)\n \n merge(l1,l2,l3)\n \n return l3.next\n \n \n# iterative solution\ndef mergeTwoLists2(l1, l2): \n # initialize the return list (lr is fixed for return, temp will move for merging)\n lr = temp = ListNode(0)\n \n while l1 or l2:\n # if l1 exhausts first, fill lr with all the remaining nodes of l2\n if not l1:\n while l2:\n temp.next = ListNode(l2.val)\n temp = temp.next\n l2 = l2.next\n # if l2 exhausts first, fill lr with all the remaining nodes of l1\n elif not l2:\n while l1:\n temp.next = ListNode(l1.val)\n temp = temp.next\n l1 = l1.next\n # otherwise, fill lr with the smaller node between l1 and l2\n else:\n if l1.val <= l2.val:\n temp.next = ListNode(l1.val)\n temp = temp.next\n l1 = l1.next\n else:\n temp.next = ListNode(l2.val)\n temp = temp.next\n l2 = l2.next\n\n return lr.next\n \n\nif __name__ == '__main__':\n l1 = ListNode(1,ListNode(2,ListNode(4)))\n l2 = ListNode(1,ListNode(3,ListNode(4)))\n mergeTwoLists2(l1,l2).display()","sub_path":"LeetCode/21_merge-two-sorted-lists.py","file_name":"21_merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"600361697","text":"# [START imports]\nimport os\nimport urllib\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\nimport jinja2\nimport webapp2\n\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n# [END imports]\n\nDEFAULT_GENRE_NAME = 'history'\nDEFAULT_ID = 0\n\ndef genre_key(genre_name=DEFAULT_GENRE_NAME):\n \"\"\"Constructs a Datastore key for a Genre entity.\n We use genre_name as the key.\n \"\"\"\n return ndb.Key('genre', genre_name)\n\ndef shopper_id(cart_id=DEFAULT_ID):\n return ndb.Key('cart', cart_id)\n\nclass BookinCart(ndb.Model):\n writer = ndb.StringProperty()\n title = ndb.StringProperty()\n price = ndb.FloatProperty(indexed=False)\n num = ndb.IntegerProperty(indexed=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n\nclass Author(ndb.Model):\n \"\"\"Sub model for representing an author.\"\"\"\n identity = ndb.StringProperty(indexed=False)\n email = ndb.StringProperty(indexed=False)\n\nclass Greeting(ndb.Model):\n \"\"\"A main model for representing an individual Genre entry.\"\"\"\n author = ndb.StructuredProperty(Author)\n writer = ndb.StringProperty(indexed=False)\n title = ndb.StringProperty(indexed=False)\n price = ndb.FloatProperty(indexed=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n# [START main_page]\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n login = False\n url = ''\n user = users.get_current_user()\n person = ''\n if user:\n login = True\n url = users.create_logout_url(self.request.uri)\n person = user.nickname()\n else:\n url = users.create_login_url(self.request.uri)\n\n template = JINJA_ENVIRONMENT.get_template('index.html')\n\n template_values = {\n 'url': url,\n 'user': user,\n 'login': login,\n 'person': person,\n }\n\n self.response.write(template.render(template_values))\n# [END main_page]\n\nclass SearchPage(webapp2.RequestHandler):\n def get(self):\n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE_NAME)\n genre_name = genre_name.lower()\n\n writer = self.request.get('writer')\n writer = writer.lower()\n\n greetings_query = Greeting.query(\n ancestor=genre_key(genre_name)).order(-Greeting.date)\n greetings = greetings_query.fetch()\n\n login = False\n user = users.get_current_user()\n if user:\n login = True\n rightone = []\n emp = False\n if(writer!=\"\"):\n for g in greetings:\n if writer in g.writer.lower():\n rightone.append(g)\n\n if len(rightone)==0:\n emp = True\n template_values = {\n 'genre_name': urllib.quote_plus(genre_name),\n 'writer': urllib.quote_plus(writer),\n 'rightone': rightone,\n 'emp': emp,\n 'user': user,\n 'login': login,\n }\n template = JINJA_ENVIRONMENT.get_template('search.html')\n self.response.write(template.render(template_values))\n\n def post(self):\n genre_name = self.request.get('genre_name', DEFAULT_GENRE_NAME).lower()\n writer = self.request.get('writer').lower()\n if genre_name!=\"\" and writer!=\"\":\n self.redirect('/search?genre_name=' + genre_name + '&writer=' + writer)\n else:\n self.redirect('/err')\n\nclass Display(webapp2.RequestHandler):\n\n def get(self):\n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE_NAME)\n genre_name = genre_name.lower()\n greetings_query = Greeting.query(\n ancestor=genre_key(genre_name)).order(-Greeting.date)\n greetings = greetings_query.fetch(10)\n\n user = users.get_current_user()\n login = False\n if user:\n login = True\n\n template_values = {\n 'greetings': greetings,\n 'genre_name': urllib.quote_plus(genre_name),\n 'user': user,\n 'login': login,\n }\n\n template = JINJA_ENVIRONMENT.get_template('display.html')\n self.response.write(template.render(template_values))\n\nclass Shop(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n books = []\n s = 0.0\n login = False\n\n if user:\n login = True\n cart_id = user.user_id()\n q = BookinCart.query(ancestor=shopper_id(cart_id)).order(-Greeting.date)\n books = q.fetch()\n for b in books:\n s += b.price * b.num\n\n empty = (s==0.0)\n\n template_values = {\n 'books': books,\n 'user': user,\n 'total': s,\n 'login': login,\n 'empty': empty,\n }\n\n template = JINJA_ENVIRONMENT.get_template('cart.html')\n self.response.write(template.render(template_values))\n\n def post(self):\n cart_id = users.get_current_user().user_id()\n q = BookinCart.query(ancestor=shopper_id(cart_id))\n for i in q:\n i.key.delete()\n\n self.redirect('/thankyou')\n\nclass AddtoCart(webapp2.RequestHandler):\n def post(self):\n cart_id = self.request.get('cart_id')\n w = self.request.get('writer').strip()\n t = self.request.get('title').strip()\n p = self.request.get('price')\n\n book = BookinCart(parent=shopper_id(cart_id))\n\n q = BookinCart.query(BookinCart.writer==w, BookinCart.title==t,\n ancestor=shopper_id(cart_id))\n\n if q.get():\n for i in q:\n #k = i.key.get()\n i.num += 1\n i.put()\n else:\n book.writer = w\n book.title = t\n book.price = float(p)\n book.num = 1\n book.put()\n\n self.redirect('/cart')\n\nclass BookRepository(webapp2.RequestHandler):\n def post(self):\n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE_NAME)\n greeting = Greeting(parent=genre_key(genre_name))\n\n if users.get_current_user():\n greeting.author = Author(\n identity=users.get_current_user().user_id(),\n email=users.get_current_user().email())\n\n greeting.writer = self.request.get('writer').strip()\n greeting.title = self.request.get('title').strip()\n p = self.request.get('price')\n if greeting.writer!=\"\" and greeting.title!=\"\" and p!=\"\":\n greeting.price = float(p)\n greeting.put()\n self.redirect('/')\n else:\n self.redirect('/err')\n\nclass Enter(webapp2.RequestHandler):\n def get(self):\n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE_NAME)\n genre_name = genre_name.lower()\n query_params = {'genre_name': urllib.quote_plus(genre_name)}\n\n template = JINJA_ENVIRONMENT.get_template('enter.html')\n self.response.write(template.render(query_params))\n\nclass Err(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('err.html')\n self.response.write(template.render())\n\nclass Thank(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('thankyou.html')\n self.response.write(template.render())\n\n def post(self):\n cart_id = users.get_current_user().user_id()\n w = self.request.get('writer').strip()\n t = self.request.get('title').strip()\n q = BookinCart.query(BookinCart.writer==w, BookinCart.title==t,\n ancestor=shopper_id(cart_id))\n\n for i in q:\n if i.key.get().num == 1:\n i.key.delete()\n else:\n i.num -= 1\n i.put()\n\n self.redirect('/cart')\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/sign', BookRepository),\n ('/display', Display),\n ('/search', SearchPage),\n ('/enter', Enter),\n ('/add', AddtoCart),\n ('/cart', Shop),\n ('/err', Err),\n ('/thankyou', Thank)\n], debug=True)\n\n\n","sub_path":"repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"410385723","text":"\n\n#calss header\nclass _GESTATION():\n\tdef __init__(self,): \n\t\tself.name = \"GESTATION\"\n\t\tself.definitions = [u\"(the period of) the development of a child or young animal while it is still inside its mother's body: \", u'(the period of) the development of ideas, thoughts, or plans: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_gestation.py","file_name":"_gestation.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"615013280","text":"#!/usr/bin/env python3\n\nfrom numpy import *\nfrom matplotlib.pyplot import *\n\nthicks = []\nrads = []\n\nfor L in [1]:\n mu = 0.6\n focalLength = 11000\n dr = 1.4\n\n def Energy(arg):\n theta = 0.5*arctan(arg/focalLength)\n return 1.364e7*4.1e-18*3e10/sin(theta)\n\n def Energy2(arg): #Same with n=2\n theta = 0.5*arctan(arg/focalLength)\n return 2.7e7*4.1e-18*3e10/sin(theta)\n\n def radius(arg):\n theta = arcsin(1.364e7*4.1e-18*3e10/arg)\n return focalLength*tan(2*theta)\n\n def radius2(arg): #Same with n=2\n theta = arcsin(2.7e7*4.1e-18*3e10/arg)\n return focalLength*tan(2*theta)\n\n def dE(arg):\n return 1.454e-4*((Energy(radius(arg)))**2)\n\n def area(arg):\n return pi*(2*arg*dr + dr**2)\n\n def sigma(arg):\n return 6.66 #*950/(arg**2)\n\n def thickness(arg):\n #return 1.8 - 0.018*arg\n return 0.25\n\n def efficiency(arg):\n T = thickness(radius(arg))\n thicks.append(T)\n rads.append(radius(arg))\n val = 0.5*(1 - e**(-2*sigma(arg)*T))*e**(-mu*T)\n return val\n\n radii = arange(15,98,dr)\n Es = array([Energy(P) for P in radii])\n Es2 = array([Energy2(P) for P in radii])\n areas = zeros(len(Es))\n\n for i in range(0,len(radii)):\n R = radii[i]\n E = Energy(R)\n for j in range(0,len(Es)):\n if ( Es[j] >= (E-dE(E)) ) and ( Es[j] <= (E+dE(E)) ):\n areas[i] += efficiency(E)*area(radii[j])\n if ( Es2[j] >= (E-dE(E)) ) and ( Es2[j] <= (E+dE(E)) ):\n areas[i] += 0.1*efficiency(Es2[j])*area(radii[j])\n print(str(Es[i])+\"\\t\"+str(areas[i]))\n plot(Es,areas,'-',label=\"NERDS\")\n\nplot([18,2499],[500,500],label='SPI')\nylim([0,3000])\nlegend()\ntitle(\"Effective Area Curve for Cu Crystal Laue Lens\")\nxlabel(\"Energy [keV]\")\nylabel(\"Effective Area [cm$^2$]\")\nsavefig(\"Area-curve.pdf\",format='PDF')\nshow()\n","sub_path":"mission/fluxes/Er.py","file_name":"Er.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"551376220","text":"#chapter9_4 영제형 답\n\ntext = open('vocabulary.text', 'r')\n\ndict = {}\nword_list = []\nfor line in text:\n list = line.split(\": \")\n vocab = list[0]\n mean = list[1].strip()\n word_list.append(vocab)\n dict[vocab] = mean\n\nfrom random import randint\nwhile True:\n i = randint(0, len(word_list) - 1)\n answer = raw_input(\"%s: \" % word_list[i])\n if answer == dict[word_list[i]]:\n print(\"yy!\")\n elif answer != dict[word_list[i]]:\n print(\"no solution\")\n elif answer == \"q\":\n break","sub_path":"hello_world18.py","file_name":"hello_world18.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"284446120","text":"# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Stochastic Network Contraction.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nfrom typing import Tuple, Set, Optional, Dict\nfrom tensornetwork import network\nfrom tensornetwork import network_components\n\n\ndef find_parallel(edge: network_components.Edge\n ) -> Tuple[Set[network_components.Edge], int]:\n \"\"\"Finds all edges shared between the nodes connected with the given edge.\n\n Args:\n edge: A non-dangling edge between two different nodes.\n\n Returns:\n parallel_edges: Edges that are parallel to the given edge.\n parallel_dim: Product of sizes of all parallel edges.\n \"\"\"\n if edge.is_dangling():\n raise ValueError(\n \"Cannot find parallel edges for dangling edge {}\".format(edge))\n nodes = {edge.node1, edge.node2}\n parallel_dim = 1\n parallel_edges = set()\n for e in edge.node1.edges:\n if set(e.get_nodes()) == nodes:\n parallel_edges.add(e)\n edge_size = list(e.node1.get_tensor().shape)[e.axis1]\n if edge_size is not None:\n parallel_dim *= edge_size\n return parallel_edges, parallel_dim\n\n\ndef contract_trace_edges(\n net: network.TensorNetwork, none_value: int = 1\n) -> Tuple[network.TensorNetwork, Dict[network_components.Node, int],\n Dict[network_components.Node, int]]:\n \"\"\"Contracts trace edges and calculate tensor sizes for every node.\n\n Tensor size is defined as the product of sizes of each of edges (axes).\n\n Args:\n net: TensorNetwork to contract all the trace edges of.\n none_value: The value that None dimensions contribute to the tensor size.\n Unit (default) means that None dimensions are neglected.\n\n Returns:\n A tuple containing:\n net: \n Given TensorNetwork with all its trace edges contracted.\n node_sizes: \n Map from nodes in the network to their total size.\n node_sizes_none: \n Map from nodes that have at least one None dimension to\n their size.\n \"\"\"\n # Keep node sizes in memory for cost calculation\n node_sizes, node_sizes_none = dict(), dict()\n initial_node_set = set(net.nodes_set)\n for node in initial_node_set:\n trace_edges, flag_none, total_dim = set(), False, 1\n new_node = node\n for edge, dim in zip(node.edges, list(node.get_tensor().shape)):\n if edge.node1 is edge.node2:\n if edge not in trace_edges:\n # Contract trace edge\n new_node = net.contract(edge)\n trace_edges.add(edge)\n else:\n if dim is None:\n total_dim *= none_value\n flag_none = True\n else:\n total_dim *= dim\n if flag_none:\n node_sizes_none[new_node] = total_dim\n else:\n node_sizes[new_node] = total_dim\n return net, node_sizes, node_sizes_none\n\n\ndef stochastic(net: network.TensorNetwork,\n max_rejections: int,\n threshold: Optional[int] = None,\n none_value: int = 1) -> network.TensorNetwork:\n \"\"\"Contracts a connected network by stochastically picking edges.\n\n Algorithm 2 in page 7 of https://doi.org/10.1371/journal.pone.0208510.\n Cost calculation is slightly modified here:\n If A and B are the tensors that share the given `edge`, cost is defined as:\n `cost = dims(A @ B) - max(dims(A), dims(B))`, where\n `@` denotes contraction of all shared edges via `contract_between` and\n `dims(X)` is the total dimension of tensor X (product of sizes of all axes).\n\n Args:\n net: Connected TensorNetwork to contract fully.\n max_rejections: Maximum number of rejections before you increase threshold.\n threshold: Initial value for the threshold.\n none_value: The value of None dimensions in the cost calculation.\n\n Returns:\n TensorNetwork with a single node after fully contracting.\n \"\"\"\n net, node_sizes, node_sizes_none = contract_trace_edges(net, none_value)\n if threshold is None:\n # Set threshold as the maximum tensor size in the network\n # ignoring nodes with None sizes.\n threshold = max(node_sizes.values())\n node_sizes.update(node_sizes_none)\n\n rejections = 0\n nondangling_edges = net.get_all_nondangling()\n while nondangling_edges:\n edge = random.choice(tuple(nondangling_edges))\n shared_edges, shared_dim = find_parallel(edge)\n new_dim = ((node_sizes[edge.node1] // shared_dim) *\n (node_sizes[edge.node2] // shared_dim))\n cost = new_dim - max(node_sizes[edge.node1], node_sizes[edge.node2])\n if cost <= threshold:\n node_sizes.pop(edge.node1)\n node_sizes.pop(edge.node2)\n node_sizes[net.contract_parallel(edge)] = new_dim\n nondangling_edges -= shared_edges\n rejections = 0\n else:\n rejections += 1\n if rejections > max_rejections:\n threshold *= 2\n rejections = 0\n return net\n","sub_path":"tensornetwork/contractors/stochastic_contractor.py","file_name":"stochastic_contractor.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502132898","text":"\"\"\"\n给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。\n\n请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。\n\n你可以假设 nums1 和 nums2 不会同时为空。\n\n示例 1:\n\nnums1 = [1, 3]\nnums2 = [2]\n\n则中位数是 2.0\n示例 2:\n\nnums1 = [1, 2]\nnums2 = [3, 4]\n\n则中位数是 (2 + 3)/2 = 2.5\n\"\"\"\n\n\nclass Solution1:\n def findMedianSortedArrays(self, nums1, nums2):\n i = j = 0\n t = list()\n while i <= len(nums1) and j <= len(nums2):\n if i == len(nums1):\n t += nums2[j:]\n break\n elif j == len(nums2):\n t += nums1[i:]\n break\n if nums1[i] <= nums2[j]:\n t.append(nums1[i])\n i += 1\n else:\n t.append(nums2[j])\n j += 1\n n = len(t)\n if n == 1:\n return t[0]\n if n == 2:\n return (t[0]+t[1])/2\n if ((n-1)/2) % ((n-1)//2) != 0:\n result = (t[(n-1)//2] + t[(n-1)//2 + 1])/2\n else:\n result = t[(n-1)//2]\n return result\n\n\ndef main():\n nums1 = [0]\n nums2 = [2, 3]\n s = Solution1()\n result = s.findMedianSortedArrays(nums1, nums2)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"129389131","text":"import glob\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\n\nclass Failed_Experiment_Analysis():\n\n @staticmethod\n def create_results_for_failed_experiments(path_to_results, path_to_for_malpaca_files, data_set_name):\n\n for_malpaca_folders = [f.path for f in os.scandir(path_to_for_malpaca_files) if f.is_dir()]\n for_malpaca_folders = [(x, os.path.basename(x)) for x in for_malpaca_folders]\n\n results_folders = [f.path for f in os.scandir(path_to_results) if f.is_dir()]\n results_folders = [os.path.basename(x) for x in results_folders]\n\n failed_experiments = []\n\n for path, for_malpaca_name in for_malpaca_folders:\n if for_malpaca_name not in results_folders:\n failed_experiments.append((path, for_malpaca_name))\n\n for path, for_malpaca_name in failed_experiments:\n\n csv_files = glob.glob(path + \"/*.csv\")\n\n for csv_index, csv_file in enumerate(csv_files):\n csv_df = pd.read_csv(csv_file)\n if csv_index == 0:\n combined_summary_df = csv_df\n else:\n combined_summary_df = combined_summary_df.append(csv_df)\n\n new_results_path = path_to_results + \"/\" + for_malpaca_name + \"_failed\"\n new_csv_path = new_results_path + \"/combined_summary.csv\"\n path_detailed_label_csv = new_results_path + \"/detailed_length_summary.csv\"\n path_detailed_label_table = new_results_path + \"/detailed_length_summary.png\"\n shortened_summary_path = new_results_path + \"/shortened_summary.csv\"\n overall_summary_path = new_results_path + \"/overall_summary.csv\"\n\n os.mkdir(new_results_path)\n\n combined_summary_df.to_csv(new_csv_path, index=False)\n\n total_amount_connections = len(combined_summary_df.index)\n\n dl_average_length_df = combined_summary_df.groupby(\"detailed_label\")[\n \"connection_length\"].mean().to_frame().reset_index()\n dl_average_length_df = dl_average_length_df.rename(\n columns={\"connection_length\": \"avg_connection_length\"})\n dl_average_length_df[\"avg_connection_length\"] = dl_average_length_df[\"avg_connection_length\"].apply(\n lambda x: round(x, 2))\n dl_con_count_df = combined_summary_df.groupby(\"detailed_label\")[\n \"connection_length\"].count().to_frame().reset_index()\n dl_con_count_df = dl_con_count_df.rename(columns={\"connection_length\": \"connection_count\"})\n detailed_label_info_df = dl_average_length_df.merge(right=dl_con_count_df, on=\"detailed_label\")\n detailed_label_info_df[\"ratio\"] = round(\n (detailed_label_info_df[\"connection_count\"] / total_amount_connections) * 100, 4)\n detailed_label_info_df = detailed_label_info_df.sort_values(by=\"connection_count\", ascending=False)\n detailed_label_info_df.to_csv(path_detailed_label_csv, index=False)\n\n fig, ax = plt.subplots()\n fig.patch.set_visible(False)\n ax.axis('off')\n ax.axis('tight')\n table = ax.table(cellText=detailed_label_info_df.values, colLabels=detailed_label_info_df.columns,\n loc='center',\n cellLoc='center')\n table.auto_set_column_width(col=list(range(len(detailed_label_info_df.columns))))\n for (row, col), cell in table.get_celld().items():\n if (row == 0):\n cell.set_text_props(fontproperties=FontProperties(weight='bold'))\n fig.tight_layout(pad=3.0)\n plt.savefig(path_detailed_label_table, dpi=1200, bbox_inches='tight')\n plt.close()\n plt.clf()\n\n\n data_shortened = {\n \"validity_index\": \"nan\",\n \"shilouette_score\": \"nan\",\n\n \"noise_percentage\": \"nan\",\n \"number_clusters\": \"nan\",\n\n \"cohesion_score\": \"nan\",\n \"purity_score\": \"nan\",\n\n \"avg_cluster_probability\": \"nan\",\n \"avg_clustering_error\": \"nan\"}\n\n shortened_summary = pd.DataFrame(data_shortened, index=[0])\n shortened_summary.to_csv(shortened_summary_path, index=False)\n\n\n data_overall = {\n \"total_time_processing\" : \"nan\",\n \"validity_index\" : \"nan\",\n \"shilouette_score\" : \"nan\",\n \"total_number_connections\" : \"nan\",\n \"total_number_packets\" : \"nan\",\n \"total_number_clusters\" : \"nan\",\n \"avg_cluster_size\" : \"nan\",\n \"std_cluster_size\" : \"nan\",\n \"noise_percentage\" : \"nan\",\n \"avg_label_cohesion\" : \"nan\",\n \"avg_detailed_label_cohesion\" : \"nan\",\n \"avg_application_name_cohesion\" : \"nan\",\n \"avg_application_category_name_cohesion\" : \"nan\",\n \"avg_name_cohesion\" : \"nan\",\n \"avg_label_purity\" : \"nan\",\n \"avg_detailed_label_purity\" : \"nan\",\n \"avg_application_name_purity\" : \"nan\",\n \"avg_application_category_name_purity\" : \"nan\",\n \"avg_name_purity\" : \"nan\",\n \"avg_cluster_probability\" : \"nan\",\n \"avg_clustering_error\" : \"nan\"\n }\n\n overall_summary = pd.DataFrame(data_overall, index=[0])\n overall_summary.to_csv(overall_summary_path, index=False)","sub_path":"src/scripts/result_analysis/failed_experiments_analysis.py","file_name":"failed_experiments_analysis.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"146975564","text":"\"\"\"Config flow for ZHA.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport logging\nimport os\nfrom typing import Any\n\nfrom zigpy.application import ControllerApplication\nimport zigpy.backups\nfrom zigpy.config import CONF_DEVICE, CONF_DEVICE_PATH\nfrom zigpy.exceptions import NetworkNotFormed\n\nfrom homeassistant.core import HomeAssistant\n\nfrom .core.const import (\n CONF_DATABASE,\n CONF_ZIGPY,\n DATA_ZHA,\n DATA_ZHA_CONFIG,\n DEFAULT_DATABASE_NAME,\n RadioType,\n)\n\n# Only the common radio types will be autoprobed, ordered by new device popularity.\n# XBee takes too long to probe since it scans through all possible bauds and likely has\n# very few users to begin with.\nAUTOPROBE_RADIOS = (\n RadioType.ezsp,\n RadioType.znp,\n RadioType.deconz,\n RadioType.zigate,\n)\n\nCONNECT_DELAY_S = 1.0\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ZhaRadioManager:\n \"\"\"Helper class with radio related functionality.\"\"\"\n\n hass: HomeAssistant\n\n def __init__(self) -> None:\n \"\"\"Initialize ZhaRadioManager instance.\"\"\"\n self.device_path: str | None = None\n self.device_settings: dict[str, Any] | None = None\n self.radio_type: RadioType | None = None\n self.current_settings: zigpy.backups.NetworkBackup | None = None\n self.backups: list[zigpy.backups.NetworkBackup] = []\n self.chosen_backup: zigpy.backups.NetworkBackup | None = None\n\n @contextlib.asynccontextmanager\n async def _connect_zigpy_app(self) -> ControllerApplication:\n \"\"\"Connect to the radio with the current config and then clean up.\"\"\"\n assert self.radio_type is not None\n\n config = self.hass.data.get(DATA_ZHA, {}).get(DATA_ZHA_CONFIG, {})\n app_config = config.get(CONF_ZIGPY, {}).copy()\n\n database_path = config.get(\n CONF_DATABASE,\n self.hass.config.path(DEFAULT_DATABASE_NAME),\n )\n\n # Don't create `zigbee.db` if it doesn't already exist\n if not await self.hass.async_add_executor_job(os.path.exists, database_path):\n database_path = None\n\n app_config[CONF_DATABASE] = database_path\n app_config[CONF_DEVICE] = self.device_settings\n app_config = self.radio_type.controller.SCHEMA(app_config)\n\n app = await self.radio_type.controller.new(\n app_config, auto_form=False, start_radio=False\n )\n\n try:\n await app.connect()\n yield app\n finally:\n await app.disconnect()\n await asyncio.sleep(CONNECT_DELAY_S)\n\n async def restore_backup(\n self, backup: zigpy.backups.NetworkBackup, **kwargs: Any\n ) -> None:\n \"\"\"Restore the provided network backup, passing through kwargs.\"\"\"\n if self.current_settings is not None and self.current_settings.supersedes(\n self.chosen_backup\n ):\n return\n\n async with self._connect_zigpy_app() as app:\n await app.backups.restore_backup(backup, **kwargs)\n\n def parse_radio_type(self, radio_type: str) -> RadioType:\n \"\"\"Parse a radio type name, accounting for past aliases.\"\"\"\n if radio_type == \"efr32\":\n return RadioType.ezsp\n\n return RadioType[radio_type]\n\n async def detect_radio_type(self) -> bool:\n \"\"\"Probe all radio types on the current port.\"\"\"\n for radio in AUTOPROBE_RADIOS:\n _LOGGER.debug(\"Attempting to probe radio type %s\", radio)\n\n dev_config = radio.controller.SCHEMA_DEVICE(\n {CONF_DEVICE_PATH: self.device_path}\n )\n probe_result = await radio.controller.probe(dev_config)\n\n if not probe_result:\n continue\n\n # Radio library probing can succeed and return new device settings\n if isinstance(probe_result, dict):\n dev_config = probe_result\n\n self.radio_type = radio\n self.device_settings = dev_config\n\n return True\n\n return False\n\n async def async_load_network_settings(self, create_backup: bool = False) -> None:\n \"\"\"Connect to the radio and load its current network settings.\"\"\"\n async with self._connect_zigpy_app() as app:\n # Check if the stick has any settings and load them\n try:\n await app.load_network_info()\n except NetworkNotFormed:\n pass\n else:\n self.current_settings = zigpy.backups.NetworkBackup(\n network_info=app.state.network_info,\n node_info=app.state.node_info,\n )\n\n if create_backup:\n await app.backups.create_backup()\n\n # The list of backups will always exist\n self.backups = app.backups.backups.copy()\n\n async def async_form_network(self) -> None:\n \"\"\"Form a brand new network.\"\"\"\n async with self._connect_zigpy_app() as app:\n await app.form_network()\n\n async def async_reset_adapter(self) -> None:\n \"\"\"Reset the current adapter.\"\"\"\n async with self._connect_zigpy_app() as app:\n await app.reset_network_info()\n","sub_path":"homeassistant/components/zha/radio_manager.py","file_name":"radio_manager.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"210919928","text":"import openpyxl\nimport warnings\nimport datetime\nimport sys\nimport shutil\n\n\n# return the first and last day of last week\ndef last_week():\n today = datetime.datetime.now()\n\n weekday = today.weekday()\n last_monday = today + datetime.timedelta(days=(-7 - weekday))\n last_saturday = today + datetime.timedelta(days=(-2 - weekday))\n\n return last_monday.strftime(\"%Y-%m-%d\"), last_saturday.strftime(\"%Y-%m-%d\")\n\n\ndef show(begin, end, values):\n # column number for \"Possible Issue From\" in Weekly sheet\n col = 9\n\n row = -1\n result = {}\n print(f\"Concession Report From {begin} to {end}:\")\n for v in values:\n row += 1\n # skip the title row\n if row == 0:\n continue\n issue_from = v[col]\n # fix: TypeError: 'NoneType' object is not subscriptable\n try:\n # Keep the first 6 characters only for easy sum up\n # customer == customer / Ingram == Customer/Amazon ...\n issue_from = issue_from[:6].strip().title()\n except TypeError:\n print(issue_from)\n continue\n\n if result.get(issue_from) is None:\n result[issue_from] = 0\n\n result[issue_from] += 1\n\n # print(f\"{result}\")\n for k, v in result.items():\n print(f\"{k}: {v}\")\n print(f\"Total: {row}\")\n\n\ndef main(begin, end, src, dst):\n warnings.simplefilter(\"ignore\")\n wb = openpyxl.load_workbook(src, data_only=True)\n # Restore to default\n warnings.simplefilter(\"default\")\n\n new_wb = openpyxl.Workbook()\n new_sheet = new_wb.active\n\n # write title to the first line of the new file\n writen = False\n for sheet in wb:\n title = sheet.title\n if title == \"Sum\" or title == \"Weekly\":\n continue\n\n n = 0\n for row in sheet.values:\n n += 1\n # write the first line as title line\n if n == 1 and not writen:\n title_row = list(row[:])\n title_row.insert(0, \"Tester\")\n new_sheet.append(title_row)\n writen = True\n\n if n < 2:\n continue\n\n # ignore blank row or non data row\n try:\n col_date = row[1].strftime(\"%Y-%m-%d\")\n except:\n col_date = \"2020-01-01\"\n pass\n\n # if \"2020-05-18\" <= col_date <= \"2020-05-22\":\n if begin <= col_date <= end:\n new_row = [v for v in row]\n new_row[1] = col_date\n # tester in first line\n new_row.insert(0, title)\n new_sheet.append(new_row)\n\n try:\n new_wb.save(dst)\n except PermissionError:\n print(f\"Sorry, I can't save file {dst}. Please close the file and try again.\")\n\n return new_sheet.values\n # show(begin, end, new_sheet.values)\n\n\n# Main Function\nif __name__ == \"__main__\":\n # Original file\n SRC_CONCESSION_FILE = \"C:/Users/caguoa00/OneDrive - Ingram Micro/Work/Concession v2.xlsx\"\n DST_CONCESSION_FILE = \"concession/Concession v2.xlsx\"\n # Weekly file - new\n WEEKLY_FILE = \"concession/weekly.xlsx\"\n\n begin = \"\"\n end = \"\"\n if len(sys.argv) == 3:\n begin = sys.argv[1]\n end = sys.argv[2]\n else:\n begin, end = last_week()\n\n # Work on the local copy of the file to prevent permission error when others open the file\n shutil.copyfile(SRC_CONCESSION_FILE, DST_CONCESSION_FILE)\n\n values = main(begin, end, DST_CONCESSION_FILE, WEEKLY_FILE)\n show(begin, end, values)\n","sub_path":"concession.py","file_name":"concession.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628577154","text":"import logging\nimport pandas as pd\nimport numpy as np\n\nfrom prior_work.cartograph.border_graph.Graph import Graph\n\ndef preprocess_file(xy_embedding_csv, cluster_group_csv):\n\n xy_embedding_df = pd.read_csv(xy_embedding_csv)\n cluster_groups_df = pd.read_csv(cluster_group_csv)\n\n # Merge the xy_embedding df and cluster_groups_df\n combined_df = pd.merge(xy_embedding_df, cluster_groups_df, on='article_id')\n\n points = np.column_stack((combined_df['x'].values, combined_df['y'].values))\n cluster_list = combined_df['country'].values.astype(int)\n article_id_list = combined_df['article_id'].values.astype(int)\n\n return points, cluster_list, article_id_list\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) != 4:\n sys.stderr.write('Usage: %s map_directory' % sys.argv[0])\n sys.exit(1)\n\n experiment_directory, xy_embeddings, cluster_groups = sys.argv[1:]\n\n points, cluster_list, article_id_list = preprocess_file(experiment_directory + xy_embeddings, experiment_directory + cluster_groups)\n g = Graph(points, cluster_list, article_id_list)\n g.export_boundaries(experiment_directory)\n g.export_polygons(experiment_directory)\n","sub_path":"old/cartograph/border_creator.py","file_name":"border_creator.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"12291203","text":"import numpy as np\n\nclass SinglePendulum:\n\n def __init__(self, theta, theta_dot, **kwargs):\n self.name = \"SinglePendulum\"\n self.GRAVITY = 9.81\n self.MASS = 0.3\n self.LENGTH = 0.2 # actually half the pole's length\n self.DRAG = 0.001\n self.set_param(**kwargs)\n\n self.INERTIA = (self.MASS * (2 * self.LENGTH)**2)/12\n \n self.state = (theta, theta_dot)\n self.input = 0.\n\n def dynamics(self, theta, theta_dot, u):\n theta_2dot = (self.LENGTH * self.MASS * self.GRAVITY * np.sin(theta) - self.DRAG * theta_dot + u)/self.INERTIA\n\n return np.array([theta_dot, theta_2dot])\n\n def step(self, dt):\n current_state = np.array(self.state)\n k0 = dt * self.dynamics(*current_state, self.input)\n k1 = dt * self.dynamics(*current_state + k0/2, self.input)\n k2 = dt * self.dynamics(*current_state + k1/2, self.input)\n k3 = dt * self.dynamics(*current_state + k2, self.input)\n\n state_dot = (k0 + 2 * (k1 + k2) + k3)/6\n self.state = tuple(current_state + state_dot)\n\n return self.state\n\n def get_param(self):\n return {\"mass\": self.MASS, \"length\": self.LENGTH, \"drag\": self.DRAG}\n\n def set_param(self, **kwargs):\n for key in kwargs:\n if key == \"mass\":\n self.MASS = kwargs[key]\n continue\n if key == \"length\":\n self.LENGTH = kwargs[key]\n continue\n if key == \"drag\":\n self.DRAG = kwargs[key]\n continue\n raise TypeError(\"The required key {key!r} \"\"are not in kwargs\".format(key=key))\n self.INERTIA = (self.MASS * (2 * self.LENGTH)**2)/12\n","sub_path":"SinglePendulum/SinglePendulum.py","file_name":"SinglePendulum.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"297280811","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport math\nimport random\nimport cPickle as pickle\nimport time\n\nfrom healpy.pixelfunc import *\nimport healpy\nimport numpy\nimport matplotlib.pyplot as plt\n\nimport scipy\nimport scipy.misc\nimport scipy.sparse\n\ndef pix_circle(nside, ipix, radius, nest=False):\n assert False\n unvisited = set([ipix])\n visited = set()\n for _ in range(radius):\n while unvisited:\n ipix = unvisited.pop()\n neighbours = set(get_all_neighbours(nside, ipix, phi=None,\n nest=nest))\n neighbours -= visited\n neighbours.discard(-1)\n visited |= neighbours\n visited.add(ipix)\n unvisited |= visited\n return visited\n\ndef draw_circles():\n assert False\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n nside = 64\n npix = nside2npix(nside)\n xmap = numpy.zeros(npix)\n for i in range(16, 0, -1):\n highlight = pix_circle(nside, 0, i, nest=False)\n for h in highlight:\n xmap[h] = i\n img_x = healpy.visufunc.mollview(\n xmap,\n nest=False,\n return_projected_map=True)\n ax.imshow(img_x)\n ax.axis('off')\n fig.savefig('indicator.png')\n return 0\n\n# Read in a list of points. Remember their indicies. You'll need them later.\n# Accept a distance kernel. It should return the variance between two\n# points.\n# Accept neighborsof: given a point, return the neighbors of this point.\n# def pts2invcov(points, kernel, neighborsof):\n# Generator: points\n# function(point, point) -> float: kernel\n# function(point) -> [point, ...]: neighborsof\n\ndef cast_neighborsof(order, nest=False):\n nside = order2nside(order)\n def neighborsof(ipix):\n outarr = get_all_neighbours(nside, ipix, phi=None, nest=nest)\n return [ x for x in outarr if x != -1 ]\n return neighborsof\n\ndef pointsof(order):\n return range(nside2npix(order2nside(order)))\n\ndef cast_kernel(order, nest=False):\n nside = order2nside(order)\n def kernel(pt1, pt2):\n assert nest\n dist = 1.0 - numpy.dot(pix2vec(nside, pt1, nest=nest),\n pix2vec(nside, pt2, nest=nest))\n dist *= math.pi\n return math.exp(-dist * 30)\n return kernel\n\ndef circle(ipix, neighborsof, radius):\n unvisited = set([ipix])\n visited = set()\n for _ in range(radius):\n while unvisited:\n ipix = unvisited.pop()\n neighbours = set(neighborsof(ipix))\n neighbours -= visited\n visited |= neighbours\n visited.add(ipix)\n unvisited |= visited\n return visited\n\ndef pts2cov(points, kernel, sparse=True):\n if sparse:\n cov = scipy.sparse.dok_matrix((len(points),) * 2, numpy.float64)\n else:\n cov = numpy.zeros((len(points),) * 2)\n for i, pt1 in enumerate(points):\n for j, pt2 in enumerate(points[i:]):\n variance = kernel(pt1, pt2)\n if math.log10(abs(variance)) > -6:\n cov[i, i + j] = variance\n cov[j + i, i] = variance\n cov[i, i] = kernel(pt1, pt1)\n if sparse:\n cov = cov.tocsc()\n return cov\n\ndef pts2invcov(points, kernel, neighborsof):\n points = set(points)\n size = len(points)\n global_invcov = scipy.sparse.dok_matrix((size, size), numpy.float64)\n #global_invcov = dict()\n while points:\n center = points.pop()\n region = circle(center, neighborsof, 10)\n hotspot = circle(center, neighborsof, 8)\n points -= hotspot\n region = list(region)\n cov = pts2cov(region, kernel, sparse=False)\n local_invcov = numpy.linalg.inv(cov)\n for i, pt1 in enumerate(region):\n for j, pt2 in enumerate(region[i:]):\n if set((pt1, pt2)) <= hotspot:\n val = local_invcov[i][j + i]\n if math.log10(abs(val)) > -6:\n #global_invcov[frozenset((pt1, pt2))] =\n global_invcov[pt1, pt2] = val\n global_invcov[pt2, pt1] = val\n global_invcov = global_invcov.tocsr()\n return global_invcov\n\ndef dict2mat(invcov, order):\n size = nside2npix(order2nside(order))\n mat = numpy.zeros((size, size))\n for ent in invcov.keys():\n if len(ent) > 1:\n i, j = ent\n else:\n i, j = list(ent) * 2\n mat[i][j] = invcov[ent]\n mat[j][i] = invcov[ent]\n return mat\n\ndef ss_matmul(csr, csc, size):\n outmat = numpy.zeros((size,) * 2)\n for i in range(size):\n for j in range(size):\n row = csr.getrow(i).todense()\n col = csc.getcol(j).todense()\n outmat[i, j] = numpy.dot(row, col)\n print(i)\n return outmat\n\ndef main():\n order = 3\n pts = pointsof(order)\n kern = cast_kernel(order, nest=True)\n neighbor_func = cast_neighborsof(order, nest=True)\n invcov = pts2invcov(pts, kern, neighbor_func)\n #for v in di.values():\n # print(v)\n print('invcov_done')\n cov = pts2cov(pts, kern)\n print('cov_done')\n check = ss_matmul(invcov, cov, len(pts))\n scipy.misc.imsave('check.png', check)\n\ndef run_checks():\n scipy.misc.imsave('cov.png', cov)\n scipy.misc.imsave('check.png', check_mat)\n scipy.misc.imsave('slow_invcov.png', numpy.linalg.inv(cov))\n scipy.misc.imsave('invcov.png', invcov)\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n img_x = healpy.visufunc.mollview(\n invcov[113],\n nest=True,\n return_projected_map=True)\n ax.imshow(img_x)\n ax.axis('off')\n fig.savefig('randrow.png')\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"healpix_sel.py","file_name":"healpix_sel.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390490826","text":"import datetime\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import loader\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.views import generic\n\nfrom . import models, forms\n\n\nclass IndexView(generic.ListView):\n\ttemplate_name = 'bridges/index.html'\n\tcontext_object_name = 'report_list'\n\n\tdef get_queryset(self):\n\t\treturn models.Report.objects.order_by('report_date')\n\n\nclass DetailView(generic.DetailView):\n\tmodel = models.Report\n\ttemplate_name = 'bridges/detail.html'\n\n\nclass ReportDeleteView(generic.edit.DeleteView):\n\tmodel = models.Report\n\ttemplate_name = 'bobs/report_delete.html'\n\tsuccess_url = reverse_lazy('bridges:index')\n\n\ndef report_form(request, report_id):\n\tif request.method == 'POST':\n\t\tfile_dict = request.FILES\n\t\tt = datetime.date.today()\n\t\tyear = t.year\n\t\tmonth = t.month\n\t\tday = t.day\n\t\trnum = request.POST['report_number']\n\t\tfor each in file_dict.keys():\n\t\t\tfile_dict[each].name = '{y}{m}{d}-{r}-'.format(\n\t\t\t\ty=year, m=month, d=day, r=rnum) + file_dict[each].name\n\n\t\tif report_id == 'new':\n\t\t\tform = forms.ReportForm(request.POST, file_dict)\n\t\t\treport = form.save()\n\t\t\treturn HttpResponseRedirect(reverse('bridges:detail', args=(report.id,)))\n\t\telse:\n\t\t\treport = get_object_or_404(models.Report, pk=report_id)\n\t\t\tform = forms.ReportForm(request.POST, file_dict, instance=report)\n\t\t\treport = form.save()\n\t\t\treturn HttpResponseRedirect(reverse('bridges:detail', args=(report.id,)))\n\telse:\n\t\tif report_id == 'new':\n\t\t\tform = forms.ReportForm()\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'report_id': report_id,\n\t\t\t}\n\t\t\treturn render(request, 'bridges/report_form.html', context)\n\t\telse:\n\t\t\treport = get_object_or_404(models.Report, pk=report_id)\n\t\t\tform = forms.ReportForm(instance=report)\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'report_id': report_id,\n\t\t\t}\n\t\t\treturn render(request, 'bridges/report_form.html', context)\n\n","sub_path":"bridges/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"629153448","text":"#!/usr/bin/env python -O\r\n\"\"\"\r\nThis is the test class for testing Failure Definition module algorithms and\r\nmodels.\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n#\r\n# tests.integration.TestFailureDefinition.py is part of The RTK Project\r\n#\r\n# All rights reserved.\r\n\r\nimport sys\r\nfrom os.path import dirname\r\nsys.path.insert(0, dirname(dirname(dirname(__file__))) + \"/rtk\")\r\n\r\nimport unittest\r\nfrom nose.plugins.attrib import attr\r\n\r\nimport dao.DAO as _dao\r\nfrom failure_definition.FailureDefinition import FailureDefinition\r\n\r\n__author__ = 'Andrew Rowland'\r\n__email__ = 'andrew.rowland@reliaqual.com'\r\n__organization__ = 'ReliaQual Associates, LLC'\r\n__copyright__ = 'Copyright 2014 Andrew \"Weibullguy\" Rowland'\r\n\r\n\r\nclass TestUsageProfileController(unittest.TestCase):\r\n \"\"\"\r\n Class for testing the Usage Profile controller class.\r\n \"\"\"\r\n\r\n def setUp(self):\r\n\r\n _database = '/tmp/tempdb.rtk'\r\n self._dao = _dao(_database)\r\n self._dao.execute(\"PRAGMA foreign_keys = ON\", commit=False)\r\n\r\n self.DUT = FailureDefinition()\r\n self.DUT.dao = self._dao\r\n\r\n @attr(all=True, integration=True)\r\n def test00_request_definitions(self):\r\n \"\"\"\r\n (TestFailureDefinition) request_definitions should return a list of definitions and an error code of 0 on success\r\n \"\"\"\r\n\r\n (_results, _error_code) = self.DUT.request_definitions(0)\r\n self.assertEqual(_error_code, 0)\r\n\r\n @attr(all=True, integration=True)\r\n def test01_add_definition(self):\r\n \"\"\"\r\n (TestFailureDefinition) add_definition should return\r\n \"\"\"\r\n\r\n self.DUT.request_definitions(0)\r\n\r\n (_results, _error_code, _last_id) = self.DUT.add_definition(0)\r\n self.assertTrue(_results)\r\n self.assertEqual(_error_code, 0)\r\n\r\n @attr(all=True, integration=True)\r\n def test02_save_definition(self):\r\n \"\"\"\r\n (TestFailureDefinition) save_definition should return True on success\r\n \"\"\"\r\n\r\n self.DUT.request_definitions(0)\r\n\r\n (_results, _error_code) = self.DUT.save_definitions(0)\r\n self.assertTrue(_results)\r\n\r\n @attr(all=True, integration=True)\r\n def test03_delete_definition(self):\r\n \"\"\"\r\n (TestFailureDefinition) delete_definition should return a 0 error code on success\r\n \"\"\"\r\n\r\n self.DUT.request_definitions(0)\r\n\r\n (_results, _error_code) = self.DUT.delete_definition(0, 1)\r\n self.assertEqual(_error_code, 0)\r\n","sub_path":"rtk-RQA/tests/integration/TestFailureDefinition.py","file_name":"TestFailureDefinition.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389817126","text":"'''\nFor next version include arrow keys to naigate the text and make root work properly without the user\nneeding to fill in the brakets- from my standpoit that is too hard to do this late but for the new version it\nmight work\n-Good luck me\n'''\n\n\nclass Main():\n Defults = {\n \"Bg_Colour\": \"#7eccf7\",\n\n \"Font\": (\"Arial\", 15),\n \"Title_Font\": (\"Arial\", 45, \"bold\"),\n \"SubTitle_Font\": (\"Arial\", 20, \"bold\"),\n \"Font_Colour\": \"Black\",\n\n\n \"Btn_Bg\": \"#2db4ff\",\n \"Btn_Active\": \"#2da9ff\",\n\n \"QuitBtn_Bg\": \"#ef2804\",\n \"QuitBtn_Active\": \"#e82502\"\n }\n\n Bg_Colour = Defults[\"Bg_Colour\"]\n\n Font = Defults[\"Font\"]\n Title_Font = Defults[\"Title_Font\"]\n SubTitle_Font = Defults[\"SubTitle_Font\"]\n Font_Colour = Defults[\"Font_Colour\"]\n\n Btn_Bg = Defults[\"Btn_Bg\"]\n Btn_Active = Defults[\"Btn_Active\"]\n\n QuitBtn_Bg = Defults[\"QuitBtn_Bg\"]\n QuitBtn_Active = Defults[\"QuitBtn_Active\"]\n\n Frames = []\n\n Sum_str = \"\"\n Sum_str_List = []\n Display_str = \"\"\n\n Answered = False\n Answer = \"\"\n\n def __init__(self, Root):\n import tkinter as TK\n\n self.Frames = []\n\n self.Root = Root\n self.Root.title(\"Calculator\")\n\n self.Icon_file = __file__[:-14] + \"CalculatorIcon.ico\"\n\n self.Root.wm_iconbitmap(self.Icon_file)\n self.Root.configure(bg=self.Bg_Colour)\n self.Root.geometry(\"557x393\")\n self.Frames.append(self.Root)\n\n self.Top_Frame = TK.Frame(self.Root, bg=self.Bg_Colour)\n self.Top_Frame.grid(row=0, column=0, columnspan=2, sticky=\"nsew\")\n self.Frames.append(self.Top_Frame)\n\n self.Display_lbl = TK.Label(self.Top_Frame, bg=\"white\", text=\"\",\n font=self.Font, fg=self.Font_Colour, activeforeground=self.Font_Colour)\n self.Display_lbl.grid(row=0, column=0, columnspan=5, sticky=\"nsew\")\n\n #self.Setting_btn = TK.Button(self.Top_Frame, bg = self.Btn_Bg, text = \"Settings\", activebackground = self.Btn_Active, font = self.Font, fg = self.Font_Colour, activeforeground = self.Font_Colour, command = lambda: self.Settings())\n #self.Setting_btn.grid(row = 0, column = 5, sticky = \"nsew\")\n\n self.Number_fr = TK.Frame(self.Root, bg=self.Bg_Colour)\n self.Number_fr.grid(row=1, column=0, sticky=\"nsew\")\n self.Frames.append(self.Number_fr)\n\n self.Numbers = [[1, 2, 3, \" \"], [4, 5, 6, \" \"],\n [7, 8, 9, \" \"], [\".\", 0, \"Ans\", \" \"]]\n self.Symbols = [[\"+\", \"-\", \"*\", \"/\"],\n [\"Power\", \"Root\", \" \", \"=\"], [\"C\", \"Del\", \"(\", \")\"]]\n\n self.No_Buttons = []\n\n for i in range(0, len(self.Numbers)):\n for n in range(0, len(self.Numbers[i])):\n self.No_Btn = TK.Button(self.Number_fr, text=self.Numbers[i][n], font=self.Font, fg=self.Font_Colour, activeforeground=self.Font_Colour,\n bg=self.Btn_Bg, activebackground=self.Btn_Active, command=lambda Symbol=self.Numbers[i][n]: self.Sum(Symbol))\n self.No_Btn.grid(row=i, column=n, sticky=\"nsew\")\n if(self.Numbers[i][n] == \" \"):\n self.No_Btn.configure(state=TK.DISABLED)\n\n self.Symbols_fr = TK.Frame(self.Root, bg=self.Bg_Colour)\n self.Symbols_fr.grid(row=1, column=1, sticky=\"nsew\")\n self.Frames.append(self.Symbols_fr)\n\n for i in range(0, len(self.Symbols)):\n for n in range(0, len(self.Symbols[i])):\n self.Symbol_btn = TK.Button(self.Symbols_fr, text=self.Symbols[i][n], font=self.Font, fg=self.Font_Colour, activeforeground=self.Font_Colour,\n bg=self.Btn_Bg, activebackground=self.Btn_Active, command=lambda Symbol=self.Symbols[i][n]: self.Sum(Symbol))\n self.Symbol_btn.grid(row=n, column=i, sticky=\"nsew\")\n if(self.Symbols[i][n] == \" \"):\n self.Symbol_btn.configure(state=TK.DISABLED)\n\n for i in range(len(self.Frames)):\n self.Align_Grid(self.Frames[i])\n\n def Sum(self, Symbol):\n\n if(self.Answered):\n self.Sum_Symbol = \"\"\n self.Display_Symbol = \"\"\n self.Sum_str = \"\"\n self.Display_str = \"\"\n\n self.Display_lbl.configure(text=self.Display_str)\n self.Display_lbl.update()\n\n self.Answered = False\n\n self.Sum_Symbol = str(Symbol)\n self.Display_Symbol = str(Symbol)\n\n if(str(Symbol) == \"Power\"):\n self.Sum_Symbol = \"**\"\n self.Display_Symbol = \"^\"\n\n if(str(Symbol) == \"Root\"):\n self.Sum_Symbol = \"**(1/\"\n self.Display_Symbol = \"√\"\n\n if(str(Symbol) == \"C\"):\n self.Sum_Symbol = \"\"\n self.Display_Symbol = \"\"\n self.Sum_str = \"\"\n self.Display_str = \"\"\n\n if(str(Symbol) == \"Ans\"):\n self.Sum_Symbol = self.Answer\n self.Display_Symbol = self.Answer\n\n if(str(Symbol) == \"=\"):\n self.Sum_Symbol = \"\"\n try:\n self.Answer = str(eval(self.Sum_str))\n self.Display_str = \"\"\n self.Display_Symbol = self.Answer\n\n except:\n self.Sum_Symbol = \"\"\n self.Display_Symbol = \"\"\n self.Sum_str = \"\"\n self.Display_str = \"\"\n\n self.Display_lbl.configure(text=self.Display_str)\n self.Display_lbl.update()\n\n self.Sum_Symbol = \"\"\n self.Display_Symbol = \"Error\"\n\n self.Answered = True\n\n if(str(Symbol) == \"Del\"):\n self.Sum_Symbol = \"\"\n self.Display_Symbol = \"\"\n self.Sum_str = self.Sum_str[:-\n len(self.Sum_str_List[len(self.Sum_str_List)-1])]\n self.Display_str = self.Display_str[:-1]\n self.Sum_str_List = self.Sum_str_List[:-1]\n else:\n self.Sum_str_List.append(self.Sum_Symbol)\n\n self.Sum_str += str(self.Sum_Symbol)\n self.Display_str += str(self.Display_Symbol)\n\n self.Display_lbl.configure(text=self.Display_str)\n self.Display_lbl.update()\n\n def Align_Grid(self, Frame):\n # Aligns the grid\n self.Grid_Size = Frame.grid_size()\n\n for i in range(self.Grid_Size[0]):\n Frame.columnconfigure(i, weight=3)\n for i in range(self.Grid_Size[1]):\n Frame.rowconfigure(i, weight=3)\n if(Frame == self.Frames[0] and i == 0):\n Frame.rowconfigure(i, weight=1)\n\n '''\n def Settings(self):\n\n self.Setting_tl = TK.Toplevel(self.Root, bg = self.Bg_Colour)\n self.Setting_tl.title(\"Settings\")\n self.Setting_tl.geometry(\"\")\n self.Setting_widgets = []\n\n self.Options_fr = TK.Frame(self.Setting_tl, bg = self.Bg_Colour)\n self.Options_fr.grid(row = 0, column = 0, sticky = \"nsew\")\n\n self.Options = [\"Colours\", \"Font\", \"Reset\", \"\", \"Quit\"]\n\n for i in range(0,len(self.Options)):\n self.Setting = TK.Button(self.Options_fr, text = self.Options[i], font = self.Font, foreground = self.Font_Colour, bg = self.Btn_Bg, activebackground = self.Btn_Active, command = lambda e = self.Options[i]: self.Option(e))\n self.Setting.grid(row = i, column = 0, sticky = \"nsew\")\n\n self.Setting_widgets.append(self.Setting)\n\n if(self.Options[i] == \"\"):\n self.Setting.configure(state = TK.DISABLED)\n\n self.Align_Grid(self.Setting_tl)\n self.Align_Grid(self.Options_fr)\n\n \n self.Warning_tl = TK.Toplevel(self.Root, bg = self.Bg_Colour)\n self.Warning_tl.title(\"WARNING\")\n\n self.Warning_lbl = TK.Label(self.Warning_tl, bg = self.Bg_Colour, font = self.Font, fg = self.Font_Colour, text = \"Sorry but due to prgraming errors this isn't working see Version 3.0 notes\")\n self.Warning_lbl.pack()\n\n\n def Option(self, Selection):\n if(Selection == \"Colours\"):\n self.Colour_Frame = TK.Frame(self.Setting_tl, bg = self.Bg_Colour)\n self.Colour_Frame.grid(row = 0, column = 0, sticky = \"nsew\")\n\n self.Colour_Options = [\"Background\",\"Forground\",\"Button\",\"Clicked button\"]\n self.Colour_Option_Widgets = []\n\n for i in range(len(self.Colour_Options)):\n self.Colour_Option_lbl = TK.Label(self.Colour_Frame, bg = self.Bg_Colour, text = self.Colour_Options[i], font = self.Font, fg = self.Font_Colour, activeforeground = self.Font_Colour)\n self.Colour_Option_lbl.grid(row = i, column = 0, sticky = \"nsew\")\n\n self.Colour_Option_ent = TK.Entry(self.Colour_Frame, font = self.Font, fg = self.Font_Colour)\n self.Colour_Option_ent.insert(0,\"Hex colour code\")\n self.Colour_Option_ent.grid(row = i, column = 1, sticky = \"nsew\", padx = 10, pady = 50)\n\n self.Colour_Option_Widgets.append(self.Colour_Option_ent)\n\n \n self.Save_btn = TK.Button(self.Colour_Frame, bg = self.Btn_Bg, fg = self.Font_Colour, activebackground = self.Btn_Active, activeforeground = self.Font_Colour, font = self.Font, text = \"Save\", command = lambda: self.Change_Colour())\n self.Save_btn.grid(row = 4, column = 1, sticky = \"nsew\")\n\n self.Quit_btn = TK.Button(self.Colour_Frame, bg = self.QuitBtn_Bg, fg = self.Font_Colour, activebackground = self.QuitBtn_Active, activeforeground = self.Font_Colour, font = self.Font, text = \"QUIT\", command = lambda: self.Colour_Quit)\n self.Quit_btn.grid(row = 4,column = 0, sticky = \"nsew\")\n\n self.Align_Grid(self.Colour_Frame)\n \n def Change_Colour(self):\n\n self.Hex_Codes = []\n\n for i in range(len(self.Colour_Option_Widgets)):\n self.Hex_Code = self.Colour_Option_Widgets[i].get()\n self.Hex_Codes.append(self.Hex_Code)\n \n if(self.Hex_Codes[0][0] == \"#\"):\n self.Bg_Colour = self.Hex_Codes[0]\n if(self.Hex_Codes[1][0] == \"#\"):\n self.Font_Colour = self.Hex_Codes[1]\n if(self.Hex_Codes[2][0] == \"#\"):\n self.Btn_Bg = self.Hex_Codes[2]\n if(self.Hex_Codes[3][0]):\n self.Btn_Active = self.Hex_Codes[3]\n \n '''\n\n\ndef Run():\n import tkinter as TK\n\n Root = TK.Tk()\n\n Main(Root)\n\n while True:\n try:\n Root.update_idletasks()\n Root.update()\n except:\n break\n\n\nif __name__ == \"__main__\":\n import tkinter as TK\n\n Root = TK.Tk()\n\n Run = Main(Root)\n\n while True:\n try:\n Root.update_idletasks()\n Root.update()\n except:\n break\n","sub_path":"Moduals/Tools/Calculator.pyw","file_name":"Calculator.pyw","file_ext":"pyw","file_size_in_byte":10731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"59774778","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport sys\nimport random\n\n\nxtrain = np.array([-6,0,7])\nytrain = np.array([3,-2,2])\n\n# use a three units periodic kernel\n\nn = 100\ns = -10\nf = 10\nx = np.linspace(s,f,n)\nT = 10\n\ndef sin2_kernel(a,b):\n\treturn np.exp(-( np.sin( np.pi*abs(a-b)/T) )**2)\n\ndef kernel(a,b):\n\treturn np.exp(-(1/5)*np.linalg.norm(a-b)**2)\n\nkxx = np.array( [[sin2_kernel(xi,xj) for xi in xtrain ] for xj in xtrain] )\nkxx_ = np.array( [[sin2_kernel(xi,xj) for xi in x ] for xj in xtrain] )\nkx_x = np.array( [[sin2_kernel(xi,xj) for xi in xtrain ] for xj in x] )\nkx_x_ = np.array( [[sin2_kernel(xi,xj) for xi in x ] for xj in x] )\n\nkxxinv = np.linalg.inv(kxx)\n\nmean_ = kx_x @ kxxinv @ ytrain\ncov_ = kx_x_ - kx_x @ kxxinv @ kxx_\n\ny = np.random.multivariate_normal(mean_, cov_, 4)\n\nfig,ax = plt.subplots(figsize=(15,5), facecolor='white')\n# ax.set_ylim(-4,4)\nfor i,f in enumerate(y):\n\tax.plot(x,f, label = f'f{i+1}')\n\nax.plot(x, mean_, c='k', label ='mean')\nax.scatter(xtrain,ytrain, s=100, )\nplt.title('4 posterior functions created with gaussian kernel')\nplt.legend()\nplt.xticks(x)\nplt.show()\nfig.savefig('periodic,posterior.png')\n","sub_path":"x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96332653","text":"from parser import createBlock, convert\nfrom learnblockCode.VisualBlock import toLBotPy\nfrom learnblockCode.Block import *\n\nlistVariables = []\n\n# Gets the user variables definitions. Only variable declarations at the beginning of the code\ndef processVars(variables):\n global listVariables\n listVariables = variables.split(\",\")\n listVariables.pop()\n\n# Gets the user variables and parses them to give the initial definition (var = None)\ndef convertVariables():\n text = \"\"\n if len(listVariables) > 0:\n for name in listVariables:\n text += name + \" = None\\n\"\n text += \"\\n\\n\"\n return text\n\n# Gets the user functions definitions and the blocks inside the main block, and parses them to Block-Text\ndef convertUserFunctionsAndMain(blocks):\n text = \"\"\n for b in [block for block in blocks if block[1][\"TYPE\"] is USERFUNCTION]:\n text += \"def \" + toLBotPy(b, 1)\n text += \"\\nend\\n\\n\"\n for b in [block for block in blocks if \"main\" == block[0]]:\n text += b[0] + \":\\n\"\n if b[1][\"BOTTOMIN\"] is not None:\n text += \"\\t\" + toLBotPy(b[1][\"BOTTOMIN\"], 2)\n else:\n text += \"pass\"\n text += \"\\nend\\n\\n\"\n return text\n\n# Returns the complete code in Block-Text according to the blocks in the workspace\ndef parserBlockText(blocks):\n result = \"\"\n result += convertVariables()\n result += convertUserFunctionsAndMain(convert(blocks))\n return result\n","sub_path":"btParser.py","file_name":"btParser.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"187476238","text":"\"\"\"This file is another taleo sub spider (for yahoo, heb, etc). It extends TaleoEE\n\nIMPORTANT: Note that sites that use this sub spider need to have a portal code\n\nscrapy crawl taleo_yahoo \\\n -a url=\"https://tas-yahoo.taleo.net/careersection/yahoo_us_cs/jobsearch.ftl?lang=en&portal=40140220106\" \\\n -a extract=1\n\nsite list:\n https://tas-yahoo.taleo.net/careersection/yahoo_us_cs/jobsearch.ftl?lang=en&portal=40140220106\n https://heb.taleo.net/careersection/Ex-Store_Operations/jobsearch.ftl?lang=en&portal=36200120157\n https://supervalu.taleo.net/careersection/svuiaext/jobsearch.ftl?lang=en&portal=101430233\n\"\"\"\n\nimport json\nimport re\nfrom urlparse import urljoin\n\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.http import Request\n\nfrom brightcorp.base.taleo_ee import TaleoEE\n\n\nclass TaleoYahoo(TaleoEE):\n\n name = \"taleo_yahoo\"\n\n allowed_domains = [\"taleo.net\"]\n\n DOMAIN_REG = re.compile(r'(?<=https\\:\\/\\/).*(?=.taleo)')\n PORTAL_REG = re.compile(r'\\d+$')\n URL_CODE_REG = re.compile(r'(?<=careersection\\/).*(?=\\/jobsearch)')\n LANG_REG = re.compile(r'lang=(\\w+)')\n COMPANY_FROM_JSON = False\n LOCATION_INDEX = 1\n COMPANY_INDEX = 2\n\n def parse(self, response):\n # We need to get the domain, portal and the element in between careersection and jobsearch (url_code)\n # to make this spider generic for re-use\n domain = self.DOMAIN_REG.search(self.start_urls[0])\n portal = self.PORTAL_REG.search(self.start_urls[0])\n url_code = self.URL_CODE_REG.search(self.start_urls[0])\n lang = self.LANG_REG.search(self.start_urls[0])\n\n if portal and domain and url_code:\n # Create the rest api url\n rest_api_url = \"https://\"+domain.group()+\".taleo.net/careersection/rest/jobboard/searchjobs?lang=\"+lang.group(1)+\"&portal=\"+portal.group()\n yield self.create_request(rest_api_url, \"{}\", url_code.group())\n else:\n raise CloseSpider(\"Please check that the url includes all the required fields: domain, portal, url_code\")\n\n def parse_job_results(self, response):\n data = json.loads(response.body)\n\n if not data['requisitionList'] or len(data['requisitionList']) == 0:\n raise CloseSpider(\"No jobs available.\")\n\n page = data['pagingData']['currentPageNo']\n items = data['pagingData']['pageSize']\n total = data['pagingData']['totalCount']\n\n jobdetail_path = \"/careersection/\"+response.meta[\"url_code\"]+\"/jobdetail.ftl?job=%s\"\n for job in data['requisitionList']:\n job_url = jobdetail_path % job['jobId']\n request = Request(urljoin(response.url, job_url),\n callback=self.parse_job_callback())\n request.meta['location'] = job['column'][self.LOCATION_INDEX].strip('[\"]')\n if self.COMPANY_FROM_JSON and len(job['column']) >= self.COMPANY_INDEX:\n request.meta['company'] = job['column'][self.COMPANY_INDEX]\n yield request\n\n if page * items < total:\n # If this is not the last page, then make a new request\n yield self.create_request(response.url, json.dumps({'pageNo': page + 1}), response.meta['url_code'])\n\n def create_request(self, url, json_body, url_code):\n \"\"\"\n Helper method to create the parse_job_results request\n \"\"\"\n return Request(\n url,\n method=\"POST\",\n body=json_body,\n headers={'Content-Type': \"application/json\"},\n callback=self.parse_job_results,\n meta={'url_code': url_code}\n )\n","sub_path":"brightcorp/brightcorp/spiders/taleo_yahoo.py","file_name":"taleo_yahoo.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"546759616","text":"import stirling\n\ndef do_test2(obj, *a, **kw):\n '''\n usage: test2 [boolean] [kv] [target]\n All purpose test command, use it for whatever you need\n '''\n try:\n foo = stirling.clone('world.dev.obj.toothpick.Toothpick')\n obj.debug(foo)\n foo.move(obj.environment)\n except:\n obj.debug(moose)\n return\n","sub_path":"stirling/cmd/dev/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"84817725","text":"#! ../env/bin/python\n\nfrom flask import Flask\n\nfrom .models import db\nfrom .controllers.main import main\nfrom .controllers.api import api\n\nfrom .extensions import (\n cache,\n login_manager,\n webpack\n)\n\nDEBUG = True\n\napp = Flask(__name__)\n\n\ndef create_app(object_name, env=\"prod\"):\n \"\"\"\n An flask application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/\n Arguments:\n object_name: the python path of the config object,\n e.g. appname.settings.ProdConfig\n env: The name of the current environment, e.g. prod or dev\n \"\"\"\n #app = Flask(__name__)\n\n app.config.from_object(object_name)\n app.config['ENV'] = env\n\n # initialize the cache\n cache.init_app(app)\n\n # initialize SQLAlchemy\n db.init_app(app)\n\n # setting up flask-login as login manager\n login_manager.init_app(app)\n\n # initialize flask-webpack\n webpack.init_app(app)\n\n # registering blueprints\n app.register_blueprint(main)\n app.register_blueprint(api)\n\n return app\n","sub_path":"server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"615410599","text":"from PIL import Image, ImageChops, ImageDraw, ImageFont\r\nfrom io import BytesIO\r\n\r\ndef make(input=\"template.png\", qrcode=\"qrcode.png\", output=\"grabber.png\", size=314):\r\n wanted = Image.open(input)\r\n pfp = Image.open(qrcode)\r\n pfp = pfp.resize((size,size))\r\n wanted.paste(pfp, (200,720))\r\n draw = ImageDraw.Draw(wanted)\r\n wanted.save(output)","sub_path":"QRCodeTokenGrabber/utils/nitroImage.py","file_name":"nitroImage.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"84040883","text":"import sys\nimport time\nimport socket\n\nbatchers = [('localhost', 9000)]\npayload = '{\"Host\":0,\"TOId\":0,\"LId\":0,\"Tags\":{\"from\":\"id\"},\"Pre\":{\"Host\":0,\"TOId\":0}}'\n\ndef build(conn, id):\n tmp = payload.replace('from', str(conn)).replace('id', str(id))\n n = len(tmp) + 1\n header = n.to_bytes(4, byteorder='big')\n header += b'r'\n return header + tmp.encode()\n\ndef post(s, payload):\n s.send(payload)\n\nduration = int(sys.argv[1])\nrate = int(sys.argv[2])\ntotal = duration * rate\nprecision = 100\ninterval = 1.0 / rate\npool = 10\n\ns = [0] * pool\nfor i in range(pool):\n s[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s[i].connect(batchers[0])\n\nstart = time.time()\nlast = time.time()\n\ncnt = 0\ndiff = 0\nwhile cnt < total:\n if time.time() - last > interval * pool * precision - diff:\n a = time.time()\n for i in range(precision):\n for j in range(pool):\n cnt += 1\n post(s[j], build(j, cnt))\n diff = time.time() - a\n last = time.time()\nprint(time.time() - start)\n\nfor j in range(pool):\n s[j].close()\n","sub_path":"test/script/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93933937","text":"#!/usr/bin/python\n\nimport RPi.GPIO as GPIO\nimport smbus\nimport time\n\nfrom PCF8574 import PCF8574_GPIO\nfrom Adafruit_LCD1602 import Adafruit_CharLCD\nfrom TwitterAPI import TwitterAPI, TwitterOAuth\n\ndef cleanup():\n\tmcp.output(3,0)\n\tlcd.clear()\n\tbus.close()\n\ndef scroll(lcd, text, pause1=False, pause2=False, rep=False):\n\t#timing defaults\n\tPAUSE_NEXT = 2\n\tPAUSE_REP = 2\n\tREPETITIONS = 1\n\n\tif pause1: PAUSE_NEXT = pause1\n\tif pause2: PAUSE_REP = pause2\n\tif rep: REPETITIONS = rep\n\n\tn=16\n\trows = [text[i:i+n] for i in range (0, len(text), n)]\n\tn_rows = len(rows)\n\tfor i in range (REPETITIONS):\n\t\tfor x in range (n_rows):\n\t\t\tlcd.home()\n\t\t\tlcd.clear()\n\t\t\tnxt = x + 1\n\t\t\tlcd.message(rows[x]+\"\\n\")\n\t\t\tif nxt == n_rows:\n\t\t\t\ttime.sleep(2)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tlcd.message(rows[nxt])\n\t\t\t\ttime.sleep(PAUSE_REP)\n\tlcd.clear()\n\n##########SETUP##########\n\no=TwitterOAuth.read_file()\ntry:\n\tapi = TwitterAPI( o.consumer_key,\n \t o.consumer_secret,\n \t o.access_token_key,\n \to.access_token_secret)\nexcept Exception as e:\n\tprint ('TAPI Error:', e)\n\nSEARCH_TERM = '#christmas'\nlcd_addr = 0x3f\nbus=smbus.SMBus(1)\n\ntry:\n\tmcp = PCF8574_GPIO(lcd_addr)\nexcept Exception as e:\n\tprint ('I/O Error: ',e)\n\ntry:\n\tlcd = Adafruit_CharLCD(pin_rs=0, pin_e=2, pins_db=[4,5,6,7], GPIO=mcp)\nexcept Exception as e:\n\tprint ('Init error: ',e)\n\nmcp.output(3,1)\nlcd.begin(16,2)\n\nif __name__ == '__main__':\n\n\tmsg_count = 0\n\tr = api.request('search/tweets',{'q': SEARCH_TERM})\n\tfor item in r:\n\t\tMSG = item['text'] if 'text' in item else item\n\t\tscroll(lcd, MSG)\n\t\tmsg_count += 1\n\tcleanup()\n","sub_path":"lcd.py","file_name":"lcd.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"115242162","text":"# USER: TCG\n# Tic Tac Toe (User vs Computer)\n\nimport random\n\ndef place(num,x):\n # Returns all the moves made\n for i in range(len(x)):\n if x[i] == num:\n pos = x.index(num)\n return x[(pos + 1)]\n return str(num)\n\ndef print_grid(move,player,x):\n\n pos_list.extend([move,player])\n\n # Grid on which the player and computer play on\n grid = [\"\",\"-------------\\n\",\n \"|\",place(1,pos_list),\"|\",place(2,pos_list),\"|\",place(3,pos_list),\"|\\n\",\n \"|---+---+---|\\n\",\n \"|\",place(4,pos_list),\"|\",place(5,pos_list),\"|\",place(6,pos_list),\"|\\n\",\n \"|---+---+---|\\n\",\n \"|\",place(7,pos_list),\"|\",place(8,pos_list),\"|\",place(9,pos_list),\"|\\n\",\n \"-------------\\n\"]\n if x == 2:\n # Only prints one the player has made a move\n print = '\\n', ' '.join(grid)\n\ndef winner(x,player,xx):\n\n # Checks if there is a winner (Really messy, could do with code simplifying)\n if ((1 in x and 4 in x and 7 in x)or(1 in x and 2 in x and 3 in x)or(2 in x and 5 in x and 8 in x)or(3 in x and 6 in x and 9 in x)or\n (4 in x and 5 in x and 6 in x)or(7 in x and 8 in x and 9 in x)or(1 in x and 5 in x and 9 in x)or(3 in x and 5 in x and 7 in x)):\n # If prevents the A.I part from printing the statement\n if 'If prevents the A.I part from printing the statement':\n print = '\\n'*5,\"\\'%s\\'\" %player, \"HAS WON!\"\n return 1 == 1\n\ndef computer_AI_part(listx):\n global computer_move\n\n # Checks all possible values which the player can and enter to win and blocks it\n for x in range(1,10):\n if x not in pos_list:\n listx.append(x)\n if (winner(listx,'Computer',1)) == True:\n del listx[-1]\n computer_move = x\n return 1\n del listx[-1]\n\ndef computer_and_player():\n global computer_move,pos_list,player_list,computer_list\n replay,draw = 0,0\n\n while True:\n\n # Replay's the game\n if replay == 1:\n restart = 'raw_input'(\"Would you like to replay?: \")\n if restart == \"yes\":\n pass\n else:\n return\n else:\n print = \"\\nTic Tac Toe - Computer vs You\", '\\n'*2,\"Computer goes first\\n\"\n\n replay,computer_move,players_move,loop_count,pos_list,player_list,computer_list = 0,0,0,0,[],[],[]\n\n for each in \"XXXXX\":\n loop_count += 1\n\n # Computer's Move\n if computer_AI_part(computer_list) or computer_AI_part(player_list) == 1:\n pass\n else:\n while True:\n computer_move = random.randint(1,9)\n if computer_move in pos_list:\n continue\n break\n computer_list.append(computer_move)\n # Prints Grid\n print_grid(computer_move,'O',2)\n\n if loop_count == 5:\n if winner(player_list,'player',2) == True or winner(computer_list,'Computer',2) == True:\n pass\n else:\n print = \"Match Was a draw!\"\n replay = 1\n break\n\n # Checks winner\n if winner(computer_list,'Computer',2) == True:\n replay = 1\n break\n\n # Player's Move\n while True:\n try:\n players_move = int('raw_input'(\"\\n\\'%s\\' Enter a value from the grid to plot your move: \" %each))\n if players_move in pos_list or players_move < 1 or players_move > 9:\n print = \"Enter an available number that's between 1-9\"\n continue\n break\n except:\n print = \"Enter a number\"\n\n player_list.append(players_move)\n # Sets player's move for printing\n print_grid(players_move,each,1)\n\n # Checks winner again\n if winner(player_list,'player',1) == True:\n print_grid(players_move,each,2)\n winner(player_list,'player',2)\n replay = 1\n break\n\nif __name__ == \"__main__\":\n computer_and_player()","sub_path":"Tic Tac Toe game.py","file_name":"Tic Tac Toe game.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"372166177","text":"from flask import make_response, abort\nfrom config import db\nfrom models import LabTest, LabTestSchema\n\n\n# Create a handler for our read (GET) labTests\ndef read():\n \"\"\"\n This function responds to a request for /api/labTests\n with the complete lists of labTests\n\n :return: sorted list of labTests\n \"\"\"\n # Create the list of labTests from our data\n labTest = LabTest.query \\\n .order_by(LabTest.test_id) \\\n .all()\n\n # Serialize the data for the response\n labTest_schema = LabTestSchema(many=True)\n return labTest_schema.dump(labTest).data\n\n\ndef read_one(test_id):\n \"\"\"\n This function responds to a request for /api/labTests/{test_id}\n with one matching laboratory test from database\n\n :param test_id: ID of labTest to find\n :return: labTest matching ID\n \"\"\"\n # Get the labTest requested\n labTest = LabTest.query \\\n .filter(LabTest.test_id == test_id) \\\n .one_or_none()\n\n if labTest is not None:\n # Serialize the data for the response\n labTest_schema = LabTestSchema()\n return labTest_schema.dump(labTest).data\n\n else:\n abort(404, 'LabTest not found for Id: {labTest_id}'.format(labTest_id=test_id))\n\n\ndef create(labTest):\n \"\"\"\n This function creates a new laboratory test \n based on the passed-in laboratory test data\n\n :param labTest: labTest to create in labTests structure\n :return: 201 on success, 406 on laboratory test exists\n \"\"\"\n name = labTest.get('name')\n test_id = labTest.get('test_id')\n\n # Checking only for id\n existing_labTest = LabTest.query \\\n .filter(LabTest.test_id == test_id) \\\n .one_or_none()\n\n # existing_labTest = LabTest.query \\\n # .filter(or_(LabTest.name == name, LabTest.test_id == test_id)) \\\n # .one_or_none()\n\n if existing_labTest is None:\n schema = LabTestSchema()\n new_labTest = schema.load(labTest, session=db.session).data\n\n db.session.add(new_labTest)\n db.session.commit()\n\n # Serialize and return the newly created labTest in the response\n return schema.dump(new_labTest).data, 201\n\n else:\n abort(409, f'Laboratory test with name {name} or test_id exists already')\n\n\ndef update(test_id, labTest):\n \"\"\"\n This function updates an existing labTest in the labTests structure\n :param test_id: Id of the labTest to update in the labTests structure\n :param labTest: labTest to update\n :return: updated labTest structure\n \"\"\"\n # Get the labTest requested from the db into session\n update_labTest = LabTest.query.filter(\n LabTest.test_id == test_id\n ).one_or_none()\n\n # Did we find a labTest?\n if update_labTest is not None:\n\n # turn the passed in labTest into a db object\n schema = LabTestSchema()\n updated = schema.load(labTest, session=db.session).data\n\n # Set the id to the labTest we want to update\n updated.test_id = update_labTest.test_id\n\n # merge the new object into the old and commit it to the db\n db.session.merge(updated)\n db.session.commit()\n\n # return updated labTest in the response\n data = schema.dump(update_labTest).data\n\n return data, 200\n\n # Otherwise, nope, didn't find that labTest\n else:\n abort(\n 404,\n \"Lab test not found for Id: {id}\".format(id=id),\n )\n\n\ndef delete(test_id):\n \"\"\"\n This function deletes a labTest from the labTests structure\n :param test_id: Id of the labTest to delete\n :return: 200 on successful delete, 404 if not found\n \"\"\"\n # Get the labTest requested\n labTest = LabTest.query.filter(LabTest.test_id == test_id).one_or_none()\n\n # Did we find a labTest?\n if labTest is not None:\n db.session.delete(labTest)\n db.session.commit()\n return make_response(\n \"Laboratory test {test_id} {name} deleted\".format(test_id=test_id, name=labTest.name), 200\n )\n\n # Otherwise, nope, didn't find that labTest\n else:\n abort(\n 404,\n \"Laboratory test not found for Id: {test_id}\".format(test_id=test_id),\n )\n\n\n","sub_path":"labTests.py","file_name":"labTests.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399887967","text":"# Your code here\n\n\n\ndef word_count(s):\n\n # Your code here\n s = s.lower()\n ignore = [\n '\"',\n \":\",\n \";\",\n \",\",\n \".\",\n \"-\",\n \"+\",\n \"=\",\n \"/\",\n \"\\\\\",\n \"|\",\n \"[\",\n \"]\",\n \"{\",\n \"}\",\n \"(\",\n \")\",\n \"*\",\n \"^\",\n \"&\",\n ]\n\n for char in ignore:\n s = s.replace(char, \"\")\n counts = {}\n split = s.split()\n for word in split:\n if word in counts.keys():\n counts[word] += 1\n else:\n counts[word] = 1\n return counts\n\n\nwith open(\"robin.txt\") as file:\n s = file.read()\n\ncounts = word_count(s)\nwidth = max([len(word) for word in counts.keys()]) + 2\n\ncounts = sorted(counts.items(), key=lambda x: (-x[1], x[0]))\n\nprint(counts)\n\n\nfor count in counts:\n print(f\"{count[0].ljust(width)}\" + \"#\" * count[1])","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"30598682","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Wei, Shuowen\n\nhttps://leetcode.com/problems/maximum-sum-bst-in-binary-tree/\n\nhttps://labuladong.gitee.io/algo/2/18/28/\n\nLC95, LC96, LC1373\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def maxSumBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root is None:\n return 0\n res = [0]\n def traversePostOrder(root):\n isBST = True\n rootSum = 0\n maxNodes = -2**31-1\n minNodes = 2**31+1\n if root is None: \n return isBST, rootSum, maxNodes, minNodes\n isBST_left, rootSum_left, maxNodes_left, minNodes_left = traversePostOrder(root.left)\n isBST_right, rootSum_right, maxNodes_right, minNodes_right = traversePostOrder(root.right)\n # post order, what does this node need to do? \n # both left and right subtree are BST\n # this node's value must > max value of the left subtree\n # this node's value must < min value of the right subtree\n if isBST_left and isBST_right and root.val > maxNodes_left and root.val < minNodes_right:\n rootSum = rootSum_left + rootSum_right + root.val\n res.append(rootSum)\n maxNodes = max(maxNodes_right, root.val) #? \n minNodes = min(minNodes_left, root.val) #? \n return isBST, rootSum, maxNodes, minNodes\n else:\n isBST = False\n return isBST, rootSum, maxNodes, minNodes\n isBST, rootSum, maxNodes, minNodes = traversePostOrder(root)\n return max(res)\n \n","sub_path":"Hard/LC1373.py","file_name":"LC1373.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"64421584","text":"import pickle\r\ndados=open('Dados.txt', 'rb')\r\nusuarios = pickle.load(dados)\r\ndados.close()\r\n\r\nprint('---------------acessar a conta---------------')\r\n\r\nusuario = input('username--> ')\r\nif usuario not in usuarios:\r\n while usuario not in usuarios:\r\n usuario = input('usuario inexistente, informe novamente--> ')\r\nsenha = input('senha--> ')\r\nif usuarios[usuario]==senha: #o conteudo do dicionário é a senha, ai ele vai \r\n#se a senha (conteúdo do dicionario) é igual a senha informada pelo usuário\r\n print('bem vindo', usuario)\r\nelse:\r\n while senha not in usuarios[usuario]:\r\n senha = input('senha incorreta, tente novamente--> ')\r\n print('Bem vindo', usuario)#obs: tanto faz o escope desta, fora ou dentro do\r\n #else ja que este print só sera exibido no final do programa, e o final do\r\n #programa, só chega quando o usuario consegue logar(entrar)\r\n","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"24336188","text":"import pygame, time\r\n\r\npygame.init()\r\n\r\nimport Room, ChatBox, NPC, Player, Object\r\nfrom globals import *\r\nimport globals_vars\r\n\r\nimport ui_util\r\n\r\n\r\ngame_display = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))\r\ndef c_h():\r\n cb.string = 'Hell Yeah'\r\n\r\njoe = NPC.NPC(game_display, 'Joe', {'FUCK YOU': 'Nah fuck you!'}, {\"TEST\":c_h})\r\ndale = NPC.NPC(game_display, 'Dale', {'FUCK YOU': 'Nah fuck you!'}, {\"TEST\":c_h})\r\nchicken_leg = Object.Object('Chicken Leg',\r\n info = 'Its a fucking chicken leg.',\r\n image = 'imgs/pulke.png',\r\n w = 780, h = 370)\r\n\r\ngenesis = Room.Room(npcs=[joe], objects=[chicken_leg], id_n=1)\r\nr1 = Room.Room(npcs=[dale], right=genesis, objects=[chicken_leg], id_n=2)\r\nr2 = Room.Room(left=genesis, objects=[chicken_leg], id_n=3)\r\nr3 = Room.Room(up=r2, objects=[chicken_leg], id_n=4)\r\nr4 = Room.Room(right=r3, up=genesis, objects=[chicken_leg], id_n=5)\r\nr2.down = r3\r\nr3.left = r4\r\n\r\nplayer = Player.Player(game_display, genesis)\r\n\r\ncb = ChatBox.ChatBox(player = player)\r\nfont = pygame.font.SysFont('Comic Sans MS', 30)\r\n\r\n\r\nwhile 1:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n game_display.fill(BACKGROUND)\r\n\r\n ui_util.frame(game_display, 10, 10, 780, 60, color = FOREGROUND)\r\n ui_util.frame(game_display, 10, 80, 780, 60, color = FOREGROUND)\r\n ui_util.frame(game_display, 10, 150, 780, 370, color = FOREGROUND)\r\n ui_util.frame(game_display, 10, 530, 780, 60, color = FOREGROUND)\r\n\r\n cb.printer(game_display, font)\r\n\r\n text_surface = font.render(globals_vars.current_displaying_text, False, FOREGROUND)\r\n game_display.blit(text_surface, (15, 15))\r\n\r\n text_surface = font.render(globals_vars.current_displaying_info, False, FOREGROUND)\r\n game_display.blit(text_surface, (15, 85))\r\n\r\n if globals_vars.current_displaying_image is not None:\r\n game_display.blit(globals_vars.current_displaying_image, (15, 155))\r\n\r\n pygame.display.update()\r\n\r\n # frame delay\r\n time.sleep(1/1000)\r\n","sub_path":"Textventure/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"531011846","text":"import os\nfrom BBWebFw import webApp\nfrom BBWebFw.FileRenderer import Template\nimport random\nimport firebase_admin\nfrom firebase_admin import db, storage\n\ncred_obj = firebase_admin.credentials.Certificate(\"pwastore-c1ed2-firebase-adminsdk-s9p9l-77ba9c52d5.json\")\nfb = firebase_admin.initialize_app(cred_obj,{\"databaseURL\": \"https://pwastore-c1ed2-default-rtdb.firebaseio.com/\",})\nrootRef = db.reference(\"/\")\n\n\n\n#App Defination\napp = webApp(\"app\", \"gunicorn\", os.path.dirname(os.path.abspath(__file__)))\napp.staticCache = 60 * 60 * 24 * 365\n\n#Jinja Templater\ntemplate = Template()\n\n@app.catchURL('/')\n@app.catchURL('/home')\ndef index(request,response):\n apps = rootRef.child(\"apps\").get()\n response.text = template(\"index.html\", {\"key\": random.random()*100000, \"apps\": apps})\n\n@app.catchURL('/sitemap')\ndef sitemap(request,response):\n response.text = open('sitemap.xml', \"rb\").read().decode()\n response.content_type = \"text/xml\"\n\n@app.catchURL('/github')\ndef github(request,response):\n response.body = b''\n \n@app.catchURL('/merch/dlsl')\ndef merchDLSL(request,response):\n response.body = b''\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107356133","text":"# Server logic for shim backend\n# TCP server to handle get and post requests\n# Current Shim backend should be able to hook into this\n\nimport cgi\nimport json\nimport sys\nimport subprocess\nimport http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler\n\n\nclass request_handler(BaseHTTPRequestHandler):\n def _str_to_bytes(self, s):\n return bytes(s, \"utf-8\")\n\n def _bytes_to_str(self, s):\n return s.decode(\"utf-8\")\n\n def _respond(self, s):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Content-length\", len(s))\n self.end_headers()\n self.wfile.write(bytes(s, \"utf-8\"))\n\n def _parse_post(self):\n ctype, pdict = cgi.parse_header(self.headers['content-type'])\n if ctype == 'multipart/form-data':\n postvars = cgi.parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers['content-length'])\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n else:\n postvars = {}\n return postvars\n\n def do_GET(self):\n s = json.dumps({\n 'resp': \"server does not handle get requests\"\n })\n self._respond(s)\n\n def do_POST(self):\n body = self._parse_post()\n cmd = self._bytes_to_str(body[self._str_to_bytes('command')][0])\n args = self._bytes_to_str(body[self._str_to_bytes('args')][0])\n resp = self.server.context.handle_command(cmd, args)\n\n s = json.dumps({\n 'cmd': resp['cmd'],\n 'args': resp['args'],\n })\n self._respond(s)\n\n def log_message(self, format, *args):\n return\n\n\nclass server(socketserver.TCPServer):\n def __init__(self, context, *args, **kw):\n socketserver.TCPServer.__init__(self, *args, **kw)\n self.context = context\n self._keep_running = True\n\n def server_bind(self):\n self.allow_reuse_address = True\n super(server, self).server_bind()\n\n def stop(self):\n self._keep_running = False\n\n def run(self):\n while self._keep_running:\n self.handle_request()\n","sub_path":"src/backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"361235748","text":"import sys\n# from PyQt4 import QtGui, QtCore\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtCore ,QtGui\nimport cv2\n\nclass QtCapture(QWidget):\n def __init__(self):\n super().__init__()\n\n self.fps = 24\n self.cap = cv2.VideoCapture(0)\n\n self.video_frame = QLabel()\n lay = QVBoxLayout()\n # lay.setMargin(0)\n lay.addWidget(self.video_frame)\n self.setLayout(lay)\n\n def setFPS(self, fps):\n self.fps = fps\n\n def nextFrameSlot(self):\n ret, frame = self.cap.read()\n # My webcam yields frames in BGR format\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img = QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)\n pix = QPixmap.fromImage(img)\n self.video_frame.setPixmap(pix)\n\n def start(self):\n self.timer = QTimer()\n self.timer.timeout.connect(self.nextFrameSlot)\n self.timer.start(1000./self.fps)\n\n def stop(self):\n self.timer.stop()\n\n def deleteLater(self):\n self.cap.release()\n super(self).deleteLater()\n\n\nclass ControlWindow(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n self.capture = None\n\n self.start_button = QPushButton('Start')\n self.start_button.clicked.connect(self.startCapture)\n self.quit_button = QPushButton('End')\n self.quit_button.clicked.connect(self.endCapture)\n self.end_button = QPushButton('Stop')\n\n vbox =QVBoxLayout(self)\n vbox.addWidget(self.start_button)\n vbox.addWidget(self.end_button)\n vbox.addWidget(self.quit_button)\n self.setLayout(vbox)\n self.setWindowTitle('Control Panel')\n self.setGeometry(200,200,200,200)\n self.show()\n\n def startCapture(self):\n if not self.capture:\n self.capture = QtCapture()\n self.end_button.clicked.connect(self.capture.stop)\n # self.capture.setFPS(1)\n self.capture.setParent(self)\n self.capture.setWindowFlags(QtCore.Qt.Tool)\n self.capture.start()\n self.capture.setGeometry(400,250,200,200)\n self.capture.show()\n\n def endCapture(self):\n self.capture.deleteLater()\n self.capture = None\n\n\nif __name__ == '__main__':\n\n import sys\n app = QApplication(sys.argv)\n window = ControlWindow()\n sys.exit(app.exec_())","sub_path":"webcamcapture.py","file_name":"webcamcapture.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"625399564","text":"#!/usr/bin/env python\n# encoding:utf8\nimport os\nfrom s_defaults import default_inputs, default_model_save_iter, has_flag\n# 0 = all messages are logged (default behavior)\n# 1 = INFO messages are not printed\n# 2 = INFO and WARNING messages are not printed\n# 3 = INFO, WARNING, and ERROR messages are not printed\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n\n\nfrom s_inspect import inspect_data\nfrom s_data_loader import load_all, find_inputs_num\n\nimport shutil\nimport traceback\nimport logging\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\nprint(\"import tf\")\nimport tensorflow as tf # Version 1.0.0 (some previous versions are used in past commits)\n\nfrom s_save_model import SessModelSaver\nfrom s_save_pred import PredModelSaver, PredResultSaver\nfrom s_graph import inspect_graph, get_summary_writer, add_summary\nfrom s_console_prompt import prompt_yellow, prompt_blue, prompt_green, prompt_red, prompt_progress\n\n# load dataset from data_loader\nprompt_progress(\"LoadDataset\")\ndh = load_all()\nX_train = dh.X_train\nX_test = dh.X_test\ny_train = dh.y_train\ny_test = dh.y_test\nLABELS = dh.LABELS\n\n#setup tf\nprompt_progress(\"InitializeTensorFlow\")\nlist_gpu = tf.config.experimental.list_physical_devices('GPU')\nprompt_yellow(\"Num GPUs Available: \", list_gpu, len(list_gpu))\ntf.enable_resource_variables()\ntf.logging.set_verbosity(tf.logging.ERROR)\ntf.get_logger().setLevel(logging.ERROR)\n\n# %% [markdown]\n# ## Additionnal Parameters:\n# \n# Here are some core parameter definitions for the training. \n# \n# For example, the whole neural network's structure could be summarised by enumerating those parameters and the fact that two LSTM are used one on top of another (stacked) output-to-input as hidden layers through time steps. \n\n# %%\n# Input Data \n\n\nn_training_data_count = len(X_train) # 7352 training series (with 50% overlap between each serie)\nn_test_data_count = len(X_test) # 2947 testing series\nn_steps = len(X_train[0]) # 128 timesteps per series\nn_input = len(X_train[0][0]) # 6/9 input parameters per timestep\nn_classes = 6 # Total classes (should go up, or should go down\n\n\ninspect_data(X_train, X_test, y_train, y_test)\n\n\n# key data for model and training\n# LSTM Neural Network's internal structure\nm_hidden = 8 # Hidden layer num of features\nm_learning_rate = 0.0025\nm_lambda_loss_amount = 0.0015\nm_training_iters = n_training_data_count * 300 # Loop 300 times on the dataset\nif len(list_gpu) > 0:\n m_training_iters = n_training_data_count * 500 # Loop 300 times on the dataset\nm_batch_size = 1500\nm_display_iter = 30000 # To show test set accuracy during training\n\n\ndef extract_batch_size(_train, step, batch_size):\n # Function to fetch a \"batch_size\" amount of data from \"(X|y)_train\" data. \n \n shape = list(_train.shape)\n shape[0] = batch_size\n batch_s = np.empty(shape, dtype=np.float32)\n\n for i in range(batch_size):\n # Loop index\n index = ((step-1)*batch_size + i) % len(_train)\n batch_s[i] = _train[index] \n\n return batch_s\n\n\ndef one_hot(y_, n_classes=n_classes):\n # Function to encode neural one-hot output labels from number indexes \n # e.g.: \n # one_hot(y_=[[5], [0], [3]], n_classes=6):\n # return [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]\n \n y_ = y_.reshape(len(y_))\n return np.eye(n_classes, dtype=np.int32)[np.array(y_, dtype=np.int32)] # Returns FLOATS\n\n\n# %% [markdown]\ncnc = tf.constant(n_classes, name=\"my_cn_classes\")\ncns = tf.constant(n_steps, name=\"my_cn_steps\")\n\n\n# ## Utility functions for training: \n# %%\ndef LSTM_RNN(_X, _weights, _biases):\n # Function returns a tensorflow LSTM (RNN) artificial neural network from given parameters. \n # Moreover, two LSTM cells are stacked which adds deepness to the neural network. \n # Note, some code of this notebook is inspired from an slightly different \n # RNN architecture used on another dataset, some of the credits goes to \n # \"aymericdamien\" under the MIT license.\n\n # (NOTE: This step could be greatly optimised by shaping the dataset once\n # input shape: (batch_size, n_steps, n_input)\n _X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size\n # Reshape to prepare input to hidden activation\n _X = tf.reshape(_X, [-1, n_input]) \n # new shape: (n_steps*batch_size, n_input)\n \n # ReLU activation, thanks to Yu Zhao for adding this improvement here:\n _X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])\n # Split data because rnn cell needs a list of inputs for the RNN inner loop\n _X = tf.split(_X, n_steps, 0) \n # new shape: n_steps * (batch_size, m_hidden)\n\n if has_flag(\"LTSM_LAYER_1\"):\n # Define two stacked LSTM cells (two recurrent layers deep) with tensorflow\n lstm_cell_1 = tf.contrib.rnn.BasicRNNCell(m_hidden) #, forget_bias=1.0, state_is_tuple=True)\n #lstm_cell_2 = tf.contrib.rnn.BasicRNNCell(\n # m_hidden, forget_bias=1.0, state_is_tuple=True)\n #lstm_cells = tf.contrib.rnn.MultiRNNCell(\n # [lstm_cell_1, lstm_cell_2], state_is_tuple=True)\n # Get LSTM cell output\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cell_1, _X, dtype=tf.float32)\n else:\n # Define two stacked LSTM cells (two recurrent layers deep) with tensorflow\n lstm_cell_1 = tf.contrib.rnn.BasicRNNCell(m_hidden) # , forget_bias=1.0, state_is_tuple=True)\n lstm_cell_2 = tf.contrib.rnn.BasicRNNCell(m_hidden) # , forget_bias=1.0, state_is_tuple=True)\n lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)\n # # Get LSTM cell output\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)\n\n # Get last time step's output feature for a \"many-to-one\" style classifier, \n # as in the image describing RNNs at the top of this page\n lstm_last_output = outputs[-1]\n \n # Linear activation\n return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']\n\n\n# %% [markdown]\n# ## Let's get serious and build the neural network:\n# Graph input/output\nwith tf.name_scope(\"Input\"):\n # 128 steps 9 input\n x = tf.placeholder(tf.float32, [None, n_steps, n_input], name=\"my_x_input\")\n # 6 classified result\n y = tf.placeholder(tf.float32, [None, n_classes], name=\"my_y_input\")\n\ninspect_graph(\"start\")\n\nprompt_progress(\"Model\")\nwith tf.name_scope(\"Model\"):\n # Graph weights\n weights = {\n # Hidden layer weights\n 'hidden': tf.Variable(tf.random_normal([n_input, m_hidden], name=\"weights_hidden\")),\n 'out': tf.Variable(tf.random_normal([m_hidden, n_classes], mean=1.0), name=\"weights_out\")\n }\n biases = {\n 'hidden': tf.Variable(tf.random_normal([m_hidden]), name=\"biases_hidden\"),\n 'out': tf.Variable(tf.random_normal([n_classes]), name=\"biases_out\")\n } \n pred = LSTM_RNN(x, weights, biases)\n\nwith tf.name_scope(\"Output\"):\n pred = tf.identity(pred, name=\"my_pred\")\n\nprompt_progress(\"Loss\")\nwith tf.name_scope(\"Loss\"):\n # Loss, optimizer and evaluation\n _l2 = m_lambda_loss_amount * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) # L2 loss prevents this overkill neural network to overfit the data\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + _l2 # Softmax loss\n\nprompt_progress(\"Optimizer\")\nwith tf.name_scope(\"Optimizer\"):\n optimizer = tf.train.AdamOptimizer(learning_rate=m_learning_rate).minimize(cost) # Adam Optimizer\n # optimizer = tf.identity(optimizer, name=\"my_optimizer\")\n\nprompt_progress(\"Accuray\")\nwith tf.name_scope(\"Accuray\"):\n _correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1), name=\"_correct_pred\")\n accuracy = tf.reduce_mean(tf.cast(_correct_pred, tf.float32), name=\"accuracy\")\n accuracy = tf.identity(accuracy, name=\"my_accuracy\")\n\n# %% [markdown]\nprompt_progress(\"init\")\ninit = tf.global_variables_initializer()\n\n# Create a summary to monitor cost tensor\ntf.summary.scalar(\"loss\", cost)\n# Create a summary to monitor accuracy tensor\ntf.summary.scalar(\"accuracy\", accuracy)\n# Merge all summaries into a single op\nmerged_summary_op = tf.summary.merge_all()\n# ## Hooray, now train the neural network:\n\n# %%\n# To keep track of training's performance\n# Launch the graph\nprompt_progress(\"SessionInit\")\nsess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True, device_count={'GPU': len(list_gpu)}))\nsess.run(init)\ninspect_graph(\"after_init\")\n\ntest_losses = []\ntest_accuracies = []\ntrain_losses = []\ntrain_accuracies = []\n\nwriter = get_summary_writer(sess)\n\n\ndef save_model_pred(sess, step):\n prompt_yellow(\"save_model_pred {}\".format(step))\n pms = PredModelSaver(sess, step, pred, x, inputs=n_input)\n return pms.save()\n\n\ndef save_pred_result(sess, step, acc, loss, pred_test=None, y_test=None):\n prs = PredResultSaver(sess, step, acc, loss, pred_test=pred_test, y_test=y_test, inputs=n_input)\n return prs.save()\n\n\ndef save_model_ses(sess, step):\n sms = SessModelSaver(sess, step, inputs=n_input)\n return sms.save()\n\n\n# Perform Training steps with \"batch_size\" amount of example data at each loop\nprompt_progress(\"SessionLoopStart\")\nstep = 1\nwhile step * m_batch_size <= m_training_iters:\n batch_xs = extract_batch_size(X_train, step, m_batch_size)\n batch_ys = extract_batch_size(y_train, step, m_batch_size)\n batch_ys_oh = one_hot(batch_ys)\n\n # Fit training using batch data\n _, loss, acc = sess.run(\n [optimizer, cost, accuracy],\n feed_dict={\n x: batch_xs, \n y: batch_ys_oh\n }\n )\n train_losses.append(loss)\n train_accuracies.append(acc)\n \n # Evaluate network only at some steps for faster training: \n if (step*m_batch_size % m_display_iter == 0) or (step == 1) or (step * m_batch_size > m_training_iters) or (step % 400 == 100):\n \n # To not spam console, show training accuracy/loss in this \"if\"\n print(\"Training iter #\" + str(step*m_batch_size) + \": Batch Loss = \" + \"{:.6f}\".format(loss) + \", Accuracy = {}\".format(acc))\n if step % 400 == 100:\n save_model_pred(sess, step)\n if step % 100 == 0:\n save_model_ses(sess, step)\n add_summary(sess, step, merged_summary_op, feed_dict={\n x: batch_xs,\n y: batch_ys_oh\n })\n\n # Evaluation on the test set (no learning made here - just evaluation for diagnosis)\n loss, acc = sess.run(\n [cost, accuracy], \n feed_dict={\n x: X_test,\n y: one_hot(y_test)\n }\n )\n test_losses.append(loss)\n test_accuracies.append(acc)\n prompt_blue(\"PERFORMANCE ON TEST SET: \" + \"Batch Loss = {} , Accuracy = {} @Step:{}\".format(loss, acc, step))\n save_pred_result(sess, step, acc, loss)\n\n step += 1\n\n\nprint(\"Optimization Finished!\")\nstep += 1\nsave_model_pred(sess, step)\n\n\n# Final prediction: Accuracy for all test data\ny_test_oh = one_hot(y_test)\none_hot_predictions, final_accuracy, final_loss = sess.run(\n [pred, accuracy, cost],\n feed_dict={\n x: X_test,\n y: y_test_oh\n }\n)\ntest_losses.append(final_loss)\ntest_accuracies.append(final_accuracy)\nprint(\"FINAL RESULT: \" + \"Batch Loss = {}\".format(final_loss) + \", Accuracy = {}\".format(final_accuracy))\n\npred_test = one_hot_predictions.argmax(1)\n\nsave_pred_result(sess, step, final_accuracy, final_loss, pred_test=pred_test, y_test=y_test)\n\nsess.close()\n\n\n# %% [markdown]\n# ## Training is good, but having visual insight is even better:\n# \n# Okay, let's plot this simply in the notebook for now.\n\n# %%\n# (Inline plots: )\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom s_plot import plot_traning, print_accuracy, plot_confusion\nplot_traning(m_batch_size, train_losses, train_accuracies, m_training_iters,\n test_losses, test_accuracies, m_display_iter)\n\nprint_accuracy(final_accuracy, pred_test, y_test, X_test)\n\nplot_confusion(pred_test, y_test, LABELS)\n","sub_path":"run_training_rnn.py","file_name":"run_training_rnn.py","file_ext":"py","file_size_in_byte":12049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"194872854","text":"import sklearn.datasets\nimport nltk.stem\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cluster import KMeans\n\nMLCOMP_DIR = \"/home/chenmm/Documents/mlpython/ch03/data/\"\ntrain_data = sklearn.datasets.load_mlcomp(\"20news-18828\", mlcomp_root=MLCOMP_DIR)\n\nenglish_stemmer = nltk.stem.SnowballStemmer('english')\n\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n\n def build_analyzer(self):\n analyzer = super(TfidfVectorizer, self).build_analyzer()\n return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n\nvectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,\n stop_words='english', decode_error='ignore'\n )\nvectorized = vectorizer.fit_transform(train_data.data)\n\nnum_samples, num_features = vectorized.shape\nprint(\"#sample: %d, #features: %d\" % (num_samples, num_features))\nprint(\"\\n\")\n\nnum_clusters = 50\nkm = KMeans(n_clusters = num_clusters, init = 'random', n_init = 1, verbose = 1)\nkml = km.fit(vectorized)\nprint(kml)\nprint(\"\\n\")\n\nprint(km.labels_)\nprint(\"\\n\")\n\nprint(km.labels_.shape)\nprint(\"\\n\")\n\nprint(km.cluster_centers_)\nprint(\"\\n\")\n","sub_path":"Building-Machine-Learning-System-with-Python/ch03/3-3-3.py","file_name":"3-3-3.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63342109","text":"import os\ndef clear_screen():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\nclear_screen()\n\n# Data structures\n\n# Lists:\n# mutable, iterable, have an order\nlist1 = [1,2,3]\n\n## Dictionaries:\n# mutable, iterable, doesn't have an order\ndict1 = {'Name': 'Cristian', 'Age': '32', 'Skill': 'Engineer'}\n#print all the keys:\nprint(f'All the keys of dict1: {dict1.keys()}')\n#print all the values:\nprint(f'All the Values of dict1: {dict1.values()}')\n#Sort the dictionaries values and keys:\nprint(f'Sorted keys: {sorted(dict1.keys())}')\nprint(f'Sorted Values: {sorted(dict1.values())}')\n#Changing values:\ndict1['Name'] = 'Figueiredo'\nprint(f'dict1 novo: {dict1}')\n#Add a new key:\ndict1['Estatura'] = 'Alto' \nprint(f'dict1 com key nova{dict1}')\n#Delete a key\ndel(dict1['Age'])\nprint(f'dic1 com Age deletado: {dict1}\\n')\n\n# unpacking using items()\nfor key, value in dict1.items():\n print(key)\n print(value)\nprint(f'\\n')\n\n# To pack a Dictionarie, we have to use duble '**'\ndef packer(**kwargs):\n for key, value in kwargs.items():\n print(f'{key}: {value}')\npacker(name = \"Cristian\", Idade = 32, Religiao = False, Status = \"Married\")\nprint(f'\\n')\n\n##Tuples:\n# Preferable to use, we can acess the data quickly\n# Immutable <<<----\n# you can transform a list into a tuple\ntuple1 = 5, 6, 7\ntuple2 = (2,3,4)\ntuple3 = tuple(list1)\nprint('tuple1: {}'.format(tuple1))\nprint('tuple2: {}'.format(tuple2)) \nprint('tuple3: {}'.format(tuple3)) \n# To pack a Tuple, we have to use simple '*'\ndef tuple_packer(*args):\n print(args)\ntuple_packer('cristian',27,[1,2,3,4]) #It creates a tuple with a 'str', 'int' and a 'list' \n\n# Enumerate in for loop\n# f' inside print() to format the content\nletter_list = ['a','b','c','d','e','f','g','h','i']\nfor index, letter in enumerate(letter_list, start=1):\n print(f'{index}. {letter}')\n\n\nfruits = ['apple', 'banana', 'orange', 'pear', 'strawberry']\nvegetables = ('asparagus', 'corn', 'broccoli', 'eggplant', 'onion')\n'eggplant' in fruits # False\n'eggplant' not in fruits # True\n'eggplant' in vegetables # True\n'eggplant' not in vegetables # False\n\n\nmy_pets = ('dog', 'cat', 'cat', 'chicken', 'dog')\nmy_pets.index('dog') # 0\nmy_pets.index('chicken') # 3\n\nmy_pets = ['dog', 'cat', 'cat', 'chicken', 'dog']\nmy_pets.count('cat') # 2\nmy_pets.count('lizard') # 0\n\nnums = range(1, 10, 2) # start, stop, step\n0 in nums # False\n6 in nums # False\n4 not in nums # True\n8 not in nums # True\n\nnums = range(1, 10, 2)\nnums.index(5) # 2\nnums.index(1) # 0","sub_path":"---Book_Notes.py","file_name":"---Book_Notes.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"57050046","text":"import warnings\n\nfrom astropy import units as u\nfrom glue.core.message import (DataCollectionAddMessage,\n DataCollectionDeleteMessage)\nfrom glue.core import Data\nfrom glue.core.link_helpers import LinkSame\nfrom specutils import Spectrum1D, SpectralRegion\nfrom specutils.manipulation import spectral_slab\nfrom traitlets import List, Unicode, Any, observe\n\nfrom jdaviz.core.events import SnackbarMessage\nfrom jdaviz.core.registries import tray_registry\nfrom jdaviz.core.template_mixin import TemplateMixin\n\n__all__ = ['Collapse']\n\n\n@tray_registry('g-collapse', label=\"Collapse\")\nclass Collapse(TemplateMixin):\n template_file = __file__, \"collapse.vue\"\n data_items = List([]).tag(sync=True)\n selected_data_item = Unicode().tag(sync=True)\n funcs = List(['Mean', 'Median', 'Min', 'Max', 'Sum']).tag(sync=True)\n selected_func = Unicode('Sum').tag(sync=True)\n\n # selected_viewer for spatial-spatial image.\n # NOTE: this is currently cubeviz-specific so will need to be updated\n # to be config-specific if using within other viewer configurations.\n viewer_to_id = {'Left': 'cubeviz-0', 'Center': 'cubeviz-1', 'Right': 'cubeviz-2'}\n viewers = List(['None', 'Left', 'Center', 'Right']).tag(sync=True)\n selected_viewer = Unicode('None').tag(sync=True)\n\n spectral_min = Any().tag(sync=True)\n spectral_max = Any().tag(sync=True)\n spectral_unit = Unicode().tag(sync=True)\n spectral_subset_items = List([\"Entire Spectrum\"]).tag(sync=True)\n selected_subset = Unicode(\"\").tag(sync=True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.hub.subscribe(self, DataCollectionAddMessage,\n handler=self._on_data_updated)\n self.hub.subscribe(self, DataCollectionDeleteMessage,\n handler=self._on_data_updated)\n\n self._selected_data = None\n self._selected_cube = None\n self._spectral_subsets = {}\n self._label_counter = 0\n\n def _on_data_updated(self, msg):\n self.data_items = [x.label for x in self.data_collection]\n # Default to selecting the first loaded cube\n if self._selected_data is None:\n for i in range(len(self.data_items)):\n try:\n self.selected_data_item = self.data_items[i]\n except (ValueError, TypeError):\n continue\n\n @observe('selected_data_item')\n def _on_data_item_selected(self, event):\n data_label = event['new']\n if data_label not in self.data_collection.labels:\n return\n self._selected_data = self.data_collection[self.data_collection.labels.index(data_label)]\n self._selected_cube = self._selected_data.get_object(cls=Spectrum1D, statistic=None)\n self.spectral_unit = self._selected_cube.spectral_axis.unit.to_string()\n\n # Also set the spectral min and max to default to the full range\n self.selected_subset = \"Entire Spectrum\" # This calls self._on_subset_selected()\n\n @observe(\"selected_subset\")\n def _on_subset_selected(self, event):\n if self._selected_data is None:\n return\n\n # If \"Entire Spectrum\" selected, reset based on bounds of selected data\n if self.selected_subset == \"Entire Spectrum\":\n self.spectral_min = self._selected_cube.spectral_axis[0].value\n self.spectral_max = self._selected_cube.spectral_axis[-1].value\n\n else:\n spec_reg = self._spectral_subsets[self.selected_subset]\n self.spectral_min = spec_reg.lower.value\n self.spectral_max = spec_reg.upper.value\n\n def vue_list_subsets(self, event):\n \"\"\"Populate the spectral subset selection dropdown\"\"\"\n temp_subsets = self.app.get_subsets_from_viewer(\"spectrum-viewer\",\n subset_type=\"spectral\")\n temp_dict = {}\n # Attempt to filter out spatial subsets\n for key, region in temp_subsets.items():\n if type(region) == SpectralRegion:\n temp_dict[key] = region\n self._spectral_subsets = temp_dict\n self.spectral_subset_items = [\"Entire Spectrum\"] + sorted(temp_dict.keys())\n\n def vue_collapse(self, *args, **kwargs):\n # Collapsing over the spectral axis. Cut out the desired spectral\n # region. Defaults to the entire spectrum.\n spec_min = float(self.spectral_min) * u.Unit(self.spectral_unit)\n spec_max = float(self.spectral_max) * u.Unit(self.spectral_unit)\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message='No observer defined on WCS')\n spec = spectral_slab(self._selected_cube, spec_min, spec_max)\n # Spatial-spatial image only.\n collapsed_spec = spec.collapse(self.selected_func.lower(), axis=-1).T # Quantity\n\n data = Data()\n data['flux'] = collapsed_spec.value\n data.get_component('flux').units = str(collapsed_spec.unit)\n\n self._label_counter += 1\n label = f\"Collapsed {self._label_counter} {self._selected_data.label}\"\n\n self.data_collection[label] = data\n\n # Link the new dataset pixel-wise to the original dataset. In general\n # direct pixel to pixel links are the most efficient and should be\n # used in cases like this where we know there is a 1-to-1 mapping of\n # pixel coordinates.\n # Spatial-spatial image only.\n pix_id_1 = self._selected_data.pixel_component_ids[0] # Pixel Axis 0 [z]\n pix_id_1c = self.data_collection[label].pixel_component_ids[0] # Pixel Axis 0 [y]\n pix_id_2 = self._selected_data.pixel_component_ids[1] # Pixel Axis 1 [y]\n pix_id_2c = self.data_collection[label].pixel_component_ids[1] # Pixel Axis 1 [x]\n\n self.data_collection.add_link([LinkSame(pix_id_1, pix_id_1c),\n LinkSame(pix_id_2, pix_id_2c)])\n\n snackbar_message = SnackbarMessage(\n f\"Data set '{self._selected_data.label}' collapsed successfully.\",\n color=\"success\",\n sender=self)\n self.hub.broadcast(snackbar_message)\n\n # Spatial-spatial image only.\n if self.selected_viewer != 'None':\n # replace the contents in the selected viewer with the results from this plugin\n self.app.add_data_to_viewer(self.viewer_to_id.get(self.selected_viewer),\n label, clear_other_data=True)\n","sub_path":"jdaviz/configs/default/plugins/collapse/collapse.py","file_name":"collapse.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"572686615","text":"import threading\nimport sys\nimport serial\nimport numpy\nimport time\n\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\n\ntry:\n from PyQt5.Qt import *\n from PyQt5 import QtCore, QtGui, QtWidgets\n from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot\n from PyQt5.QtGui import *\nexcept:\n pass\n\nfrom mock_graph_qt import TestWindow \nfrom canvas import Canvas\n\nclass Graph_Pane(QtWidgets.QWidget):\n\n # Event for concurrency control - wait/set\n update_graph_event = threading.Event()\n\n canvas_frames = []\n\n # boolean flags\n stop_real_time_graph = False\n reset_port = False\n is_arduino_connected = False\n playback_graph_active = False\n data_row_sample = 0\n microvolts = []\n\n def __init__(self, layout, logger):\n super(Graph_Pane, self).__init__()\n QtWidgets.QWidget.__init__(self)\n self.logger = logger\n\n # Default Port\n self.port = \"COM3\"\n\n @pyqtSlot()\n def reset_data_on_graph(self):\n self.playback_graph_active = False\n self.data_row_sample = 0\n self.microvolts = []\n try:\n for canvas in self.canvas_frames:\n # Prevent drawing\n canvas.reset_graph_axis()\n except:\n # Case where the graph is reset with no data points found.\n pass\n\n def layout_widgets(self, layout): \n self.graph_frame = Canvas(layout, self.logger) # Widget is added from Canvas\n self.canvas_frames.append(self.graph_frame) \n layout.addWidget(self.graph_frame.canvas)\n\n # Trend graph animation Placeholder.\n def build_placeholder(self):\n if self.trend != None:\n movie = QtGui.QMovie(\"../assets/trend.gif\")\n self.trend.setMovie(movie)\n movie.start()\n self.trend.setLayout(QtWidgets.QHBoxLayout())\n\n # Getter\n def get_samples(self):\n return self.graph_frame.samples\n\n # Getter\n def get_microvolts(self):\n return self.graph_frame.microvolts\n\n def get_microvolt_reading(self):\n if len(self.graph_frame.microvolts) > 0:\n return self.graph_frame.microvolts[0]\n return 0\n\n def start_graph_listener(self):\n self.graph_thread = threading.Thread(target=self.read_from_ppg)\n self.graph_thread.start()\n\n def stop_graph(self):\n self.logger.warning(\"Stopping Real Time Graph...\")\n self.stop_real_time_graph = True\n\n def check_arduino_connection(self):\n return self.is_arduino_connected\n\n def stop_graph_temporarily(self):\n self.reset_port = True\n while(self.graph_thread.is_alive()):\n time.sleep(1)\n \n # Clear Graph Data\n for canvas in self.canvas_frames:\n canvas.reset_graph_axis()\n\n def update_port(self, port):\n self.port = port\n self.stop_graph_temporarily()\n\n self.logger.warning(\"Arduino Port Thread Dead - Restarting...\")\n # Start process again with new port\n self.start_graph_listener()\n\n # Conditions/Locks used here for synchronization.\n def start_playback_graph(self):\n self.playback_graph_active = True\n for canvas in self.canvas_frames:\n canvas.reset_graph_axis(min_x=0, max_x=1600, min_y=0, max_y=3000)\n\n try:\n while(self.playback_graph_active):\n self.update_graph_event.wait()\n for microvolt in self.microvolts[::-1]:\n if (self.playback_graph_active):\n self.data_row_sample += 64\n self.update_graph(self.data_row_sample, microvolt)\n else:\n break\n self.update_graph_event.clear()\n finally:\n self.reset_data_on_graph()\n \n\n # Method called for each single update that a graph is required to make.\n # NOTE: plot.pause() method available if needed.\n def update_graph(self, sample_x, sample_y):\n for canvas in self.canvas_frames: \n canvas.samples.append(sample_x)\n canvas.microvolts.append(sample_y)\n\n for canvas in self.canvas_frames:\n canvas.plot()\n\n if(self.data_row_sample > self.canvas_frames[0].max_x):\n #If you have 25 or more points, delete the first one from the array\n #This allows us to just see the last 50 data points\n for canvas in self.canvas_frames:\n canvas.samples.pop(0) \n canvas.microvolts.pop(0)\n canvas.ax1.set_xlim(min(canvas.samples), max(canvas.samples))\n\n # Method is extremely important.\n # Method tries to interact and scan for an active arduino connection based on a port value.\n # This port value can be modified in the settings of the application if needed.\n # It is important this method runs in a separate thread to the primary thread and\n # feeds back the results to the primary thread. \n # This method uses the serial library to check for a arduino connection.\n def read_from_ppg(self, save_path=\"voltages.csv\"):\n try:\n with serial.Serial(self.port, 19200, bytesize=serial.SEVENBITS, timeout=0) as ser, open(save_path, 'w') as text_file:\n text_file.write(\"{}, {}\\n\".format(\"Samples\", \"Microvolts(mV)\"))\n self.is_arduino_connected = True\n self.reset_port = False\n self.logger.info(\"Arduino Connection found on port {}\".format(ser.port))\n\n for canvas in self.canvas_frames:\n canvas.reset_graph_axis()\n\n while not self.stop_real_time_graph and not self.reset_port:\n voltage_reading = str(ser.readline().decode(encoding='utf-8', errors='strict')).strip(\"\\n\").strip(\"\\r\\n\")\n if (voltage_reading != \"\" and voltage_reading.isdigit() and float(voltage_reading) > 1000):\n text_file.write(\"{}, {}\\n\".format(self.data_row_sample, voltage_reading))\n text_file.flush()\n self.data_row_sample += 1\n\n sample_x = float(self.data_row_sample)\n sample_y = float(voltage_reading)\n self.update_graph(sample_x, sample_y)\n \n ser.flushInput()\n ser.flushOutput()\n except:\n if (not self.stop_real_time_graph and not self.reset_port and not self.playback_graph_active):\n\n # Clear Graph Data\n for canvas in self.canvas_frames:\n canvas.reset_graph_axis()\n\n # Ensure boolean flag is not set\n self.is_arduino_connected = False\n self.stop_real_time_graph = False\n self.data_row_sample = 0\n\n # log to file/console\n self.logger.info(\"Scanning for active Arduino Connection... {5 Second Delay}\")\n\n # Sleep for X seconds checking for reconnection of Arduino\n time.sleep(5)\n\n # Try to read again\n self.read_from_ppg()","sub_path":"src/code/client_side_application_v1.0/main/graph_pane.py","file_name":"graph_pane.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566692194","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom serialize.models import Teacher\nfrom serialize.serializers import TeacherModelsSerializers\n\n\nclass TeacherAPI(APIView):\n def get(self, request, *args, **kwargs):\n id = kwargs.get('id')\n if id:\n try:\n teacher = Teacher.objects.get(pk=id)\n except:\n return Response({\n 'status': 400,\n 'message': 'id错误'\n })\n return Response({\n 'status': 200,\n 'message': '查询单个成功',\n 'result': TeacherModelsSerializers(teacher).data\n })\n else:\n teachers = Teacher.objects.filter(is_delete=False)\n return Response({\n 'status': 200,\n 'message': '查询所有',\n 'result': TeacherModelsSerializers(teachers, many=True).data\n })\n\n def post(self, request, *args, **kwargs):\n data = request.data\n if isinstance(data, dict):\n many = False\n elif isinstance(data, list):\n many = True\n else:\n return Response({\n 'status': 400,\n 'message': '参数有误',\n })\n re_data = TeacherModelsSerializers(data=data, many=many)\n re_data.is_valid(raise_exception=True)\n obj = re_data.save()\n return Response({\n 'status': 200,\n 'message': '添加图书成功',\n 'results': TeacherModelsSerializers(obj, many=many).data\n })\n\n def delete(self, request, *args, **kwargs):\n id = kwargs.get(\"id\")\n if id:\n ids = [id]\n else:\n ids = request.data.get('ids')\n response = Teacher.objects.filter(pk__in=ids, is_delete=False).update(is_delete=True)\n if response:\n return Response({\n 'status': 200,\n 'message': '删除成功'\n })\n return Response({\n 'status': 400,\n 'message': '删除失败或不存在'\n })\n\n def put(self, request, *args, **kwargs):\n request_data = request.data\n id = kwargs.get('id')\n try:\n obj = Teacher.objects.get(pk=id)\n except Teacher.DoesNotExist:\n return Response({\n 'status': 400,\n 'message': '不存在'\n })\n se_data = TeacherModelsSerializers(data=request_data, instance=obj)\n se_data.is_valid(raise_exception=True)\n se_data.save()\n return Response({\n 'status': 200,\n 'message': '修改成功',\n 'results': TeacherModelsSerializers(obj).data\n })\n\n def patch(self, request, *args, **kwargs):\n request_data = request.data\n id = kwargs.get(\"id\")\n try:\n obj = Teacher.objects.get(pk=id)\n except Teacher.DoesNotExist:\n return Response({\n 'status': 400,\n 'message': '不存在'\n })\n\n se_data = TeacherModelsSerializers(data=request_data, instance=obj, partial=True)\n se_data.is_valid(raise_exception=True)\n se_data.save()\n return Response({\n \"status\": 200,\n \"message\": '修改成功',\n \"results\": TeacherModelsSerializers(obj).data\n })","sub_path":"drf_ModelSerializers/serialize/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344605798","text":"#PF-Assgn-35\n\n#Global variable\nlist_of_marks=(12,18,25,24,2,5,18,20,20,21)\n\ndef find_more_than_average():\n\n l = len(list_of_marks)\n sum=0\n count_student=0\n for i in list_of_marks:\n sum+=i\n average=sum/l\n for i in list_of_marks:\n if i>average:\n count_student+=1\n above_avg=(count_student/l)*100\n return above_avg\n\ndef sort_marks():\n\n sorted_list=sorted(list_of_marks)\n return sorted_list\n\ndef generate_frequency():\n\n a=[] #list of all marks\n freq_list=[]\n for i in range(0,26):\n a.append(i)\n for i in a:\n count=0\n for j in list_of_marks:\n if i==j:\n count+=1\n freq_list.append(count)\n return freq_list\n\nprint(find_more_than_average())\nprint(generate_frequency())\nprint(sort_marks())\n","sub_path":"Day 5/Ex35.py","file_name":"Ex35.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"185912341","text":"import numpy as np\nfrom skimage.transform import probabilistic_hough_line\n\n\ndef hough_split(img, splits, shape, overlap):\n temp = []\n cc_edges = np.repeat(np.linspace(shape[1]/splits, shape[1], splits-1, False, dtype=int), 2)\n rr_edges = np.repeat(np.linspace(shape[0]/splits, shape[0], splits-1, False, dtype=int), 2)\n print(shape)\n print(\"(\" + str(shape[0]/splits) + \", \" + str(shape[1]/splits) + \")\")\n cc_edges[::2] += overlap\n cc_edges[1::2] -= overlap\n rr_edges[::2] += overlap\n rr_edges[1::2] -= overlap\n cc_edges = np.concatenate(([0], cc_edges, [shape[1]]))\n rr_edges = np.concatenate(([0], rr_edges, [shape[0]]))\n\n for i in range(0, splits*2, 2):\n for j in range(0, splits*2, 2):\n slice_t = img[rr_edges[i]:rr_edges[i+1], cc_edges[j]:cc_edges[j+1]]\n array = np.asarray(probabilistic_hough_line(slice_t, 60, 10, 2)[:])\n if array.size:\n array[:, :, ::2] += cc_edges[j]\n array[:, :, 1::2] += rr_edges[i]\n\n temp.append(array)\n\n res = np.array(temp[0])\n for i in range(1, len(temp)):\n res = np.append(res, temp[i])\n res = np.reshape(res, (-1, 2, 2))\n\n return res\n","sub_path":"hough_split.py","file_name":"hough_split.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"370378125","text":"# Runtime: 132 ms, faster than 83.04% of Python online submissions for Count Binary Substrings.\n# Memory Usage: 14.3 MB, less than 25.00% of Python online submissions for Count Binary Substrings.\n\nclass Solution(object):\n def countBinarySubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n \n l = len(s)\n prev = s[0]\n pcnt = 0\n ncnt = 1\n \n ans = 0\n \n for i in range(1, l):\n if s[i] != s[i-1]:\n ans += min(pcnt, ncnt)\n pcnt = ncnt\n ncnt = 1\n else:\n ncnt += 1\n \n ans += min(pcnt, ncnt)\n \n return ans","sub_path":"Leetcode/Arrays/Easy/696_counting_binary_substring.py","file_name":"696_counting_binary_substring.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355745451","text":"\n'''\nDirectory tree\n'''\ndef tx1():\n import os\n for folderName, subfolders, filenames in os.walk(r'E:\\my_program\\py'):\n print('folderName %s' % (folderName))\n print('subfolders %s' % (subfolders))\n print('filenames %s' % (filenames))\n \ndef tx2():\n import os, zipfile\n os.chdir('e:/my_program/py')\n zipObj = zipfile.ZipFile('绝地源码.zip')\n for name in zipObj.namelist():\n print(name)\n # zipObj.extractall()\n zipObj.close()\n\ntx2()","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279956984","text":"with open (r\"C:\\Distrib\\Python\\dataset_3363_2.txt\", \"r\") as file_in:\n r = file_in.read().strip().lower()\n count = 0\n c = 0\n for i in r:\n if r.count(i) > count:\n count = r.count(i)\n c = i\n with open (r\"C:\\Distrib\\Python\\dataset_3363_3.txt\", \"w\") as file_out:\n file_out.write(str(c))\n file_out.write(\" \")\n file_out.write(str(count))\n \n","sub_path":"files002_count_letter.py","file_name":"files002_count_letter.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32195375","text":"import twitter\nclass TwitterProxy(object):\n \n def __init__(self):\n self.twitter_api = twitter.Twitter(domain=\"api.twitter.com\", api_version=1)\n self.twitter_search = twitter.Twitter(domain=\"search.twitter.com\")\n def getTrends(self):\n trends = self.twitter_api.trends()\n return trends\n def search(self, query, rpp, firstPage, lastPage):\n searchResults = []\n for page in range(firstPage, lastPage):\n res = self.twitter_search.search(q=query, rpp=rpp, page=page)['results']\n if res:\n searchResults.append(res)\n return searchResults\n\nif __name__ == \"__main__\":\n import json\n tw = TwitterProxy()\n print([trend[\"name\"] for trend in tw.getTrends()[\"trends\"]])\n print(\"=\"*10)\n def troySearch():\n \n response = tw.search(\"Troy Davis OR #troydavis\" ,1000, 1, 10)\n with open('troydavisORtroydavisHash.json', mode='w') as f:\n json.dump(response, f)\n response = tw.search(\"Troy Davis\" ,1000, 1, 10)\n with open('troydavis.json', mode='w') as f:\n json.dump(response, f)\n response = tw.search(\"#troydavis\" ,1000, 1, 10)\n with open('troydavisHash.json', mode='w') as f:\n json.dump(response, f)\n response = tw.search(\"#USA Troy Davis\", 1000, 1, 10)\n with open('troydavis_usaHash.json', mode='w') as f:\n json.dump(response, f)\n \n response = tw.search(\"#USA\", 1000, 1, 10)\n with open('usaHash.json', mode='w') as f:\n json.dump(response, f)\n \n response = tw.search(\"Troy Davis #RIP\", 1000, 1, 10)\n with open('troydavis_ripHash.json', mode='w') as f:\n json.dump(response, f) \n response = tw.search(\"#RIP\", 1000, 1, 10)\n with open('ripHash.json', mode='w') as f:\n json.dump(response, f) \n \n response = tw.search(\"Troy Davis #peinedemort\", 1000, 1, 10)\n with open('troydavis_peinedemortHash.json', mode='w') as f:\n json.dump(response, f)\n \n response = tw.search(\"#peinedemort\", 1000, 1, 10)\n with open('peinedemortHash.json', mode='w') as f:\n json.dump(response, f)\n\n def leanSearch(): \n response = tw.search(\"Lean Startup OR #leanstartup OR #ericries\", 1000, 1, 10)\n with open('leanstartupORleanstartupHashORericriesHash.json', mode='w') as f:\n json.dump(response, f)\n \n def GaddafiSearch(): \n response = tw.search(\"al-Gaddafi OR #gaddafi OR #Libya\", 1000, 1, 10)\n with open('gaddafiORgaddafiHash.json', mode='w') as f:\n json.dump(response, f)\n def GrouponSearch(): \n response = tw.search(\"Groupon OR #groupon \", 1000, 1, 10)\n with open('grouponORgrouponHash.json', mode='w') as f:\n json.dump(response, f)\n #GaddafiSearch()\n #GrouponSearch()\n troySearch()\n","sub_path":"trends.py","file_name":"trends.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"647597817","text":"import argparse\nimport csv\nimport gzip\nimport numpy as np\nimport zarr\nfrom scipy import sparse\nfrom zarr import Blosc\nimport logging\n\nZARR_GROUP = {\n 'expression_matrix': [\"expression\", \"cell_id\", \"gene_id\",\n \"gene_metadata_string\", \"gene_metadata_numeric\",\n \"cell_metadata_string\", \"cell_metadata_numeric\"\n ]\n}\n\nCOMPRESSOR = Blosc(cname='lz4', clevel=5, shuffle=Blosc.SHUFFLE, blocksize=0)\n\n# the number of rows in a chunk for expression counts\nCHUNK_ROW_SIZE = 10000\nCHUNK_COL_SIZE = 10000\nlogging.basicConfig(level=logging.INFO)\n\n\ndef init_zarr(sample_id, path, file_format):\n \"\"\"Initializes the zarr output.\n\n Args:\n sample_id (str): sample or cell id\n path (str): path to the zarr output\n file_format (str): zarr file format [DirectoryStore, ZipStore]\n\n Returns:\n root (zarr.hierarchy.Group): initialized zarr group\n \"\"\"\n\n store = None\n if file_format == \"DirectoryStore\":\n store = zarr.DirectoryStore(path)\n\n if file_format == \"ZipStore\":\n store = zarr.ZipStore(path, mode='w')\n\n # create the root group\n root = zarr.group(store, overwrite=True)\n\n # add some readme for the user\n root.attrs['README'] = \"The schema adopted in this zarr store may undergo changes in the future\"\n root.attrs['sample_id'] = sample_id\n\n # now iterate through list of expected groups and create them\n for dataset in ZARR_GROUP:\n root.create_group(dataset, overwrite=True)\n\n return root\n\n\ndef add_gene_metrics(data_group, input_path, gene_ids, verbose=False):\n \"\"\"Converts the gene metrics from the Optimus pipeline to zarr file\n\n Args:\n data_group (zarr.hierarchy.Group): datagroup object for the zarr\n input_path (str): file containing gene metrics name and values\n gene_ids (list): list of gene ids\n verbose (bool): whether to output verbose messages for debugging purposes\n \"\"\"\n\n # read the gene metrics names and values\n if input_path.endswith(\".gz\"):\n with gzip.open(input_path, 'rt') as f:\n gene_metrics = [row for row in csv.reader(f)]\n else:\n with open(input_path, 'r') as f:\n gene_metrics = [row for row in csv.reader(f)]\n\n # metric names we use [1:] to remove the empty string\n if len(gene_metrics[0][1:]):\n data_group.create_dataset(\n \"gene_metadata_numeric_name\",\n shape=(len(gene_metrics[0][1:]),),\n compressor=COMPRESSOR,\n dtype=\" csr_exp_counts.shape[0]:\n p = csr_exp_counts.shape[0] - i\n\n # check if it is possible to make a full row chunk of data, otherwise adjust to the correct size\n q = CHUNK_COL_SIZE\n if j + CHUNK_COL_SIZE > csr_exp_counts.shape[1]:\n q = csr_exp_counts.shape[1] - j\n\n # insert the chunk\n exp_counts_group[i:i + p, j:j + q] = csr_exp_counts[i:i + p, j:j + q].toarray()\n\n return cell_ids, gene_ids\n\n\ndef create_zarr_files(args):\n \"\"\"This function creates the zarr file or folder structure in output_zarr_path in format file_format,\n with sample_id from the input folder analysis_output_path\n\n Args:\n args (argparse.Namespace): input arguments for the run\n \"\"\"\n # initiate the zarr file\n root_group = init_zarr(args.sample_id, args.output_zarr_path, args.zarr_format)\n\n # add the expression count matrix data\n cell_ids, gene_ids = add_expression_counts(root_group['expression_matrix'], args)\n\n # add the the gene metrics\n add_gene_metrics(root_group['expression_matrix'], args.gene_metrics, gene_ids, args.verbose)\n\n # add the the cell metrics\n add_cell_metrics(root_group['expression_matrix'], args.cell_metrics, cell_ids, args.verbose)\n\n\ndef main():\n description = \"\"\"This script converts the some of the Optimus outputs in to\n zarr format (https://zarr.readthedocs.io/en/stable/) relevant output. \n This script can be used as a module or run as a command line script.\"\"\"\n\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument('--cell_metrics',\n dest=\"cell_metrics\",\n required=True,\n help='a .csv file path for the cell metrics, an output of the MergeCellMetrics task')\n\n parser.add_argument('--gene_metrics',\n dest=\"gene_metrics\",\n required=True,\n help='a .csv file path for the gene metrics, an output of the MergeGeneMetrics task')\n\n parser.add_argument('--cell_id',\n dest=\"cell_ids\",\n required=True,\n help='a .npy file path for the cell barcodes, an output of the MergeCountFiles task')\n\n parser.add_argument('--gene_id',\n dest=\"gene_ids\",\n required=True,\n help='a .npy file path for the gene ids, an output of the MergeCountFiles task')\n\n parser.add_argument('--count_matrix',\n dest=\"count_matrix\",\n required=True,\n help='a .npz file path for the count matrix, an output of the MergeCountFiles task')\n\n parser.add_argument('--output_path_for_zarr',\n dest=\"output_zarr_path\",\n required=True,\n help='path to .zarr file is to be created')\n\n parser.add_argument('--sample_id',\n dest=\"sample_id\",\n default=\"Unknown sample\",\n help='the sample name in the bundle')\n\n parser.add_argument('--format',\n dest=\"zarr_format\",\n default=\"DirectoryStore\",\n choices=[\"DirectoryStore\", \"ZipStore\"],\n help='format of the zarr file choices: [DirectoryStore, ZipStore] default: DirectoryStore')\n\n parser.add_argument('--verbose',\n dest=\"verbose\",\n action=\"store_true\",\n help='whether to output verbose debugging messages')\n\n args = parser.parse_args()\n\n create_zarr_files(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docker/python3-scientific/create_zarr_optimus.py","file_name":"create_zarr_optimus.py","file_ext":"py","file_size_in_byte":14776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585049046","text":"#! /usr/bin/python\n# read the file extract trainSize, accuracy\n\nimport Gnuplot;\n\nfile = open(\"myResultsto500.txt\");\noutputfile = open(\"check-out.txt\",\"w\");\n#outputfile.write(\"#K-trainSize k-neighbours accuracy \\n\");\n\nX=[];\nY=[];\n#Y.append(0);\nfor line in file:\n\tline=line.replace(\"=\",\",\");\n\tcolumns = line.split(\",\");\n\t#print columns;\n\tcolumns[7]=columns[7].replace(\" \\n\",\"\");\n\t#outputfile.write(columns[1] + \"\\t\" + columns[3] + \"\\t\" + columns[7]);\n\tif(columns[1]==\"25\" and columns[5]==\"easy\" ):\n\t\tX.append(columns[3]);\n\t\tY.append(columns[7]);\n\t\t#print Y;\n\tif(columns[1]!=\"25\"):\n\t\tgp = Gnuplot.Gnuplot();\n\t\ttitle=\"Number of nearest neighbours vs Accuracy (training Size = \" + \"25\" +\" )\";\n\t\tgp('set term png small');\n\t\tgp.title(title);\n\t\txaxis=\"k-nearest neighbours\";\n\t\tyaxis=\"accuracy\";\n\t\tgp('set xlabel \"k-nearest neighbours\"');\n\t\tgp('set ylabel \"Accuracy\"');\n\t\tgp('set xrange [1:10]');\n\t\tgp('set xtics 1.0');\n\t\tgp('set style data linespoints');\n\t\tgp('set out \"output.png\"');\n\t\tgp.plot(Y);\n\t\tbreak;\n\n\nfile.close();\n#outputfile.close();\n","sub_path":"plot-scripts/check-gnuplot-script.py","file_name":"check-gnuplot-script.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344487117","text":"import pandas as pd\nimport numpy as np\nimport preprocessor as tp\nfrom shapely.geometry import Polygon, Point\nimport geopandas as gpd\nimport json\nimport os\n\n\ndef match_location(x, loc_list, gdf):\n matching = [s for s in loc_list if s in x]\n if len(matching) > 0:\n loc = matching[0]\n for col in ['ADM1_EN', 'ADM2_EN', 'ADM3_EN', 'ADM3_REF', 'ADM3ALT1EN', 'ADM3ALT2EN']:\n gdf_loc = gdf[gdf[col].str.lower() == loc]\n if not gdf_loc.empty:\n return gdf_loc['geometry'].values[0]\n return np.nan\n else:\n return np.nan\n\n\ndef extract_location(x):\n if not pd.isna(x):\n bbox = Polygon(x['bounding_box']['coordinates'][0])\n centroid = bbox.centroid.coords\n return Point(centroid)\n else:\n return np.nan\n\n\n# load vector file containing location names\ngdf = gpd.read_file(r\"C:\\Users\\JMargutti\\OneDrive - Rode Kruis\\Rode Kruis\\ERA\\shapefiles\\phl_admbndp_admALL_psa_namria_itos_20200529.shp\", encoding='utf8')\nlocations_all = gdf['ADM1_EN'].unique().tolist() + gdf['ADM2_EN'].unique().tolist() + gdf['ADM3_EN'].unique().tolist() + gdf['ADM3_REF'].dropna().unique().tolist() + gdf['ADM3ALT1EN'].dropna().unique().tolist() + gdf['ADM3ALT2EN'].dropna().unique().tolist()\nprint(f'locations: {len(locations_all)}')\nlocations_all = [x.lower() for x in locations_all]\nlocations_all.remove('bakun')\n\n# load tweets\ntweets = 'tweets/tweets_latest2_geolocated_select.csv'\ndf_tweets = pd.DataFrame()\nfor file in os.listdir('tweets'):\n if file.endswith('.json'):# and ('15-02' in file or '22-02' in file):\n print(file)\n df_tweets_ = pd.read_json(os.path.join('tweets', file), lines=True)\n df_tweets = df_tweets.append(df_tweets_, ignore_index=True)\n\n# drop duplicates\ndf_tweets = df_tweets.drop_duplicates(subset=['id'])\nprint(df_tweets.head())\n\n# parse locations (geolocation)\ndf_tweets['coord'] = df_tweets['place'].apply(extract_location)\ndf_tweets['full_text_clean'] = df_tweets['full_text'].apply(tp.clean)\ndf_tweets['full_text_clean'] = df_tweets['full_text_clean'].str.lower()\ndf_tweets['location'] = df_tweets['full_text_clean'].apply(lambda x: match_location(x, locations_all, gdf))\ndf_tweets['coord'] = df_tweets['coord'].fillna(df_tweets['location'])\ndf_tweets = df_tweets.drop(columns=['location', 'place'])\nprint(df_tweets.head())\nprint(f'{len(df_tweets)} tweets, {df_tweets.coord.count()} geo-located')\n\n# save as csv\ndf_tweets.to_csv(tweets)\n\n# save as geojson\ngdf = gpd.GeoDataFrame(df_tweets[~pd.isna(df_tweets.coord)], geometry='coord')\nout_file = tweets.replace('.csv', '.geojson')\ngdf[['id', 'coord']].to_file(out_file, driver='GeoJSON')\n\n\n","sub_path":"parse_geolocate_tweets.py","file_name":"parse_geolocate_tweets.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179296024","text":"import pygame\nfrom sounds import SoundEffect\nimport settings\nfrom random import randint\n\n\n# application is here\nclass Paddle(pygame.sprite.Sprite):\n\n def __init__(self, color, size, pos):\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Pass in the color of the car, and its x and y position, width and height.\n # Set the background color and set it to be transparent\n self.image = pygame.Surface(size)\n self.image.fill(settings.BLACK)\n self.image.set_colorkey(settings.BLACK)\n self.collision = [False]\n\n # Draw the paddle (a rectangle!)\n pygame.draw.rect(self.image, color, [0, 0, size[0], size[1]])\n\n # Fetch the rectangle object that has the dimensions of the image.\n self.rect = self.image.get_rect()\n self.rect.topleft = pos\n self.sfx = SoundEffect(settings.DEAD, 1.0)\n self.sfx2 = SoundEffect(settings.OOF, 1.0)\n self.sfx3 = SoundEffect(settings.DOH, 1.0)\n self.sfx4 = SoundEffect(settings.BRUH, 1.0)\n self.sfx5 = SoundEffect(settings.MIF, 1.0)\n\n def sound_effect(self):\n if settings.AUDIO == True:\n generate = randint(1,6)\n if generate == 1:\n self.sfx.play()\n elif generate == 2:\n self.sfx2.play()\n elif generate == 3:\n self.sfx3.play()\n elif generate == 4:\n self.sfx4.play()\n elif generate == 5:\n self.sfx5.play()\n else:\n self.sfx.play()\n \"\"\"Sound effect for the hitmark when on collision. or pong\"\"\"\n \n def moveUp(self, pixels):\n self.rect.y -= pixels\n # Check that you are not going too far (off the screen)\n if self.rect.y < 0:\n self.rect.y = 0\n def moveDown(self, pixels):\n self.rect.y += pixels\n # Check that you are not going too far (off the screen)\n if self.rect.y > 615:\n self.rect.y = 615\n\n def moveLeft(self, pixels):\n self.rect.x -= pixels\n if self.rect.x < 0:\n self.rect.x = 0\n\n def moveRight(self, pixels):\n self.rect.x += pixels\n if self.rect.x > 615:\n self.rect.x = 615\n","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"56756548","text":"import random\n\ndef helloFunction(rangeOfList, maxValueOfList):\n\n list = [random.randint(0, maxValueOfList) for i in range(rangeOfList)]\n print('Ваш список: ', list)\n list2 = [k for k in list if k > 7]\n print('Ваш список с числами больше 7: ', list2)\n\nhelloFunction(int(input('Определите длину списка: ')), int(input('Максимально возможное значение элемента списка: ')))\n\n\n\n\n\n\n\n","sub_path":"dz4/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"299732129","text":"#!flask/bin/python\n\nimport config\nfrom flask import current_app\nimport os.path\n\n########## usually called in run.py ################\nfrom app import create_app, db\n\nfrom app.models import *\n\napp = create_app() \n########## usually called in run.py ################\nSQLALCHEMY_DATABASE_URI = app.config.get('SQLALCHEMY_DATABASE_URI')\n#SQLALCHEMY_MIGRATE_REPO = app.config.get('SQLALCHEMY_MIGRATE_REPO')\n\n#do everything from https://www.mbeckler.org/blog/?p=218\nfrom sqlalchemy import *\nfrom sqlalchemy.schema import *\nfrom sqlalchemy.engine import reflection\n#do everything from https://www.mbeckler.org/blog/?p=218\n#from flask.ext.sqlalchemy import SQLAlchemy\n\n#from https://github.com/numpy/numpy/issues/7556\n#import collections\n#import numpy\n\n\n#FROM https://www.mbeckler.org/blog/?p=218\t\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy.schema import (\n MetaData,\n Table,\n DropTable,\n ForeignKeyConstraint,\n DropConstraint,\n )\n\ndef db_DropEverything(app, db):\n\t# From http://www.sqlalchemy.org/trac/wiki/UsageRecipes/DropEverything\n\twith app.app_context():\n\t\tconn=db.engine.connect()\n\n\t\t# the transaction only applies if the DB supports\n\t\t# transactional DDL, i.e. Postgresql, MS SQL Server\n\t\ttrans = conn.begin()\n\n\t\tinspector = reflection.Inspector.from_engine(db.engine)\n\n\t\t# gather all data first before dropping anything.\n\t\t# some DBs lock after things have been dropped in \n\t\t# a transaction.\n\t\tmetadata = MetaData()\n\n\t\ttbs = []\n\t\tall_fks = []\n\n\t\tfor table_name in inspector.get_table_names():\n\t\t\tfks = []\n\t\t\tfor fk in inspector.get_foreign_keys(table_name):\n\t\t\t\tif not fk['name']:\n\t\t\t\t\tcontinue\n\t\t\t\tfks.append(\n\t\t\t\t\tForeignKeyConstraint((),(),name=fk['name'])\n\t\t\t\t\t)\n\t\t\tt = Table(table_name,metadata,*fks)\n\t\t\ttbs.append(t)\n\t\t\tall_fks.extend(fks)\n\n\t\tfor fkc in all_fks:\n\t\t\tconn.execute(DropConstraint(fkc))\n\n\t\tfor table in tbs:\n\t\t\tconn.execute(DropTable(table))\n\t\t\n\t\ttrans.commit()\t\t\t\n\t\t#FROM https://www.mbeckler.org/blog/?p=218\t\n\t\t\t\n\t\tdb.reflect()\n\t\tdb.drop_all()\n\t\tdb.create_all()\n\t\t\n\t\t#db.session.commit() Check if changes are actually made even without this cmd\t\n\t\t\ndb_DropEverything(app, db)\n\n\n\t","sub_path":"db_create.py","file_name":"db_create.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"273590649","text":"import matplotlib\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\nmatplotlib.use('TkAgg')\n\nimport spectroseti.runner as runner\n\nobservations = [['awx', 222]] # ,[],['awx',223],['awx',224]]\nbacrun = [['bac', 196],\n ['bac', 273]]\n\nLS = runner.LaserSearch()\n\n#LS.search_multiple(observations, output_pngs=1, number_mads=8)\n\np = Pool(6)\nsearch_multi = lambda x: LS.search_multiple([x], output_pngs=1,number_mads=10)\n\npool_output = Pool.map(p, search_multi, bacrun)","sub_path":"scripts/one_run_cadence.py","file_name":"one_run_cadence.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358037104","text":"import requests\r\nimport xlwt\r\n# 读取数据(API),相应调取参数参见国家气象信息中心API说明\r\noutputs = requests.get(\"API\")\r\noutputs.encoding = 'utf-8'\r\n# 创建excel表格\r\nworkbook = xlwt.Workbook(encoding='utf-8')\r\nworksheet = workbook.add_sheet('sheet1')\r\n# 写入表头\r\ndata = outputs.json()['DS']\r\ntitle = list(data[0].keys())\r\nfor i in range(len(title)):\r\n worksheet.write(0, i, title[i])\r\n# 写入数据\r\nfor j in range(len(data)):\r\n values = list(data[j].values())\r\n for k in range(len(values)):\r\n worksheet.write(j+1, k, values[k])\r\n# 保存到当前目录\r\nworkbook.save('test1.xls')\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"气象资料获取.py","file_name":"气象资料获取.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1796973","text":"#-*- coding:utf-8 -*-\n# ==============================================================================\n# Copyright 2016 Windy Darian (Ruoyu Fan)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n'''\nCreated on June 24, 2016\nhttps://leetcode.com/problems/sliding-window-maximum/\n@author: Windy Darian (Ruoyu Fan)\n'''\n\nfrom collections import deque\n\nclass SlidingMaxWindow(object):\n\n def __init__(self):\n self.__max_queue = deque()\n self.__non_max_count = 0\n # or i can just use a ever-increasing index\n\n def append(self, value):\n '''push a value to the back of the window'''\n empty_count = 0\n while (self.__max_queue and value > self.__max_queue[-1][0]):\n # remove anything smaller than current value, they are useless\n empty_count += self.__max_queue.pop()[1] + 1\n self.__max_queue.append( (value, empty_count) )\n\n def pop(self):\n '''pop out the left value of the window'''\n if (self.__non_max_count >= self.__max_queue[0][1]):\n self.__max_queue.popleft()\n self.__non_max_count = 0\n else:\n self.__non_max_count += 1\n\n def max(self):\n return self.__max_queue[0][0]\n\n\nclass Solution(object):\n def maxSlidingWindow(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n if k <= 0: return None\n\n result = []\n window = SlidingMaxWindow()\n for i in range(k):\n window.append(nums[i])\n result.append(window.max())\n for i in range(k, len(nums)):\n window.append(nums[i])\n window.pop()\n result.append(window.max())\n return result\n","sub_path":"leetcode/_239_sliding_window_maximum.py","file_name":"_239_sliding_window_maximum.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"359989368","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@Time: 2020/3/28 3:23 PM\n@Author: cuberqiu\n@File: count_degree.py\n@Description: \n\"\"\"\n\n# 用字典来表示一边\nedge = {\n \"actor1\": None,\n \"actor2\": None,\n \"num_movies\": None\n}\n\nnode = {\n \"id\": None,\n \"name\": None,\n \"movies_95_04\": None,\n \"main_genre\": None,\n \"genres\": None\n}\n\nnode_xy = {\n \"degree\": None,\n \"node_number\": None\n}\n\n\ndef init_edge(edge_file_path):\n\n # 用数组存储所有边\n edge_list = list()\n\n with open(edge_file_path, 'r') as f:\n # 读取第一行\n line = f.readline()\n\n while line:\n line = f.readline()\n line = line.strip('\\n').strip()\n\n if line:\n edge_element = line.split('\\t')\n\n edge_list.append({\n \"actor1\": edge_element[0],\n \"actor2\": edge_element[1],\n \"num_movies\": edge_element[2]\n })\n\n return edge_list\n\n\ndef init_node(node_file_path):\n\n # 用集合存储所有点\n nodes = dict()\n\n with open(node_file_path, 'r', encoding=\"utf-8\") as f:\n line = f.readline()\n\n while line:\n line = f.readline()\n line = line.strip('\\n').strip()\n\n if line:\n node_element = line.split('\\t')\n nodes[node_element[0]] = {\n \"name\": node_element[1],\n \"movies_95_04\": node_element[2],\n \"main_genre\": node_element[3],\n \"genres\": node_element[4],\n \"degree\": 0\n }\n\n return nodes\n\n\ndef count_degree(edges, nodes):\n for edge in edges:\n actor1 = edge['actor1']\n actor2 = edge['actor2']\n\n nodes[actor1]['degree'] = nodes[actor1]['degree'] + 1\n nodes[actor2]['degree'] = nodes[actor2]['degree'] + 1\n\n return nodes\n\n\ndef sort_node_degree(nodes:dict):\n nodes_degree = list()\n\n for node_id, value in nodes.items():\n nodes_degree.append({\n \"node_id\": node_id,\n \"degree\": value['degree']\n })\n\n nodes_degree.sort(key=lambda node : node['degree'], reverse=True)\n\n result = list()\n\n count = 0\n\n for node in nodes_degree:\n if count < 20:\n result.append(node)\n else:\n if node['degree'] == result[19]['degree']:\n result.append(node)\n count = count + 1\n\n return result\n\n\ndef count_degree_numbers(node_degree:dict):\n result = dict()\n\n for node_id, value in node_degree.items():\n degree = value['degree']\n result[degree] = 0\n\n for node_id, value in node_degree.items():\n degree = value['degree']\n result[degree] = result[degree] + 1\n\n return result\n\n\nif __name__ == \"__main__\":\n edges = init_edge(\"./imdb_actor_edges.tsv\")\n nodes = init_node(\"./imdb_actors_key.tsv\")\n\n nodes = count_degree(edges, nodes)\n\n top_nodes = sort_node_degree(nodes)\n\n degree_number = count_degree_numbers(nodes)\n\n for node_degree, number in degree_number.items():\n print(\"{} : {}\".format(node_degree, number))\n\n\n","sub_path":".ipynb_checkpoints/util-checkpoint.py","file_name":"util-checkpoint.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"140437224","text":"import gym\nimport numpy as np\nimport monte_carlo as mc\nfrom collections import defaultdict\n\nenvironment_name = 'Blackjack-v0'\nenv = gym.make(environment_name)\ngamma = 1\n\n# Create a set of dictionaries to collect the results\ntotal_returns = defaultdict(float)\naction_count = defaultdict(float)\naverageQ = defaultdict(lambda: np.zeros(env.action_space.n))\n\n\n\n\nfor ii in range(100):\n # Steps, generate episode\n # Returns a embedded list of [[state, state, state, action, reward]......[S,S,S,A,R]]\n episode = mc.run_episode(env)\n\n # re-arrange the state, action reward tuple?\n state_action = [[tuple(sa[:4]), [sa[-1]]] for sa in episode]\n sa_log = []\n # Loop over each element of the state action tuple?!\n for ii, item in enumerate(state_action):\n # log the state action pair\n if item[0] in sa_log:\n print(True)\n else:\n print(False)\n state = item[0][:3]\n action = item[0][-1]\n sa_log.append(item[0])\n total_returns[item[0]] += gamma**ii + item[1][0]\n action_count[item[0]] += 1\n averageQ[state][action] = total_returns[item[0]]/action_count[item[0]]\n\n\n\n","sub_path":"monte_carlo_methods/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648413145","text":"from pymongo import MongoClient\nfrom datetime import datetime\nfrom bson import ObjectId\nfrom config import config\nimport re\n\nclass Database(object):\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n \n def insert(self,db_object,collection,field_name):\n db_object['created_time'],db_object['updated_time'] = datetime.now(),datetime.now()\n insert_obj = self.db[collection].update_one({field_name:re.compile(db_object[field_name],re.IGNORECASE)},{'$set':db_object},upsert = True)\n return insert_obj.acknowledged\n\n def insert_many(self,db_object,collection):\n data = []\n for obj in db_object:\n obj['created_time'],obj['updated_time'] = datetime.now(),datetime.now()\n data.append(obj)\n insert_obj = self.db[collection].insert_many(data)\n return insert_obj.acknowledged\n \n def find(self,collection):\n cursor,records = self.db[collection].find(),[]\n if cursor:\n for obj in cursor:\n del obj['_id']\n records.append(obj)\n cursor.close()\n return records\n\n def find_by_role(self,role,collection):\n cursor,records = self.db[collection].find({'role_name':re.compile(role,re.IGNORECASE)}),[]\n if cursor:\n for obj in cursor:\n del obj['_id']\n records.append(obj)\n cursor.close()\n return records\n\n def find_by_record(self,record,collection,field_name):\n obj = self.db[collection].find_one({field_name:re.compile(record,re.IGNORECASE)})\n if obj: \n del obj['_id']\n return obj\n return None\n\n def update(self,username,data_obj,collection,field_name):\n data_obj['updated_time'] = datetime.now()\n update_obj = self.db[collection].update_one({field_name:re.compile(username,re.IGNORECASE)},{'$set':data_obj})\n if update_obj.matched_count == 1: return 'Record Successfully Updated'\n else: return 'Record Updation Failed'\n \n def delete(self,username,collection,field_name):\n delete_obj = self.db[collection].delete_one({field_name:re.compile(username,re.IGNORECASE)})\n return bool(delete_obj.deleted_count)\n\n def search(self,query_object,collection):\n query,records = query_object['query'].split(),[]\n regex_query = [re.compile(qry,re.IGNORECASE) for qry in query]\n search_field = 'question' if query_object['doc_type'].lower() == 'faq' else 'title'\n cursor = self.db[collection].find({'$and':[{'knowledge_name':re.compile(query_object['knowledge_name'],re.IGNORECASE)},\n {'category':re.compile(query_object['category'],re.IGNORECASE)},{'doc_type':re.compile(query_object['doc_type'],re.IGNORECASE)},\n {search_field:{'$in':regex_query}}]})\n\n if cursor:\n for obj in cursor:\n del obj['_id']\n records.append(obj)\n \n cursor.close()\n return records","sub_path":"assignment/src/factory/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175626073","text":"\"\"\"The tools to read, write, audit, transform, wash the data.\"\"\"\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Generator\n\n\ndef paths(dct: Any, path=()) -> Generator:\n \"\"\"Yield paths to the leafs of a nested dictionary.\"\"\"\n if isinstance(dct, dict):\n for key, value in dct.items():\n yield from paths(value, path + (key,))\n elif isinstance(dct, (tuple, list)):\n for ind, elem in enumerate(dct):\n yield from paths(elem, path + (ind,))\n else:\n yield path + (dct,)\n\n\ndef iter_dct(dct: dict, operation: Callable):\n \"\"\"Recursively iterate the dictionary and operate on the leaf nodes. Return the processed dictionary.\"\"\"\n dct2 = dict()\n for key, value in dct.items():\n if isinstance(value, dict):\n dct2[key] = iter_dct(value, operation)\n else:\n dct2[key] = operation(value)\n return dct2\n\n\ndef to_dict(obj: object, exclude: Callable[[str], bool] = None) -> dict:\n \"\"\"Convert an object to a dictionary by keeping import attribute information.\"\"\"\n\n def pdfconfig_exclude(attr: str) -> bool:\n \"\"\"Whether to exclude an attribute in pdfconfig.\"\"\"\n if attr.startswith(\"_\"):\n return True\n if attr in (\"inputfiles\", \"output\", \"interact\"):\n return True\n return False\n\n dct = {}\n if exclude is None:\n exclude = pdfconfig_exclude\n for attr_key, attr_val in obj.__dict__.items():\n if not exclude(attr_key):\n dct[attr_key] = attr_val\n return dct\n\n\ndef get_value(dct: dict, keys: tuple) -> Any:\n \"\"\"Get the value by walking along a key chain.\"\"\"\n result = dct\n for key in keys:\n result = result[key]\n return result\n","sub_path":"pdfstream/utils/dct_ops.py","file_name":"dct_ops.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"443782256","text":"import cv2\nimport numpy as np\n\n\nclass PanelLayout(object):\n def __init__(self, cap):\n self.cv2_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.cv2_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.cv2_fps = cap.get(cv2.CAP_PROP_FPS)\n\n self.start_width = None\n self.start_height = None\n self.monitor_width = None\n self.monitor_height = None\n\n self.monitor_pt1 = None\n self.monitor_pt2 = None\n\n def draw_data(self, start_width_per=0.06, start_height_per=0.1, monitor_width_per=0.7, monitor_height_per=0.8):\n self.start_width = int(self.cv2_width * start_width_per)\n self.start_height = int(self.cv2_height * start_height_per)\n self.monitor_width = int(self.cv2_width * monitor_width_per)\n self.monitor_height = int(self.cv2_height * monitor_height_per)\n\n self.monitor_pt1 = (self.start_width, self.start_height)\n self.monitor_pt2 = (self.start_width + self.monitor_width, self.start_height + self.monitor_height)\n\n def draw_act(self, img):\n print(f\"width: {self.cv2_width}; height: {self.cv2_height}; fps: {self.cv2_fps}\")\n sub_img = img[self.start_height:(self.start_height + self.monitor_height),\n self.start_width:(self.start_width + self.monitor_width)]\n white_rect = np.full(shape=sub_img.shape, fill_value=255, dtype=np.uint8)\n res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)\n img[self.start_height:(self.start_height + self.monitor_height),\n self.start_width:(self.start_width + self.monitor_width)] = res\n\n cv2.rectangle(img=img, pt1=self.monitor_pt1, pt2=self.monitor_pt2, color=(0, 0, 0), thickness=2)\n\n # print(f\"fps: {self.cv2_fps}\")\n\n # def __call__(self, *args, **kwargs):\n # self.draw_data()\n\n\ndef main():\n # init cv\n cap = cv2.VideoCapture(0)\n panellayout = PanelLayout(cap=cap)\n panellayout.draw_data()\n\n while cap.isOpened():\n success, img = cap.read()\n if img is not None:\n img = cv2.flip(img, flipCode=1)\n panellayout.draw_act(img=img)\n cv2.imshow(winname=\"universal control\", mat=img)\n cv2.waitKey(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gethand/universal_control_cv.py","file_name":"universal_control_cv.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"235593227","text":"from itertools import repeat, cycle\nimport numpy as np\nimport argparse\nimport torch as pt\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport losses,ramps\nfrom collections import OrderedDict\nimport pickle\nimport os\nimport random\nfrom MLP import MLP\nimport torch.utils.data\nfrom dataset import load_dataset\n\n#use_cuda = torch.cuda.is_available()\nuse_cuda = True\ndevice = 'cuda'\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n\ndef adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch):\n lr = args.lr\n epoch = epoch + step_in_epoch / total_steps_in_epoch\n\n # LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677\n lr = ramps.linear_rampup(epoch, args.lr_rampup) * (args.lr - args.initial_lr) + args.initial_lr\n\n # Cosine LR rampdown from https://arxiv.org/abs/1608.03983 (but one cycle only)\n if args.lr_rampdown_epochs:\n assert args.lr_rampdown_epochs >= args.epochs\n lr *= ramps.cosine_rampdown(epoch, args.lr_rampdown_epochs)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef adjust_learning_rate_step(optimizer, epoch, gammas, schedule):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr\n assert len(gammas) == len(schedule), \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(gammas, schedule):\n if (epoch >= step):\n lr = lr * gamma\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef mixup_data(x, y, alpha=1.0):\n '''Compute the mixup data. Return mixed inputs, mixed target, and lambda'''\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n batch_size = x.size()[0]\n index = np.random.permutation(batch_size)\n x, y = x.data.cpu().numpy(), y.data.cpu().numpy()\n mixed_x = torch.Tensor(lam * x + (1 - lam) * x[index, :])\n mixed_y = torch.Tensor(lam * y + (1 - lam) * y[index, :])\n\n mixed_x = Variable(mixed_x.to(device))\n mixed_y = Variable(mixed_y.to(device))\n return mixed_x, mixed_y, lam\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n # labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-8).type(torch.cuda.FloatTensor)\n minibatch_size = len(target)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / minibatch_size))\n return res\n\n\ndef mixup_data_sup(x, y, alpha=1.0):\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n batch_size = x.size()[0]\n index = np.random.permutation(batch_size)\n # x, y = x.numpy(), y.numpy()\n # mixed_x = torch.Tensor(lam * x + (1 - lam) * x[index,:])\n mixed_x = lam * x + (1 - lam) * x[index, :]\n # y_a, y_b = torch.Tensor(y).type(torch.LongTensor), torch.Tensor(y[index]).type(torch.LongTensor)\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef get_current_consistency_weight(final_consistency_weight, epoch, step_in_epoch, total_steps_in_epoch):\n # Consistency ramp-up from https://arxiv.org/abs/1610.02242\n epoch = epoch - args.consistency_rampup_starts\n epoch = epoch + step_in_epoch / total_steps_in_epoch\n return final_consistency_weight * ramps.sigmoid_rampup(epoch,\n args.consistency_rampup_ends - args.consistency_rampup_starts)\n\ndef mixup_criterion(y_a, y_b, lam):\n return lambda criterion, pred: lam * criterion(pred, y_a.long()) + (1 - lam) * criterion(pred, y_b.long())\n\n\ndef update_ema_variables(model, ema_model, alpha, global_step):\n # Use the true average until the exponential average is more correct\n alpha = min(1 - 1 / (global_step + 1), alpha)\n for ema_param, param in zip(ema_model.parameters(), model.parameters()):\n ema_param.data.mul_(alpha).add_(1 - alpha, param.data)\n\n\nparser = argparse.ArgumentParser(description='Interpolation consistency training')\n\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='max learning rate')\nparser.add_argument('--initial_lr', default=0.0, type=float,\n metavar='LR', help='initial learning rate when using linear rampup')\nparser.add_argument('--lr_rampup', default=0, type=int, metavar='EPOCHS',\n help='length of learning rate rampup in the beginning')\nparser.add_argument('--lr_rampdown_epochs', default=None, type=int, metavar='EPOCHS',\n help='length of learning rate cosine rampdown (>= length of training): the epoch at which learning rate \\\n reaches to zero')\nparser.add_argument('--schedule', type=int, nargs='+', default=[150, 225], help='Decrease learning rate at these epochs.')\nparser.add_argument('--gpu', '-g', type=int, default=-1,\n help='Zero-origin GPU ID (negative value indicates CPU)')\nparser.add_argument('--rn_method', '-rn', default='rand', type=str, choices=['rand', 'SVM', 'RF', 'distance'],\n help='manual negative mining method')\nparser.add_argument('--pseudo_label', choices=['single', 'mean_teacher'],\n help='pseudo label generated from either a single model or mean teacher model')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--ema_decay', default=0.999, type=float, metavar='ALPHA',\n help='ema variable decay rate (default: 0.999)')\nparser.add_argument('--mixup_consistency', default=1.0, type=float,\n help='consistency coeff for mixup usup loss')\nparser.add_argument('--consistency_type', default=\"mse\", type=str, metavar='TYPE', choices=['mse', 'kl'],\n help='consistency loss type to use')\nparser.add_argument('--consistency_rampup_starts', default=30, type=int, metavar='EPOCHS',\n help='epoch at which consistency loss ramp-up starts')\nparser.add_argument('--consistency_rampup_ends', default=30, type=int, metavar='EPOCHS',\n help='lepoch at which consistency loss ramp-up ends')\nparser.add_argument('--mixup_sup_alpha', default=1.0, type=float,\n help='for supervised loss, the alpha parameter for the beta distribution from where the mixing lambda is drawn')\nparser.add_argument('--mixup_usup_alpha', default=2.0, type=float,\n help='for unsupervised loss, the alpha parameter for the beta distribution from where the mixing lambda is drawn')\nparser.add_argument('--mixup_hidden', action='store_true',\n help='apply mixup in hidden layers')\nparser.add_argument('--num_mix_layer', default=3, type=int,\n help='number of layers on which mixup is applied including input layer')\nparser.add_argument('--evaluation_epochs', default=1, type=int,\n metavar='EPOCHS', help='evaluation frequency in epochs, 0 to turn evaluation off (default: 1)')\nparser.add_argument('--print_freq', '-p', default=100, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--nesterov', action='store_true',\n help='use nesterov momentum')\nparser.add_argument('--batchsize', '-b', type=int, default=30000,\n help='Mini batch size')\nparser.add_argument('--dataset', '-d', default='mnist', type=str, choices=['mnist', 'cifar10'],\n help='The dataset name')\nparser.add_argument('--labeled', '-l', default=100, type=int,\n help='# of labeled data')\nparser.add_argument('--unlabeled', '-u', default=49900, type=int,\n help='# of unlabeled data')\nparser.add_argument('--epoch', '-e', default=100, type=int,\n help='# of epochs to learn')\nparser.add_argument('--loss', type=str, default=\"sigmoid\", choices=['logistic', 'sigmoid'],\n help='The name of a loss function')\nparser.add_argument('--model', '-m', default='3lp', choices=['linear', '3lp', 'mlp'],\n help='The name of a classification model')\nparser.add_argument('--stepsize', '-s', default=1e-3, type=float,\n help='Stepsize of gradient method')\nparser.add_argument('--out', '-o', default='result',\n help='Directory to output the result')\n\nbest_prec1 = 0\nglobal_step = 0\nargs = parser.parse_args()\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n\ndef pre_train(trainloader, model, optimizer, epoch):\n import utils\n\n # switch to train mode\n model.train()\n\n meters = utils.AverageMeterSet()\n class_criterion = nn.CrossEntropyLoss().to(device)\n i = -1\n for (input, target) in trainloader:\n\n #if False:\n if args.mixup_sup_alpha:\n if use_cuda:\n input, target = input.to(device), target.to(device)\n input_var, target_var = Variable(input), Variable(target)\n\n if args.mixup_hidden:\n ### model\n output_mixed_l, target_a_var, target_b_var, lam = model(input_var, target_var, mixup_hidden=True,\n mixup_alpha=args.mixup_sup_alpha,\n layers_mix=args.num_mix_layer)\n lam = lam[0]\n else:\n mixed_input, target_a, target_b, lam = mixup_data_sup(input, target, args.mixup_sup_alpha)\n # if use_cuda:\n # mixed_input, target_a, target_b = mixed_input.cuda(), target_a.cuda(), target_b.cuda()\n mixed_input_var, target_a_var, target_b_var = Variable(mixed_input), Variable(target_a), Variable(target_b)\n ### model\n output_mixed_l = model(mixed_input_var)\n\n loss_func = mixup_criterion(target_a_var, target_b_var, lam)\n class_loss = loss_func(class_criterion, output_mixed_l)\n\n else:\n input_var = torch.autograd.Variable(input.to(device))\n\n target_var = torch.autograd.Variable(target.to(device))\n\n # print (input_var.shape)\n # print (type(input_var))\n output = model(input_var.float())\n\n class_loss = class_criterion(output, target_var.long())\n\n #print(\"class_loss\",class_loss)\n meters.update('class_loss', class_loss.item())\n loss = class_loss\n #print (\"loss\",loss)\n\n ### get ema loss. We use the actual samples(not the mixed up samples ) for calculating EMA loss\n minibatch_size = len(target_var)\n\n if args.mixup_sup_alpha:\n class_logit = model(input_var)\n else:\n class_logit = output\n\n\n meters.update('loss', loss.item())\n\n prec1, prec5 = accuracy(class_logit.data, target_var.data, topk=(1, 2))\n\n meters.update('top1', prec1[0], minibatch_size)\n meters.update('error1', 100.0 - prec1[0], minibatch_size)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_class_loss_list.append(meters['class_loss'].avg)\n train_error_list.append(float(meters['error1'].avg))\n train_pre_list.append(meters['top1'].avg)\n # measure elapsed time\n #meters.update('batch_time', time.time() - end)\n #end = time.time()\n\n #print (\"prec1, prec5\",prec1, prec5)\n\ndef pre_train_bak(trainloader, unlabelledloader, model, ema_model, optimizer, epoch):\n import utils\n global global_step\n\n # switch to train mode\n model.train()\n\n meters = utils.AverageMeterSet()\n class_criterion = nn.CrossEntropyLoss().to(device)\n for i, (input, target) in enumerate(trainloader):\n\n #lr = adjust_learning_rate(optimizer, epoch, i, len(unlabelledloader))\n #meters.update('lr', optimizer.param_groups[0]['lr'])\n\n #if False:\n if args.mixup_sup_alpha:\n if use_cuda:\n input, target = input.to(device), target.to(device)\n input_var, target_var = Variable(input), Variable(target)\n\n if args.mixup_hidden:\n ### model\n output_mixed_l, target_a_var, target_b_var, lam = model(input_var, target_var, mixup_hidden=True,\n mixup_alpha=args.mixup_sup_alpha,\n layers_mix=args.num_mix_layer)\n lam = lam[0]\n else:\n mixed_input, target_a, target_b, lam = mixup_data_sup(input, target, args.mixup_sup_alpha)\n # if use_cuda:\n # mixed_input, target_a, target_b = mixed_input.cuda(), target_a.cuda(), target_b.cuda()\n mixed_input_var, target_a_var, target_b_var = Variable(mixed_input), Variable(target_a), Variable(target_b)\n ### model\n output_mixed_l = model(mixed_input_var)\n\n loss_func = mixup_criterion(target_a_var, target_b_var, lam)\n class_loss = loss_func(class_criterion, output_mixed_l)\n\n else:\n input_var = torch.autograd.Variable(input.to(device))\n\n target_var = torch.autograd.Variable(target.to(device))\n\n # print (input_var.shape)\n # print (type(input_var))\n output = model(input_var.float())\n\n class_loss = class_criterion(output, target_var.long())\n\n #print(\"class_loss\",class_loss)\n meters.update('class_loss', class_loss.item())\n loss = class_loss\n #print (\"loss\",loss)\n\n ### get ema loss. We use the actual samples(not the mixed up samples ) for calculating EMA loss\n minibatch_size = len(target_var)\n\n if args.mixup_sup_alpha:\n class_logit = model(input_var)\n else:\n class_logit = output\n\n\n meters.update('loss', loss.item())\n\n prec1, prec5 = accuracy(class_logit.data, target_var.data, topk=(1, 2))\n\n meters.update('top1', prec1[0], minibatch_size)\n meters.update('error1', 100.0 - prec1[0], minibatch_size)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n global_step += 1\n\n train_class_loss_list.append(meters['class_loss'].avg)\n train_error_list.append(float(meters['error1'].avg))\n train_pre_list.append(meters['top1'].avg)\n # measure elapsed time\n #meters.update('batch_time', time.time() - end)\n #end = time.time()\n\n #print (\"prec1, prec5\",prec1, prec5)\n\ndef train(trainloader, unlabelledloader, model, ema_model, optimizer, epoch):\n\n global global_step\n if args.consistency_type == 'mse':\n consistency_criterion = losses.softmax_mse_loss\n elif args.consistency_type == 'kl':\n consistency_criterion = losses.softmax_kl_loss\n else:\n assert False, args.consistency_type\n\n # switch to train mode\n model.train()\n ema_model.train()\n\n\n import utils\n import time\n meters = utils.AverageMeterSet()\n #class_criterion=nn.MSELoss().cuda()\n class_criterion = nn.CrossEntropyLoss().to(device)\n\n pairwise_weight = 1\n i = -1\n for (input, target), (u, _) in zip(cycle(trainloader), unlabelledloader):\n i += 1\n\n #lr = adjust_learning_rate(optimizer, epoch, i, len(unlabelledloader))\n #meters.update('lr', optimizer.param_groups[0]['lr'])\n\n #print (\"target\",target[0:6])\n if input.shape[0] != u.shape[0]:\n bt_size = np.minimum(input.shape[0], u.shape[0])\n input = input[0:bt_size]\n target = target[0:bt_size]\n u = u[0:bt_size]\n\n\n if args.mixup_sup_alpha:\n if use_cuda:\n input, target, u = input.to(device), target.to(device), u.to(device)\n input_var, target_var, u_var = Variable(input), Variable(target), Variable(u)\n\n #IF False:\n if args.mixup_hidden:\n ### model\n output_mixed_l, target_a_var, target_b_var, lam = model(input_var, target_var, mixup_hidden=True,\n mixup_alpha=args.mixup_sup_alpha,\n layers_mix=args.num_mix_layer)\n lam = lam[0]\n else:\n mixed_input, target_a, target_b, lam = mixup_data_sup(input, target, args.mixup_sup_alpha)\n # if use_cuda:\n # mixed_input, target_a, target_b = mixed_input.cuda(), target_a.cuda(), target_b.cuda()\n mixed_input_var, target_a_var, target_b_var = Variable(mixed_input), Variable(target_a), Variable(target_b)\n ### model\n output_mixed_l = model(mixed_input_var.float())\n\n loss_func = mixup_criterion(target_a_var, target_b_var, lam)\n class_loss = loss_func(class_criterion, output_mixed_l)\n output = output_mixed_l\n\n else:\n input_var = torch.autograd.Variable(input.to(device))\n with torch.no_grad():\n u_var = torch.autograd.Variable(u.to(device))\n target_var = torch.autograd.Variable(target.to(device))\n ### model\n output = model(input_var.float())\n\n # sharpening\n #output = output**2 / sum([x**2 for x in output])\n\n #print (\"output\",output[0:6])\n #print (\"target\",target[0:6])\n class_loss = class_criterion(output, target_var.long()) / len(output)\n\n #print(\"class_loss\",class_loss)\n meters.update('class_loss', class_loss.item())\n\n ### get ema loss. We use the actual samples(not the mixed up samples ) for calculating EMA loss\n minibatch_size = len(target_var)\n if args.pseudo_label == 'single':\n ema_logit_unlabeled = model(u_var.float())\n ema_logit_labeled = model(input_var.float())\n else:\n ema_logit_unlabeled = ema_model(u_var.float())\n ema_logit_labeled = ema_model(input_var.float())\n if args.mixup_sup_alpha:\n class_logit = model(input_var.float())\n else:\n class_logit = output\n cons_logit = model(u_var.float())\n #print (\"cons_logit\",cons_logit)\n\n ema_logit_unlabeled = Variable(ema_logit_unlabeled.detach().data, requires_grad=False)\n\n # class_loss = class_criterion(class_logit, target_var) / minibatch_size\n\n ema_class_loss = class_criterion(ema_logit_labeled, target_var.long()) # / minibatch_size\n meters.update('ema_class_loss', ema_class_loss.item())\n #print (\"ema_class_loss\",ema_class_loss)\n\n ### get the unsupervised mixup loss###\n if args.mixup_consistency:\n if args.mixup_hidden:\n # output_u = model(u_var)\n output_mixed_u, target_a_var, target_b_var, lam = model(u_var.float(), ema_logit_unlabeled,\n mixup_hidden=True,\n mixup_alpha=args.mixup_sup_alpha,\n layers_mix=args.num_mix_layer)\n # ema_logit_unlabeled\n lam = lam[0]\n mixedup_target = lam * target_a_var + (1 - lam) * target_b_var\n else:\n # output_u = model(u_var)\n mixedup_x, mixedup_target, lam = mixup_data(u_var, ema_logit_unlabeled, args.mixup_usup_alpha)\n # mixedup_x, mixedup_target, lam = mixup_data(u_var, output_u, args.mixup_usup_alpha)\n output_mixed_u = model(mixedup_x.float())\n mixup_consistency_loss = consistency_criterion(output_mixed_u,\n mixedup_target) / minibatch_size # criterion_u(F.log_softmax(output_mixed_u,1), F.softmax(mixedup_target,1))\n meters.update('mixup_cons_loss', mixup_consistency_loss.item())\n if epoch < args.consistency_rampup_starts:\n mixup_consistency_weight = 0.0\n else:\n mixup_consistency_weight = get_current_consistency_weight(args.mixup_consistency, epoch, i,\n len(unlabelledloader))\n meters.update('mixup_cons_weight', mixup_consistency_weight)\n mixup_consistency_loss = mixup_consistency_weight * mixup_consistency_loss\n else:\n mixup_consistency_loss = 0\n meters.update('mixup_cons_loss', 0)\n\n # labeled_minibatch_size = target_var.data.ne(NO_LABEL).sum().type(torch.cuda.FloatTensor)\n # assert labeled_minibatch_size > 0\n\n #p_score, _ = output.topk(1, 1, True, True)\n #u_score, _ = cons_logit.topk(1, 1, True, True)\n p_score = output[target == 1][:,1]\n\n u_samp_idx = random.sample(range(cons_logit.shape[0]), min(len(p_score), int(0.1 * cons_logit.shape[0])))\n u_score = cons_logit[u_samp_idx, 1]\n #u_score = output[target == 0][:,1]\n\n #print (p_score, u_score)\n\n #u_score = cons_logit\n gamma = 0.01\n pairwise_ranking_loss = max(0, u_score.view(-1).mean() - p_score.view(-1).mean() + gamma)\n\n #loss = mixup_consistency_loss\n #print(class_loss)\n #loss = pairwise_ranking_loss + 1 * mixup_consistency_loss\n #loss = 1 * pairwise_ranking_loss + 1 * mixup_consistency_loss\n #loss = class_loss + 1 * mixup_consistency_loss\n #loss = 0 * class_loss + 10 * mixup_consistency_loss + 1 * pairwise_ranking_loss\n loss = 1 * class_loss + 1 * mixup_consistency_loss + pairwise_weight * pairwise_ranking_loss\n pairwise_weight *= 0.95\n #print ('pairwise ranking loss: ', pairwise_ranking_loss)\n\n #print (class_loss)\n #print (mixup_consistency_loss)\n #print (\"loss\",loss)\n\n meters.update('loss', loss.item())\n\n prec1, prec5 = accuracy(class_logit.data, target_var.data, topk=(1, 2))\n\n #print (\"prec1\",prec1[0])\n #print (\"prec5\",prec5[0])\n meters.update('top1', prec1[0], minibatch_size)\n meters.update('error1', 100. - prec1[0], minibatch_size)\n meters.update('top5', prec5[0], minibatch_size)\n meters.update('error5', 100. - prec5[0], minibatch_size)\n\n ema_prec1, ema_prec5 = accuracy(ema_logit_labeled.data, target_var.data, topk=(1, 2))\n meters.update('ema_top1', ema_prec1[0], minibatch_size)\n meters.update('ema_error1', 100. - ema_prec1[0], minibatch_size)\n meters.update('ema_top5', ema_prec5[0], minibatch_size)\n meters.update('ema_error5', 100. - ema_prec5[0], minibatch_size)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n global_step += 1\n update_ema_variables(model, ema_model, args.ema_decay, global_step)\n\n # measure elapsed time\n #meters.update('batch_time', time.time() - end)\n #end = time.time()\n\n\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\t'\n 'Class {meters[class_loss]:.4f}\\t'\n 'Mixup Cons {meters[mixup_cons_loss]:.4f}\\t'\n 'Prec@1 {meters[top1]:.3f}\\t'\n 'Prec@5 {meters[top5]:.3f}'.format(\n epoch, i, len(unlabelledloader), meters=meters))\n # print ('lr:',optimizer.param_groups[0]['lr'])\n train_class_loss_list.append(meters['class_loss'].avg)\n train_error_list.append(float(meters['error1'].avg))\n train_pre_list.append(meters['top1'].avg)\n\nNO_LABEL = -1\ndef validate(eval_loader, model, global_step, epoch, ema=False, testing=False):\n class_criterion = nn.CrossEntropyLoss(reduction='sum', ignore_index=NO_LABEL).to(device)\n import utils\n import time\n meters = utils.AverageMeterSet()\n\n # switch to evaluate mode\n model.eval()\n\n output = []\n\n end = time.time()\n for i, (input, target) in enumerate(eval_loader):\n meters.update('data_time', time.time() - end)\n\n with torch.no_grad():\n input_var = torch.autograd.Variable(input.to(device))\n with torch.no_grad():\n target_var = torch.autograd.Variable(target.to(device))\n\n minibatch_size = len(target_var)\n #labeled_minibatch_size = target_var.data.ne(NO_LABEL).sum().type(torch.cuda.FloatTensor)\n #assert labeled_minibatch_size > 0\n #meters.update('labeled_minibatch_size', labeled_minibatch_size)\n\n # compute output\n output1 = model(input_var.float())\n #print (\"output1\",output1)\n class_loss = class_criterion(output1, target_var.long()) / minibatch_size\n\n output = output + list(output1.data)\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output1.data, target_var.data, topk=(1, 2))\n meters.update('class_loss', class_loss.item(), minibatch_size)\n meters.update('top1', prec1[0], minibatch_size)\n meters.update('error1', 100.0 - prec1[0], minibatch_size)\n meters.update('top5', prec5[0], minibatch_size)\n meters.update('error5', 100.0 - prec5[0], minibatch_size)\n\n # measure elapsed time\n meters.update('batch_time', time.time() - end)\n end = time.time()\n\n print(' * Prec@1 {top1.avg:.3f}\\tPrec@5 {top5.avg:.3f}\\n'\n .format(top1=meters['top1'], top5=meters['top5']))\n\n val_class_loss_list.append(meters['class_loss'].avg)\n val_error_list.append(float(meters['error1'].avg))\n val_pre_list.append(meters['top1'].avg)\n return output, meters['top1'].avg\n\nfrom numpy import array\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n# MNIST\n#values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n#values = array(values)\n#label_encoder = LabelEncoder()\n#integer_encoded = label_encoder.fit_transform(values)\n#print(integer_encoded)\n\n#onehot_encoder = OneHotEncoder(sparse=False)\n#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n#onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n\ntrain_class_loss_list = []\ntrain_error_list = []\ntrain_pre_list = []\nval_class_loss_list = []\nval_error_list = []\nval_pre_list = []\n\n\ndef run(train_x, train_y, test_x, test_y):\n\n #print(\"train_x.shape\", train_x.shape)\n\n global train_class_loss_list\n global train_error_list\n global train_pre_list\n global val_class_loss_list\n global val_error_list\n global val_pre_list\n train_class_loss_list = []\n train_error_list = []\n train_pre_list = []\n val_class_loss_list = []\n val_error_list = []\n val_pre_list = []\n\n global global_step\n global_step = 0\n\n global best_prec1\n best_prec1 = 0\n\n global args\n args = parser.parse_args()\n\n input_size = train_x.shape[1]\n hidden_size1 = 256\n num_classes = 2\n\n model = MLP(input_size, hidden_size1, num_classes).to(device)\n ema_model = MLP(input_size, hidden_size1, num_classes).to(device)\n optimizer = pt.optim.SGD(model.parameters(), lr=1e-5, momentum=0.9, weight_decay=1e-4, nesterov=True)\n #scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50], gamma=0.1)\n\n #args.mixup_hidden = False\n args.mixup_hidden = True\n\n train_y[train_y == -1] = 0\n test_y[test_y == -1] = 0\n train_x=torch.tensor(train_x)\n\n pos_idx = np.where(train_y == 1)[0]\n neg_idx = np.where(train_y == 0)[0]\n\n #print (pos_idx)\n #print (neg_idx)\n\n train_pos_X = train_x[pos_idx]\n train_neg_X = train_x[neg_idx]\n train_pos_y = train_y[pos_idx]\n train_neg_y = train_y[neg_idx]\n\n train_pos_y = torch.tensor(train_pos_y)\n train_neg_y = torch.tensor(train_neg_y)\n #print (train_neg_X.size())\n\n torch_dataset = torch.utils.data.TensorDataset(train_pos_X, train_pos_y)\n\n P_loader = torch.utils.data.DataLoader(\n dataset=torch_dataset,\n batch_size=256\n )\n\n\n torch_dataset = torch.utils.data.TensorDataset(train_neg_X, train_neg_y)\n\n U_loader = torch.utils.data.DataLoader(\n # 从数据库中每次抽出batch size个样本\n dataset=torch_dataset,\n batch_size=256\n )\n\n test_x_=torch.tensor(test_x)\n test_y=torch.tensor(test_y)\n torch_dataset = torch.utils.data.TensorDataset(test_x_, test_y)\n validloader = torch.utils.data.DataLoader(\n # 从数据库中每次抽出batch size个样本\n dataset=torch_dataset,\n batch_size=256\n )\n\n\n #rn_method = args.rn_method\n # fraction = 0.5 on krvskp\n # reliable negative mining\n #rn_method = 'NB'\n #rn_method = 'distance'\n #rn_method = 'SVM'\n #rn_method = 'RF'\n #rn_method = 'rand'\n #n_sample_idx_u = RN_mining(train_x, train_y, rn_method, 0.5)\n n_sample_idx_u=random.sample(range(train_neg_X.shape[0]), min(train_pos_X.shape[0] * 10, int(0.4*train_neg_X.shape[0])))\n\n pretrain_x=train_pos_X\n pretrain_y=train_pos_y\n while len(pretrain_y) + len(train_pos_y) <= len(n_sample_idx_u):\n pretrain_x = torch.cat((pretrain_x, train_pos_X), 0)\n pretrain_y = torch.cat((pretrain_y, train_pos_y), 0)\n\n print (n_sample_idx_u)\n print (pretrain_x.shape)\n print (pretrain_y.shape)\n\n pretrain_x = torch.cat((pretrain_x, train_neg_X[n_sample_idx_u]), 0)\n pretrain_y = torch.cat((pretrain_y, train_neg_y[n_sample_idx_u]), 0)\n perm = np.random.permutation(len(pretrain_y))\n pretrain_x, pretrain_y = pretrain_x[perm], pretrain_y[perm]\n print (pretrain_y)\n torch_dataset = torch.utils.data.TensorDataset(pretrain_x, pretrain_y)\n\n trainloader = torch.utils.data.DataLoader(\n dataset=torch_dataset,\n batch_size=256\n )\n\n args.mixup_hidden = False\n\n for epoch in range(args.start_epoch, args.start_epoch + 300):\n pre_train(trainloader, model, optimizer, epoch)\n print(\"=========================:\\n\")\n prec1 = validate(validloader, model, global_step, epoch + 1)\n\n torch.cuda.empty_cache()\n\n train_error_list.append('====')\n val_error_list.append('====')\n\n for epoch in range(args.start_epoch, args.start_epoch + 1000):\n train(trainloader, U_loader, model, ema_model, optimizer, epoch)\n print(\"Evaluating the primary model on validation set:\\n\")\n output, prec1 = validate(validloader, model, global_step, epoch + 1)\n #scheduler.step()\n #torch.cuda.empty_cache()\n\n train_log = OrderedDict()\n train_log['train_class_loss_list'] = train_class_loss_list\n train_log['train_error_list'] = train_error_list\n train_log['train_pre_list'] = train_pre_list\n\n train_log['val_pre_list'] = val_pre_list\n train_log['val_class_loss_list'] = val_class_loss_list\n train_log['val_error_list'] = val_error_list\n\n return output, train_log\n\nx_train, y_train, x_test, y_test, prior = load_dataset('cifar10', args.labeled, args.unlabeled)\noutput, train_log = run(x_train, y_train, x_test, y_test)\nprint (train_log)\n","sub_path":"mixpul_cifar10_2.py","file_name":"mixpul_cifar10_2.py","file_ext":"py","file_size_in_byte":31738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"88888122","text":"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\nimport time\nimport datetime\nimport sys\n\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\nimport torch.autograd as autograd\n\nfrom datasets import *\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nepoch = 0\nn_epochs = 200\ndataset_name = \"img_align_celeba\"\nbatch_size = 16\nlr = 0.0002\nb1 = 0.5\nb2 = 0.999\ndecay_epoch = 100\nn_cpu = 8\nimg_height = 128\nimg_width = 128\nchannels = 3\nsample_interval = 400\ncheckpoint_interval = -1\nresidual_blocks = 6\nselected_attrs = [\"Black_Hair\", \"Blond_Hair\", \"Brown_Hair\", \"Male\", \"Young\"]\n\nc_dim = len(selected_attrs)\nimg_shape = (channels, img_height, img_width)\n\ncuda = torch.cuda.is_available()\n\n# Loss functions\ncriterion_cycle = torch.nn.L1Loss()\n\n\ndef criterion_cls(logit, target):\n return F.binary_cross_entropy_with_logits(logit, target, size_average=False) / logit.size(0)\n\n\n# Loss weights\nlambda_cls = 1\nlambda_rec = 10\nlambda_gp = 10\n\n# Initialize generator and discriminator\ngenerator = GeneratorResNet(img_shape=img_shape, res_blocks=residual_blocks, c_dim=c_dim)\ndiscriminator = Discriminator(img_shape=img_shape, c_dim=c_dim)\n\nif cuda:\n generator = generator.cuda()\n discriminator = discriminator.cuda()\n criterion_cycle.cuda()\n\nif epoch != 0:\n # Load pretrained models\n generator.load_state_dict(torch.load(\"saved_models/generator_%d.pth\" % epoch))\n discriminator.load_state_dict(torch.load(\"saved_models/discriminator_%d.pth\" % epoch))\nelse:\n generator.apply(weights_init_normal)\n discriminator.apply(weights_init_normal)\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))\n\n# Configure dataloaders\ntrain_transforms = [\n transforms.Resize(int(1.12 * img_height), Image.BICUBIC),\n transforms.RandomCrop(img_height),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]\n\ndataloader = DataLoader(CelebADataset(\"../../data/%s\" % dataset_name, transforms_=train_transforms, mode=\"train\", attributes=selected_attrs),\n batch_size=batch_size,\n shuffle=True,\n num_workers=n_cpu,\n)\n\nval_transforms = [\n transforms.Resize((img_height, img_width), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]\n\nval_dataloader = DataLoader(\n CelebADataset(\n \"../../data/%s\" % dataset_name, transforms_=val_transforms, mode=\"val\", attributes=selected_attrs\n ),\n batch_size=10,\n shuffle=True,\n num_workers=1,\n)\n\n# Tensor type\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n\ndef compute_gradient_penalty(D, real_samples, fake_samples):\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n d_interpolates, _ = D(interpolates)\n fake = Variable(Tensor(np.ones(d_interpolates.shape)), requires_grad=False)\n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty\n\n\nlabel_changes = [\n ((0, 1), (1, 0), (2, 0)), # Set to black hair\n ((0, 0), (1, 1), (2, 0)), # Set to blonde hair\n ((0, 0), (1, 0), (2, 1)), # Set to brown hair\n ((3, -1),), # Flip gender\n ((4, -1),), # Age flip\n]\n\n\ndef sample_images(batches_done):\n \"\"\"Saves a generated sample of domain translations\"\"\"\n val_imgs, val_labels = next(iter(val_dataloader))\n val_imgs = Variable(val_imgs.type(Tensor))\n val_labels = Variable(val_labels.type(Tensor))\n img_samples = None\n for i in range(10):\n img, label = val_imgs[i], val_labels[i]\n # Repeat for number of label changes\n imgs = img.repeat(c_dim, 1, 1, 1)\n labels = label.repeat(c_dim, 1)\n # Make changes to labels\n for sample_i, changes in enumerate(label_changes):\n for col, val in changes:\n labels[sample_i, col] = 1 - labels[sample_i, col] if val == -1 else val\n\n # Generate translations\n gen_imgs = generator(imgs, labels)\n # Concatenate images by width\n gen_imgs = torch.cat([x for x in gen_imgs.data], -1)\n img_sample = torch.cat((img.data, gen_imgs), -1)\n # Add as row to generated samples\n img_samples = img_sample if img_samples is None else torch.cat((img_samples, img_sample), -2)\n\n save_image(img_samples.view(1, *img_samples.shape), \"images/%s.png\" % batches_done, normalize=True)\n\n\n# ----------\n# Training\n# ----------\n\nsaved_samples = []\nstart_time = time.time()\nfor epoch in range(epoch, n_epochs):\n for i, (imgs, labels) in enumerate(dataloader):\n\n # Model inputs\n imgs = Variable(imgs.type(Tensor))\n labels = Variable(labels.type(Tensor))\n\n # Sample labels as generator inputs\n sampled_c = Variable(Tensor(np.random.randint(0, 2, (imgs.size(0), c_dim))))\n # Generate fake batch of images\n fake_imgs = generator(imgs, sampled_c)\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Real images\n real_validity, pred_cls = discriminator(imgs)\n # Fake images\n fake_validity, _ = discriminator(fake_imgs.detach())\n # Gradient penalty\n gradient_penalty = compute_gradient_penalty(discriminator, imgs.data, fake_imgs.data)\n # Adversarial loss\n loss_D_adv = -torch.mean(real_validity) + torch.mean(fake_validity) + lambda_gp * gradient_penalty\n # Classification loss\n loss_D_cls = criterion_cls(pred_cls, labels)\n # Total loss\n loss_D = loss_D_adv + lambda_cls * loss_D_cls\n\n loss_D.backward()\n optimizer_D.step()\n\n optimizer_G.zero_grad()\n\n # Every n_critic times update generator\n if i % n_critic == 0:\n\n # -----------------\n # Train Generator\n # -----------------\n\n # Translate and reconstruct image\n gen_imgs = generator(imgs, sampled_c)\n recov_imgs = generator(gen_imgs, labels)\n # Discriminator evaluates translated image\n fake_validity, pred_cls = discriminator(gen_imgs)\n # Adversarial loss\n loss_G_adv = -torch.mean(fake_validity)\n # Classification loss\n loss_G_cls = criterion_cls(pred_cls, sampled_c)\n # Reconstruction loss\n loss_G_rec = criterion_cycle(recov_imgs, imgs)\n # Total loss\n loss_G = loss_G_adv + lambda_cls * loss_G_cls + lambda_rec * loss_G_rec\n\n loss_G.backward()\n optimizer_G.step()\n\n # --------------\n # Log Progress\n # --------------\n\n # Determine approximate time left\n batches_done = epoch * len(dataloader) + i\n batches_left = n_epochs * len(dataloader) - batches_done\n time_left = datetime.timedelta(seconds=batches_left * (time.time() - start_time) / (batches_done + 1))\n\n # Print log\n sys.stdout.write(\n \"\\r[Epoch %d/%d] [Batch %d/%d] [D adv: %f, aux: %f] [G loss: %f, adv: %f, aux: %f, cycle: %f] ETA: %s\"\n % (\n epoch,\n n_epochs,\n i,\n len(dataloader),\n loss_D_adv.item(),\n loss_D_cls.item(),\n loss_G.item(),\n loss_G_adv.item(),\n loss_G_cls.item(),\n loss_G_rec.item(),\n time_left,\n )\n )\n\n # If at sample interval sample and save image\n if batches_done % sample_interval == 0:\n sample_images(batches_done)\n\n if checkpoint_interval != -1 and epoch % checkpoint_interval == 0:\n # Save model checkpoints\n torch.save(generator.state_dict(), \"saved_models/generator_%d.pth\" % epoch)\n torch.save(discriminator.state_dict(), \"saved_models/discriminator_%d.pth\" % epoch)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633771613","text":"from insere import *\n\ndef lerDoArquivo():\n valores = None\n tree = None\n # terminar, dar free na arvore\n arq = open(\"arq1.txt\", \"r+\")\n valores = arq.read()\n if valores is None:\n return None\n valores = valores.split(\" \")\n for i in valores:\n tree = insere(tree, int(i))\n arq.close()\n return tree\n","sub_path":"lista4/tree/lerDoArquivo.py","file_name":"lerDoArquivo.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"195794229","text":"letter = input(\"Enter a single letter: \")\nflag = False\nwhile flag == False:\n if ord(letter)>=65 and ord(letter)<=90: #using ASCII values to determine its type of character\n print(\"Letter is within the range of A-Z\")\n flag = True\n elif ord(letter)>= 97 and ord(letter)<= 122:\n print(\"Letter is within the range a-z\")\n flag = True\n else:\n print(\"Letter not within the ranges of A-Z or a-z\")\n letter = input(\"Enter a single letter: \") \n","sub_path":"Papers/C2 BT2 2018/task 1.py","file_name":"task 1.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"68629312","text":"#User function Template for python3\r\n\r\n\r\ndef maxLen(a):\r\n \r\n n = len(a)\r\n \r\n for i in range(n):\r\n if(a[i] == 0):\r\n a[i] = -1\r\n \r\n maxi = 0\r\n d = {}\r\n s = 0\r\n k = 0\r\n\r\n for i in range(n):\r\n s += a[i]\r\n if(s==k):\r\n maxi = i+1\r\n \r\n if((s-k) in d):\r\n maxi = max(maxi,i-d[(s-k)])\r\n\r\n if s not in d:\r\n d[s] = i\r\n\r\n return maxi\r\n\r\n\r\n\r\n#{ \r\n# Driver Code Starts\r\n#Initial Template for Python 3\r\n\r\n\r\nt=int(input())\r\nfor _ in range(0,t):\r\n n=int(input())\r\n a=list(map(int,input().split()))\r\n s=maxLen(a)\r\n print(s)\r\n# } Driver Code Ends","sub_path":"python/Largest_subarray_of 0's_and_1's.py","file_name":"Largest_subarray_of 0's_and_1's.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442516426","text":"from direct.gui.DirectGui import *\r\nfrom pandac.PandaModules import *\r\n\r\nclass Preloader:\r\n\tdef __init__(self, fonts):\r\n\t\tself.createGraphics(fonts)\r\n\t\t\r\n\t\tself.prepLoadGroup()\r\n\t\t\r\n\t\tself.loaderBar.setTexOffset(self.modTS, .015, 0)\r\n\t\tbase.graphicsEngine.renderFrame()\r\n\t\tbase.graphicsEngine.renderFrame()\r\n\t\t\r\n\t\tself.itemCount = 0\r\n\t\t\r\n\t\tfor M in self.models:\r\n\t\t\titem = loader.loadModel(M)\r\n\t\t\tself.itemCount += 1\r\n\t\t\tprogress = self.itemCount / float(self.totalItems)\r\n\t\t\tself.loaderBar.setTexOffset(self.modTS, \r\n\t\t\t\t-progress + .015, 0)\r\n\t\t\tbase.graphicsEngine.renderFrame()\r\n\t\t\tbase.graphicsEngine.renderFrame()\r\n\t\t\t\r\n\t\tself.destroy()\r\n\t\t\t\r\n\tdef createGraphics(self, fonts):\r\n\t\tself.modTS = TextureStage(\"Modulate\")\r\n\t\tself.modTS.setMode(TextureStage.MModulate)\r\n\t\t\r\n\t\tself.frame = DirectFrame(frameSize = (-.3, .3, -.2, .2), \r\n\t\t\tframeColor = (1,1,1,0),\r\n\t\t\tparent = base.aspect2d)\r\n\t\t# Creates the frame that will house the Preloader graphics\r\n\t\t\r\n\t\tloaderEgg = loader.loadModel(\"../Models/EnergyBar.egg\")\r\n\t\tself.loaderBG = loaderEgg.find(\"**/EnergyBG\")\r\n\t\tself.loaderBar = loaderEgg.find(\"**/EnergyBar\")\r\n\t\tself.loaderFrame = loaderEgg.find(\"**/EnergyFrame\")\r\n\t\tself.loaderBG.reparentTo(self.frame)\r\n\t\tself.loaderBar.reparentTo(self.loaderBG)\r\n\t\tself.loaderFrame.reparentTo(self.loaderBG)\r\n\t\tself.loaderBG.setPos(0, 0, -.2)\r\n\t\t# Loads the three parts of the loader Bar and places them\r\n\t\t# on the frame.\r\n\t\t\r\n\t\talpha = loader.loadTexture(\"../Images/LoaderAlpha.png\")\r\n\t\talpha.setFormat(Texture.FAlpha)\r\n\t\talpha.setWrapU(Texture.WMClamp)\r\n\t\t# Loads an alpha texture to use as a cut out for the\r\n\t\t# bars.\r\n\t\t\r\n\t\tself.loaderBar.setTexture(self.modTS, alpha)\r\n\t\t# Applies the alpha cut out texture to the loader bar.\r\n\t\t\r\n\t\tself.text = DirectLabel(\r\n\t\t\ttext = \"Loading Suicide Jockeys...\",\r\n\t\t\ttext_font = fonts[\"orange\"], text_scale = .1, \r\n\t\t\ttext_fg = (1,1,1,1), relief = None, \r\n\t\t\ttext_align = TextNode.ACenter,\r\n\t\t\tparent = self.frame)\r\n\t\treturn\r\n# createGraphics: Creates the text and loader bar for the preloader.\r\n\r\n\tdef prepLoadGroup(self):\r\n\t\tself.models = [\"../Models/Track.egg\",\r\n\t\t\t\t\t\t\"../Models/Planet.egg\",\r\n\t\t\t\t\t\t\"../Models/Ground.egg\",\r\n\t\t\t\t\t\t\"../Models/LinearPinkSkySphere.bam\",\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\"../Models/TargetCone.bam\",\r\n\t\t\t\t\t\t\"../Models/ShieldBar.egg\",\r\n\t\t\t\t\t\t\"../Models/SpeedBar.egg\",\r\n\t\t\t\t\t\t\"../Models/EnergyBar.egg\",\r\n\r\n\t\t\t\t\t\t\"../Models/RedCycle.bam\",\r\n\t\t\t\t\t\t\"../Models/RedTurr.bam\",\r\n\t\t\t\t\t\t\"../Models/YellowCycle.bam\",\r\n\t\t\t\t\t\t\"../Models/YellowTurr.bam\",\r\n\t\t\t\t\t\t\"../Models/GreenCycle.bam\",\r\n\t\t\t\t\t\t\"../Models/GreenTurr.bam\",\r\n\t\t\t\t\t\t\"../Models/BlueCycle.bam\",\r\n\t\t\t\t\t\t\"../Models/BlueTurr.bam\",\r\n\t\t\t\t\t\t\"../Models/Disc.bam\",\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\"../Models/MachineGun.bam\",\r\n\t\t\t\t\t\t\"../Models/Cannon.bam\",\r\n\t\t\t\t\t\t\"../Models/LaserFlash.bam\",\r\n\t\t\t\t\t\t\"../Models/LaserProj.bam\",\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\"../Models/Explosions/Laserburst1.bam\",\r\n\t\t\t\t\t\t\"../Models/Explosions/Laserburst2.bam\",\r\n\t\t\t\t\t\t\"../Models/Explosions/Laserburst3.bam\"]\r\n\t\t\t\t\t\t\r\n\t\tself.totalItems = len(self.models)\r\n\t\t\r\n\t\treturn\r\n# prepLoadGroup: Creates a list containing all of the models that need to be\r\n# loaded and then counts how many there are.\r\n\r\n\tdef destroy(self):\r\n\t\tself.loaderBG.removeNode()\r\n\t\tself.text.destroy()\r\n\t\tself.frame.destroy()","sub_path":"BGP3D/Chapter12/Examples/PreloaderClass_01.py","file_name":"PreloaderClass_01.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"308901883","text":"import heapq\nfrom typing import List\n\nfrom Helpers import helper as hlp\nfrom Helpers import test_class\nimport re\n\n\nclass Solution(test_class.test_class):\n\n def setUp(self):\n super().setUp()\n\n def getKthSmallest(self, arr1, arr2, k) -> int:\n mergedArray = self.merge(arr1, arr2, k)\n return mergedArray[k - 1]\n\n def merge(self, arr1, arr2, k):\n res = []\n l = 0\n r = 0\n while l < len(arr1) and r < len(arr2):\n if arr1[l] <= arr2[r]:\n res.append(arr1[l])\n l += 1\n else:\n res.append(arr2[r])\n r += 1\n if len(res) == k:\n return res\n res += arr1[l:]\n res += arr2[r:]\n return res\n\n def test_1(self):\n self.assertEqual(4, self.getKthSmallest([1, 4, 6], [2, 3, 5], 4))\n\n def test_2(self):\n self.assertEqual(2, self.getKthSmallest([1, 4, 6, 8, 9, 12], [2, 3, 5, 7, 10, 11], 2))\n","sub_path":"venv/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53901310","text":"import numpy as np\nimport openjij\nfrom openjij.sampler import measure_time\nfrom openjij.sampler import BaseSampler\nfrom openjij.utils.decorator import deprecated_alias\nimport cxxjij\n\n\nclass SQASampler(BaseSampler):\n \"\"\"Sampler with Simulated Quantum Annealing (SQA).\n\n Inherits from :class:`openjij.sampler.sampler.BaseSampler`.\n Hamiltonian\n $$H(s) = s H_p + (1-s)\\sum_i \\sigma_i^x$$\n which $H_p$ is problem Hamiltonian that we want solve.\n\n Args:\n beta (float):\n Inverse temperature.\n\n gamma (float):\n Amplitude of quantum fluctuation.\n\n trotter (int):\n Trotter number.\n\n step_length (int):\n Length of Monte Carlo step.\n\n step_num (int):\n Number of Monte Carlo step.\n\n schedule_info (dict):\n Information about a annealing schedule.\n\n iteration (int):\n Number of iterations.\n\n Attributes:\n energy_bias (float):\n Energy bias.\n\n var_type (str):\n Type of variables: 'SPIN' or 'BINARY' which mean {-1, 1} or {0, 1}.\n\n indices (int):\n Indices of `openjij.model.model.BinaryQuadraticModel` object.\n\n N (int):\n Number of the indices.\n\n system_class (:class:):\n `cxxjij.system.QuantumIsing` class.\n\n sqa_kwargs (dict):\n Parameters of SQA: beta, gamma, and schedule_info.\n\n Raises:\n ValueError: If the schedule violates as below.\n - not list or numpy.array.\n - schedule range is '0 <= s <= 1'.\n\n \"\"\"\n @property\n def parameters(self):\n return {\n 'beta': ['parameters'],\n 'gamma': ['parameters'],\n 'trotter': ['parameters'],\n }\n\n @deprecated_alias(iteration='num_reads')\n def __init__(self,\n beta=5.0, gamma=1.0,\n num_sweeps=1000, schedule=None,\n trotter=4,\n num_reads=1):\n\n self.beta = beta\n self.gamma = gamma\n self.trotter = trotter\n self.num_reads = num_reads\n self.num_sweeps = num_sweeps\n self.schedule = schedule\n self.energy_bias = 0.0\n self._schedule_setting = {\n 'beta': beta,\n 'gamma': gamma,\n 'num_sweeps': num_sweeps,\n 'num_reads': num_reads,\n }\n\n self._make_system = cxxjij.system.make_transverse_ising_Eigen\n self._algorithm = {\n 'singlespinflip': cxxjij.algorithm.Algorithm_SingleSpinFlip_run\n }\n\n def _convert_validation_schedule(self, schedule, beta):\n if not isinstance(schedule, (list, np.array)):\n raise ValueError(\"schedule should be list or numpy.array\")\n\n if isinstance(schedule[0], cxxjij.utility.TransverseFieldSchedule):\n return schedule\n\n # schedule validation 0 <= s <= 1\n sch = np.array(schedule).T[0]\n if not np.all((0 <= sch) & (sch <= 1)):\n raise ValueError(\"schedule range is '0 <= s <= 1'.\")\n\n if len(schedule[0]) == 2:\n # convert to list of cxxjij.utility.TransverseFieldSchedule\n cxxjij_schedule = []\n for s, one_mc_step in schedule:\n _schedule = cxxjij.utility.TransverseFieldSchedule()\n _schedule.one_mc_step = one_mc_step\n _schedule.updater_parameter.beta = beta\n _schedule.updater_parameter.s = s\n cxxjij_schedule.append(_schedule)\n return cxxjij_schedule\n elif len(schedule[0]) == 3:\n # convert to list of cxxjij.utility.TransverseFieldSchedule\n cxxjij_schedule = []\n for s, _beta, one_mc_step in schedule:\n _schedule = cxxjij.utility.TransverseFieldSchedule()\n _schedule.one_mc_step = one_mc_step\n _schedule.updater_parameter.beta = _beta\n _schedule.updater_parameter.s = s\n cxxjij_schedule.append(_schedule)\n return cxxjij_schedule\n else:\n raise ValueError(\n \"\"\"schedule is list of tuple or list\n (annealing parameter s : float, step_length : int) or\n (annealing parameter s : float, beta: float, step_length : int)\n \"\"\")\n\n def _dict_to_model(self, var_type, h=None, J=None, Q=None, **kwargs):\n if var_type == openjij.SPIN:\n bqm = openjij.BinaryQuadraticModel(h, J, 0.0, var_type)\n elif var_type == openjij.BINARY:\n bqm = openjij.BinaryQuadraticModel.from_qubo(Q)\n else:\n raise ValueError(\n 'var_type should be openjij.SPIN or openjij.BINARY')\n return bqm\n\n def _get_result(self, system, model):\n state, info = super()._get_result(system, model)\n\n q_state = system.trotter_spins[:-1].T.astype(np.int)\n c_energies = [model.calc_energy(\n state, need_to_convert_from_spin=True) for state in q_state]\n info['trotter_state'] = q_state\n info['trotter_energies'] = c_energies\n\n return state, info\n\n def sample_ising(self, h, J,\n beta=None, gamma=None,\n num_sweeps=None, schedule=None,\n num_reads=1,\n initial_state=None, updater='single spin flip',\n reinitialize_state=True, seed=None, **kwargs):\n \"\"\"Sampling from the Ising model\n\n Args:\n h (dict): Linear term of the target Ising model. \n J (dict): Quadratic term of the target Ising model. \n beta (float, optional): inverse tempareture.\n gamma (float, optional): strangth of transverse field. Defaults to None.\n num_sweeps (int, optional): number of sweeps. Defaults to None.\n schedule (list[list[float, int]], optional): List of annealing parameter. Defaults to None.\n num_reads (int, optional): number of sampling. Defaults to 1.\n initial_state (list[int], optional): Initial state. Defaults to None.\n updater (str, optional): update method. Defaults to 'single spin flip'.\n reinitialize_state (bool, optional): Re-initilization at each sampling. Defaults to True.\n seed (int, optional): Sampling seed. Defaults to None.\n\n Raises:\n ValueError: [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n bqm = openjij.BinaryQuadraticModel(\n linear=h, quadratic=J, var_type='SPIN'\n )\n return self._sampling(bqm, beta=beta, gamma=gamma,\n num_sweeps=num_sweeps, schedule=schedule,\n num_reads=num_reads,\n initial_state=initial_state, updater=updater,\n reinitialize_state=reinitialize_state, seed=seed, **kwargs)\n\n def _sampling(self, bqm, beta=None, gamma=None,\n num_sweeps=None, schedule=None,\n num_reads=1,\n initial_state=None, updater='single spin flip',\n reinitialize_state=True, seed=None, **kwargs):\n\n ising_graph = bqm.get_cxxjij_ising_graph()\n\n self._setting_overwrite(\n beta=beta, gamma=gamma,\n num_sweeps=num_sweeps, num_reads=num_reads\n )\n self._annealing_schedule_setting(\n bqm, beta, gamma, num_sweeps, schedule)\n\n # make init state generator --------------------------------\n if initial_state is None:\n def init_generator(): return [ising_graph.gen_spin()\n for _ in range(self.trotter)]\n else:\n if isinstance(initial_state, dict):\n initial_state = [initial_state[k] for k in bqm.indices]\n trotter_init_state = [np.array(initial_state)\n for _ in range(self.trotter)]\n\n def init_generator(): return trotter_init_state\n # -------------------------------- make init state generator\n\n # choose updater -------------------------------------------\n sqa_system = self._make_system(\n init_generator(), ising_graph, self.gamma\n )\n _updater_name = updater.lower().replace('_', '').replace(' ', '')\n if _updater_name not in self._algorithm:\n raise ValueError('updater is one of \"single spin flip\"')\n algorithm = self._algorithm[_updater_name] \n # ------------------------------------------- choose updater\n\n response = self._cxxjij_sampling(\n bqm, init_generator,\n algorithm, sqa_system,\n reinitialize_state, seed, **kwargs\n )\n\n response.info['schedule'] = self.schedule_info\n\n return response\n\n def _annealing_schedule_setting(self, model,\n beta=None, gamma=None,\n num_sweeps=None,\n schedule=None):\n self.beta = beta if beta else self.beta\n self.gamma = gamma if gamma else self.gamma\n if schedule or self.schedule:\n self._schedule = self._convert_validation_schedule(\n schedule if schedule else self.schedule, self.beta\n )\n self.schedule_info = {'schedule': 'custom schedule'}\n else:\n\n self.num_sweeps = num_sweeps if num_sweeps else self.num_sweeps\n self._schedule, beta_gamma = linear_ising_schedule(\n model=model,\n beta=self._schedule_setting['beta'],\n gamma=self._schedule_setting['gamma'],\n num_sweeps=self._schedule_setting['num_sweeps']\n )\n self.schedule_info = {\n 'beta': beta_gamma[0],\n 'gamma': beta_gamma[1],\n 'num_sweeps': self._schedule_setting['num_sweeps']\n }\n\n\ndef linear_ising_schedule(model, beta, gamma, num_sweeps):\n schedule = cxxjij.utility.make_transverse_field_schedule_list(\n beta=beta, one_mc_step=1, num_call_updater=num_sweeps\n )\n return schedule, [beta, gamma]\n","sub_path":"openjij/sampler/sqa_sampler.py","file_name":"sqa_sampler.py","file_ext":"py","file_size_in_byte":10137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"50883194","text":"import multiprocessing\nimport os\nimport uuid\n\nimport empyrical\nimport pandas as pd\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval\n\nfrom data_prepare import split_data_set_by_date\nfrom index_components import zz500\nfrom target.valid_wave.wave_strategy import *\n\ntry:\n import _pickle as pickle\nexcept:\n import pickle\nfrom functools import partial\n\nabsolute_spaces = {\n 'max_return_threshold':\n hp.quniform('max_return_threshold', 0.02, 0.2, 0.01),\n 'return_per_count_threshold':\n hp.quniform('return_per_count_threshold', 0.001, 0.02, 0.001),\n 'withdraw_threshold':\n hp.quniform('withdraw_threshold', 0.005, 0.1, 0.005),\n 'minimum_period':\n hp.uniform('minimum_period', 5, 20)\n}\n\nrelative_spaces = {\n 'std_window':\n hp.quniform('window', 3, 30, 2),\n 'first_count_threshold':\n hp.quniform('first_count_threshold', 0.1, 3.0, 0.1),\n 'return_per_count_threshold':\n hp.quniform('return_per_count_threshold', 0.1, 3.0, 0.1),\n 'withdraw_threshold':\n hp.quniform('withdraw_threshold', 0.2, 4, 0.2)\n}\n\n\ndef get_data(stk_list):\n # market = pd.read_csv(\"../data/cs_market.csv\", parse_dates=[\"date\"], dtype={\"code\": str})\n # market = pd.read_csv(\"~/cs_market.csv\", parse_dates=[\"date\"], dtype={\"code\": str})\n market = pd.read_csv(\"E:\\market_data/cs_market.csv\", parse_dates=[\"date\"], dtype={\"code\": str})\n all_ohlcv = market.drop([\"Unnamed: 0\", \"total_turnover\", \"limit_up\", \"limit_down\"], axis=1)\n all_ohlcv = all_ohlcv.set_index(['code', 'date']).sort_index()\n idx_slice = pd.IndexSlice\n stk_ohlcv_list = []\n for stk in all_ohlcv.index.get_level_values('code').unique():\n if stk in stk_list:\n stk_ohlcv = all_ohlcv.loc[idx_slice[stk, :], idx_slice[:]]\n stk_ohlcv_list.append(stk_ohlcv)\n return stk_ohlcv_list\n\n\ndef valid_wave_by_multi_processes(params, ohlcv_list, operation, mode, processes=0):\n stk_result_list = []\n\n if processes <= 0:\n processes = multiprocessing.cpu_count()\n\n pool = multiprocessing.Pool(processes=processes)\n\n def callback(result):\n stk_result_list.append(result)\n\n def print_error(err):\n print(err)\n\n for ohlcv in ohlcv_list:\n if 'std_window' in params:\n std_window = params['std_window']\n else:\n std_window = 0\n pool.apply_async(tag_wave_direction, args=(\n ohlcv, params['first_count_threshold'], params['return_per_count_threshold'],\n params['withdraw_threshold'], operation, mode, std_window,),\n callback=callback, error_callback=print_error)\n\n pool.close()\n pool.join()\n return stk_result_list\n\n\ndef objective(params, ohlcv_list, operation, mode, log_dir):\n print(params)\n identity = str(uuid.uuid1())\n result_list = valid_wave_by_multi_processes(params, ohlcv_list, operation, mode)\n returns_list = []\n for result in result_list:\n stk_returns = result['pct_chg'] * result['direction']\n stk_returns = stk_returns.fillna(0)\n returns_list.append(stk_returns)\n returns = pd.concat(returns_list, axis=0)\n\n annual_return = empyrical.annual_return(returns)\n sharpe_ratio = empyrical.sharpe_ratio(returns)\n if np.isnan(sharpe_ratio):\n sharpe_ratio = 0\n\n data = {\n 'id': identity,\n 'params': params,\n 'returns': returns,\n 'annual_return': annual_return,\n 'sharpe_ratio': sharpe_ratio,\n # 'result_list': result_list\n }\n\n with open(os.path.join(log_dir, identity + '.pkl'), 'wb') as f:\n pickle.dump(data, f)\n\n print('id: %s, annual_return: %s, sharpe_ratio: %s' % (identity, annual_return, sharpe_ratio))\n return {'loss': -sharpe_ratio, 'status': STATUS_OK}\n\n\nif __name__ == '__main__':\n space = relative_spaces\n sub_dir = 'relative'\n\n # function = tag_wave_direction_by_absolute()\n # space = absolute_spaces\n # sub_dir = 'absolute'\n\n ohlcv_list = get_data(zz500[:50])\n split_dates = [\"2016-01-01\", \"2017-01-01\"]\n train_set, validate_set, test_set = split_data_set_by_date(ohlcv_list, split_dates, minimum_size=1)\n log_dir = os.path.join('./valid_wave_hyperopt', sub_dir)\n\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n\n hyperopt_objective = partial(objective, ohlcv_list=test_set, operation='search', mode='relative', log_dir=log_dir)\n trials = Trials()\n best = fmin(hyperopt_objective, space, algo=tpe.suggest, max_evals=60, trials=trials)\n params = space_eval(space, best)\n print(params)\n\n # best_params = {\n # 'minimum_period': 12.135881002390583,\n # 'std_window': 10.0,\n # 'withdraw_threshold': 2.0,\n # 'max_return_threshold': 1.0,\n # 'return_per_count_threshold': 1.0}\n #\n # hyperopt_objective(best_params)\n","sub_path":"target/valid_wave/wave_stratege_hyperopt.py","file_name":"wave_stratege_hyperopt.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"341750706","text":"from eudplib import *\n\nclass ReferenceTable(EUDObject):\n '''\n Same as EUDArray with contents =\n size(=N), key 1, value 1, key 2, value 2, ..., key N, value N\n However, these key-value pair are lazily collected.\n\n key_f transforms key before registered.\n rt = ReferenceTable(key_transformer = EPDConstString)\n rt.AddPair(\"Hello\", 3) # transforms \"Hello\" to EPD(Db())\n\n value_f works similar to key_f\n '''\n def __init__(self,\n initdict=[], \n ref_by = [],\n key_f = lambda k:k,\n value_f = lambda v:v,\n sortkey_f = lambda k,v:0,\n ):\n super().__init__()\n self._dict = []\n self.key_f = key_f\n self.value_f = value_f\n self.sortkey_f = sortkey_f\n\n # Save transformed objects\n self._keys = []\n self._values = []\n\n # Added on parents lazily\n self._ref_by = ref_by\n\n # Check the object called Evaluate() at least once\n self._addedToParents = False\n\n # list of tuples, [(k1, v1), (k2, v2), ... ]\n for k, v in initdict:\n self.AddPair(k, v)\n\n def AddPair(self, key, value):\n # duplicate check\n for k, v in self._dict:\n if k == key:\n raise RuntimeError('Duplicate of key {}'.format(k))\n\n # Record original key & value + transformed key & value\n key_tr = self.key_f(key)\n value_tr = self.value_f(value)\n\n ep_assert(IsConstExpr(key_tr), 'Invalid key {}'.format(key))\n ep_assert(IsConstExpr(value_tr), 'Invalid value {}'.format(value))\n\n self._keys.append(key_tr)\n self._values.append(value_tr)\n self._dict.append((key, value))\n\n def AddPairLazily(self, key, value):\n from eudplib.core.allocator.payload import phase, PHASE_COLLECTING\n assert phase == PHASE_COLLECTING\n\n # duplicate check\n self.AddPair(key, value)\n\n def Evaluate(self):\n # Collection phase\n if not self._addedToParents:\n for parent, key in self._ref_by:\n parent.AddPairLazily(key, self)\n self._addedToParents = True\n return super().Evaluate()\n\n def DynamicConstructed(self):\n return True\n\n def GetDataSize(self):\n return 4 + 8 * len(self._dict)\n\n def CollectDependency(self, emitbuffer):\n for key_tr in self._keys:\n if key_tr is not int:\n emitbuffer.WriteDword(key_tr)\n for value_tr in self._values:\n if value_tr is not int:\n emitbuffer.WriteDword(value_tr)\n\n def WritePayload(self, emitbuffer):\n emitbuffer.WriteDword(len(self._dict))\n tuples = sorted(zip(self._keys, self._values, self._dict),\n key=lambda kvd:self.sortkey_f(kvd[2][0], kvd[2][1]))\n for key, value, item in tuples:\n emitbuffer.WriteDword(key)\n emitbuffer.WriteDword(value)\n\n @staticmethod\n def Iter(table_epd, func):\n '''\n func receives i, key_epd, value_epd and do something\n '''\n i = EUDVariable()\n k_epd = table_epd + 1\n v_epd = table_epd + 2\n i << 0\n if EUDInfLoop()():\n EUDBreakIf(MemoryEPD(table_epd, AtMost, i))\n func(i, k_epd, v_epd)\n DoActions([\n i.AddNumber(1),\n k_epd.AddNumber(2),\n v_epd.AddNumber(2)\n ])\n EUDEndInfLoop()\n\n @staticmethod\n def GetSize(table_epd):\n return f_dwread_epd(table_epd)\n\n@EUDTypedFunc([None, None, EUDFuncPtr(2, 1), None], [None])\ndef SearchTable(key, table_epd, compareFunc, retval_epd):\n def func(i, k, v):\n if EUDIf()(compareFunc(key, f_dwread_epd(k)) == 0): # Caution: 0\n f_dwwrite_epd(retval_epd, f_dwread_epd(v))\n EUDReturn(1)\n EUDEndIf()\n ReferenceTable.Iter(table_epd, func)\n EUDReturn(0)\n\n@EUDFunc\ndef SearchTableInv(value, table_epd, retval_epd):\n def func(i, k, v):\n if EUDIf()(MemoryEPD(v, Exactly, value)): # Caution: 0\n f_dwwrite_epd(retval_epd, f_dwread_epd(k))\n EUDReturn(1)\n EUDEndIf()\n ReferenceTable.Iter(table_epd, func)\n EUDReturn(0)\n","sub_path":"repl/core/referencetable.py","file_name":"referencetable.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"379133034","text":"from ..models.department import Department\nfrom ..models.epci import EPCI\n\n\nclass EPCIService:\n \"\"\" Class to deal with epci business logic \"\"\"\n\n @staticmethod\n def create_epci(epci_code, epci_name, department_id):\n department = Department.objects.get(id=department_id)\n epci = EPCI(\n code=epci_code,\n name=epci_name,\n )\n epci.save()\n epci.departments.add(department)\n epci.save()\n return epci.id\n\n @staticmethod\n def find_epci_by_code(epci_code):\n try:\n return EPCI.objects.get(code=epci_code)\n except EPCI.DoesNotExist:\n return None\n\n def find_or_create_epci(self, epci_code, epci_name, department_id):\n epci = self.find_epci_by_code(department_id)\n if epci is not None:\n return epci.id\n return self.create_epci(epci_code, epci_name, department_id)\n","sub_path":"app/api/services/epci_service.py","file_name":"epci_service.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"630269094","text":"# TODO:\n# - Fix points plot velocity;\n# - Add ticks to axes;\n# - Fix weird bug of window closing;\n\n\nfrom turtle import Turtle, Screen\nimport numpy as np\nimport tkinter as tk\n\nFONT = (\"Arial\", 10)\n\n\ndef draw_circle(radius):\n turtle = Turtle(visible=False)\n turtle.setpos(0, 0)\n turtle.penup()\n\n turtle.right(90)\n turtle.forward(radius)\n turtle.right(270)\n turtle.pendown()\n turtle.circle(radius)\n turtle.penup()\n\n\ndef draw_square(x_vert, y_vert):\n turtle = Turtle(visible=False)\n turtle.penup()\n\n for i in range(len(x_vert)):\n turtle.goto(x_vert[i], y_vert[i])\n turtle.pendown()\n\n\ndef draw_axis(length):\n turtle = Turtle(visible=False)\n turtle.pensize(1)\n turtle.penup()\n turtle.setpos(0, 0)\n\n turtle.pendown()\n turtle.goto(0, length)\n turtle.setpos(0, 0)\n turtle.goto(0, -length)\n turtle.setpos(0, 0)\n\n turtle.goto(-length, 0)\n turtle.setpos(0, 0)\n turtle.goto(length, 0)\n turtle.setpos(0, 0)\n\n\ndef plot_n_calculate(n, radius):\n turtle = Turtle(visible=False)\n turtle.speed('fastest')\n turtle.penup()\n\n x_ran = []\n y_ran = []\n\n for _ in range(0, n):\n x_ran.append(np.random.random_integers(-radius, radius + 1))\n y_ran.append(np.random.random_integers(-radius, radius + 1))\n\n score = 0\n\n for i in range(len(x_ran)):\n if x_ran[i] ** 2 + y_ran[i] ** 2 <= radius ** 2:\n score += 1\n\n for i in range(len(x_ran)):\n turtle.goto(x_ran[i], y_ran[i])\n turtle.dot(size=3)\n\n return 4 * (score / n)\n\n\ndef text_at_xy(x, y, text):\n turtle = Turtle(visible=False)\n turtle.penup()\n turtle.goto(x, y)\n turtle.write(text, font=FONT)\n\n\ndef main():\n screen = Screen()\n screen.setup(width=800, height=600, startx=250, starty=0)\n\n screen.title('Monte Carlo method for estimating the value of pi')\n radius = screen.numinput('Radius of the circle input', 'Your radius of choose: ', minval=0, maxval=300)\n points = int(\n screen.numinput('Number of points that will be generated', 'Number of points: ', minval=0, maxval=3000))\n draw_axis(radius * 0.8)\n draw_square([-0.5 * radius, 0.5 * radius, 0.5 * radius, -0.5 * radius, -0.5 * radius],\n [0.5 * radius, 0.5 * radius, -0.5 * radius, -0.5 * radius, 0.5 * radius])\n draw_circle(radius * 0.708)\n pi_aprox = plot_n_calculate(points, radius)\n text_at_xy(-270, -250, 'Estimated value of pi: {}'.format(pi_aprox))\n tk.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Random/monte_carlo.py","file_name":"monte_carlo.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"540667019","text":"from tkinter import *\nfrom utils.PatientBuilder import PatientBuilder\nfrom repository.PatientRepository import PatientRepository\nfrom models.Address import Address\nfrom tkinter import messagebox\n\nclass PatientDetailFrame(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n\n self.configure(bg=\"white\")\n\n self._selected_patient = None\n\n self._gender = StringVar()\n self._gender.set('M')\n\n self._general_frame = LabelFrame(self, bg='white', text='General information')\n\n self._pin_label = Label(self._general_frame, text='PIN', bg='white')\n self._firstname_label = Label(self._general_frame, text='Firstname', bg='white')\n self._lastname_label = Label(self._general_frame, text='Lastname', bg='white')\n self._birthday_label = Label(self._general_frame, text='Birthday', bg='white')\n self._gender_label = Label(self._general_frame, text='Gender', bg='white')\n self._height_label = Label(self._general_frame, text='Height', bg='white')\n self._weight_label = Label(self._general_frame, text='Weight', bg='white')\n\n self._pin_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._firstname_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._lastname_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._birthday_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._m_gender_radio = Radiobutton(self._general_frame, text=\"M\", variable=self._gender, value='M', bg='white')\n self._f_gender_radio = Radiobutton(self._general_frame, text=\"F\", variable=self._gender, value='F', bg='white')\n self._height_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._weight_entry = Entry(self._general_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n\n self._contact_frame = LabelFrame(self, bg='white', text='Contact')\n\n self._phone_label = Label(self._contact_frame, text='Phone ', bg='white')\n self._email_label = Label(self._contact_frame, text='Email', bg='white')\n\n self._phone_entry = Entry(self._contact_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n self._email_entry = Entry(self._contact_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=23, justify='center')\n\n self._address_frame = LabelFrame(self, bg='white', text='Address')\n\n self._city_label = Label(self._address_frame, text='City ', bg='white')\n self._street_label = Label(self._address_frame, text='Street', bg='white')\n self._zip_code_label = Label(self._address_frame, text='Zip code', bg='white')\n self._postal_code_label = Label(self._address_frame, text='Postal code', bg='white')\n\n self._city_entry = Entry(self._address_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n self._street_entry= Entry(self._address_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n self._zip_code_entry = Entry(self._address_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n self._postal_code_entry = Entry(self._address_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n\n self._insurance_frame = LabelFrame(self, bg='white', text='Insurance')\n\n self._insurance_pin_label = Label(self._insurance_frame, text='PIN ', bg='white')\n self._insurance_number_label = Label(self._insurance_frame, text='Number ', bg='white')\n\n self._insurance_pin_entry = Entry(self._insurance_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n self._insurance_number_entry = Entry(self._insurance_frame, bg='#EEEEEE', state='disabled', relief=FLAT, width=20, justify='center')\n\n self._alergies_frame = LabelFrame(self, bg='white', text='Alergies')\n\n self._alergies_text = Text(self._alergies_frame, width=23, height=3, bg='#EEEEEE', state='disabled', relief=FLAT)\n\n self._button_frame = Frame(self, pady=5, bg='white')\n\n self._button_frame.columnconfigure(0, weight=1)\n self._button_frame.columnconfigure(1, weight=1)\n\n self._export_button = Button(self._button_frame, text='Export', font='Helvetica 12 bold', padx=20, pady=12, bg='#1E88E5', fg='white', relief=FLAT)\n self._save_button = Button(self._button_frame, text=' Save ', font='Helvetica 12 bold', padx=20, pady=12, bg='#1E88E5', fg='white', relief=FLAT, disabledforeground=\"#CFD8DC\", command=self._save_patient)\n\n\n self._entries = (\n self._firstname_entry,\n self._lastname_entry,\n self._birthday_entry,\n self._height_entry,\n self._weight_entry,\n self._phone_entry,\n self._email_entry,\n self._city_entry,\n self._street_entry,\n self._zip_code_entry,\n self._postal_code_entry,\n self._insurance_pin_entry,\n self._insurance_number_entry,\n self._pin_entry\n )\n\n #Placing\n self._pin_label.grid(row=0, column=0, sticky=W, padx=(3, 3), pady=(5, 0))\n self._firstname_label.grid(row=1, column=0, sticky=W, padx=(3, 3))\n self._lastname_label.grid(row=2, column=0, sticky=W, padx=(3, 3))\n self._birthday_label.grid(row=3, column=0, sticky=W, padx=(3, 3))\n self._gender_label.grid(row=4, column=0, sticky=W, padx=(3, 3))\n self._height_label.grid(row=5, column=0, sticky=W, padx=(3, 3))\n self._weight_label.grid(row=6, column=0, sticky=W, padx=(3, 3))\n\n self._pin_entry.grid(row=0, column=1, sticky=W, columnspan=2, pady=(8, 6))\n self._firstname_entry.grid(row=1, column=1, sticky=W, columnspan=2, pady=(3, 3))\n self._lastname_entry.grid(row=2, column=1, sticky=W, columnspan=2, pady=(3, 3))\n self._birthday_entry.grid(row=3, column=1, sticky=W, columnspan=2, pady=(3, 3))\n self._m_gender_radio.grid(row=4, column=1, sticky=W, pady=(3, 3))\n self._f_gender_radio.grid(row=4, column=2, sticky=W, pady=(3, 3))\n self._height_entry.grid(row=5, column=1, sticky=W, columnspan=2, pady=(3, 3))\n self._weight_entry.grid(row=6, column=1, sticky=W, columnspan=2, pady=(3, 0))\n\n self._general_frame.grid(row=0, column=0, rowspan=2, pady=(10,0), ipadx=5, ipady=5, padx=7, sticky=W+E+S+N)\n\n self._phone_label.grid(row=0, column=0, sticky=W, padx=(5, 5), pady=(5, 0))\n self._email_label.grid(row=1, column=0, sticky=W, padx=(5, 5), pady=(0, 5))\n\n self._phone_entry.grid(row=0, column=1, sticky=W, pady=(8, 3))\n self._email_entry.grid(row=1, column=1, sticky=W, pady=(3, 0))\n\n self._contact_frame.grid(row=2, column=0, pady=2, ipadx=0, ipady=5, padx=7, sticky=W+E+S+N)\n\n self._city_label.grid(row=0, column=0, sticky=W, padx=(5, 5), pady=(0, 0))\n self._street_label.grid(row=1, column=0, sticky=W, padx=(5, 5))\n self._postal_code_label.grid(row=2, column=0, sticky=W, padx=(5, 5))\n self._zip_code_label.grid(row=3, column=0, sticky=W, padx=(5, 5))\n\n self._city_entry.grid(row=0, column=1, sticky=W, pady=(3, 6))\n self._street_entry.grid(row=1, column=1, sticky=W, pady=(3, 3))\n self._postal_code_entry.grid(row=2, column=1, sticky=W, pady=(3, 3))\n self._zip_code_entry.grid(row=3, column=1, sticky=W, pady=(3, 0))\n\n self._address_frame.grid(row=0, column=1, pady=(10,0), ipadx=5, padx=7, sticky=W+E+S+N)\n\n self._insurance_number_label.grid(row=0, column=0, sticky=W, padx=(5, 5), pady=(5, 0))\n self._insurance_pin_label.grid(row=1, column=0, sticky=W, padx=(5, 5))\n\n self._insurance_pin_entry.grid(row=0, column=1, sticky=W, pady=(8, 6))\n self._insurance_number_entry.grid(row=1, column=1, sticky=W, pady=(3, 0))\n\n self._insurance_frame.grid(row=1, column=1, ipadx=5, padx=7, sticky=W+E+S+N)\n\n self._alergies_text.grid(row=0, column=0, pady=(5, 10), padx=(10,10))\n\n self._alergies_frame.grid(row=2, column=1)\n\n self._button_frame.grid(row=3, column=0, ipadx=5, columnspan=2, sticky=W + E + S + N)\n\n self._export_button.grid(row=0, column=0, sticky=W, pady=(8, 6), padx=(40,0))\n self._save_button.grid(row=0, column=1, sticky=W, pady=(3, 0), padx=(50,0))\n\n def fill_entries(self, patient):\n self._enable_entries()\n\n self._selected_patient = patient\n\n for x in self._entries:\n x.delete(0, END)\n\n self._alergies_text.delete('1.0', END)\n\n self._firstname_entry.insert(0, patient.data['fname'])\n self._lastname_entry.insert(0, patient.data['lname'])\n self._birthday_entry.insert(0, patient.data['birthday'])\n self._height_entry.insert(0, patient.data['height'])\n self._weight_entry.insert(0, patient.data['weight'])\n self._phone_entry.insert(0, patient.data['phone'])\n self._email_entry.insert(0, patient.data['email'])\n self._city_entry.insert(0, patient.data['address'].city)\n self._street_entry.insert(0, patient.data['address'].street)\n self._zip_code_entry.insert(0, patient.data['address'].zip_code)\n self._postal_code_entry.insert(0, patient.data['address'].postal_code)\n self._insurance_pin_entry.insert(0, patient.data['insurance_pin'])\n self._insurance_number_entry.insert(0, patient.data['insurance_number'])\n self._pin_entry.insert(0, patient.data['pin'])\n\n self._gender.set(patient.data['gender'])\n\n alergies = ', '\n\n for a in patient.data['alergies']:\n alergies += a + ', '\n\n self._alergies_text['state'] = 'normal'\n self._alergies_text.insert(END, alergies[2: len(alergies) - 2])\n\n def _enable_entries(self):\n for entry in self._entries:\n entry['state'] = 'normal'\n\n def _save_patient(self):\n fname = self._firstname_entry.get()\n lname = self._lastname_entry.get()\n birthday = self._birthday_entry.get()\n height = self._height_entry.get()\n weight = self._weight_entry.get()\n phone = self._phone_entry.get()\n email = self._email_entry.get()\n city = self._city_entry.get()\n street = self._street_entry.get()\n zip_code = self._zip_code_entry.get()\n postal_code = self._postal_code_entry.get()\n insurance_pin = self._insurance_pin_entry.get()\n insurance_number = self._insurance_number_entry.get()\n pin = self._pin_entry.get()\n gender = self._gender.get()\n alergies_text = self._alergies_text.get('1.0', END)\n recipes = self._selected_patient.data['recipes']\n alergies = []\n\n for alergie in alergies_text.split(','):\n alergies.append(alergie)\n\n patient = PatientBuilder()\\\n .set_pin(pin)\\\n .set_fname(fname)\\\n .set_lname(lname)\\\n .set_height(height)\\\n .set_weight(weight)\\\n .set_insurance_pin(insurance_pin)\\\n .set_insurance_number(insurance_number)\\\n .set_phone(phone)\\\n .set_email(email)\\\n .set_gender(gender)\\\n .set_address(Address(city, zip_code, postal_code, street))\\\n .set_alergies(alergies)\\\n .set_recipes(recipes)\\\n .build()\n\n PatientRepository.get_instance().update(patient)\n\n messagebox.showinfo(\"Update\", \"Pacient succesfully updated\")\n\n\n","sub_path":"graphics/PatientDetailFrame.py","file_name":"PatientDetailFrame.py","file_ext":"py","file_size_in_byte":11776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"350750791","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport jxmlease\nimport datetime\nimport re\n\n\n\n####################################### Itinerary class ##################################\nclass Itinerary(object):\n\n\t\n\tdef segment_status_list(self, data):\n\t\t\"\"\" Retrive a list wich contains all status codes\n\t\t\tfor each segment \"\"\"\n\n\t\tstatus_code = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\ts = node.get_xml_attr('Status')\n\t\t\t\tstatus_code.append(str(s))\n\t\texcept:\n\t\t\tstatus_code = ['N/A']\n\t\n\t\treturn status_code\n\t\n\n\tdef airline_list(self, data):\n\t\t\"\"\" Retrieve a list which contains all airlines for each segment\"\"\"\n\t\t\n\t\tairline = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\ta1 = node['tir38:MarketingAirline'].get_xml_attr('Code')\n\t\t\t\tairline.append(str(a1))\t\n\t\texcept:\n\t\t\tairline = ['N/A']\n\t\n\t\treturn \n\t\t\n\tdef carrier_list(self, data):\n\t\t\"\"\" Retrieve a list which contains all airlines for each segment\"\"\"\n\t\t\n\t\tcarrier = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tc1 = node['tir38:OperatingAirline'].get_xml_attr('Code')\n\t\t\t\tcarrier.append(str(c1))\n\t\texcept:\n\t\t\tcarrier = ['N/A']\n\t\n\t\treturn carrier\n\n\n\tdef origin_city_list(self, data):\n\t\t\"\"\" Retrieve a list which contains origines\n\t\t\tcity list for each segment \"\"\"\n\n\t\t\n\t\torigin_location = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\torigin_location1 = node['tir38:OriginLocation'].get_xml_attr('LocationCode')\n\t\t\t\torigin_location.append(str(origin_location1))\n\t\texcept:\n\t\t\torigin_location = [\"N/A\"]\n\n\t\treturn origin_location \n\n\n\tdef destination_city_list(self, data) :\n\t\t\"\"\" Retrieve a list which contains destination city\n\t\t\tfor each segment \"\"\" \n\n\t\tdestination_location = []\n\t\ttry:\n\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tdestination_location1 = node['tir38:DestinationLocation'].get_xml_attr('LocationCode')\n\t\t\t\tdestination_location.append(str(destination_location1))\n\t\n\t\texcept:\n\t\t\tdestination_location = [\"N/A\"]\n\t\t\n\t\treturn destination_location \n\n\n\tdef flight_number_list(self, data):\n\t\t\"\"\" This methode retrieve a list wich combers \n\t\tof flight for each segment \"\"\"\n\n\t\tflight_number = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tf = node['tir38:OperatingAirline'].get_xml_attr('FlightNumber')\n\t\t\t\tflight_number.append(str(f))\n\t\texcept:\n\t\t\tflight_number = ['N/A'] \n\t\t\t\n\t\treturn flight_number\n\n\tdef class_of_service_list(self, data):\n\t\t\"\"\" This methode retrieve a list wich combers \n\t\tof flight for each segment \"\"\"\n\n\t\tclass_of_service = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tclass_of_service1 = node.get_xml_attr('ResBookDesigCode')\n\t\t\t\tclass_of_service.append(str(class_of_service1))\n\t\texcept:\n\t\t\tclass_of_service = ['N/A'] \n\t\t\t\n\t\treturn class_of_service\n\n\n\tdef flight_duration_list(self, data):\n\t\t\"\"\" This method retrieve a list which contains flight duration \"\"\"\n\n\t\ttime = []\n\t\ttry:\n\t\t\t\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\ttime1 = node.get_xml_attr('ElapsedTime')\n\t\t\t\ttime.append(str(time1))\n\t\texcept:\n\t\t\ttime = ['N/A'] \n\t\treturn time\n\n\tdef departure_datetime_list(self, data):\n\t\t\"\"\" Return list wich contains departure datetime \"\"\"\n\n\t\tdeparture_datetime = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tdeparture_datetime1 = node.get_xml_attr('DepartureDateTime')\n\t\t\t\tdeparture_datetime.append(str(departure_datetime1))\n\t\texcept:\n\t\t\tdeparture_datetime = ['N/A']\n\t\treturn departure_datetime\n\n\n\tdef arrival_datetime_list(self, data):\n\t\t\"\"\" Retrieve list wich contains arrival datetimes \"\"\"\n\n\t\tdatetime = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tdatetime1 = node.get_xml_attr('ArrivalDateTime')\n\t\t\t\tdatetime.append(str(datetime1))\n\t\texcept:\n\t\t\tdatetime = ['N/A']\n\t\treturn datetime\n\t\n\n\tdef updated_depature_datetime(self, data):\n\t\t\"\"\" Retrieve list wich contains updated depature datetimes \"\"\"\n\n\t\tupdate_datetime = []\n\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ReservationItems/tir38:Item/tir38:FlightSegment\"):\n\t\t\t\tdatetime1 = node['tir38:UpdatedDepartureTime']\n\t\t\t\tupdate_datetime.append(str(datetime1))\n\t\texcept:\n\t\t\tupdate_datetime = ['N/A']\n\t\treturn update_datetime\n\n\tdef get_pnr(self, data):\n\t\t\"\"\" get the agency's dk \"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tpnr = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:ItineraryRef'].get_xml_attr('ID')\n\t\texcept:\n\t\t\tpnr = ['N/A']\n\t\treturn pnr\n\t\n\n\tdef miscsegment_list(self, data):\n\t\t\"\"\" Retrieve list which contains all messages text\n\t\t\tfrom MiscSegment \"\"\" \n\n\t\tmsg_list = []\n\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:ReservationItems/tir38:Item/tir38:MiscSegment/tir38:Text\"):\n\t\t\t\tt = node\n\t\t\t\tmsg_list.append(str(t))\n\t\t\t\n\t\texcept:\n\t\t\tmsg_list = ['N/A']\n\t\treturn msg_list\n\t\t\n\tdef miscsegment_status(self, data):\n\t\t\"\"\" Retrieve list which contains all status\n\t\t\tfrom MiscSegment \"\"\" \n\n\t\tmsg_status = []\n\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:ReservationItems/tir38:Item/tir38:MiscSegment\"):\n\t\t\t\tt = node.get_xml_attr('Status')\n\t\t\t\tmsg_status.append(str(t))\n\t\texcept:\n\t\t\tmsg_list = []\n\t\treturn msg_status\n\n\tdef validating_carrier(self, data):\n\t\t\"\"\"Return the validating carrier for this reservation\"\"\"\n\n\t\tvalidatingcarrier = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:PriceQuote/tir38:PricedItinerary\"):\n\t\t\t\tvalidatingcarrier1 = node.get_xml_attr('ValidatingCarrier')\n\t\t\t\tvalidatingcarrier.append(str(validatingcarrier1))\n\t\texcept:\n\t\t\tvalidatingcarrier = ['N/A']\n\t\treturn validatingcarrier\n\n\tdef return_date(self, data):\n\t\t\"\"\"Return the validating carrier for this reservation\"\"\"\n\t\tvalidatingcarrier = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:PriceQuote/tir38:PricedItinerary\"):\n\t\t\t\tvalidatingcarrier1 = node.get_xml_attr('ValidatingCarrier')\n\t\t\t\tvalidatingcarrier.append(str(validatingcarrier1))\n\t\texcept:\n\t\t\tvalidatingcarrier = ['N/A']\n\n\t\t#print validatingcarrier\n\t\treturn validatingcarrier[0]\n\n\tdef frequent_flyer(self, data):\n\t\t\"\"\" get the return fidelity \"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tfrequent = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:CustomerInfo']['tir38:CustLoyalty'].get_xml_attr('MembershipID')\n\t\texcept:\n\t\t\tfrequent = ['N/A']\n\t\treturn frequent\n\t\n################################### Passenger class ####################################\n\nclass Passenger(object):\n\n\tdef first_name_list(self, data):\n\t\t\"\"\" Retrieve a list which contains all passengers firstname \n\t\t\tfor each segment \"\"\"\n\n\t\tfirst_name= []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\t\n\t\t\t\tfirst_name1 = node['tir38:GivenName']\n\t\t\t\tfirst_name.append(str(first_name1))\n\t\texcept:\n\t\t\tfirst_name = ['N/A']\n\t\treturn first_name\n\t\t\n\n\tdef surname_list(self, data):\n\t\t\"\"\" Retrieve a list which contains all passengers surname \n\t\t\tfor each segment \"\"\"\n\n\t\tsurname= []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\t\n\t\t\t\tsurname1 = node['tir38:Surname']\n\t\t\t\tsurname.append(str(surname1))\n\t\texcept:\n\t\t\tsurname = ['N/A']\n\t\treturn surname\t\n\n\tdef full_name_list(self,data):\n\t\t\"\"\" Retrieve a list which contains all passengers fullname \n\t\t\tfor each segment \"\"\"\n\n\t\tfull_name = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\t\n\t\t\t\tfull_name1=node['tir38:GivenName']+ ' '+node['tir38:Surname']\n\t\t\t\tfull_name.append(str(full_name1))\n\t\texcept:\n\t\t\tfull_name = ['N/A']\n\t\treturn full_name\n\t\n\tdef passenger_type_list(self, data):\n\t\t\"\"\" this method contains list of all passenger type\"\"\"\n\n\t\tpass_type = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\tpass_type1 = node.get_xml_attr('PassengerType')\n\t\t\t\tpass_type.append(str(pass_type1))\n\t\texcept:\n\t\t\tpass_type = ['N/A']\n\t\treturn pass_type\n\n\tdef date_of_birth_list(self, data):\n\t\t\"\"\" this method retrieve the list of passengers's date of birth \"\"\"\n\n\t\tbirth_day = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:SpecialServiceInfo/tir38:Service\"):\n\t\t\t\tif node.get_xml_attr('SSR_Type') == \"DOCS\":\n\t\t\t\t\tbd = str(node['tir38:Text'])\n\t\t\t\t\t#string = bd.split('/')\n\t\t\t\t\t#dob_list.append(string[5])\n\t\t\t\t\tbirth = re.search(r\"[0-9]{2}[A-Z]{3}[0-9]{4}\",bd,flags=0).group()\n\t\t\t\t\tbirth_day.append(birth)\n\t\t\t\t\n\t\texcept:\n\t\t\tbirth_day = ['N/A']\n\n\t\treturn birth_day\n\n\tdef email_list(self, data):\n\t\t\"\"\" this method retrieve the list of passengers's date of birth \"\"\"\n\t\ttry:\n\t\t\tmail = []\n\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:SpecialServiceInfo/tir38:Service\"):\n\t\t\t\tif node.get_xml_attr('SSR_Type') == \"CTCE\":\n\t\t\t\t\tbd = str(node['tir38:Text'])\n\t\t\t\t\tstring = re.search(r'[\\w\\.-]+//[\\w\\.-]+',bd).group(0)\n\t\t\t\t\temail = string.replace('//','@')\n\t\t\t\t\tmail.append(email)\n\t\texcept:\n\t\t\tmail = ['N/A']\n\t\t\t\t\n\t\treturn mail\n\n\tdef phone_list(self, data):\n\t\t\"\"\" this method retrieve the list of passengers's date of birth \"\"\"\n\t\ttry:\n\t\t\tphone = []\n\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:SpecialServiceInfo/tir38:Service\"):\n\t\t\t\tif node.get_xml_attr('SSR_Type') == \"CTCM\":\n\t\t\t\t\tbd = str(node['tir38:Text'])\n\t\t\t\t\tphone1 = re.search(r'[0-9]{5,}',bd).group(0)\n\t\t\t\t\tphone.append(phone1)\n\t\texcept:\n\t\t\tphone = ['N/A']\n\t\t\t\t\n\t\treturn phone\n\n\tdef gender_list(self, data):\n\t\t\"\"\" this method allows to know the list of passenger's gender \"\"\"\n\n\t\tg_list = []\n\n\t\ttry:\n\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:SpecialServiceInfo/tir38:Service\"):\n\t\t\t\tif node.get_xml_attr('SSR_Type') == \"DOCS\":\n\t\t\t\t\tbd = str(node['tir38:Text'])\n\t\t\t\t\tstring = bd.split('/')\n\t\t\t\t\tfor i in string:\n\t\t\t\t\t\tif i==\"F\" or i==\"M\":\n\t\t\t\t\t\t\tgender=i\n\t\t\t\t\t\n\t\texcept:\n\t\t\tstring = ['N/A']\n\t\treturn gender\n\n\t\n###################################### Agency class ######################################\n\nclass Agency(object):\n\n\n\tdef agency_dk(self, data):\n\t\t\"\"\" get the agency's dk \"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tdk = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:ItineraryRef'].get_xml_attr('CustomerIdentifier')\n\t\texcept:\n\t\t\tdk = ['N/A']\n\t\treturn dk\n\n\n\tdef agency_phone_list(self, data):\n\t\t\"\"\" retrieve the phone list of agency\"\"\"\n\n\t\tphone = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:ContactNumbers/tir38:ContactNumber\"):\n\t\t\t\tphone1 = node.get_xml_attr(\"Phone\")\n\t\t\t\tphone.append(str(phone1))\n\t\texcept:\n\t\t\tphone = ['N/A']\n\t\treturn phone\n\t\n\tdef agency_address(self, data):\n\t\t\"\"\" this method retrieve a list which contains agency addresses \"\"\"\n\n\t\taddressline = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data,generator=\"tir38:CustomerInfo/tir38:ContactNumbers/tir38:ContactNumber\"):\n\t\t\t\taddressline1 = node.get_xml_attr('LocationCode')\n\t\t\t\taddressline.append(str(addressline1))\n\t\texcept:\n\t\t\taddressline = ['N/A']\n\t\treturn addressline\n\n\tdef agency_name(self):\n\n\t\tname = []\n\t\ttry:\n\t\t\tprint(1)\n\t\texcept:\n\t\t\tname = ['N/A']\n\t\treturn name\n\n\tdef agency_email(self):\n\n\t\tmail = []\n\t\ttry:\n\t\t\tprint(1)\n\t\texcept:\n\t\t\tmail = ['N/A']\n\t\treturn mail\n\n\tdef agency_pcc(self, data):\n\t\t\"\"\" get the agency's dk \"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tpcc = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:ItineraryRef']['tir38:Source'].get_xml_attr('AAA_PseudoCityCode')\n\t\texcept:\n\t\t\tpcc = ['N/A']\n\t\treturn pcc\n\n\tdef agency_create(self, data):\n\t\t\"\"\" get the agency's dk \"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tagent = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:ItineraryRef']['tir38:Source'].get_xml_attr('CreationAgent')\n\t\texcept:\n\t\t\tagent = ['N/A']\n\t\treturn agent\n\n########################################### Pricing class #####################################\n\nclass Pricing(object):\n\t\n\tdef base_fare_list(self, data):\n\t\t\"\"\"This methed return the base fare list for this reservation\"\"\"\n\n\t\tbasefare = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:ItinTotalFare\"):\n\t\t\t\tbasefare1 = node['tir38:BaseFare'].get_xml_attr('Amount')\n\t\t\t\tbasefare.append(str(basefare1))\n\t\texcept:\n\t\t\tbasefare = ['N/A']\n\t\treturn basefare\n\n\tdef total_fare_list(self, data):\n\t\t\"\"\" this method retrieve a list which contains totals fares for this reservation \"\"\"\n\n\t\ttotalfare = []\n\t\ttry:\n\t\t\t for path, _, node in jxmlease.parse(data, generator=\"tir38:ItinTotalFare\"):\n\t\t\t\t totalfare1 = node['tir38:TotalFare'].get_xml_attr('Amount')\n\t\t\t\t totalfare.append(str(totalfare1))\n\n\t\texcept:\n\t\t\ttotalfare = ['N/A']\n\t\treturn totalfare\n\t\n\t\n\tdef tax_detail(self):\n\n\t\ttaxdetail = []\n\t\ttry:\n\t\t\tprint(1)\n\t\texcept:\n\t\t\ttaxdetail = ['N/A']\n\t\treturn taxdetail\n\t\n\tdef total_tax(self, data):\n\t\t\"\"\" Retrieve a list which contains total tax for this reservation \"\"\"\n\n\t\ttotaltax = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:ItinTotalFare\"):\n\t\t\t\ttotaltax1 = node['tir38:Totals']['tir38:Taxes']['tir38:Tax'].get_xml_attr('Amount')\n\t\t\t\ttotaltax.append(totaltax1)\n\t\t\t\n\t\texcept:\n\t\t\ttotaltax = ['N/A']\n\t\treturn totaltax\n\t\n\tdef pricing_type(self):\n\n\t\ttype = []\n\t\ttry:\n\t\t\tprint(1)\n\t\texcept:\n\t\t\ttype = ['N/A']\n\t\treturn type\n\t\n\tdef payement_card(self, data):\n\t\t\"\"\" retrieve the phone list of agency\"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tpayement = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:AccountingInfo']['tir38:PaymentInfo']['tir38:Payment']['tir38:CC_Info']['tir38:PaymentCard'].get_xml_attr('Number')\n\t\texcept:\n\t\t\tpayement = ['N/A']\n\t\treturn payement\n\n############################################ Rules class #################################################\n\nclass Rules():\n\n\n\tdef baggage_allowance(self, data):\n\t\t\"\"\" Retrieve a list which contains baggage allowance \"\"\"\n\n\t\tbaggage = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:ItineraryPricing/tir38:PriceQuote/tir38:PricedItinerary/tir38:AirItineraryPricingInfo/tir38:PTC_FareBreakdown/tir38:FlightSegment\"):\n\t\t\t\tbaggage1 = node['tir38:BaggageAllowance'].get_xml_attr(\"Number\",\"\")\n\t\t\t\tbaggage.append(str(baggage1))\n\t\t\tbaggage.remove('')\n\t\texcept:\n\t\t\tpass \t\n\t\treturn baggage\n\n\tdef refund(self):\n\n\t\trefund_police = []\n\t\t \n\t\t\t\n\t\treturn refund_police\n\n\tdef changed(self):\n\n\t\tchanged = []\n\t\t \n\t\t\t\n\t\treturn changed\n\n############################################# Ticketing class ########################################\n\nclass Ticketing():\n\n\n\tdef is_ticketed(self, data):\n\t\t\"\"\" Return true if reservation is ticketed false else \"\"\"\n\n\t\tticket_number_list = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:Ticketing\"):\n\t\t\t\tticket_number = node.get_xml_attr('eTicketNumber',\"\")\n\t\t\t\tticket_number_list.append(str(ticket_number)) \n\t\t\t\t#print ticket_number_list\n\t\t\tfor i in ticket_number_list:\n\t\t\t\t#print i\n\t\t\t\ttry:\n\t\t\t\t\tp = re.search(r'(TE)[ ][0-9]+',i, flags=0).group()\n\t\t\t\texcept:\n\t\t\t\t\tp = \"\"\n\t\t\t\tif len(p)>1:\n\t\t\t\t\treturn True\n\n\t\texcept:\n\t\t\treturn False \n\t\n\tdef is_exchange_list(self, data):\n\t\t\"\"\" Return true if reservation is exchange false else \"\"\"\n\n\t\texchange_list = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:AccountingInfo/tir38:TicketingInfo/tir38:Exchange\"):\n\t\t\t\texchange = node.get_xml_attr('Ind')\n\t\t\t\texchange_list.append(str(exchange))\n\t\texcept:\n\t\t\texchange_list = ['N/A']\n\t\treturn exchange_list\n\t\n\tdef ticketed_number_list(self, data):\n\t\t\"\"\" Retrieve a list which contains ticket number \"\"\"\n\n\t\tticket_number_list = []\n\t\ttkt_number_list = []\n\t\ttry:\n\t\t\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:Ticketing\"):\n\t\t\t\tticket_number = node.get_xml_attr('eTicketNumber',\"\")\n\t\t\t\tticket_number_list.append(str(ticket_number))\n\t\t\tfor i in ticket_number_list:\n\t\t\t\tif i=='':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\n\t\t\t\t\ttkt_numbers = re.search(r\"(TE)[ ][0-9]+\",i,flags=0).group(0)\n\n\t\t\t\t\ttkt_number = re.search(r'[0-9]+',tkt_numbers,flags=0).group()\n\t\t\t\t\ttkt_number_list.append(tkt_number)\n\t\texcept:\n\t\t\ttkt_number_list = ['N/A']\n\t\t\t\n\t\t\n\t\treturn tkt_number_list\n\t\n\tdef ticketed_date(self, data):\n\t\t\"\"\" Retrieve the ticketed date\"\"\"\n\n\t\ttry:\n\t\t\troot = jxmlease.parse(data)\n\t\t\tcreat_date = root['soap-env:Envelope']['soap-env:Body']['tir38:TravelItineraryReadRS']['tir38:TravelItinerary']['tir38:ItineraryRef']['tir38:Source'].get_xml_attr('CreateDateTime')\t\n\t\texcept:\n\t\t\tcreat_date = 'NA'\n\t\treturn creat_date\n\n\tdef ticketing_pcc(self, data):\n\t\tticket_pcc_list = []\n\t\ttkt_pcc_list = []\n\t\ttry:\n\t\t\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:Ticketing\"):\n\t\t\t\tticket_pcc1 = node.get_xml_attr('eTicketNumber',\"\")\n\t\t\t\tticket_pcc_list.append(str(ticket_pcc1))\n\t\t\tfor i in ticket_pcc_list:\n\t\t\t\tif i=='':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\ttkt_pcc = re.search(r\"[(/)][A-Z][ ][A-Z0-9_]{4}\",i,flags=0).group(0)\n\t\t\t\t\ttkt = re.search(r'[ ][A-Z0-9]+',tkt_pcc,flags=0).group()\n\t\t\t\t\treturn tkt\n\t\texcept:\n\t\t\treturn 'None'\n\n\tdef ticketing_agent(self, data):\n\t\tticket_agent_list = []\n\t\ttkt_agent_list = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:Ticketing\"):\n\t\t\t\ttkt_agent_list1 = node.get_xml_attr('eTicketNumber',\"\")\n\t\t\t\ttkt_agent_list.append(str(tkt_agent_list1))\n\t\t\tfor i in tkt_agent_list:\n\t\t\t\tif i=='':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\ttkt_agent = re.search(r\"(/)[A-Z][ ][A-Z0-9_]+\",i,flags=0).group(0)\n\t\t\t\t\tt_agent = tkt_agent[len(tkt_agent)-3:len(tkt_agent)]\n\t\t\t\t\treturn t_agent\n\t\texcept:\n\t\t\treturn 'None'\n\t\t\t\t\n\tdef issue_date(self, data):\n\t\tticket_agent_list = []\n\t\ttkt_agent_list = []\n\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:TravelItinerary/tir38:ItineraryInfo/tir38:Ticketing\"):\n\t\t\ttkt_agent_list1 = node.get_xml_attr('eTicketNumber',\"\")\n\t\t\ttkt_agent_list.append(str(tkt_agent_list1))\n\t\t\tprint(tkt_agent_list)\n\t\tfor i in tkt_agent_list:\n\t\t\tif i=='':\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\ttkt_agent = re.search(r\"([0-9](/)[A-Z0-9_]+\",i,flags=0).group(0)\n\t\t\t\tprint(tkt_agent)\n\t\t#return ticket_agent_list\n\t\n\t\t\n\t\n\tdef is_refund(self):\n\n\t\treturn\n\n\tdef is_mco(self):\n\n\t\treturn\n\t\n\n################################### Accounting class ################################################\n\nclass Accounting():\n\n\tdef foi(self):\n\n\t\tfoi_list = []\n\t\ttry:\n\t\t\tprint(1)\n\t\texcept:\n\t\t\tfoi_list = ['N/A']\n\t\treturn foi_list\n\n\tdef total_amount_list(self, data):\n\t\t\"\"\" Retrieve a list which total amount \"\"\"\n\t\t\n\t\ttotalamount = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:ItinTotalFare\"):\n\t\t\t\t totalamount1 = node['tir38:TotalFare'].get_xml_attr('Amount')\n\t\t\t\t totalamount.append(str(totalamount1))\n\t\texcept:\n\t\t\ttotalamount = ['N/A']\n\t\treturn totalamount\n\t\n\tdef commission_list(self, data):\n\t\t\"\"\" Retrieve a list which contains commission \"\"\"\n\n\t\tcommission_list = []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parsedata(data, generator=\"tir38:AccountingInfo/tir38:PaymentInfo\"):\n\t\t\t\tcommission_list1 = node['tir38:Commission'].get_xml_attr('Amount','')\n\t\t\t\tcommission_list.append(str(commission_list1))\n\t\t\tcommission_list.remove('')\n\n\t\texcept:\n\t\t\tcommission_list = ['N/A']\n\t\treturn commission_list\n\n\n\n","sub_path":"backend/Fteros/gds/request/fetchdata.py","file_name":"fetchdata.py","file_ext":"py","file_size_in_byte":20503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"219910214","text":"\npeople = [{\"name\":\"Todd\",\"age\":\"42\"},{\"name\":\"Jen\", \"age\":\"42\"},{\"name\":\"Doba\", \"age\":\"3\"},{\"name\":\"Chipotle\", \"age\":\"12\"}]\n\nfamily = {\"mom\":\"jen\", \"dad\":\"Todd\", \"dog\":\"Qdoba\"}\n\ndef sort_person(p):\n return p['age']\n\nsortedpeople = sorted(people,key=sort_person)\n\n\n\nfor item in sortedpeople:\n print(f\" {item['name']} is {item['age']}\")\n\n\nfor item in sorted(people,key=lambda p: int(p[\"age\"]) ) :\n print(f\" {item['name']} is {item['age']}\")\n\nlist1=[1,2,3,4,5]\nlist2=[]\n\nfor x in list1:\n list2.append(x*2)","sub_path":"Optum Tech/student_files/ch01_overview/lensort.py","file_name":"lensort.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390139471","text":"import turtle\nt = turtle.Pen()\n\ndef circle6c():\n for x in range(100):\n t.speed(0)\n t.backward(215)\n t.left(45)\n t.forward(200)\n t.left(60)\n t.forward(100)\n t.left(120)\n t.forward(100)\n t.left(60)\n t.forward(300)\n \n\n \nfor y in range(1):\n circle6c()\n t.speed(0)\n \n","sub_path":"Circle.py","file_name":"Circle.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"94265479","text":"print (\"welke kaas is het gemaakt door roy otten.\") \nvraag = input (\"is de kaas geel?\")\nif vraag == (\"ja\"):\n vraag2 = input (\"zitten er gaten in?\")\n if vraag2 == (\"ja\"):\n vraag3 = input (\"is de kaas duur?\")\n if vraag3 == (\"ja\"):\n print (\"emmenthaler\")\n elif vraag3 == (\"nee\"):\n print (\"leerdammer\") \n elif vraag2 == (\"nee\"):\n vraag4 = input (\"is de kaas hard als steen?\")\n if vraag4 == (\"ja\"):\n print (\"parmigiano reggiano\") \n elif vraag4 == (\"nee\"):\n print (\"goudse kaas\")\nelif vraag == (\"nee\"):\n vraag5 = input (\"heeft de kaas blauwe schimmels\")\n if vraag5 == (\"ja\"):\n vraag6 = input (\"heeft de kaas een korst\")\n if vraag6 == (\"ja\"):\n print (\"blue de rochbaron\")\n elif vraag6 == (\"ja\"):\n print (\"foume d'Ambert\")\n elif vraag5 == (\"ja\"):\n vraag7 = input (\"heeft de kaas een korst?\")\n if vraag7 == (\"ja\"):\n print (\"camembert\") \n elif vraag7 == (\"nee\"):\n print (\"Mozzerella\")\n\n \n\n\n \n\n \n\n \n \n \n \n\n \n \n\n","sub_path":"welkekaas.py","file_name":"welkekaas.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"367405029","text":"import pandas as pd\nimport numpy as np\nimport argparse\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score as cvs\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor\nfrom math import sqrt\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\ndef linear_model(x_train ='x_train.csv',y_train ='y_train.csv',x_test ='x_test.csv',y_test ='y_test.csv'):\n\n\n\tLinear_model = LinearRegression()\n\tLinear_model.fit(x_train, y_train)\n\tscores=cvs(Linear_model,x_train,y_train,cv=10)\n\tprint(\"accuracy of linearRegressor \"+str(scores.mean()))\n\n\trms = np.sqrt(np.square(np.asarray(np.log(y_predict)-np.log(y_test))).sum()/float(len(y_predict)))\n\tprint('RMSE = {}'.format(rms))\n\n\ty_predict=Linear_model.predict(x_test)\n\n\treturn y_predict\n\ndef xgb_model(x_train ='x_train.csv',y_train ='y_train.csv',x_test ='x_test.csv',y_test ='y_test.csv'):\n\n\n\txgb_model=XGBRegressor()\n\txgb_model.fit(x_train,y_train)\n\tprint('Accuracy of Xgb : {}'.format(xgb_model.score(x_test,y_test)))\n\n\ty_predict=xgb_model.predict(x_test)\n\trms = np.sqrt(np.square(np.asarray(np.log(y_predict)-np.log(y_test))).sum()/float(len(y_predict)))\n\n\tprint('RMSE = {}'.format(rms))\n\n\treturn y_predict\n\nif __name__ == '__main__':\n\n\tparser=argparse.ArgumentParser()\n\tparser.add_argument('--x_train',help=\"address of x_train.csv\")\n\tparser.add_argument('--y_train',help=\"address of x_train.csv\")\n\tparser.add_argument('--x_test',help=\"address of x_test.csv\")\n\tparser.add_argument('--y_train',help=\"address of y_test.csv\")\n\n\tx_train=parser.x_train\n\ty_train=parser.y_train\n\tx_test=parser.x_test\n\ty_train=parser.y_test\n\n\tlinear_model(x_train,y_train,x_test,y_test)\n\txgb_model(x_train,y_train,x_test,y_test)\n\n","sub_path":"MachineLearning-Projects--master/House Price prediction (kaggle)/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"434215354","text":"#! /usr/bin/env/ python3\n\nimport requests, os, bs4\n\ninput(\"\\n### READ ME ###\\n\\nThis program will retrieve all downloadable comics from xkcd.com website and save them in the current directory.\\n \\nThis work is licensed under a Creative Commons Attribution-NonCommercial 2.5 License. This means you're free to copy and share these comics (but not to sell them).\\n \\nPress ENTER to contiue or CTRL + C to exit.\")\n\nurl = 'http://xkcd.com'\nos.makedirs('xkcd', exist_ok=True)\n\nwhile not url.endswith('#'):\n print('Downloading page %s...' % url)\n comicid = os.path.basename(url.strip(\"/\"))\n res = requests.get(url)\n res.raise_for_status()\n\n soup = bs4.BeautifulSoup(res.text)\n\n prevLink = soup.select('a[rel=prev]')[0]\n url = 'http://xkcd.com' + prevLink.get('href')\n\n comic = soup.select('#comic img')\n if not comic:\n print('Could not find comic image.')\n continue\n\n comicUrl = comic[0].get('src')\n if comicUrl.startswith(\"//\"):\n comicUrl = 'http:' + comicUrl\n\n if \"imgs.xkcd.com\" not in comicUrl: continue\n\n print('Downloading image %s...' % (comicUrl))\n res = requests.get(comicUrl)\n res.raise_for_status()\n\n\n # Save the image to ./xkcd\n imageFile = open(os.path.join('xkcd', comicid + \"_\" + os.path.basename(comicUrl)), 'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\nprint('Done.')","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"641237016","text":"class Page():\n def __init__(self, driver):\n\n self.driver = driver\n self._search_bar = None\n self._search_result = None\n self._error_search_result = None\n self._way_result = None\n self._share_panel = None\n\n @property\n def search_bar(self):\n from online.helpers.search_bar import SearchBar\n\n if self._search_bar is None:\n self._search_bar = SearchBar(self.driver,\n self.driver.find_element_by_css_selector(SearchBar.selectors['self']))\n return self._search_bar\n\n @property\n def search_result(self):\n from online.helpers.search_result import SearchResult\n\n if self._search_result is None:\n self._search_result = SearchResult(self.driver,\n self.driver.find_element_by_css_selector(SearchResult.selectors['self']))\n return self._search_result\n\n @property\n def error_search_result(self):\n from online.helpers.error_search_resullt import ErrorSearchResult\n\n if self._error_search_result is None:\n self._error_search_result = ErrorSearchResult(self.driver,\n self.driver.find_element_by_css_selector(\n ErrorSearchResult.selectors['self']))\n return self._error_search_result\n\n @property\n def way_result(self):\n from online.helpers.way_results import WayResult\n\n if self._way_result is None:\n self._way_result = WayResult(self.driver,\n self.driver.find_element_by_css_selector(WayResult.selectors['self']))\n return self._way_result\n\n @property\n def share_panel(self):\n from online.helpers.share import SharePanel\n\n if self._share_panel is None:\n self._share_panel = SharePanel(self.driver,\n self.driver.find_element_by_css_selector(SharePanel.selectors['self']))\n return self._share_panel\n\n def open(self, url):\n self.driver.get(url)\n\n","sub_path":"5_selenium_2gis/online/helpers/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"105712844","text":"import numpy as np\nimport h5py\n\nimitation_type = np.dtype([('image', float, (20, 30)), ('label', float, 5)])\nprint(\"datatype :\\n{}\".format(imitation_type))\n\nhdf5_file = h5py.File(\"carla_dataset_1.hdf5\", \"a\")\ndataset = hdf5_file.create_dataset(\"carla_set1\",shape =(1,), maxshape=(None,), chunks=(1,), compression=\"lzf\", dtype=imitation_type)\n\nimage = np.ndarray(shape=(20,30), dtype=float)\nlabel = np.array([0.0, 0.25, 0.5, 0.75, 1.0]).astype(float)\nlabel2 = np.array([-0.0, -0.25, -0.5, -0.75, -1.0]).astype(float)\n\ndata = np.array([(image, label)], dtype=imitation_type)\ndataset[0] = data\n\nprint(dataset[0,\"label\"].shape)\nprint(dataset[0,\"image\"].shape)\n\nprint(\"label\\n\", dataset[0, \"label\"])\nprint(\"image\\n\", dataset[0, \"image\"]) \n\ndataset.resize(2, axis=0 )\ndata = np.array([(image, label2)], dtype=imitation_type)\ndataset[1] = data\nhdf5_file.close()\n\nhdf5_file = h5py.File(\"carla_dataset_1.hdf5\", \"r\")\nprint(list(hdf5_file.keys()))\ndataset = hdf5_file['carla_set2']\n\nprint(\"label\\n\", dataset[0, \"label\"])\nprint(\"image\\n\", dataset[1, \"label\"])\nprint(\"shape \", dataset.shape)","sub_path":"hdf5.py","file_name":"hdf5.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304489665","text":"from __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils import data\nfrom PIL import Image\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport models as models\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torchvision import models as tmodels\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport matplotlib.image as mpimg\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\nimport imghdr\nimport untangle\nfrom efficientnet_pytorch import EfficientNet\n\nimport transformations\nimport xml.etree.ElementTree as ET\n\n#os.environ['TORCH_HOME'] = \"/media/goeau/DATA/villacis/tmp/torch\"\n\n\nclass Dataset(data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, list_IDs, labels, transform=None):\n 'Initialization'\n self.labels = labels\n self.transform = transform\n self.list_IDs = list_IDs\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.list_IDs)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n ID = self.list_IDs[index]\n # Load data and get label\n #X = torch.load(ID)\n img = Image.open(ID) # use pillow to open a file\n img = img.convert('RGB') #convert image to RGB channel\n if self.transform is not None:\n img = self.transform(img)\n\n #img = np.asarray(img).transpose(-1, 0, 1) # we have to change the dimensions from width x height x channel (WHC) to channel x width x height (CWH)\n img = torch.from_numpy(np.asarray(img)) # create the image tensor\n X = img\n y = self.labels[ID]\n\n return X, y\n\n\ndata_transforms_normal = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n #transforms.Resize(256),\n #transformations.CropField(),\n transforms.CenterCrop(224),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val_photo': transforms.Compose([\n #transforms.Resize(256),\n transforms.CenterCrop(224),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\n##larger images\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((900, 600)),\n transforms.CenterCrop((850, 550)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(30),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize((900, 600)),\n transforms.CenterCrop((850, 550)),\n #transforms.ColorJitter(),\n #transforms.RandomRotation(30),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val_photo': transforms.Compose([\n #transforms.Resize(256),\n #transforms.CenterCrop(224),\n transforms.Resize((900, 600)),\n transforms.CenterCrop((850, 550)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ntest_transforms = transforms.Compose([\n #transforms.Resize(256),\n transforms.RandomRotation(15),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(),\n transformations.CropField(),\n #transformations.TileCircle(),\n transforms.CenterCrop((224, 224)),\n #p.torch_transform(),\n transforms.ToTensor(),\n #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n #transforms.Normalize([0.7410, 0.7141, 0.6500], [0.0808, 0.0895, 0.1141])\n transforms.Normalize([0.2974, 0.3233, 0.2370], [0.1399, 0.1464, 0.1392])\n])\n\ntest_transforms_grande = transforms.Compose([\n transforms.Resize((900, 600)),\n transforms.CenterCrop((850, 550)),\n #transforms.ColorJitter(),\n #transforms.RandomRotation(30),\n #p.torch_transform(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\ndata_dir = \"/home/villacis/Desktop/villacis/datasets/plantclef20_split/herbarium\"\ndata_dir_photo = \"/home/villacis/Desktop/villacis/datasets/plantclef20_split/photo\"\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']} #,\nimage_datasets_photo = datasets.ImageFolder(os.path.join(data_dir_photo),\n data_transforms['val_photo'])\nlearning_rate = 0.001\nbatch_size = 15\nparams = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': 24}\n\npartition = {}\npartition['train'] = []\npartition['val'] = []\npartition['val_photo'] = []\nlabels = {}\nprint(\"Preprocessing datasets\")\nclass_name_to_id = image_datasets['train'].class_to_idx\nprint(class_name_to_id)\ndef get_class_id(path):\n class_name = os.path.split(path)[0]\n class_name = os.path.split(class_name)[1]\n try:\n class_name2 = class_name_to_id[class_name]\n except:\n class_name2 = 0\n print(\"Error: \"+str(class_name))\n return class_name2\nfor i in image_datasets['train'].imgs:\n partition['train'].append(i[0])\n labels[i[0]] = get_class_id(i[0])\n\n\n#for i in image_datasets['val'].imgs:\n# partition['val'].append(i[0])\n# labels[i[0]] = get_class_id(i[0])\n\n\nfor i in image_datasets_photo.imgs:\n partition['val_photo'].append(i[0])\n labels[i[0]] = get_class_id(i[0])\n\n\n\n\nprint(\"Finished preprocessing datasets\")\n\n\n#dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} #, 'val'\n\"\"\"\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=106,\n shuffle=True, num_workers=6)\n for x in ['train']} #, 'val'\n#dataloaders_photo = torch.utils.data.DataLoader(image_datasets_photo, batch_size=112,\n# shuffle=True, num_workers=6)\n#dataloaders['val_photo'] = dataloaders_photo\n\"\"\"\n#dataset_sizes['val_photo'] = len(image_datasets_photo)\n\"\"\"\n#for i in image_datasets['train']:\n# print(i[1])\n\"\"\"\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") #\"cuda\" if torch.cuda.is_available() else \"cpu\"\nclass_names = image_datasets['train'].classes\nnum_classes = len(class_names)\nprint(num_classes)\nprint(device)\n\n#train_set = Dataset(partition['train'], labels, transform = data_transforms['train'])\n#train_generator = data.DataLoader(train_set, **params)\n\n#val_set = Dataset(partition['val'], labels, transform = data_transforms['val'])\n#val_generator = data.DataLoader(val_set, **params)\n\nval_photo_set = Dataset(partition['val_photo'], labels, transform = data_transforms['val_photo'])\nval_photo_generator = data.DataLoader(val_photo_set, **params)\n\ndataloaders = {}\n#dataloaders['train'] = train_generator\n#dataloaders['val'] = val_generator\n#dataloaders['val_photo'] = val_photo_generator\n\ndef load_ckp(checkpoint_fpath, model):\n checkpoint = torch.load(checkpoint_fpath)\n print(checkpoint['epoch'])\n model.load_state_dict(checkpoint['state_dict'])\n #optimizer.load_state_dict(checkpoint['optimizer'])\n return model #, optimizer, checkpoint['epoch']\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = 1.0\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k)\n return res\ndef get_rank(outputs, label):\n _, vals = outputs.sort()\n vals = vals[0]\n rank = ((vals == label).nonzero())\n rank = rank.item()\n rank = vals.size(0) - rank\n rank = rank * 1.0\n resp = 1.0 / rank\n return resp\ndef test_model(encoder, classifier):\n encoder.eval()\n classifier.eval()\n running_corrects = 0\n #/home/villacis/Desktop/villacis/datasets/plantclef20_src/photo\n data_dir = '/home/villacis/villacis/datasets/plantclef20_split/photo/val_peq'\n #data_dir = '/home/villacis/Desktop/villacis/datasets/plantclef20_src/photo'\n #data_dir = \"/home/villacis/pc2020/photo\"\n classes = os.listdir(data_dir)\n total1 = []\n total5 = []\n mrr = 0.0\n queries= 0\n obsids = {}\n obsidclase = {}\n for class_name in classes:\n source_dir = os.path.join(data_dir, class_name)\n print(class_name)\n procesados = 0\n acc1 = 0\n acc5 = 0\n for f in os.listdir(source_dir):\n if (f.endswith('.jpg') or f.endswith('.JPG')):\n procesados += 1\n base = os.path.basename(f)\n base = os.path.splitext(base)[0]\n image_name = os.path.join(source_dir, f)\n xml_base_name = \"/home/villacis/villacis/datasets/plantclef20_src/photo\"\n #xml_base_name = \"/home/villacis/pc2020/photo\"\n xml_base_name = os.path.join(xml_base_name, class_name)\n xml_name = os.path.join(xml_base_name, base + \".xml\")\n file_text = open(xml_name, 'r').read()\n file_text = file_text.replace(\"&\", \"and\")\n obj = untangle.parse(file_text)\n obsid = obj.Image.ObservationId.cdata\n obsid = int(obsid)\n imagen = Image.open(image_name)\n imagen = imagen.convert(\"RGB\")\n with torch.no_grad():\n imagen = test_transforms(imagen)\n imagen = imagen.to(device)\n imagen = imagen.unsqueeze(0)\n label = class_name_to_id[class_name]\n label = torch.tensor(label)\n label = label.to(device)\n outputs = classifier(encoder(imagen))\n if obsid in obsids:\n obsids[obsid]+=outputs\n else:\n obsids[obsid] = outputs\n obsidclase[obsid] = label.item()\n mrr += get_rank(outputs, label.item())\n queries += 1\n _, preds = torch.max(outputs, 1)\n tacc1, tacc5 = accuracy(outputs, label, topk=(1, 5))\n acc1 += tacc1\n acc5 += tacc5\n procesados *= 1.0\n if(procesados < 0.95):\n continue\n acc1 = (acc1*1.0)/(procesados)\n acc5 = (acc5*1.0)/(procesados)\n total1.append(acc1.item())\n total5.append(acc5.item())\n print(\"Clase: \"+str(class_name)+\" ||| acc1: \"+str(acc1)+\" ||| acc5: \"+str(acc5))#\n total1 = torch.tensor(total1)\n total5 = torch.tensor(total5)\n total1 = torch.mean(total1)\n total5 = torch.mean(total5)\n print(\"Todas las clases\")\n print(total1)\n print(total5)\n print(\"MRR fotos: \")\n print(mrr)\n print(queries)\n queries = queries*1.0\n mrr = mrr/queries\n print(mrr)\n mrr = 0.0\n queries = 0\n acc1 = 0.0\n acc5 = 0.0\n\n for obsid in obsids.keys():\n mrr+=get_rank(obsids[obsid], obsidclase[obsid])\n _, preds = torch.max(obsids[obsid], 1)\n\n label = obsidclase[obsid]\n label = torch.tensor(label)\n label = label.to(device)\n tacc1, tacc5 = accuracy(obsids[obsid], label, topk=(1, 5))\n acc1 += tacc1\n acc5 += tacc5\n queries+=1\n print(\"MRR obsid: \")\n print(mrr)\n print(queries)\n queries = queries*1.0\n mrr = mrr/queries\n print(mrr)\n print(\"Accs por topk\")\n acc1 = (acc1*1.0)/(queries)\n acc5 = (acc5*1.0)/(queries)\n print(acc1)\n print(acc5)\ndef ejecutar_1():\n classifier = models.ClassifierPro()\n encoder = tmodels.resnet50(pretrained=True)\n encoder.fc = nn.Sequential()\n classifier.to(device)\n encoder.to(device)\n encoder.load_state_dict(torch.load('r50_encoder_taxons.pth'))\n classifier.load_state_dict(torch.load('r50_classifier_taxons.pth'))\n #encoder.load_state_dict(torch.load('result/encoder_fullfull1.pth'))\n #classifier.load_state_dict(torch.load('result/classifier_fullfull1.pth'))\n #encoder.load_state_dict(torch.load('encoder_fullfullfull.pth'))\n #classifier.load_state_dict(torch.load('classifier_fullfullfull.pth'))\n #encoder.load_state_dict(torch.load('result/encoder_full_ss.pth'))\n #classifier.load_state_dict(torch.load('result/classifier_full_ss.pth'))\n #encoder.load_state_dict(torch.load('result/encoder_fullextra.pth'))\n #classifier.load_state_dict(torch.load('result/classifier_fullextra.pth'))\n #resnet\n #model_ft = models.resnet50(pretrained=True)\n ## Cargar pesos\n #pretrained_weights = torch.load('mejores_pesos_viejo.pth')\n #model_ft = models.resnet50(pretrained=False)\n #num_ftrs = model_ft.fc.in_features\n #model_ft.fc = nn.Linear(num_ftrs, num_classes)\n #pretrained_weights = torch.load('pesos_herb_normal_resnet.pth')\n #model_ft = models.resnet50(pretrained=False)\n #model_ft.load_state_dict(pretrained_weights)\n #resnet very large\n #model_ft = models.resnet50(pretrained=True)\n ## Cargar pesos\n #pretrained_weights = torch.load('mejores_pesos_viejo.pth')\n #model_ft = models.resnet50(pretrained=False)\n #model_ft.avgpool = nn.AvgPool2d(kernel_size=(27, 18), stride=1)\n #num_ftrs = model_ft.fc.in_features\n #model_ft.fc = nn.Linear(num_ftrs, num_classes)\n #inception\n #model_ft = models.inception_v3(pretrained=True)\n #model_ft.aux_logits=False\n #num_ftrs = model_ft.fc.in_features\n #model_ft.fc = nn.Linear(num_ftrs, num_classes)\n #model_ft.load_state_dict(pretrained_weights)\n # Here the size of each output sample is set to 2.\n # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).\n\n #efficientnet\n #model_ft = EfficientNet.from_pretrained('efficientnet-b4', num_classes=997)\n\n #model_ft = load_ckp(\"pesos_herbinception_90.pth\", model_ft)\n #model_ft = model_ft.to(device)\n #encoder = tmodels.resnet50(pretrained=True)\n test_model(encoder, classifier)\n\n\n#visualize_model(model_ft)\nejecutar_1()\n","sub_path":"train2_testphoto.py","file_name":"train2_testphoto.py","file_ext":"py","file_size_in_byte":14738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"44416846","text":"# 2 在http://money.cnn.com/data/dow30/\n# 抓取道指成分股份数据并将30家公司的代码、公司名称和最近一次成交价放到一个列表中\nimport requests\nimport re\ndef retrieve_dji_list():\n r=requests.get('http://money.cnn.com/data/dow30/')\n search_pattern=re.compile('class=\"wsod_symbol\">(.*?).*?(.*?)\\s+.*? class=\"wsod_stream\">(.*?)')\n dji_list_in_text=re.findall(search_pattern,r.text)\n return dji_list_in_text\ndji_list=retrieve_dji_list()\nprint(dji_list)\n# 3 爬取网页(http://www.volleyball.world/en/vnl/2018/women/results-and-ranking/round1)上的数据\n# (包括TEAMS and TOTAL,WON,LOST of MATCHES)\n#提示:处理时可以将每个TEAM的相关数据按组解析出来,但是由于包含这4项信息的源代码分在多行并且有多个空格,\n# 处理时将正则表达式把空白字符表达出来用\\s+(可表示多个空白字符,包括换行符和空格)\n# import requests,re\n# def crawler(url):\n# try:\n# r=requests.get(url)\n# except requests.exceptions.RequestException as err:\n# return err\n# pattern=re.compile('href=\"/en/vnl/2018/women/teams/(.*?)\">(.*?)\\s+ \\s+\\s+(.*?)\\s+(.*?)\\s+(.*?)')\n# p=re.findall(pattern,r.text)\n# return p\n# if __name__=='__main__':\n# ad='http://www.volleyball.world/en/vnl/2018/women/results-and-ranking/round1'\n# results=crawler(ad)\n# print(results)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_code/capture data/网页获取/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"129199210","text":"from fastapi.encoders import jsonable_encoder\n\nfrom app.tasks import celery_app\nfrom app.db.models.port import Port\nfrom app.db.models.port_forward import PortForwardRule, MethodEnum\nfrom app.db.schemas.port_forward import PortForwardRuleOut\n\nfrom .gost import generate_gost_config, get_gost_remote_ip\n\n\ndef send_iptables(\n rule: PortForwardRule,\n port: Port,\n old: PortForwardRuleOut = None,\n new: PortForwardRuleOut = None,\n update_gost: bool = False,\n):\n kwargs = {\n \"port_id\": port.id,\n \"server_id\": port.server.id,\n \"local_port\": port.num,\n }\n if new and new.method == MethodEnum.IPTABLES:\n kwargs[\"update_status\"] = True\n kwargs[\"remote_ip\"] = new.config.get(\"remote_ip\")\n kwargs[\"remote_port\"] = new.config.get(\"remote_port\")\n kwargs[\"forward_type\"] = new.config.get(\"type\", \"ALL\").upper()\n print(f\"Sending iptables_runner task, kwargs: {kwargs}\")\n celery_app.send_task(\"app.tasks.iptables.iptables_runner\", kwargs=kwargs)\n\n\ndef send_gost(\n rule: PortForwardRule,\n port: Port,\n old: PortForwardRuleOut = None,\n new: PortForwardRuleOut = None,\n update_gost: bool = False,\n):\n gost_config = generate_gost_config(rule)\n kwargs = {\n \"port_id\": port.id,\n \"server_id\": port.server.id,\n \"port_num\": port.num,\n \"gost_config\": gost_config,\n \"remote_ip\": get_gost_remote_ip(gost_config),\n \"update_gost\": update_gost,\n \"update_status\": bool(new and new.method == MethodEnum.GOST),\n }\n print(f\"Sending gost_runner task, kwargs: {kwargs}\")\n celery_app.send_task(\"app.tasks.gost.gost_runner\", kwargs=kwargs)\n\n\ndef trigger_install_gost(server_id):\n kwargs = {\n \"port_id\": 0,\n \"server_id\": server_id,\n \"port_num\": 0,\n \"gost_config\": {},\n \"update_gost\": True,\n \"update_status\": False,\n }\n print(f\"Sending gost install gost_runner task, kwargs: {kwargs}\")\n celery_app.send_task(\"app.tasks.gost.gost_runner\", kwargs=kwargs)\n\n\n\ndef trigger_forward_rule(\n rule: PortForwardRule,\n port: Port,\n old: PortForwardRuleOut = None,\n new: PortForwardRuleOut = None,\n update_gost: bool = False,\n):\n print(\n f\"Received forward rule:\\n\"\n + f\"old:{jsonable_encoder(old) if old else None}\\n\"\n + f\"new:{jsonable_encoder(new) if new else None}\"\n )\n if any(r.method == MethodEnum.IPTABLES for r in (old, new) if r):\n send_iptables(rule, port, old, new, update_gost)\n\n if any(r.method == MethodEnum.GOST for r in (old, new) if r):\n send_gost(rule, port, old, new, update_gost)\n\n\ndef trigger_tc(port: Port):\n kwargs = {\n \"server_id\": port.server.id,\n \"port_num\": port.num,\n \"egress_limit\": port.config.get(\"egress_limit\"),\n \"ingress_limit\": port.config.get(\"ingress_limit\"),\n }\n print(f\"Sending tc_runner task, kwargs: {kwargs}\")\n celery_app.send_task(\"app.tasks.tc.tc_runner\", kwargs=kwargs)\n\ndef remove_tc(server_id: int, port_num: int):\n kwargs = {\n \"server_id\": server_id,\n \"port_num\": port_num,\n }\n print(f\"Sending tc_runner task, kwargs: {kwargs}\")\n celery_app.send_task(\"app.tasks.tc.tc_runner\", kwargs=kwargs)\n\ndef trigger_ansible_hosts():\n print(f\"Sending ansible_hosts_runner task\")\n celery_app.send_task(\"app.tasks.ansible.ansible_hosts_runner\")\n\ndef trigger_iptables_reset(port: Port):\n kwargs = {\n \"server_id\": port.server.id,\n \"port_num\": port.num\n }\n print(f\"Sending iptables.iptables_reset_runner task\")\n celery_app.send_task(\"app.tasks.iptables.iptables_reset_runner\", kwargs=kwargs)","sub_path":"app/api/utils/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113580948","text":"__author__='棒棒糖'\nimport random\n#随机生成1-1000之间无序序列整数数据\ndef data():\n random_data=[]\n for i in range(0,10):\n random_data.append(random.randint(1,100))\n return random_data\n#插入排序(把小的数字不断往前移)\ndef insert_sort(data_list):\n #序列长度:\n lenght=len(data_list)\n for i in range(1,lenght):\n key=data_list[i]\n j=i-1\n while j >=0:\n #比较,进行插入排序\n if data_list[j]>key:\n data_list[j+1]=data_list[j]\n data_list[j]=key\n j=j-1\n return data_list\nif __name__=='__main__':\n print('棒棒糖碰一个')\n #生成随机无序数据\n random_data=data()\n print(random_data)\n #插入排序\n sorted_data=insert_sort(random_data)\n print(sorted_data)\n\n\n\n\n","sub_path":"第一期/上海-棒棒糖/第二次任务-每日代码练习/2017-1/1-25/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"85150491","text":"import numpy as np\r\nimport copy\r\n\r\n\r\nX = 'x'\r\nY = 'y'\r\n\r\nk = 'k'\r\nb = 'b'\r\n\r\ndef linear_mnk(data):\r\n '''Классический МНК'''\r\n sum_x = sum(map(lambda data: data[X], data)) # Сумма x[i]\r\n sum_y = sum(map(lambda data: data[Y], data)) # Сумма y[i]\r\n sum_xy = sum(map(lambda data: data[X]*data[Y], data)) # Сумма x[i]*y[i]\r\n sum_sqr_x = sum(map(lambda data: data[X]*data[X], data)) # Сумма x^2[i]\r\n\r\n line = {k: 0, b: 0}\r\n\r\n line[k] = (len(data)*sum_xy - sum_x*sum_y) / (len(data) * sum_sqr_x - sum_x * sum_x) # Вычисление k, y = k*x + b\r\n line[b] = (sum_y * sum_sqr_x - sum_xy * sum_x) / (len(data) * sum_sqr_x - sum_x * sum_x) # Вычисление b, y = k*x + b\r\n\r\n return line\r\n\r\n\r\ndef polynome_mnk(data, order=2, epsilon=pow(10, -4)):\r\n '''Классический МНК для полинома'''\r\n\r\n # Заполняем матрицы системы линейных уравнений\r\n a_matrix = np.array(np.zeros((order+1, order+1)))\r\n for i in range(order + 1):\r\n for j in range(order + 1):\r\n a_matrix[i][j] = sum(map(lambda dat: pow(dat[X], i + j), data)) if i + j > 0 else len(data)\r\n\r\n b_matrix = np.array(np.zeros((order + 1, 1)))\r\n for i in range(order + 1):\r\n b_matrix[i] = sum(map(lambda dat: dat[Y] * pow(dat[X], i), data))\r\n\r\n # Решение системы\r\n x_matrix = np.array(np.zeros((order + 1, 1)))\r\n while True:\r\n new_x_matrix = copy.copy(x_matrix)\r\n for i in range(order + 1):\r\n sum1 = sum(a_matrix[i][j] * new_x_matrix[j] for j in range(i))\r\n sum2 = sum(a_matrix[i][j] * x_matrix[j] for j in range(i + 1, order + 1))\r\n new_x_matrix[i] = (b_matrix[i] - sum1 - sum2) / a_matrix[i][i]\r\n\r\n if sum(pow(new_x_matrix[k] - x_matrix[k], 2) for k in range(order + 1)) < pow(epsilon, 3): break\r\n\r\n x_matrix = copy.copy(new_x_matrix)\r\n\r\n return x_matrix.tolist()\r\n\r\n\r\ndef m_est(data, epsilon=pow(10, -3)):\r\n '''МНК с модификацией суммы весовым коэффицентом, вычисляемым расстоянием от прямой'''\r\n\r\n def dist(line, point):\r\n '''Нормированный вектор прямой'''\r\n line_a = -line[k] * np.sqrt(1 / (pow(line[k], 2) + 1))\r\n line_b = np.sqrt(1 / (pow(line[k], 2) + 1))\r\n line_c = -line[b] * line_b\r\n\r\n h = abs(line_a * point[X] + line_b * point[Y] + line_c)\r\n\r\n return h\r\n\r\n def distances(line, data):\r\n '''Генератор расстояний между точками и линией'''\r\n for point in data:\r\n h = dist(line, point)\r\n yield h\r\n\r\n def sum_weight(data, line, sigma):\r\n '''Генератор взвешенных в зависимости от расстояния данных'''\r\n for point in data:\r\n new_point = point.copy()\r\n new_point[X] *= 1 / (pow(dist(line, new_point), 2) * sigma * sigma + 1)\r\n new_point[Y] *= 1 / (pow(dist(line, new_point), 2) * sigma * sigma + 1)\r\n yield new_point\r\n\r\n line0 = linear_mnk(data) # начальное приближение линии МНК\r\n\r\n # одномерная оптимизация значения sigma\r\n sigma0 = 0\r\n sigma1 = pow(10, 6)\r\n new_data0 = [point for point in sum_weight(data, line0, sigma0)]\r\n new_data1 = [point for point in sum_weight(data, line0, sigma1)]\r\n\r\n new_line0 = linear_mnk(new_data0)\r\n new_line1 = linear_mnk(new_data1)\r\n while sigma1 - sigma0 > epsilon:\r\n dist_sum_0 = sum(distances(new_line0, data))\r\n dist_sum_1 = sum(distances(new_line1, data))\r\n\r\n if dist_sum_0 > dist_sum_1:\r\n sigma0 = (sigma0 + sigma1) / 2\r\n new_data0 = [point for point in sum_weight(data, line0, sigma0)]\r\n new_line0 = linear_mnk(new_data0)\r\n\r\n else:\r\n sigma1 = (sigma0 + sigma1) / 2\r\n new_data1 = [point for point in sum_weight(data, line0, sigma1)]\r\n new_line1 = linear_mnk(new_data1)\r\n\r\n sigma = (sigma0 + sigma1) / 2\r\n\r\n new_data = [point for point in sum_weight(data, line0, sigma)]\r\n line0 = linear_mnk(new_data)\r\n\r\n return line0\r\n\r\n\r\ndef f_polynome(polynome, x):\r\n val = 0\r\n for mul in reversed(polynome):\r\n val = val * x + mul[0]\r\n\r\n return val\r\n\r\n\r\ndef line_conv(data, conv, unconv, m_est_flag=True):\r\n conv_data = [point for point in conv(data)]\r\n\r\n if m_est_flag:\r\n line = m_est(conv_data)\r\n\r\n else:\r\n line = linear_mnk(conv_data)\r\n\r\n line = unconv(line)\r\n return line","sub_path":"approximate.py","file_name":"approximate.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"213011923","text":"from django.shortcuts import reverse\n\nfrom backend.api.mixins import BaseAPITestCase\nfrom forums.models import Topic, Thread\n\n\nclass TopicAPITestCase(BaseAPITestCase):\n def setUp(self):\n super().setUp()\n\n Topic.objects.all().delete()\n Thread.objects.all().delete()\n\n self.user = self.login()\n self.topics = [Topic.objects.create(name='topic-1', created_by=self.user, order=1, key=\"topic-1\"),\n Topic.objects.create(name='topic-2', created_by=self.user, order=2, key=\"topic-2\"),\n Topic.objects.create(name='topic-3', created_by=self.user, order=3, key=\"topic-3\"),\n Topic.objects.create(name='topic-4', created_by=self.user, order=4, key=\"topic-4\")]\n\n def test_authenticated_user_list(self):\n response = self.client.get(reverse('api:forums:topics-list'), format=\"json\")\n json = response.json()\n\n self.assertEqual(len(json), 4)\n\n def test_anonymous_user_list(self):\n self.logout()\n response = self.client.get(reverse('api:forums:topics-list'), format=\"json\")\n json = response.json()\n\n self.assertEqual(len(json), 4)\n\n def test_detail(self):\n topic = Topic.objects.first()\n\n Thread.objects.create(name='thread-1', created_by=self.user, topic=topic, order=1, key=\"thread-1\")\n Thread.objects.create(name='thread-2', created_by=self.user, topic=topic, order=2, key=\"thread-2\")\n Thread.objects.create(name='thread-3', created_by=self.user, topic=topic, order=3, key=\"thread-3\")\n\n response = self.client.get(reverse('api:forums:topics-detail', kwargs={\n 'pk': topic.pk\n }), format=\"json\")\n json = response.json()\n\n self.assertEqual(json['name'], 'topic-1')\n self.assertIsInstance(json['id'], int)\n self.assertIsNotNone(json['created'])\n self.assertIsNotNone(json['modified'])\n self.assertEqual(json['created_by'], self.user.pk)\n self.assertEqual(json['description'], None)\n self.assertEqual(len(json['thread_set']), 3)\n\n for thread in json['thread_set']:\n self.assertIsInstance(thread['id'], int)\n self.assertEqual(thread['topic'], topic.pk)\n self.assertEqual(thread['created_by'], self.user.pk)","sub_path":"forums/tests/test_topic_api.py","file_name":"test_topic_api.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506344589","text":"\r\nimport random\r\nimport os\r\nclear = lambda: os.system('cls')\r\nclear()\r\nfrom add_record import * \r\nfrom records import * \r\nfrom remove_record import *\r\nimport time\r\nimport ctypes\r\nimport sys\r\nfrom import_records import *\r\n\r\nup_one = '\\x1b[1A'\r\nerase = '\\x1b[2K'\r\n\r\nprint('__ __ __ ____ __ __ __ __ ')\r\nprint('\\ \\/ / || | \\ | | \\ \\/ / | | ')\r\nprint(' \\ / || | |\\ \\| | | | | |__ ')\r\nprint(' \\/ || |_| \\___| |__| |____|')\r\n\r\nprint(' ______ __ ___ __ ______ ')\r\nprint(' |__ __| || | \\/ | | _*_| ')\r\nprint(' | | || | | | |__ ')\r\nprint(' |_| || |_|\\/|_| |_____| ')\r\ntime.sleep(1)\r\nclear()\r\n\r\n\r\nmainprogram = True\r\n\r\nwhile mainprogram:\r\n\r\n\r\n\tprint('Please Choose an option ')\r\n\tprint('1: Add a record? ')\r\n\tprint('2: Remove a record? ')\r\n\tprint('3: Create a playlist? ')\r\n\tprint('4: Import Record Collection?')\r\n\tprint('5: Exit? ')\r\n\r\n\tselect = int(input('Selection: '))\r\n\twhile not int(select) in range(0,6):\r\n\t\tselect = int(input('Your Selection Must Be 1-4: '))\r\n\r\n\t\r\n\r\n\tupdate = 'n'\r\n\tdelete = 'n'\r\n\tplaylst = 'n'\r\n\timport_lst = 'n'\r\n\r\n\tif select == 1:\r\n\t\tupdate = 'y'\r\n\telse:\r\n\t\tupdate = 'n'\r\n\r\n\tif select == 2:\r\n\t\tdelete = 'y'\r\n\telse:\r\n\t\tdelete = 'n'\r\n\tif select == 3:\r\n\t\tplaylst = 'y'\r\n\telse:\r\n\t\tplaylst = 'n'\r\n\tif select == 4:\r\n\t\timport_lst = 'y'\r\n\telse:\r\n\t\timport_lst = 'n'\r\n\r\n\tif select == 5:\r\n\t\t#font = pygame.font.Font(None,40)\r\n\t\tclear()\r\n\t\tprint('Good Bye')#.font.nFont = 16\r\n\t\ttime.sleep(2)\r\n\t\tmainprogram = False\r\n\r\n\t\t#break\r\n\tclear()\r\n\r\n\t'''\r\n\tDetermin if the user wants to add records to the collection\r\n\t'''\r\n\r\n\t#update = input(\"Do you need to add records to the collection? \").lower()\r\n\r\n\twhile select == 1 and update[0] =='y':\r\n\t add_record()\r\n\t clear()\r\n\t update = input(\"Do you need to add another record? \").lower()\r\n\t while update[0].lower() not in ('y','n'):\r\n\t \tupdate = input('You must select Yes or No!')\r\n\t \tsys.stdout.write(up_one)\r\n\t \tsys.stdout.write(erase)\r\n\r\n\tclear()\r\n\r\n\t#delete = input(\"Do you need to remove a record from your collection? \").lower()\r\n\r\n\twhile select == 2 and delete[0] == 'y':\r\n\t\tremove()\r\n\t\tclear()\r\n\t\tdelete = input(\"Do you need to remove another record? \").lower()\r\n\t\twhile delete[0].lower() not in ('y','n'):\r\n\t\t\tdelete = input('You must select Yes or No!')\r\n\t\t\tsys.stdout.write(up_one)\r\n\t\t\tsys.stdout.write(erase)\r\n\r\n\tclear()\r\n\r\n\twhile select == 4 and import_lst[0] == 'y':\r\n\t\timport_rec()\r\n\t\tprint('Your Collection Has Been Imported!')\r\n\t\ttime.sleep(2)\r\n\t\timport_lst = 'n'\r\n\t\tclear()\r\n\r\n\r\n\tclear()\r\n\r\n\r\n\t'''\r\n\tcreate a playlist\r\n\t'''\r\n\twhile select == 3 and playlst[0] == 'y':\r\n\t\tfrom records import * \r\n\r\n\t\trecord_num = 1\r\n\t\trecords = records()\r\n\t\tcounter = int(input(f\"How many records do you want to listen to? Pick 0 - {len(records)-1}: \"))\r\n\t\twhile int(counter) not in range(0,len(records)):\r\n\t\t\tcounter = int(input(f'Please pick a number 0 - {len(records)}: '))\r\n\t\t\tsys.stdout.write(up_one)\r\n\t\t\tsys.stdout.write(erase)\r\n\t\treshuffle = counter\r\n\t\tshuffle = True\r\n\r\n\t\tclear()\r\n\r\n\t\twhile shuffle:\r\n\r\n\t\t\twhile counter > 0:\r\n\t\t\t\trandomtitle = random.randint(0,len(records)-1)\r\n\t\t\t\tpick = records[randomtitle]\r\n\t\t\t\tprint(f'\\nRecord {record_num}:\\nAlbum: {pick[:-1]}\\nSide: {pick[len(pick)-1]}\\n')\r\n\t\t\t\tprint('------------------------------------------------------------')\r\n\t\t\t\tdel records[randomtitle]\r\n\t\t\t\tcounter -= 1\r\n\t\t\t\trecord_num += 1\r\n\t\t \r\n\r\n\t\t\tanswer = input(\"Reshuffle? \")\r\n\t\t\twhile answer[0].lower() not in ('n','y'):\r\n\t\t\t\tanswer = input('You must select Yes or No!')\r\n\t\t\t\tsys.stdout.write(up_one)\r\n\t\t\t\tsys.stdout.write(erase)\r\n\t\t\tif answer[0].lower() == 'y':\r\n\t\t\t\tclear()\r\n\t\t\t\t#shuffle = True\r\n\t\t\t\tcounter = reshuffle\r\n\t\t\t\trecord_num = 1\r\n\t\t\telse:\r\n\t\t\t\tshuffle = False\r\n\t\t\t\tplaylst = 'n'\r\n\t\t\t\tclear()\r\n\r\n","sub_path":"VinylTime/Vinyl_Time.py","file_name":"Vinyl_Time.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"115083226","text":"import pytest\nimport csv\nimport os\nimport json\nfrom src.data_processor import ProcessData\n\nTEST_DATA = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'test_customers.csv',\n)\n\n\nTEST_DATA_WRONG = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'test/',\n)\n\nTEST_DATA_EXPORT = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'test_customers.csv',\n)\n\n\n\nclass TestOs:\n path = TEST_DATA\n ex_path = TEST_DATA_EXPORT\n ex_path_wrong = TEST_DATA_WRONG\n office = 'test'\n distance = 1\n\n @pytest.fixture\n def data_processor(self):\n return ProcessData(self.path,self.ex_path, self.office, self.distance)\n\n @pytest.fixture\n def data_processor_wrong_path(self):\n return ProcessData(self.ex_path_wrong,self.ex_path, self.office, self.distance)\n\n @pytest.fixture\n def expected_data(self):\n data = {'latitude': '52.986375', 'user_id': '12', 'name': 'User1', 'longitude': '-6.043701'}\n return data\n\n def test_csv_reader(self, data_processor, expected_data):\n result = data_processor.read_data()\n assert expected_data == result[0]\n\n def test_csv_reader_file_not_found(self, data_processor_wrong_path, expected_data):\n with pytest.raises(FileNotFoundError):\n result = data_processor_wrong_path.read_data()\n raise FileNotFoundError\n\n\n\n\n\n\n\n\n\n","sub_path":"test/integration/test_os.py","file_name":"test_os.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"631767034","text":"import torch\nimport torch.nn.functional as F\n\n\nclass CNN(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Conv2d(in_channels=1, out_channels=32, kernel_size=7,padding=2),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(kernel_size=3, stride=2),\n torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3,padding=2),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(kernel_size=3, stride=2),\n torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3,padding=2),\n torch.nn.ReLU(),\n\n )\n self.classifier = torch.nn.Linear(8192, 10)\n\n def forward(self, x):\n x = self.net(x)\n x = self.classifier(x.view(-1, 8192))\n x = F.softmax(x)\n return x\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"396995775","text":"\"\"\"\nThis translator.py module is used to convert text from French to English and Vice versa.\n\"\"\"\nimport os\nfrom ibm_watson import LanguageTranslatorV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napikey = os.environ['apikey']\nurl = os.environ['url']\n\n\nauthenticator = IAMAuthenticator(apikey)\nlanguage_translator = LanguageTranslatorV3(\n version='2018-05-01',\n authenticator=authenticator\n)\n\nlanguage_translator.set_service_url(url)\n\ndef english_to_french(english_text):\n \"\"\"\n This function converts the english text to French\n \"\"\"\n translation = language_translator.translate(\n text=english_text,\n source=\"en\",\n target='fr').get_result()\n french_text=translation['translations'][0]['translation']\n return french_text\n\ndef french_to_english(french_text):\n \"\"\"\n This function converts the French text to english.\n \"\"\"\n translation = language_translator.translate(\n text=french_text,\n source=\"fr\",\n target='en').get_result()\n english_text=translation['translations'][0]['translation']\n return english_text\n","sub_path":"final_project/machinetranslation/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"84984574","text":"def kth(array, k):\n pivot_point = len(array) / 2\n pivot_value = array[pivot_point]\n below = [s for s in array if s < pivot_value]\n above = [s for s in array if s > pivot_value]\n a = len(above)\n b = len(array) - len(below)\n if k < a:\n return kth(above, k)\n elif k > b:\n return kth(below, k-b)\n else:\n return pivot_value\n\nimport random\narray = [random.randint(-100,100) for i in range(32)]\nprint(\"array\", array)\n\nk = 3\nkval = kth(array, k)\nprint(\"k = \",k, \" kval = \",kval)\n\nfrom heap_sort import *\nheap_sort(array)\nprint(\"sorted\", array)\nprint(array[len(array) -k - 1])\n","sub_path":"kth.py","file_name":"kth.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"460074037","text":"from queue import PriorityQueue\n\nclass Dijkstra():\n # The list which will hold the nodes as we go through.\n pq = None\n vistedList = []\n goalNode = None\n counter = 0\n \n def __init__(self, initNode, endNode):\n self.pq = PriorityQueue()\n self.pq.put(initNode)\n self.goalNode = endNode\n\n def dijkstraAlgo(self, gameMapInternal, gameMap):\n self.counter = 0\n\n while (not self.pq.empty()):\n # Get the tuple with the smallest g value.\n priorityItem = self.pq.get()\n currentNode = priorityItem[2]\n currentNodeEdges = currentNode.getEgdes()\n currentNodeNeighbours = currentNode.getNeighbours()\n dist = currentNode.g\n \n # We have found the goal return the gameMap.\n if (self.goalNode in self.vistedList):\n updatedGameMap = self.tracePath(self.goalNode, gameMap)\n return updatedGameMap, self.counter\n\n for i in range(len(currentNode.neighbours)):\n if (currentNodeNeighbours[i] not in self.vistedList):\n alt = dist + currentNodeEdges[i]\n\n if (alt < currentNodeNeighbours[i].g):\n \n currentNodeNeighbours[i].g = alt\n currentNodeNeighbours[i].cameFrom = currentNode\n\n # Add it to the priority queue as a tuple so that it can be kept organized.\n self.pq.put((currentNodeNeighbours[i].g, self.counter, currentNodeNeighbours[i]))\n self.counter+=1\n \n # Mark this node as visited\n self.vistedList.append(currentNode)\n \n def tracePath(self, node, gameMap):\n gameMap[node.i][node.j] = \"x\"\n prevNode = node.cameFrom\n while (prevNode != None):\n # Mark the current spot on the graph as visited.\n gameMap[prevNode.i][prevNode.j] = \"x\"\n prevNode = prevNode.cameFrom\n\n return gameMap","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"342490675","text":"import requests\nfrom mstrio.utils.helper import response_handler\n\n\ndef server_status(connection, verbose=False):\n \"\"\"\n Args:\n connection: MicroStrategy REST API connection object\n verbose (bool, optional): Verbosity of server responses; defaults to False.\n Returns:\n Complete HTTP response object\n \"\"\"\n\n response = requests.get(url=connection.base_url + '/api/status',\n cookies=connection.cookies,\n verify=connection.ssl_verify)\n\n if verbose:\n print(response.url)\n if not response.ok:\n response_handler(response, \"Failed to check server status\")\n return response\n","sub_path":"mstrio/api/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"37892243","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\n#from scipy.optimize import minimize\n\ndef multi_log_loss(y_pred, y_true):\n epsilon = 1e-15\n y_pred = [y_pred[x] for x in y_true]\n y_pred = np.minimum(y_pred, 1-epsilon)\n y_pred = np.maximum(y_pred, epsilon)\n score = - np.mean(np.log(y_pred))\n return score\n\n\n'''\n通过调整模型线性组合的权重来实现最有的组合方式\ndef get_best_combine_weights(y_preds, y_true):\n model_num = len(y_preds)\n starting_weights = [1.0/model_num] * model_num\n bounds = [(0.0,1.0)]*model_num\n\n res = minimize((lambda x: multi_log_loss(merge_results_by_weights(y_preds, x))), starting_weights, method='SLSQP', bounds=bounds)\n print res['x'], res['func']\n'''\n","sub_path":"OttoGroupProductClassification/code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"545809508","text":"from __future__ import absolute_import\n\nimport importlib\nimport os\nimport yaml\nimport gorilla\n\nimport pandas as pd\nimport pickle\n\nimport paddle\nimport paddle.fluid as fluid\n\nfrom mlflow import pyfunc\nfrom mlflow.models import Model\nimport mlflow.tracking\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.autologging_utils import try_mlflow_log\nimport numpy as np\n\nFLAVOR_NAME = \"paddle\"\n_MODEL_SAVE_PATH = \"model\"\n\n\ndef get_default_conda_env():\n return _mlflow_conda_env(\n additional_conda_deps=[\n \"paddle=={}\".format(paddle.__version__),\n ],\n additional_pip_deps=None, #[\"cloudpickle=={}\".format(cloudpickle.__version__)],\n additional_conda_channels=[\"paddle\",])\n\n\ndef save_model(path, feeded_var_names, target_vars, executor, conda_env=None, mlflow_model=Model(),\n **kwargs): \n # paddle_module = importlib.import_module(paddle_module)\n\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise MlflowException(\"Path '{}' already exists\".format(path))\n #model_subpath = \"data\"\n #model_path = os.path.join(path, model_subpath)\n os.makedirs(path)\n #os.makedirs(model_path)\n #print(\"paddle.py save_model path: \", path)\n fluid.io.save_inference_model(dirname=path, feeded_var_names=feeded_var_names,\n target_vars=target_vars, executor=executor)\n \n mlflow_model.add_flavor(FLAVOR_NAME,\n paddle_version=paddle.__version__,)\n #data=path)\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n pyfunc.add_to_model(mlflow_model, loader_module=\"mlflow.paddle\",\n env=conda_env_subpath)\n mlflow_model.save(os.path.join(path, \"MLmodel\"))\n\n\ndef log_model(artifact_path, conda_env=None,\n **kwargs):\n Model.log(artifact_path=artifact_path, flavor=mlflow.paddle,\n conda_env=conda_env, **kwargs)\n\n\ndef load_model(model_uri, executor, **kwargs): # require executor\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n paddle_model_artifacts_path = os.path.join(\n local_model_path,\n flavor_conf.get(\"data\", _MODEL_SAVE_PATH))\n return _load_model(paddle_model_artifacts_path, executor) \n\n\ndef _load_model(path, executor, **kwargs): # require executor\n [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=path, executor=executor)\n return inference_program, feed_target_names, fetch_targets, executor\n\n\ndef _load_pyfunc(path, **kwargs):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.\n \"\"\"\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n inference_program, feed_target_names, fetch_targets, executor = _load_model(path, executor = exe, **kwargs)\n return _PaddleWrapper(inference_program=inference_program, feed_target_names=feed_target_names, fetch_targets=fetch_targets, executor=executor)\n\n\nclass _PaddleWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n def __init__(self, inference_program, feed_target_names, fetch_targets, executor):\n self.inference_program = inference_program\n self.feed_target_names = feed_target_names\n self.fetch_targets = fetch_targets\n self.executor = executor\n\n def predict(self, data):\n exe = self.executor\n generated = {column:np.asarray([data[column].values.tolist()]).astype(\"float32\") for column in data.columns}\n results = exe.run(self.inference_program,\n feed=generated,\n fetch_list=self.fetch_targets)\n print(type(results))\n print(results)\n predicted = pd.DataFrame(results[0])\n print(predicted)\n return predicted\n","sub_path":"mlflow/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"482398008","text":"from constants import Constants\nfrom Config.config import Config\nfrom typing import Optional\nfrom CommandLine.command_line_utilities import require_input, write_to_console\nimport json\n\nhome = str()\n\n\nclass ConfigManager:\n _config: Optional[Config]\n\n def __init__(self):\n self._config = None\n\n try:\n self._read_config()\n except FileNotFoundError:\n self._set_config(*self._require_config_parameters_from_console())\n self._write_config()\n\n def _read_config(self) -> None:\n with open(Constants.CONFIG_PATH) as config_file:\n config_parameters = self._translate_json_to_config(config_file.read())\n self._set_config(*config_parameters)\n\n @staticmethod\n def _require_config_parameters_from_console() -> (str, str):\n write_to_console(\"Config not found - Creating config\")\n config_parameters = ()\n for input_message in [\"Enter \" + parameter + \":\" for parameter in [\"username\", \"default board\", \"server\"]]:\n write_to_console(input_message)\n config_parameters += (require_input(\"Enter username:\"),)\n return config_parameters\n\n def _set_config(self, *config_parameters):\n self._config = Config(*config_parameters)\n\n def _write_config(self):\n try:\n with open(Constants.CONFIG_PATH, \"w+\") as config_file:\n print(self._translate_config_to_json())\n config_file.write(self._translate_config_to_json())\n except Exception as e:\n print(e)\n pass\n\n def _translate_config_to_json(self) -> str:\n return json.dumps([{\n 'username': self._config.username,\n 'default_board': self._config.default_board,\n 'server': self._config.server\n }], separators=(',', ':'))\n\n @staticmethod\n def _translate_json_to_config(json_string):\n try:\n json_dict = json.loads(json_string)[0]\n return json_dict[\"username\"], json_dict[\"default_board\"], json_dict[\"server\"]\n except:\n pass\n\n @property\n def username(self):\n return self._config.username\n\n @property\n def default_board(self):\n return self._config.default_board\n\n @property\n def server(self):\n return self._config.server\n","sub_path":"src/Config/config_manager.py","file_name":"config_manager.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"459830180","text":"#!/usr/bin/env python\n\n\"\"\"\nDesica model: simple plant hydraulics model with mortality.\n\nStem water potential follows Xu et al.\n\nReference:\n==========\n* Xu, X., Medvigy, D., Powers, J. S., Becknell, J. M. and Guan, K.\n (2016), Diversity in plant hydraulic traits explains seasonal and\n inter-annual variations of vegetation dynamics in seasonally dry\n tropical forests. New Phytol, 212: 80–95. doi:10.1111/nph.14009.\n\nThat's all folks.\n\"\"\"\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (23.11.2017)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nimport pandas as pd\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom generate_met_data import generate_met_data\nfrom canopy import Canopy, FarquharC3\n\nclass Desica(object):\n\n def __init__(self, plc_dead=88.,soil_depth=1.0, ground_area=1.0,\n met_timestep=30., sf=8., g1=4., Cs=100000., b=6.,\n Cl=10000., kp_sat=3., p50=-4., psi_f=-2., s50=30., gmin=10,\n psi_leaf0=-1., psi_stem0=-0.5, theta_sat=0.5, sw0=0.5, AL=2.5,\n psie=-0.8*1E-03, Ksat=20., Lv=10000., F=None, keep_wet=False,\n stop_dead=True, nruns=1):\n\n self.keep_wet = keep_wet\n self.stop_dead = stop_dead\n self.plc_dead = plc_dead\n self.nruns = nruns\n self.soil_depth = soil_depth\n self.ground_area = ground_area\n self.soil_volume = self.ground_area * self.soil_depth\n self.met_timestep = met_timestep\n self.sf = sf\n self.g1 = g1\n self.Cs = Cs\n self.Cl = Cl\n self.kp_sat = kp_sat\n self.p50 = p50\n self.psi_f = psi_f\n self.s50 = s50\n self.gmin = gmin\n self.psi_leaf0 = psi_leaf0 # initial leaf water potential (MPa)\n self.psi_stem0 = psi_stem0 # initial stem water potential (MPa)\n self.theta_sat = theta_sat\n self.sw0 = sw0 # initial soil volumetric water content (m3 m-3)\n self.AL = AL\n self.lai = AL / self.ground_area\n self.b = b\n self.psie = psie\n self.Ksat = Ksat\n self.Lv = Lv\n self.F = F\n self.timestep_sec = 60. * self.met_timestep / self.nruns\n\n def main(self, met=None):\n\n (n, out) = self.initial_model(met)\n\n for i in range(1, n):\n\n out = self.run_timestep(i, met, out)\n\n # save solutions, use as input for another run,\n # keeping everything else the same\n #if self.run_twice:\n # out2 = out\n # out2.psi_leaf[i-1] = out.psi_leaf[i]\n # out2.psi_stem[i-1] = out.psi_stem[i]\n # out = self.run_timestep(i, met, out2)\n\n # save solutions, use as input for another run,\n # keeping everything else the same\n for j in range(1, self.nruns):\n out_temp = out\n out_temp.psi_leaf[i-1] = out.psi_leaf[i]\n out_temp.psi_stem[i-1] = out.psi_stem[i]\n out = self.run_timestep(i, met, out_temp)\n\n if self.stop_dead:\n # percent loss conductivity (%)\n plc = self.calc_plc(out.kp[i])\n if plc > self.plc_dead:\n break\n\n out[\"plc\"] = self.calc_plc(out.kp)\n # mmol s-1\n out[\"Eplant\"] = self.AL * out.Eleaf\n out[\"t\"] = np.arange(1, n+1)\n\n return (out)\n\n def initial_model(self, met):\n n = len(met)\n\n out = self.setup_out_df(met)\n out.psi_leaf[0] = self.psi_leaf0\n out.psi_stem[0] = self.psi_stem0\n out.sw[0] = self.sw0\n out.psi_soil[0] = self.calc_swp(self.sw0)\n out.Eleaf[0] = 0.0\n\n # soil-to-root hydraulic conductance (mmol m-2 s-1 MPa-1)\n out.ks[0] = self.calc_ksoil(out.psi_soil[0])\n\n return n, out\n\n def setup_out_df(self, met):\n dummy = np.ones(len(met)) * np.nan\n out = pd.DataFrame({'Eleaf':dummy, 'psi_leaf':dummy, 'psi_stem':dummy,\n 'psi_soil':dummy, 'sw':dummy, 'ks':dummy,\n 'kp':dummy, 'Jsl':dummy, 'Jrs':dummy, 'krst':dummy,\n 'kstl':dummy})\n\n return out\n\n def run_timestep(self, i, met, out):\n\n self.calc_conductances(out, i)\n\n mult = (self.g1 / met.Ca[i]) * self.fsig_tuzet(out.psi_leaf[i-1])\n\n # Calculate photosynthesis and stomatal conductance\n gsw = self.F.canopy(met.Ca[i], met.tair[i], met.par[i],\n met.vpd[i], mult)\n\n # Don't add gmin, instead use it as bottom value.\n gsw = max(self.gmin, 1000. * gsw)\n\n # Leaf transpiration assuming perfect coupling (mmol m-2 s-1)\n out.Eleaf[i] = gsw * (met.vpd[i] / met.press[i])\n\n out.psi_leaf[i] = self.calc_leaf_water_potential(out.kstl[i],\n out.psi_stem[i-1],\n out.psi_leaf[i-1],\n out.Eleaf[i])\n\n # Flux from stem to leaf (mmol s-1) = change in leaf storage,\n # plus transpiration\n out.Jsl[i] = self.calc_flux_to_leaf(out.psi_leaf[i], out.psi_leaf[i-1],\n out.Eleaf[i])\n\n # Update stem water potential\n out.psi_stem[i] = self.update_stem_wp(out.krst[i], out.psi_soil[i-1],\n out.Jsl[i], out.psi_stem[i-1])\n\n # flux from soil to stem, i.e. root water uptake (mmol s-1) = change in\n # stem storage, plus Jsl\n out.Jrs[i] = self.calc_flux_soil_to_stem(out.psi_stem[i],\n out.psi_stem[i-1], out.Jsl[i])\n\n out.sw[i] = self.update_sw_balance(met.precip[i], out.Jrs[i],\n out.sw[i-1])\n\n # Update soil water potential\n out.psi_soil[i] = self.calc_swp(out.sw[i])\n\n # Update soil-to-root hydraulic conductance (mmol m-2 s-1 MPa-1)\n out.ks[i] = self.calc_ksoil(out.psi_soil[i])\n\n return out\n\n def calc_conductances(self, out, i):\n # Plant hydraulic conductance (mmol m-2 s-1 MPa-1). NB. depends on stem\n # water potential from the previous timestep.\n out.kp[i] = self.kp_sat * self.fsig_hydr(out.psi_stem[i-1])\n\n # Conductance from soil to stem water store (mmol m-2 s-1 MPa-1)\n out.krst[i] = 1.0 / (1.0 / out.ks[i-1] + 1.0 / (2.0 * out.kp[i]))\n\n # Conductance from stem water store to leaf (mmol m-2 s-1 MPa-1)\n out.kstl[i] = 2.0 * out.kp[i]\n\n def calc_swp(self, sw):\n return self.psie * (sw / self.theta_sat)**-self.b\n\n def calc_ksoil(self, psi_soil):\n rroot = 1E-06\n Ks = self.Ksat * (self.psie / psi_soil)**(2. + 3. / self.b)\n if psi_soil == 0.0:\n Ks = self.Ksat\n\n rcyl = 1.0 / np.sqrt(np.pi * self.Lv)\n Rl = self.Lv * self.soil_depth\n Ksoil = (Rl / self.lai) * 2. * np.pi * Ks / np.log(rcyl / rroot)\n\n return Ksoil\n\n def fsig_hydr(self, P):\n X = 50.\n P = np.abs(P)\n PX = np.abs(self.p50)\n V = (X - 100.) * np.log(1.0 - X / 100.)\n p = (P / PX)**((PX * self.s50) / V)\n relk = (1. - X / 100.)**p\n\n return (relk)\n\n def calc_leaf_water_potential(self, kstl, psi_stem_prev, psi_leaf_prev,\n Eleaf):\n # Following Xu et al, see Appendix + code\n #\n # Reference:\n # ==========\n # * Xu, X., Medvigy, D., Powers, J. S., Becknell, J. M. and Guan, K.\n # (2016), Diversity in plant hydraulic traits explains seasonal and\n # inter-annual variations of vegetation dynamics in seasonally dry\n # tropical forests. New Phytol, 212: 80–95. doi:10.1111/nph.14009.\n #\n # Can write the dynamic equation as: dpsi_leaf_dt = b + a*psi_leaf\n # Then it follows (Xu et al. 2016, Appendix, and Code).\n bp = (self.AL * 2.0 * kstl * psi_stem_prev - self.AL * Eleaf) / self.Cl\n ap = -(self.AL * 2.0 * kstl / self.Cl)\n psi_leaf = ((ap * psi_leaf_prev + bp) * \\\n np.exp(ap * self.timestep_sec) - bp) / ap\n\n return psi_leaf\n\n def calc_flux_to_leaf(self, psi_leaf, psi_leaf_prev, Eleaf):\n # Flux from stem to leaf = change in leaf storage, plus transpiration\n Jsl = (psi_leaf - psi_leaf_prev) * \\\n self.Cl / self.timestep_sec + self.AL * Eleaf\n return Jsl\n\n def update_stem_wp(self, krst, psi_soil_prev, Jsl, psi_stem_prev):\n # Following Xu et al, see Appendix + code\n #\n # Reference:\n # ==========\n # * Xu, X., Medvigy, D., Powers, J. S., Becknell, J. M. and Guan, K.\n # (2016), Diversity in plant hydraulic traits explains seasonal and\n # inter-annual variations of vegetation dynamics in seasonally dry\n # tropical forests. New Phytol, 212: 80–95. doi:10.1111/nph.14009.\n #\n bp = (self.AL * 2.0 * krst * psi_soil_prev - Jsl) / self.Cs\n ap = -(self.AL * 2.0 * krst / self.Cs)\n psi_stem = ((ap * psi_stem_prev + bp) * \\\n np.exp(ap * self.timestep_sec)-bp) / ap\n\n return psi_stem\n\n def calc_flux_soil_to_stem(self, psi_stem, psi_stem_prev, Jsl):\n return (psi_stem - psi_stem_prev) * self.Cs / self.timestep_sec + Jsl\n\n def update_sw_balance(self, precip, Jrs, sw_prev):\n\n # Soil water increase: precip - transpiration (units kg total tstep-1)\n # (Note: transpiration is part of Jrs).\n conv = 1E-06 * 18.\n water_in = self.ground_area * precip - self.timestep_sec * conv * Jrs\n\n # volumetric soil water content (m3 m-3)\n sw = min(0.5, sw_prev + water_in / (self.soil_volume * 1E03))\n\n return sw\n\n def calc_plc(self, kp):\n return 100.0 * (1.0 - kp / self.kp_sat)\n\n def fsig_tuzet(self, psi_leaf):\n \"\"\" Empirical logistic function to describe the sensitivity of stomata\n to leaf water potential. Function assumes that stomata are insensitive\n to LWP at values close to zero and that stomata rapidly close with\n decreasing LWP.\n\n Reference:\n ----------\n * Tuzet et al. (2003) A coupled model of stomatal conductance,\n photosynthesis and transpiration. Plant, Cell and Environment 26,\n 1097–1116\n\n \"\"\"\n # psi_f is the reference potential\n num = 1.0 + np.exp(self.sf * self.psi_f)\n den = 1.0 + np.exp(self.sf * (self.psi_f - psi_leaf))\n\n fw = num / den\n return fw\n\n\n\ndef make_plot(out, timestep=15):\n\n if timestep == 15:\n ndays = out.t / 96\n elif timestep == 30:\n ndays = out.t / 96 * 2\n elif timestep == 60:\n ndays = out.t / 96 * 4\n\n fig = plt.figure(figsize=(9,6))\n fig.subplots_adjust(hspace=0.3)\n fig.subplots_adjust(wspace=0.2)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 12\n plt.rcParams['font.size'] = 12\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['xtick.labelsize'] = 12\n plt.rcParams['ytick.labelsize'] = 12\n\n ax1 = fig.add_subplot(111)\n ax2 = ax1.twinx()\n\n ln1 = ax1.plot(ndays, out.psi_leaf, \"k-\", label=\"Leaf\")\n ln2 = ax1.plot(ndays, out.psi_stem, \"r-\", label=\"Stem\")\n ln3 = ax1.plot(ndays, out.psi_soil, \"b-\", label=\"Soil\")\n\n ln4 = ax2.plot(ndays, out.plc, ls='-', color=\"darkgrey\",\n label=\"PLC\")\n\n # added these three lines\n lns = ln1 + ln2 + ln3 + ln4\n labs = [l.get_label() for l in lns]\n ax1.legend(lns, labs, loc=(0.5,0.05), ncol=2)\n\n ax2.set_ylabel(r'PLC (%)')\n\n ax1.set_xlabel(\"Time (days)\")\n ax1.set_ylabel(\"Water potential (MPa)\")\n #ax1.legend(numpoints=1, loc=\"best\")\n fig.savefig(\"test_plot.pdf\", bbox_inches='tight', pad_inches=0.1)\n\ndef plot_swp_sw(out):\n\n fig = plt.figure(figsize=(9,6))\n fig.subplots_adjust(hspace=0.3)\n fig.subplots_adjust(wspace=0.2)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 12\n plt.rcParams['font.size'] = 12\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['xtick.labelsize'] = 12\n plt.rcParams['ytick.labelsize'] = 12\n\n ax1 = fig.add_subplot(111)\n\n ax1.plot(out.sw, out.psi_soil, \"b.\", label=\"Soil\")\n\n ax1.set_xlabel(\"SW (m3 m-3)\")\n ax1.set_ylabel(\"Soil Water potential (MPa)\")\n #ax1.legend(numpoints=1, loc=\"best\")\n fig.savefig(\"sw_swp.pdf\", bbox_inches='tight', pad_inches=0.1)\n\n\n\nif __name__ == \"__main__\":\n\n time_step = 30\n\n met = generate_met_data(Tmin=10, RH=30, ndays=200, time_step=time_step)\n\n psi_stem0 = 0.\n AL = 6. # leaf area (m2)\n p50 = -4. # MPa\n psi_f = -3. # Reference potential (MPa) for Tuzet model\n gmin = 10. # mmol m-2 s-1\n Cl = 10000. # Leaf capacitance (mmol MPa-1) (total plant)\n Cs = 120000. # Stem capacitance (mmol MPa-1)\n g1 = 4.0\n\n F = Canopy(g1=g1)\n D = Desica(psi_stem0=psi_stem0, AL=AL, p50=p50, psi_f=psi_f, gmin=gmin,\n Cl=Cl, Cs=Cs, F=F, g1=g1, nruns=3, stop_dead=True)\n out = D.main(met)\n\n make_plot(out, time_step)\n plot_swp_sw(out)\n","sub_path":"python/desica.py","file_name":"desica.py","file_ext":"py","file_size_in_byte":13352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38599788","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Section, Comment\nfrom .form import CommentForm\n\ndef sections_view(request):\n sections = Section.objects.all()\n template_name = 'resume/resume.html'\n context = {\"sections\":sections}\n\n return render(request, template_name, context)\n\n\ndef section_details_view(request, slug):\n section = get_object_or_404(Section, slug=slug)\n comments = Comment.objects.filter(post=section)\n\n if request.method == 'POST':\n form = CommentForm(request.POST or None)\n if form.is_valid():\n print(\"yes\")\n comment= request.POST.get(\"comment\")\n name= request.POST.get(\"name\")\n email= request.POST.get(\"email\")\n\n new_comment = Comment(\n post = section,\n comment =comment,\n name =name,\n email =email,\n )\n\n new_comment.save()\n else:\n print (error)\n\n template_name = 'resume/resume_details.html'\n context = {\"section\":section,\n 'comments':comments}\n\n return render(request, template_name, context)\n","sub_path":"src/resume/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"404988901","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 12 19:11:21 2019\n\n@author: mathe\n\"\"\"\n\n\n\nimport csv\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys \n\ndef write_csv(pagedata):\n with open('UK.csv', 'w',encoding='utf-8-sig', newline='') as csvFile:\n fields = pagedata[0].keys()\n writer = csv.DictWriter(csvFile, fieldnames=fields)\n writer.writeheader()\n for data in pagedata:\n writer.writerow(data)\n csvFile.close()\nif __name__ == \"__main__\":\n options=webdriver.ChromeOptions()\n options.add_argument('start-maximized')\n driver = webdriver.Chrome(chrome_options=options,executable_path='C:/Users/mathe/.spyder-py3/chromedriver.exe')\n start_url = 'https://www.dacia.co.uk/find-a-dealer/find-a-dealer-listing.html'\n pagedata=[]\n driver.get(start_url)\n nextpage= True\n while nextpage:\n while len(driver.find_elements_by_css_selector('.DynamicDealerList__Dealer .DynamicDealerList__DealerName'))==0 or len(driver.find_elements_by_css_selector('.LoaderBar.is-visible'))>0:\n print('Waiting..')\n time.sleep(0.3)\n \n elems=driver.find_elements_by_css_selector('div.DynamicDealerList__Dealer')\n \n \n \n for elem in elems:\n \n carddetail={'name':elem.find_element_by_css_selector('.DynamicDealerList__DealerName').text,\n 'address':elem.find_element_by_css_selector('.DynamicDealerList__DealerAdress').text,\n 'city':elem.find_element_by_css_selector('.DynamicDealerList__DealerCity').text,\n 'phone':elem.find_element_by_css_selector('.DynamicDealerList__DealerPhone').text}\n \n \n pagedata.append(carddetail)\n \n nextpage=len(driver.find_elements_by_css_selector('.rc-pagination-next'))>0 and len(driver.find_elements_by_css_selector('.rc-pagination-disabled.rc-pagination-next'))==0\n if nextpage:\n driver.execute_script(\"return arguments[0].scrollIntoView();\", driver.find_element_by_css_selector('.rc-pagination-next'))\n time.sleep(0.2)\n\n driver.find_element_by_css_selector('.rc-pagination-next').click()\n time.sleep(0.2)\n write_csv(pagedata)\n driver.quit()\n \n","sub_path":"Mathew/Dacia_UK.py","file_name":"Dacia_UK.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"137973480","text":"import sys\n\n# third-party imports\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pyvirtualdisplay import Display\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage\nfrom PyQt5.QtWidgets import QApplication\n\n\n\nclass Render(QWebPage):\n \"\"\"Render HTML with PyQt5 WebKit.\"\"\"\n\n def __init__(self, html):\n self.html = None\n self.app = QApplication(sys.argv)\n QWebPage.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n self.mainFrame().setHtml(html)\n self.app.exec_()\n\n def _loadFinished(self, result):\n self.html = self.mainFrame().toHtml()\n self.app.quit()\n\n\nurl = 'https://impythonist.wordpress.com/2015/01/06/ultimate-guide-for-scraping-javascript-rendered-web-pages/'\n\n# get the raw HTML\nsource_html = requests.get(url).text\n\n# return the JavaScript rendered HTML\n# with Display(visible=0, size=(800, 600)):\n# rendered_html = Render(source_html).html\n\n# get the BeautifulSoup\nsoup = BeautifulSoup(rendered_html, 'html.parser')\n\nprint('title is %r' % soup.select_one('title').text)","sub_path":"BeautifulSoup/Qt_Backup.py","file_name":"Qt_Backup.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"360810227","text":"from ActiveSplineDiffIOU import ActiveSplineTorch, DiffIOU\nimport random\nrandom.seed(1)\nimport os\nfrom glob import glob\nimport shutil\nimport json\nimport numpy as np\nimport torch\nimport skimage.io as io\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef compute_iou_cityscapes(pred, gt):\n \"\"\"\n Compute IOU [Cityscapes mode].\n\n Args:\n pred: pred mask.\n gt: gt mask.\n \"\"\"\n pred = pred.astype(np.bool)\n gt = gt.astype(np.bool)\n\n pred_area = np.count_nonzero(pred)\n gt_area = np.count_nonzero(gt)\n print('Ground Truth Area : %d, Predict Area : %d, ratio: %.5f'%(gt_area, pred_area, float(pred_area) / float(gt_area)))\n\n # true_negatives = np.count_nonzero(np.logical_and(np.logical_not(gt), np.logical_not(pred)))\n false_negatives = np.count_nonzero(np.logical_and(gt, np.logical_not(pred)))\n false_positives = np.count_nonzero(np.logical_and(np.logical_not(gt), pred))\n true_positives = np.count_nonzero(np.logical_and(gt, pred))\n\n union = float(true_positives + false_positives + false_negatives)\n intersection = float(true_positives)\n\n iou = intersection / union if union > 0. else 0.\n\n return iou\n\ndef uniformsample(pgtnp_px2, newpnum):\n pnum, cnum = pgtnp_px2.shape\n assert cnum == 2\n\n idxnext_p = (np.arange(pnum, dtype=np.int32) + 1) % pnum\n pgtnext_px2 = pgtnp_px2[idxnext_p]\n edgelen_p = np.sqrt(np.sum((pgtnext_px2 - pgtnp_px2) ** 2, axis=1))\n edgeidxsort_p = np.argsort(edgelen_p)\n\n # two cases\n # we need to remove gt points\n # we simply remove shortest paths\n if pnum > newpnum:\n edgeidxkeep_k = edgeidxsort_p[pnum - newpnum:]\n edgeidxsort_k = np.sort(edgeidxkeep_k)\n pgtnp_kx2 = pgtnp_px2[edgeidxsort_k]\n assert pgtnp_kx2.shape[0] == newpnum\n return pgtnp_kx2\n # we need to add gt points\n # we simply add it uniformly\n else:\n edgenum = np.round(edgelen_p * newpnum / np.sum(edgelen_p)).astype(np.int32)\n for i in range(pnum):\n if edgenum[i] == 0:\n edgenum[i] = 1\n\n # after round, it may has 1 or 2 mismatch\n edgenumsum = np.sum(edgenum)\n if edgenumsum != newpnum:\n\n if edgenumsum > newpnum:\n\n id = -1\n passnum = edgenumsum - newpnum\n while passnum > 0:\n edgeid = edgeidxsort_p[id]\n if edgenum[edgeid] > passnum:\n edgenum[edgeid] -= passnum\n passnum -= passnum\n else:\n passnum -= edgenum[edgeid] - 1\n edgenum[edgeid] -= edgenum[edgeid] - 1\n id -= 1\n else:\n id = -1\n edgeid = edgeidxsort_p[id]\n edgenum[edgeid] += newpnum - edgenumsum\n\n assert np.sum(edgenum) == newpnum\n\n psample = []\n for i in range(pnum):\n pb_1x2 = pgtnp_px2[i:i + 1]\n pe_1x2 = pgtnext_px2[i:i + 1]\n\n pnewnum = edgenum[i]\n wnp_kx1 = np.arange(edgenum[i], dtype=np.float32).reshape(-1, 1) / edgenum[i];\n\n pmids = pb_1x2 * (1 - wnp_kx1) + pe_1x2 * wnp_kx1\n psample.append(pmids)\n\n psamplenp = np.concatenate(psample, axis=0)\n return psamplenp\n\ndef copy_json_for_test():\n root_path = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/epoch13_step_34000'\n change_path = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/test_diff_iou'\n print('==> List dir')\n anns = glob(os.path.join(root_path, '*.json'))\n random.shuffle(anns)\n if not os.path.exists(change_path):\n os.makedirs(change_path)\n\n print('==> Copy')\n for i in range(100):\n print(i)\n shutil.copy(anns[i], change_path)\n\ndef vis_image(gt_points, pred_points, save_name):\n fig, ax = plt.subplots()\n ax.plot(gt_points[:,0], gt_points[:,1], '-o')\n ax.plot(pred_points[:, 0], pred_points[:, 1], '-o')\n ax.set_aspect('equal')\n # ax.set_origin('upper')\n plt.gca().invert_yaxis()\n plt.savefig(save_name, origin='upper')\n\ndef get_real_iou(ann, preds_dir):\n gt_mask = io.imread(os.path.join(preds_dir, ann['gt_mask_fname']))\n pred_mask = io.imread(os.path.join(preds_dir, ann['pred_mask_fname']))\n\n # Get IOU\n iou = compute_iou_cityscapes(pred_mask, gt_mask)\n return iou\n\n\ndef check_json_spline(file_name, preds_dir):\n print('==> Get Positions')\n ann= json.load(open(file_name, 'r'))\n spline_pos = ann['spline_pos']\n spline_pos = np.array(spline_pos)\n original_polys = [np.array(comp['poly']) for comp in ann['components']][0]\n print(spline_pos.shape)\n print(original_polys.shape)\n\n print('==> Get real IOU')\n real_iou = get_real_iou(ann, preds_dir)\n print(real_iou)\n\n # vis_image(original_polys, spline_pos, 'original_img.jpg')\n print('==> Normalize Positions')\n original_polys = original_polys.astype(np.float)\n x_min = np.min(original_polys[:,0]) - 10\n y_min = np.min(original_polys[:,1]) - 10\n x_max = np.max(original_polys[:,0]) + 10\n y_max = np.max(original_polys[:,1]) + 10\n scale = max(x_max - x_min, y_max - y_min)\n spline_pos[:,0] = (spline_pos[:,0] - x_min) / scale\n spline_pos[:,1] = (spline_pos[:,1] - y_min) / scale\n original_polys[:,0] = (original_polys[:,0] - x_min) / scale\n original_polys[:,1] = (original_polys[:,1] - y_min) / scale\n\n # print(spline_pos)\n # print(original_polys)\n # vis_image(original_polys, spline_pos, 'normalize_img.jpg')\n\n print('==> Sample Points from the curve and line')\n active_spline = ActiveSplineTorch(20, 1000)\n s_ps = active_spline.sample_point(torch.from_numpy(spline_pos).unsqueeze(0))\n # p_ps = uniformsample(original_polys, 1000)\n p_ps = original_polys\n p_ps = torch.from_numpy(p_ps).unsqueeze(0).expand(2, p_ps.shape[0],2).float()\n s_ps = s_ps.expand(2, 1000, 2).float()\n # vis_image(p_ps[0].numpy(), s_ps[0].numpy(), 'sampled_points.jpg')\n\n print('==> match for iou')\n d_iou = DiffIOU(224)\n iou = d_iou.get_iou(s_ps, p_ps)\n\n print('Real IOU: %.4f, Pred IOU: %.4f'%(real_iou, iou[0]))\n # print(iou)\n\n\n\ndef check_all_json():\n root_path = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/test_diff_iou'\n file_list = glob(os.path.join(root_path, '*.json'))\n preds_path = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/epoch13_step_34000'\n # file_name = 'frankfurt_000001_010830_39_info.json'\n # file_name = os.path.join(root_path, file_name)\n for file_name in file_list:\n # print(file_name)\n print(file_name)\n # file_name = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/test_diff_iou/frankfurt_000000_017228_72_info.json'\n # file_name = '/ais/gobi6/jungao/polyrnn-pp-chamfer/model_dirs/active-spline-20-cp/prediction/test_diff_iou/frankfurt_000001_017459_64_info.json'\n check_json_spline(file_name, preds_path)\n # exit()\n\nif __name__ == '__main__':\n # copy_json_for_test()\n # check_json_spline()\n check_all_json()","sub_path":"configs/baselines/CurveGCN/active_spline/test_spline_diff_iou.py","file_name":"test_spline_diff_iou.py","file_ext":"py","file_size_in_byte":7220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362872499","text":"import requests # 发起网络请求\nfrom bs4 import BeautifulSoup # 解析HTML文本\nimport pandas as pd # 处理数据\nimport os\nimport time # 处理时间戳\nimport json\n\n\ndef fetchUrl(url, kw, page):\n # 请求头\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.68\",\n \"Refer\": \"http://search.people.cn/s/?keyword=%E6%96%B0%E5%86%A0&st=0&_=1617851359505\"\n }\n\n # 请求参数\n payloads = {\n \"endTime\": 0,\n \"hasContent\": True,\n \"hasTitle\": True,\n \"isFuzzy\": False,\n \"key\": \"新冠\",\n \"limit\": 10,\n \"page\": page,\n \"sortType\": 0,\n \"startTime\": 0,\n \"type\": 0,\n }\n\n # 发起 post 请求\n proxies = {\"http\": \"http://165.225.77.44:443\"}\n\n r = requests.post(url, headers=headers, data=json.dumps(payloads), proxies=proxies)\n return r.json()\n\n\ndef parseJson(jsonObj):\n # 解析数据\n records = jsonObj[\"data\"][\"records\"];\n for item in records:\n # 这里示例解析了几条,其他数据项如末尾所示,有需要自行解析\n\n belongsName = item[\"belongsName\"]\n\n displayTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(item[\"displayTime\"] / 1000))\n\n title = BeautifulSoup(item[\"title\"], \"html.parser\").text\n # url = item[\"url\"]\n\n yield [[title, displayTime, belongsName]]\n\n\ndef saveFile(path, filename, data):\n # 如果路径不存在,就创建路径\n if not os.path.exists(path):\n os.makedirs(path)\n # 保存数据\n dataframe = pd.DataFrame(data)\n dataframe.to_csv(path + filename + \".csv\", encoding='utf_8_sig', mode='a', index=False, sep=',', header=False)\n\n\nif __name__ == \"__main__\":\n # 起始页,终止页,关键词设置\n start = 6000\n end = 24000\n kw = \"test1\"\n\n # 保存表头行\n headline = [[\"标题\", \"发表时间\", \"来源\"]]\n saveFile(\"./data/\", kw, headline)\n # 爬取数据\n for page in range(start, end + 1):\n url = \"http://search.people.cn/api-search/elasticSearch/search\"\n html = fetchUrl(url, kw, page)\n for data in parseJson(html):\n saveFile(\"./data/\", kw + \"0\", data)\n print(\"第{}页爬取完成\".format(page))\n\n # 爬虫完成提示信息\n print(\"爬虫执行完毕!数据已保存至以下路径中,请查看!\")\n print(os.getcwd(), \"\\\\data\")\n","sub_path":"xinhua/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1890289","text":"\n# coding: utf-8\n\n# In[ ]:\n\nimport datetime as dt\nimport os\n\nimport pandas as pd\nimport pytz\n\npd.set_option('display.max_columns', None)\n\ncentral_time = dt.datetime.now(pytz.timezone('US/Central'))\nmodified_location = '../output/'\nsplit_location = '../modified/'\nraw_location = '../raw/'\ntableau_location = 'tableau_data_connections/'\nexport_filename = 'interval_data_netspend_'\nimport_filename = 'HourlyReport'\nsplit_filename = 'dimension_split.csv'\n\ndate_format = '%Y%m%d'\nrequired_date_range = pd.Series(\n pd.date_range(((central_time - pd.Timedelta(days=14))), central_time - pd.Timedelta(days=1)).strftime('%Y-%m-%d'))\n\nexisting_list = []\nfor files in os.listdir(modified_location):\n if export_filename in files:\n existing_list.append(files[23:-8])\n\ns = set(existing_list)\nmissing_dates = [x for x in required_date_range if x not in s]\nmissing_dates = pd.to_datetime(missing_dates).strftime(date_format)\n\n\ndef read_concat_file(location, filename, parse_dates=True, encoding='cp1252', dtypes=None, ext='.csv'):\n df_list = []\n for file_date in missing_dates:\n try:\n read_file = '{}{}{}{}'.format(location, filename, file_date, ext)\n print('Appending file {}'.format(read_file))\n df = pd.read_csv(read_file, parse_dates=parse_dates, encoding=encoding, dtype=dtypes)\n df_list.append(df)\n except IOError:\n print('File does not exist:', read_file)\n return pd.concat(df_list)\n\n\nhourly = read_concat_file(raw_location, import_filename, parse_dates=['Date'])\nsplits = pd.read_csv(split_location + split_filename, dtype={'split': object})\ninterval_dictionary = pd.read_csv(split_location + 'dimension_interval.csv') \n\n\n# In[ ]:\n\nhourly.rename(columns=lambda x: x.strip().lower().replace(\" \", \"_\"), inplace=True)\nhourly.rename(\n columns={'date': 'call_date' , 'interval(cst)': 'interval_time' , 'skill': 'split' , 'avgabntime': 'avg_abn_time' ,\n 'acd_time': 'talk_time' , 'avg_acw__time': 'avg_acw_time' , 'handled': 'handled_calls' ,\n 'offered': 'offered_calls' , 'forecast': 'forecasted_calls' ,} , inplace=True)\n\n\n# In[ ]:\n\nhourly = hourly[hourly['offered_calls'] + hourly['handled_calls'] + hourly['abandoned_calls'] > 0].copy()\n\n\n# In[ ]:\n\nhourly['call_date'] = hourly['call_date'].dt.strftime('%Y-%m-%d')\nhourly['total_time'] = hourly['talk_time'] + hourly['acw_time'] + hourly['hold_time']\nhourly['queue_time'] = hourly['asa'] * hourly['handled_calls']\nhourly['abandoned_time'] = hourly['avg_abn_time'] * hourly['abandoned_calls']\nhourly = pd.merge(hourly, interval_dictionary, how='left', on='interval_time')\n\n\n# In[ ]:\n\nhourly['date_interval'] = pd.to_datetime((hourly['call_date'] + ' ' + hourly['interval_time']))\nhourly['date_interval'] = pd.to_datetime((hourly['call_date'] + ' ' + hourly['interval_time']))\nhourly['time_zone'] = hourly['date_interval'].dt.tz_localize('US/Central').dt.strftime('%Z')\nhourly['date_interval_utc'] = hourly['date_interval'].dt.tz_localize('US/Central').dt.tz_convert('UTC').dt.strftime(\n '%Y-%m-%d %H:%M:%H')\nhourly['date_interval'] = hourly['date_interval'].dt.strftime('%Y-%m-%d %H:%M:%H')\n\n\n# In[ ]:\n\nhourly = pd.merge(hourly, splits, on='split', how='left')\nhourly.drop_duplicates(subset=['interval_time', 'split', 'offered_calls', 'handled_calls'], keep='last', inplace=True)\n\nhourly.sort_values(by=['date_interval'], inplace=True)\nhourly.set_index('call_date', drop=False, inplace=True)\n\n\n# In[ ]:\n\n(hourly[['interval_id', 'forecasted_calls', 'offered_calls', 'handled_calls', 'abandoned_calls',\n 'calls_abandoned_with_in_10_secs', 'calls_abandoned_above_10_secs', 'calls_abandoned_above_55_secs',\n 'calls_handled_w/in_sl', 'talk_time', 'acw_time', 'hold_time', 'total_time']]) = (hourly[['interval_id',\n 'forecasted_calls',\n 'offered_calls',\n 'handled_calls',\n 'abandoned_calls',\n 'calls_abandoned_with_in_10_secs',\n 'calls_abandoned_above_10_secs',\n 'calls_abandoned_above_55_secs',\n 'calls_handled_w/in_sl',\n 'talk_time',\n 'acw_time',\n 'hold_time',\n 'total_time']].astype(\n int))\n\n# In[ ]:\n\ncols = ['call_date', 'interval_id', 'date_interval', 'time_zone', 'date_interval_utc', 'split_id', 'forecasted_calls',\n 'offered_calls', 'handled_calls', 'abandoned_calls', 'calls_abandoned_with_in_10_secs',\n 'calls_abandoned_above_10_secs', 'calls_abandoned_above_55_secs', 'calls_handled_w/in_sl', 'talk_time',\n 'acw_time', 'hold_time', 'total_time', 'queue_time', 'abandoned_time', ]\n\n\n# In[ ]:\n\ndef export_file(location , df , column , mode='w' , header=True , encoding='cp1252' , compression='bz2' , columns=None ,\n index=False , ext='.csv.bz2'):\n for file_date in df.index.get_level_values(column).unique():\n try:\n x = df.loc[file_date]\n export_file = '{}{}{}{}'.format(location , export_filename , file_date , ext)\n print('Saving file {}'.format(export_file))\n x.to_csv(export_file , mode=mode , encoding=encoding , compression=compression , columns=columns ,\n index=index , header=header)\n except TypeError:\n print('Error')\n\n\n# In[ ]:\n\nexport_file(modified_location, hourly, 'call_date', columns=cols)\n","sub_path":"py_files/25004_interval_daily.py","file_name":"25004_interval_daily.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"218898799","text":"import os\n\nimport requests\n\n\nurl = 'http://www.ssckkr.kar.nic.in/'\nfile_name = 'index.html'\n\nresponse = requests.get(url)\n\ndata = response.text.strip()\n\nif not os.path.exists(file_name):\n with open(file_name, 'w') as fh:\n fh.write(data)\n\ncache = open(file_name).read().strip()\nindex = data.find('1.\\'\"CLICK')\nd_slice = slice(index, index + 50)\n\nif cache[d_slice] != data[d_slice]:\n print('website changed')\n","sub_path":"python/site_change_alert.py","file_name":"site_change_alert.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236601510","text":"import sys\n\nimport requests\nimport os\nimport random\nimport time\n\n#store folder name so it doesn't create another one.\nfolder = \"\";\nrn = random.randrange(1000,10000);\nfiles = [\"full.jpg\",\"msgbg.xml\",\"mod1.xml\",\"mod2.xml\"];\ndirectory = \"/path/to/diir\"; #remember to set this path else it won't work. the path needs to be the same path where the file is.\n\ndef download(username):\n\tf = open(\"logs.txt\", \"a\");\n\ttmp_folder = str(\"%d_%s_tmp\" % (rn,username));\n\tfolder = tmp_folder \n\tos.popen(\"sudo mkdir \" + folder); #create tmp dir\n\tfor item in files:\n\t\tos.popen(\"sudo wget -q -O \"+ directory + folder + \"/\" + item + \" http://st.chatango.com/profileimg/\" + username[0:1] + \"/\" + username[1:2] + \"/\" + username + \"/\" + item + \"\"); \n\tos.popen(\"sudo zip -r \" + folder + \" \" + folder) #zips everything from the tmp folder with the same name.\n\tos.popen(\"sudo rm -r \" + folder)\n\tprint (\"Finished Downloading files for \" + username);\n\t\n\ndef check(username):\n\turl = requests.get(\"http://fp.chatango.com/profileimg/\" + username[0:1] + \"/\" + username[1:2] + \"/\" + username + \"/mod1.xml\");\n\tl = len(url.text)\n\tx = url.text[l - 6:] #checking to see if is present indicating a proper file with real data in it.\n\tif x == \"\":\n\t\tdownload(username);\n\telse:\n\t\tprint (\"No real data to collect for \" + username)\n\t\n\ncheck(sys.argv[1]);\n\n","sub_path":"dl-ch.py","file_name":"dl-ch.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566957004","text":"import pytest\nfrom app.exceptions import ValidationError\nfrom app.utils import (auto_slugify_edition, validate_path_slug,\n validate_product_slug)\n\n\n@pytest.mark.parametrize(\n 'git_refs,expected',\n [(['tickets/DM-1234'], 'DM-1234'),\n (['tickets/LCR-758'], 'LCR-758'),\n (['master'], 'master'),\n (['u/rowen/r12_patch1'], 'u-rowen-r12-patch1'),\n (['tickets/DM-1234', 'tickets/DM-5678'],\n 'tickets-DM-1234-tickets-DM-5678')])\ndef test_auto_slugify_edition(git_refs, expected):\n assert expected == auto_slugify_edition(git_refs)\n assert validate_path_slug(auto_slugify_edition(git_refs))\n\n\ndef test_validate_product_slug():\n with pytest.raises(ValidationError):\n validate_product_slug('DM-1234')\n with pytest.raises(ValidationError):\n validate_product_slug('DM_1234')\n assert validate_product_slug('dm-1234') is True\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"554081929","text":"# Copyright (c) 2015 Yubico AB\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or\n# without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom yubikit.core import USB_INTERFACE, ApplicationNotAvailableError\nfrom yubikit.core.fido import FidoConnection\nfrom yubikit.core.smartcard import SmartCardConnection\n\nimport ykman.logging_setup\n\nfrom .. import __version__\nfrom ..scard import list_devices as list_ccid, list_readers\nfrom ..util import Cve201715361VulnerableError\nfrom ..device import (\n read_info,\n get_name,\n list_all_devices,\n scan_devices,\n get_connection_types,\n connect_to_device,\n)\nfrom .util import UpperCaseChoice, YkmanContextObject\nfrom .info import info\nfrom .mode import mode\nfrom .otp import otp\nfrom .opgp import openpgp\nfrom .oath import oath\nfrom .piv import piv\nfrom .fido import fido\nfrom .config import config\nimport click\nimport time\nimport logging\nimport sys\n\n\nlogger = logging.getLogger(__name__)\n\n\nCLICK_CONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"], max_content_width=999)\n\n\ndef retrying_connect(serial, interfaces, attempts=10):\n while True:\n try:\n return connect_to_device(serial, get_connection_types(interfaces))\n except Exception as e:\n if attempts:\n attempts -= 1\n logger.error(\"Failed opening connection, retry in 0.5s\", exc_info=e)\n time.sleep(0.5)\n else:\n raise\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(\"YubiKey Manager (ykman) version: {}\".format(__version__))\n ctx.exit()\n\n\ndef _disabled_interface(ctx, interfaces, cmd_name):\n req = \", \".join((t.name for t in USB_INTERFACE if t & interfaces))\n click.echo(\n \"Command '{}' requires one of the following USB interfaces \"\n \"to be enabled: '{}'.\".format(cmd_name, req)\n )\n ctx.fail(\"Use 'ykman config usb' to set the enabled USB interfaces.\")\n\n\ndef _run_cmd_for_serial(ctx, cmd, interfaces, serial):\n try:\n return retrying_connect(serial, interfaces)\n except ValueError:\n try:\n # Serial not found, see if it's among other interfaces in USB enabled:\n conn = retrying_connect(serial, sum(USB_INTERFACE) ^ interfaces)[0]\n conn.close()\n _disabled_interface(ctx, interfaces, cmd)\n except ValueError:\n ctx.fail(\n \"Failed connecting to a YubiKey with serial: {}. \"\n \"Make sure the application has the required \"\n \"permissions.\".format(serial)\n )\n\n\ndef _run_cmd_for_single(ctx, cmd, interfaces, reader_name=None):\n # Use a specific CCID reader\n if reader_name:\n if USB_INTERFACE.CCID in interfaces or cmd in (fido.name, otp.name):\n readers = list_ccid(reader_name)\n if len(readers) == 1:\n dev = readers[0]\n try:\n if cmd == fido.name:\n conn = dev.open_connection(FidoConnection)\n else:\n conn = dev.open_connection(SmartCardConnection)\n info = read_info(dev.pid, conn)\n return conn, dev.pid, info\n except Exception as e:\n logger.error(\"Failure connecting to card\", exc_info=e)\n ctx.fail(\"Failed to connect: {}\".format(e))\n elif len(readers) > 1:\n ctx.fail(\"Multiple YubiKeys on external readers detected.\")\n else:\n ctx.fail(\"No YubiKey found on external reader.\")\n else:\n ctx.fail(\"Not a CCID command.\")\n\n # Find all connected devices\n devices, _ = scan_devices()\n n_devs = sum(devices.values())\n if n_devs == 0:\n ctx.fail(\"No YubiKey detected!\")\n if n_devs > 1:\n ctx.fail(\n \"Multiple YubiKeys detected. Use --device SERIAL to specify \"\n \"which one to use.\"\n )\n\n # Only one connected device, check if any needed interfaces are available\n pid = next(iter(devices.keys()))\n if pid.get_interfaces() & interfaces:\n return retrying_connect(None, interfaces)\n _disabled_interface(ctx, interfaces, cmd)\n\n\n@click.group(context_settings=CLICK_CONTEXT_SETTINGS)\n@click.option(\n \"-v\",\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n)\n@click.option(\"-d\", \"--device\", type=int, metavar=\"SERIAL\")\n@click.option(\n \"-l\",\n \"--log-level\",\n default=None,\n type=UpperCaseChoice(ykman.logging_setup.LOG_LEVEL_NAMES),\n help=\"Enable logging at given verbosity level.\",\n)\n@click.option(\n \"--log-file\",\n default=None,\n type=str,\n metavar=\"FILE\",\n help=\"Write logs to the given FILE instead of standard error; \"\n \"ignored unless --log-level is also set.\",\n)\n@click.option(\n \"-r\",\n \"--reader\",\n help=\"Use an external smart card reader. Conflicts with --device and \" \"list.\",\n metavar=\"NAME\",\n default=None,\n)\n@click.pass_context\ndef cli(ctx, device, log_level, log_file, reader):\n \"\"\"\n Configure your YubiKey via the command line.\n\n Examples:\n\n \\b\n List connected YubiKeys, only output serial number:\n $ ykman list --serials\n\n \\b\n Show information about YubiKey with serial number 0123456:\n $ ykman --device 0123456 info\n \"\"\"\n ctx.obj = YkmanContextObject()\n\n if log_level:\n ykman.logging_setup.setup(log_level, log_file=log_file)\n\n if reader and device:\n ctx.fail(\"--reader and --device options can't be combined.\")\n\n subcmd = next(c for c in COMMANDS if c.name == ctx.invoked_subcommand)\n if subcmd == list_keys:\n if reader:\n ctx.fail(\"--reader and list command can't be combined.\")\n return\n\n interfaces = getattr(subcmd, \"interfaces\", USB_INTERFACE(sum(USB_INTERFACE)))\n if interfaces:\n\n def resolve():\n if not getattr(resolve, \"items\", None):\n if device is not None:\n resolve.items = _run_cmd_for_serial(\n ctx, subcmd.name, interfaces, device\n )\n else:\n resolve.items = _run_cmd_for_single(\n ctx, subcmd.name, interfaces, reader\n )\n ctx.call_on_close(resolve.items[0].close)\n return resolve.items\n\n ctx.obj.add_resolver(\"conn\", lambda: resolve()[0])\n ctx.obj.add_resolver(\"pid\", lambda: resolve()[1])\n ctx.obj.add_resolver(\"info\", lambda: resolve()[2])\n\n\n@cli.command(\"list\")\n@click.option(\n \"-s\",\n \"--serials\",\n is_flag=True,\n help=\"Output only serial \"\n \"numbers, one per line (devices without serial will be omitted).\",\n)\n@click.option(\n \"-r\", \"--readers\", is_flag=True, help=\"List available smart card readers.\"\n)\n@click.pass_context\ndef list_keys(ctx, serials, readers):\n \"\"\"\n List connected YubiKeys.\n \"\"\"\n\n if readers:\n for reader in list_readers():\n click.echo(reader.name)\n ctx.exit()\n\n # List all attached devices\n for pid, dev_info in list_all_devices():\n if serials:\n if dev_info.serial:\n click.echo(dev_info.serial)\n else:\n click.echo(\n \"{} ({}) [{}]{}\".format(\n get_name(dev_info, pid.get_type()),\n \"%d.%d.%d\" % dev_info.version if dev_info.version else \"unknown\",\n pid.name.split(\"_\", 1)[1].replace(\"_\", \"+\"),\n \" Serial: {}\".format(dev_info.serial) if dev_info.serial else \"\",\n )\n )\n\n\nCOMMANDS = (list_keys, info, mode, otp, openpgp, oath, piv, fido, config)\n\n\nfor cmd in COMMANDS:\n cli.add_command(cmd)\n\n\ndef main():\n try:\n cli(obj={})\n except ApplicationNotAvailableError as e:\n logger.error(\"Error\", exc_info=e)\n click.echo(\n \"The functionality required for this command is not enabled or not \"\n \"available on this YubiKey.\"\n )\n return 1\n except ValueError as e:\n logger.error(\"Error\", exc_info=e)\n click.echo(\"Error: \" + str(e))\n return 1\n except Cve201715361VulnerableError as err:\n logger.error(\"Error\", exc_info=err)\n click.echo(\"Error: \" + str(err))\n return 2\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"ykman/cli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123327886","text":"#!/usr/bin/python3\n# Solved by Bogdan Trif @ Completed on Wed, 5 Oct 2016, 21:44\n#The Euler Project https://projecteuler.net\n'''\nMaximum path sum II - Problem 67\nBy starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.\n3\n7 4\n2 4 6\n8 5 9 3\n\nThat is, 3 + 7 + 4 + 9 = 23.\nFind the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file\ncontaining a triangle with one-hundred rows.\n\nNOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem,\nas there are 2**99 altogether! If you could check one trillion (10**12) routes every second it would take\nover twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)\n'''\nf = open('pb067_triangle.txt', 'r')\ntext = f.read()\nf.close()\n# Initialize and populate triangle\ntriangle = []\n#print(text,' ',type(text),'\\n')\n\nfor row in text.split('\\n'):\n triangle.append(list(map(int, row.split(' ')))) # This maps the strings into ints on the run, SMART TECHNIQUE\n\nfor i in range(len(triangle)-2, -1,-1):\n for j in range(0,i+1):\n triangle[i][j] += max(triangle[i+1][j], triangle[i+1][j+1])\n #print(triangle[i][j])\n #print(max(triangle[i+1][j], triangle[i+1][j+1]))\n #print(triangle[i], end=' \\n ')\n\nprint('\\n And the largest path is : ',triangle[0][0])","sub_path":"Project EULER/pb067 Maximum path sum II.py","file_name":"pb067 Maximum path sum II.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364144943","text":"# 0, 56, 42, 8, 13, 79, 5, 2, 18\n# Python program to find smallest and min2nd smallest elements\ndef _2ndmin(Xlist):\n min1st = min2nd = max(Xlist)\n for i in range(len(Xlist)):\n if Xlist[i] < min1st:\n min2nd = min1st\n min1st = Xlist[i]\n elif Xlist[i] < min2nd and Xlist[i] != min1st:\n min2nd = Xlist[i]\n if min2nd == max(Xlist):\n print(\"No min2nd smallest element\")\n else:\n print ('The second smallest element is',min2nd)\n\n# Driver function to test above function\nXlist = [56, 42, 8, 13, 79, 5, 2, 18, 6]\n_2ndmin(Xlist)\n\n","sub_path":"second_sml2.py","file_name":"second_sml2.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344311189","text":"from sys import exit\ndef gen_triangle_numbers(k):\n for x in range(1, k + 1):\n if x == 1:\n n = 1 \n else:\n n += x\n yield n\n\ndef num_of_divisors(n):\n limit = n\n num = 0\n x = 1\n while x < limit:\n if n % x == 0:\n limit = n / x\n if limit != x:\n num += 1\n num += 1\n x += 1\n return num\n\nx = gen_triangle_numbers(50000)\n\nfor x in x:\n greater = num_of_divisors(x) >= 500\n print(x, greater)\n if greater:\n exit()","sub_path":"problems001to099/Problem12.py","file_name":"Problem12.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186579466","text":"# -*- coding: utf8 -*-\nimport json\n\nfrom mock import patch\nfrom nose.tools import eq_\n\nfrom addons.models import Addon\nfrom amo.urlresolvers import reverse\nimport amo.tests\n\n\nclass TestPerfViews(amo.tests.TestCase):\n fixtures = ['base/users', 'base/addon_3615']\n\n def setUp(self):\n super(TestPerfViews, self).setUp()\n assert self.client.login(username='del@icio.us', password='password')\n addon = Addon.objects.get(pk=3615)\n self.file = addon.latest_version.files.get()\n self.patches = [patch('waffle.flag_is_active'),\n patch('waffle.helpers.flag_is_active')]\n for p in self.patches:\n p.start().return_value = True\n p = patch('devhub.perf.start_perf_test')\n self.perf_test = p.start()\n self.patches.append(p)\n self.perf_calls = None\n\n def tearDown(self):\n for p in self.patches:\n p.stop()\n super(TestPerfViews, self).tearDown()\n\n def assert_call(self, expected_call):\n if not self.perf_calls:\n self.perf_calls = [tuple(c) for c in\n self.perf_test.call_args_list]\n assert expected_call in self.perf_calls, (\n 'Call was not made: %s' % str(expected_call))\n\n def start(self):\n re = self.client.get(reverse('devhub.file_perf_tests_start',\n args=[self.file.version.addon.id, self.file.id]),\n follow=True)\n eq_(re.status_code, 200)\n return json.loads(re.content)\n\n def test_start_linux(self):\n self.file.update(platform=amo.PLATFORM_LINUX.id)\n re = self.start()\n eq_(re, {'success': True})\n self.assert_call(((self.file, 'linux', 'firefox3.6'), {}))\n self.assert_call(((self.file, 'linux', 'firefox6.0'), {}))\n\n def test_start_all(self):\n self.file.update(platform=amo.PLATFORM_ALL.id)\n self.start()\n self.assert_call(((self.file, 'linux', 'firefox6.0'), {}))\n\n def test_unsupported_plat(self):\n self.file.update(platform=amo.PLATFORM_ANDROID.id)\n eq_(self.start(), {'success': False})\n","sub_path":"apps/devhub/tests/test_views_perf.py","file_name":"test_views_perf.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"72608712","text":"from datetime import datetime\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport io\nfrom torchvision import transforms as trans\nfrom data.data_pipe import de_preprocess\nimport torch\nfrom model import l2_norm\nimport pdb\nimport cv2\nfrom pathlib import Path\nimport pickle\n\ndef separate_bn_paras(modules):\n if not isinstance(modules, list):\n modules = [*modules.modules()]\n paras_only_bn = []\n paras_wo_bn = []\n for layer in modules:\n if 'model' in str(layer.__class__):\n continue\n if 'container' in str(layer.__class__):\n continue\n else:\n if 'batchnorm' in str(layer.__class__):\n paras_only_bn.extend([*layer.parameters()])\n else:\n paras_wo_bn.extend([*layer.parameters()])\n return paras_only_bn, paras_wo_bn\n\ndef prepare_facebank(conf, imlst, model, mtcnn, tta = True, save = False):\n model.eval()\n #embeddings = []\n ftoid = {}\n idinfo = []\n idx = 0\n embs = []\n for classnm, files in imlst.items():\n for f in files:\n if not Path(f).is_file():\n print('File {} not found'.format(f))\n continue\n else:\n try:\n img = Image.open(f).convert('RGB')\n except:\n print('Loading failed for {}'.format(imgfn))\n continue\n try:\n img = mtcnn.align(img)\n except:\n img = img.resize((conf.input_size[0],conf.input_size[1]), Image.ANTIALIAS)\n #print('mtcnn failed for {}'.format(f))\n #data = np.array((cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2GRAY),)*3).T\n #img = Image.fromarray(data.astype(np.uint8))\n data = np.asarray(img)\n img = Image.fromarray(data[:,:,::-1].astype(np.uint8))\n \n ftoid[f] = len(embs)\n idinfo.append((f,classnm))\n with torch.no_grad():\n if tta:\n mirror = trans.functional.hflip(img)\n emb = l2_norm(model(conf.test_transform(img).unsqueeze(0).to(conf.device)))\n emb_mirror = l2_norm(model(conf.test_transform(mirror).unsqueeze(0).to(conf.device)))\n embs.append(l2_norm(emb + emb_mirror))\n else: \n embs.append(l2_norm(model(conf.test_transform(img).unsqueeze(0).to(conf.device))))\n #embedding = l2_norm(torch.cat(embs).mean(0,keepdim=True))\n #embeddings.append(embedding)\n #names.append(classnm)\n #embeddings = torch.cat(embeddings)\n embeddings = torch.cat(embs)\n if save:\n torch.save(embeddings, conf.facebank_path/'facebank.pth')\n with open(conf.facebank_path/'ftoid.pkl', 'wb') as outfile:\n pickle.dump(ftoid, outfile, protocol=pickle.HIGHEST_PROTOCOL)\n np.save(conf.facebank_path/'idinfo', idinfo)\n return embeddings,ftoid,idinfo\n\ndef load_facebank(conf):\n embeddings = torch.load(conf.facebank_path/'facebank.pth',map_location=lambda storage, loc: storage)\n with open(conf.facebank_path/'ftoid.pkl', 'rb') as infile:\n ftoid = pickle.load(infile)\n idinfo = np.load(conf.facebank_path/'idinfo.npy')\n return embeddings, ftoid, idinfo\n\ndef face_reader(conf, conn, flag, boxes_arr, result_arr, learner, mtcnn, targets, tta):\n while True:\n try:\n image = conn.recv()\n except:\n continue\n try: \n bboxes, faces = mtcnn.align_multi(image, limit=conf.face_limit)\n except:\n bboxes = []\n \n results = learner.infer(conf, faces, targets, tta)\n \n if len(bboxes) > 0:\n print('bboxes in reader : {}'.format(bboxes))\n bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces\n bboxes = bboxes.astype(int)\n bboxes = bboxes + [-1,-1,1,1] # personal choice \n assert bboxes.shape[0] == results.shape[0],'bbox and faces number not same'\n bboxes = bboxes.reshape([-1])\n for i in range(len(boxes_arr)):\n if i < len(bboxes):\n boxes_arr[i] = bboxes[i]\n else:\n boxes_arr[i] = 0 \n for i in range(len(result_arr)):\n if i < len(results):\n result_arr[i] = results[i]\n else:\n result_arr[i] = -1 \n else:\n for i in range(len(boxes_arr)):\n boxes_arr[i] = 0 # by default,it's all 0\n for i in range(len(result_arr)):\n result_arr[i] = -1 # by default,it's all -1\n print('boxes_arr {}'.format(boxes_arr[:4]))\n print('result_arr {}'.format(result_arr[:4]))\n flag.value = 0\n\nhflip = trans.Compose([\n de_preprocess,\n trans.ToPILImage(),\n trans.functional.hflip,\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n\ndef hflip_batch(imgs_tensor):\n hfliped_imgs = torch.empty_like(imgs_tensor)\n for i, img_ten in enumerate(imgs_tensor):\n hfliped_imgs[i] = hflip(img_ten)\n return hfliped_imgs\n\ndef get_time():\n return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-')\n\ndef gen_plot(fpr, tpr):\n \"\"\"Create a pyplot plot and save to buffer.\"\"\"\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf\n\ndef draw_box_name(bbox,name,frame):\n frame = cv2.rectangle(frame,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),6)\n frame = cv2.putText(frame,\n name,\n (bbox[0],bbox[1]), \n cv2.FONT_HERSHEY_SIMPLEX, \n 2,\n (0,255,0),\n 3,\n cv2.LINE_AA)\n return frame\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"424355155","text":"# -*- coding:utf-8 -*-\n__author__ = 'LiQiang'\n__date__ = '2018/3/9 9:26'\n\nfrom django.conf.urls import url,include\n\nfrom .views import OrgView, AddUserAskView,OrgHomeView\n\nurlpatterns = [\n #课程机构首页\n url(r'^list/$', OrgView.as_view(), name=\"org_list\"),\n url(r'^add_ask/$', AddUserAskView.as_view(), name=\"add_ask\"),\n url(r'^home/(?P\\d+)/$', OrgHomeView.as_view(), name=\"org_home\"),\n\n #机构收藏\n url(r'^add_ask/$', AddUserAskView.as_view(), name=\"add_ask\"),\n\n]","sub_path":"apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"359669982","text":"__author__ = 'yinyan'\n\"\"\"\nQuestion Description:\nGiven a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\nFor example,\nGiven 1->4->3->2->5->2 and x = 3,\nreturn 1->2->2->4->3->5.\n\n\"\"\"\n#************************************Using two listNode****************************************************\n#left <3, right>3\n#\n#\n#Time Complexity O()\n#Space Complexity O()\n##################################Using ###########################################################################\n#\n#\n#\n#\n#\n#Time Complexity O()\n#Space Complexity O()\n\"\"\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$Pitfall and Failures$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\"\"\"\n#\n#\n#\n#\n############################################################################################################\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n head1=p1=ListNode(0)\n head2=p2=ListNode(0)\n while head:\n if head.val>x:\n p2=p2.next=head\n else:\n p1=p1.next=head\n head=head.next\n p2.next=None\n p1.next=head2.next\n return head1.next\n\n\n\n","sub_path":"PartionList_86.py","file_name":"PartionList_86.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"339673577","text":"### Given the SAXS profiles of the target and individual states, calculate the SAXS discrepancy scores\n### @Chuankai Zhao, czhao37@illinois.edu\n\nimport numpy as np\nimport math\nimport glob\n\n### load the target SAXS profiles, including both intensities and errors\nf_saxs_data = np.loadtxt(\"avg_native.dat\")\nf_saxs = np.transpose(f_saxs_data)[1]\nf_saxs_err = np.transpose(f_saxs_data)[2]\n\nN = 51\nscores = []\n\n### calculate the reduced chi^2 SAXS discrepancy scores between each state and the target \nfor i in range(500):\n state_score = []\n for j in range(100):\n s_saxs = np.loadtxt(\"STATE\" + str(i) + \"_\" + str(j) + \".txt\")\n s_saxs = np.transpose(s_saxs)[1]\n #s_saxs = (f_saxs[0]/s_saxs[0])*s_saxs\n sum = 0.\n for k in range(N):\n sum = sum + ( (f_saxs[k] - s_saxs[k])/f_saxs_err[k] )**2\n sum = sum/(N-1)\n state_score.append(sum)\n scores.append([i, np.mean(state_score), np.std(state_score)])\n\n### save the SAXS discrepancy scores \nnp.savetxt(\"ProteinG_discrepancy.txt\", np.array(scores))\n","sub_path":"04-SAXSDiscrepancy/discrepancy.py","file_name":"discrepancy.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"560987107","text":"# -*- coding: utf-8 -*-\n\"\"\"Episode: 10-2.ヘイトの魔獣\n\"\"\"\n## path\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n## local libs\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\n\n\n## define alias\nW = Writer\n_ = W.getWho()\n\n\n## scenes\ndef sc_destroymaju(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n maju = W(w.maju)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"魔獣による街の破壊\",\n hero.be(),\n mako.be(),\n sol.be(),\n yula.be(),\n maju.come(\"巨大な$w_majuが襲っている\"),\n sol.talk(\"おいおいおい\", \"街が壊されてるぞ\"),\n mako.talk(\"全部を無に返すんです\", \"憎悪という感情がこの世界をリセットするんです\"),\n mako.do(\"映像を見せる\"),\n hero.talk(\"酷い……\"),\n hero.do(\"$makoが映し出した映像では、魔物も人間も無差別に$w_majuにより殺されていた\"),\n hero.do(\"追い詰められた教会の屋根に登り、そこから見ている\"),\n )\n\ndef sc_mako_desicion(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n maju = W(w.maju)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"$makoの決意\",\n mako.talk(\"あの、それでこの$rebuildboxを使えば、あの$majuが生まれない世界に戻せるんです\",\n \"もう二度と生まれない世界にできるんです\"),\n sol.talk(\"それならもうそいつ使ってもらえばいいんじゃね?\",\n \"このままだと魔王どうこうでなく、世界が滅ぶんだろ?\"),\n mako.talk(\"はい\"),\n hero.talk(\"何とか倒せないの? あれって\",\n \"だって$meたち人間が生み出したものなんだろ?\"),\n mako.talk(\"倒せませんよ\",\n \"だって$w_majuはこの世界の外からやってきた存在なんですから\",\n \"この世界での死という概念がありません\"),\n hero.talk(\"そんなのって\"),\n mako.talk(\"だから$meがこれを使いますよ\"),\n sol.talk(\"そうだな、頼むわ\"),\n yula.talk(\"待って\"),\n sol.talk(\"何だよ、$yula\", \"別に元に戻るだけで痛いことも何もないんだからいいだろ?\"),\n yula.talk(\"違うよ\",\n \"$makoが言ってるの聞いたのよ\",\n \"それを使うと命の火が短くなる\", \"あと一回使えばもう$makoは消えてなくなってしまうって\"),\n hero.talk(\"本当なの?\"),\n mako.do(\"頷く\"),\n hero.talk(\"じゃあ駄目だよ!\", \"なんで$makoを犠牲にしなくちゃいけないんだよ\"),\n mako.talk(\"だって$meは、人間の敵で、魔王ですから\"),\n )\n\ndef sc_heros_idea(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n maju = W(w.maju)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"勇者のアイデア\",\n hero.do(\"黙り込むみんな\"),\n sol.talk(\"あ、潰された\"),\n yula.talk(\"どうやらあの$w_maju、世界中に出てるみたい\",\n \"ほら、他の街も潰されていってる\"),\n hero.do(\"$yulaが$smaphで見ている\"),\n mako.talk(\"それじゃあ$me、使いますね\"),\n hero.talk(\"待って\"),\n sol.talk(\"何だよ$k_hero\"),\n hero.talk(\"別に倒さなくてもいい\"),\n yula.talk(\"何言い出すの!\"),\n hero.talk(\"$meに考えがあるんだ\"),\n hero.do(\"自分の$smaphを見た\"),\n hero.talk(\"$mako、この$smaphって魔族が作ったものだって言ったよね?\",\n \"だったらこれも、魔族で作ってるの?\"),\n hero.do(\"ガチャでひどい目にあったゲーム画面を見せる\"),\n mako.talk(\"そうですよ\", \"うちの会社で製作してます\",\n \"人間たちから欲望を吸い上げるのにすごくいいんです\"),\n hero.talk(\"これを使って$majuを何とかできるかも\"),\n mako.talk(\"え?\"),\n )\n\n## episode\ndef ep_hatedaemon(w: World):\n return w.episode(\"10-2.ヘイトの魔獣\",\n sc_destroymaju(w),\n sc_mako_desicion(w),\n sc_heros_idea(w),\n ## NOTE\n ## - ヘイトの魔獣は大きくなり、次々と街を襲う\n ## - 魔子は世界のリセットを使おうとするが、勇者が待ったをかける\n ## - 勇者は魔子に魔獣にガチャをさせることを提案する\n )\n","sub_path":"src/c10_maoucoming/e2_hatemaju.py","file_name":"e2_hatemaju.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344170656","text":"import numpy as np\nfrom numpy import array\nimport collections\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nfrom numpy import array\nimport collections\nimport matplotlib.pyplot as plt\nimport sys\nfrom sklearn.cluster import KMeans\n\nsys.path.append(\"../tools/\")\nimport pylab as pl\n\n\ndef num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)\n\n\nimport random\n\n# generating random numbers\n\n\nrandom_list1 = random.sample(range(1, 400), 300)\n\nrandom_list = sorted(random_list1)\n\nprint (random_list)\n\nbigX = [] * 10000000\n\nfile_prefix = 'action_'\n\nanswer = [] * 1000000000\n\nlistOne = [] * 10000\n\nlinesOne = [0] * 100\n\n# lines one has the first reference skeleton\n\nspineX = 0\nspineY = 0\nspineZ = 0\n\nfor i in range(0, len(random_list)):\n filename = file_prefix + str(random_list[i]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (lines[0])\n\n if i == 0:\n def_spine_line = lines[10]\n def_spine_first = def_spine_line.split(',')\n\n spineX = num(def_spine_first[0])\n spineY = num(def_spine_first[1])\n spineZ = num(def_spine_first[2])\n print ('enter the dragon')\n for ii in range(0, 20):\n linesOne[ii] = lines[ii]\n\n flag = 0\n print (len(lines))\n for j in range(10, (len(lines) - 29)):\n spineLineNumber = 30 + flag\n if spineLineNumber > len(lines):\n break\n\n spineLine = lines[spineLineNumber]\n spineLineArray = spineLine.split(',')\n\n spineTempX = num(spineLineArray[0])\n spineTempY = num(spineLineArray[1])\n spineTempZ = num(spineLineArray[2])\n\n diffX = spineTempX - spineX\n diffY = spineTempY - spineY\n diffZ = spineTempZ - spineZ\n\n coordinateX = [] * 1000\n coordinateY = [] * 1000\n coordinateZ = [] * 1000\n\n tempSum = 0\n\n for k in range(0, 20):\n listOne = linesOne[k].split(',')\n\n listTwo = lines[k + 20 + flag].split(',')\n\n tempSum += (((num(listTwo[0]) - num(diffX)) - num(listOne[0])) ** (2) + (\n (num(listTwo[1]) - num(diffY)) - num(listOne[1])) ** (2) + (\n (num(listTwo[2]) - num(diffZ)) - num(listOne[2])) ** (2)) ** (0.5)\n\n # print tempSum\n\n j += 20\n flag += 20\n\n answer.append(tempSum)\n# print (answer)\nprint (linesOne)\n\nnumpyArray = array(answer)\n\nnumArray = numpyArray.reshape(-1, 1)\n\nkmeans = KMeans(n_clusters=100, random_state=0).fit(numArray)\n\nprint (kmeans.labels_)\nlabel = kmeans.labels_\nlabel_list = label.tolist()\nprint(len(label_list))\n\n# here's an interesting information\ncounter = collections.Counter(label_list)\nprint(counter)\n\npred = kmeans.predict(numArray)\npoi = \"poi\"\n\nprint (kmeans.cluster_centers_)\n\ntotal_sum = 0\n\n# walk model\n\nsum_walk = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 40):\n break\n\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_walk = len(lines)\n sum_walk += sub_length_walk\n\ncounter_walk = sum_walk / 20\n\nwalk_model = [[0] * 100 for i in range(100)]\n\nfor l in range(1, int(counter_walk)):\n walk_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_walk\n\nprint (walk_model)\n\ntotal_sum = total_sum + int(counter_walk)\n\n# grab model\n\nsum_grab = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 40 and random_list[p] < 80):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_grab = len(lines)\n sum_grab += sub_length_grab\n if (random_list[p] >= 80):\n break\n\ncounter_grab = sum_grab / 20\n\ngrab_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_grab)):\n grab_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_grab\n\nprint (grab_model)\n\ntotal_sum = total_sum + int(counter_grab)\n\n# watch clock model\n\nsum_watch = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 80 and random_list[p] < 120):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_watch = len(lines)\n sum_watch += sub_length_watch\n if (random_list[p] >= 120):\n break\n\ncounter_watch = sum_watch / 20\n\nwatch_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_watch)):\n watch_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_watch\n\nprint (watch_model)\n\ntotal_sum = total_sum + int(counter_watch)\n\n# head model\n\nsum_head = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 120 and random_list[p] < 160):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_head = len(lines)\n sum_head += sub_length_head\n if (random_list[p] >= 160):\n break\n\ncounter_head = sum_head / 20\n\nhead_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_head)):\n head_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_head\n\nprint (head_model)\n\ntotal_sum = total_sum + int(counter_head)\n\n# phone\n\nsum_phone = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 160 and random_list[p] < 200):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_phone = len(lines)\n sum_phone += sub_length_phone\n if (random_list[p] >= 200):\n break\n\ncounter_phone = sum_phone / 20\n\nphone_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_phone)):\n phone_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_phone\n\nprint (phone_model)\n\ntotal_sum = total_sum + int(counter_phone)\n\n# cross arms\n\nsum_arms = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 200 and random_list[p] < 240):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_arms = len(lines)\n sum_arms += sub_length_arms\n if (random_list[p] >= 240):\n break\n\ncounter_arms = sum_arms / 20\n\narms_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_arms)):\n arms_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_arms\n\nprint (arms_model)\n\ntotal_sum = total_sum + int(counter_arms)\n\n# cross seat\n\nsum_seat = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 240 and random_list[p] < 280):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_seat = len(lines)\n sum_seat += sub_length_seat\n if (random_list[p] >= 280):\n break\n\ncounter_seat = sum_seat / 20\n\nseat_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_seat)):\n seat_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_seat\n\nprint (seat_model)\n\ntotal_sum = total_sum + int(counter_seat)\n\n# punch\n\nsum_punch = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 280 and random_list[p] < 320):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_punch = len(lines)\n sum_punch += sub_length_punch\n if (random_list[p] >= 320):\n break\n\ncounter_punch = sum_punch / 20\n\npunch_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_punch)):\n punch_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_punch\n\nprint (punch_model)\n\ntotal_sum = total_sum + int(counter_punch)\n\n# kick\n\nsum_kick = 0\n\nfor p in range(0, len(random_list)):\n if (random_list[p] >= 320 and random_list[p] < 360):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_kick = len(lines)\n sum_kick += sub_length_kick\n if (random_list[p] >= 360):\n break\n\ncounter_kick = sum_kick / 20\n\nkick_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_kick)):\n kick_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_kick\n\nprint (kick_model)\n\ntotal_sum = total_sum + int(counter_kick)\n\n# wave\n\nsum_wave = 0\n\nfor p in range(0, len(random_list) - 5):\n if (random_list[p] >= 360 and random_list[p] < 399):\n filename = file_prefix + str(random_list[p]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (len(lines))\n sub_length_wave = len(lines)\n sum_wave += sub_length_wave\n\ncounter_wave = sum_wave / 20\n\nwave_model = [[0] * 100 for i in range(100)]\n\nfor l in range(total_sum, total_sum + int(counter_wave)):\n wave_model[kmeans.labels_[l]][kmeans.labels_[l - 1]] += 1 / counter_wave\n\nprint (wave_model)\n\ntotal_sum = total_sum + int(counter_wave)\n\n# testing\n# lets take a motion sequence for testing one from each of the 10 actions\n\n# we try to predict the cluster of the skeleton using the predict function of kmeans in sckit learn library\n\ntesting_list_1 = []\ntesting_list_2 = []\ntesting_list_3 = []\ntesting_list_4 = []\ntesting_list_5 = []\ntesting_list_6 = []\ntesting_list_7 = []\ntesting_list_8 = []\ntesting_list_9 = []\ntesting_list_10 = []\n\nfor i in range(1, 401):\n if i in random_list:\n continue\n else:\n # this is our left over testing data which we are gonna work on\n if i < 40:\n testing_list_1.append(i)\n elif 40 <= i < 80:\n testing_list_2.append(i)\n elif 80 <= i < 120:\n testing_list_3.append(i)\n elif 120 <= i < 160:\n testing_list_4.append(i)\n elif 160 <= i < 200:\n testing_list_5.append(i)\n elif 200 <= i < 240:\n testing_list_6.append(i)\n elif 240 <= i < 280:\n testing_list_7.append(i)\n elif 280 <= i < 320:\n testing_list_8.append(i)\n elif 320 <= i < 360:\n testing_list_9.append(i)\n else:\n testing_list_10.append(i)\n\nprint(kmeans.predict(1.22))\n\n# walk testing data\n\n# we gotta use the first reference skeleton data which is stored in the variables SpineX, SpineY, SpineZ, linesOne\n\nanswer_testing_1 = []\n\nfor i in range(len(testing_list_1)):\n\n if i == 1:\n break\n filename = file_prefix + str(testing_list_1[i]) + '.txt'\n print (filename)\n\n lines = open(filename).read().splitlines()\n print (lines[0])\n\n flag = 0\n print (len(lines))\n for j in range(10, (len(lines) - 29)):\n spineLineNumber = 30 + flag\n if spineLineNumber > len(lines):\n break\n\n spineLine = lines[spineLineNumber]\n spineLineArray = spineLine.split(',')\n\n spineTempX = num(spineLineArray[0])\n spineTempY = num(spineLineArray[1])\n spineTempZ = num(spineLineArray[2])\n\n diffX = spineTempX - spineX\n diffY = spineTempY - spineY\n diffZ = spineTempZ - spineZ\n\n tempSum = 0\n\n for k in range(0, 20):\n listOne = linesOne[k].split(',')\n\n listTwo = lines[k + 20 + flag].split(',')\n\n tempSum += (((num(listTwo[0]) - num(diffX)) - num(listOne[0])) ** (2) + (\n (num(listTwo[1]) - num(diffY)) - num(listOne[1])) ** (2) + (\n (num(listTwo[2]) - num(diffZ)) - num(listOne[2])) ** (2)) ** (0.5)\n\n # print tempSum\n\n j += 20\n flag += 20\n answer_testing_1.append((kmeans.predict(tempSum))[0])\n\nprint (linesOne)\n\nprint (answer_testing_1)\n\n# computing probabilities using the all the action model matrices.\n\n# the max probablility gives the desired action\n\ntest_probability_1 = 1\ntest_probability_2 = 1\ntest_probability_3 = 1\ntest_probability_4 = 1\ntest_probability_5 = 1\ntest_probability_6 = 1\ntest_probability_7 = 1\ntest_probability_8 = 1\ntest_probability_9 = 1\ntest_probability_10 = 1\n\nfor l in range(1, len(answer_testing_1)):\n\n if ((walk_model[answer_testing_1[l]][answer_testing_1[l - 1]]) * 1000000 != 0.000):\n test_probability_1 = test_probability_1 + walk_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n print(test_probability_1)\n\n else:\n test_probability_1 = test_probability_1 + (0.0000000001)\n\n if ((grab_model[answer_testing_1[l]][answer_testing_1[l - 1]]) * 10000000 != 0.000):\n test_probability_2 = test_probability_2 + grab_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_2 = test_probability_2 + (0.0000000001)\n\n if ((watch_model[answer_testing_1[l]][answer_testing_1[l - 1]]) * 100000 != 0.000):\n test_probability_3 = test_probability_3 + watch_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_3 = test_probability_3 + (0.0000000001)\n\n if (head_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.000):\n test_probability_4 = test_probability_4 + head_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_4 = test_probability_4 + (0.0000000001)\n\n if (phone_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.000):\n test_probability_5 = test_probability_5 + phone_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_5 = test_probability_5 + (0.0000000001)\n\n if (arms_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.000):\n test_probability_6 = test_probability_6 + arms_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_6 = test_probability_6 + (0.0000000001)\n\n if (seat_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.000):\n test_probability_7 = test_probability_7 + seat_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_7 = test_probability_7 + (0.0000000001)\n\n if (punch_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.000):\n test_probability_8 = test_probability_8 + punch_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_8 = test_probability_8 + (0.0000000001)\n\n if (kick_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.0000):\n test_probability_9 = test_probability_9 + kick_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_9 = test_probability_9 + (0.0000000001)\n\n if (wave_model[answer_testing_1[l]][answer_testing_1[l - 1]] != 0.00000):\n test_probability_10 = test_probability_10 + wave_model[answer_testing_1[l]][answer_testing_1[l - 1]]\n else:\n test_probability_10 = test_probability_10 + (0.0000000001)\n\nprint('Printing the probablilities')\nprint(test_probability_1)\nprint(test_probability_2)\nprint(test_probability_3)\nprint(test_probability_4)\nprint(test_probability_5)\nprint(test_probability_6)\nprint(test_probability_7)\nprint(test_probability_8)\nprint(test_probability_9)\nprint(test_probability_10)\n","sub_path":"main/code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":15552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"557366098","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render_to_response, render, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, Http404\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\nfrom modeltranslation.forms import FieldTranslationForm, ImportTranslationsForm\n\nfrom modeltranslation.models import checksum, FieldTranslation, trans_attr, trans_is_fuzzy_attr\n\nfrom django.conf import settings\n\nimport re\nimport hashlib\n\n########################################################################\n########################################################################\n# Index de administración \ndef admin(request):\n\treturn render_to_response('modeltranslation/admin/admin.html',{}, RequestContext(request))\n\n\n########################################################################\n########################################################################\n# Ver todas las traducciones\ndef view_all(request, language, filter=None):\n\t\"\"\"\n\tVisualización del listado de traducciones.\n\tMuestra todas las traducciones existentes en el sistema.\n\t\"\"\"\n\n\t# Si se ha enviado una búsqueda por POST,\n\tif request.method == \"POST\":\n\t\tdata = request.POST.dict()\n\n\t\tif not data[\"search\"] or data[\"search\"]==\"\":\n\t\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:view_all_url\",args=(data[\"language\"],data[\"filter\"])))\n\n\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:view_all_url\",args=(data[\"language\"],data[\"filter\"]))+\"?search=\"+data[\"search\"])\n\n\t# Si en cambio, no se ha enviado nada por POST,\n\t# cogemos todo de la URL\n\tLANGUAGES = dict(lang for lang in settings.LANGUAGES)\n\tif language not in LANGUAGES.keys():\n\t\traise Http404(u\"El idioma {0} no existe\".format(language))\n\t\n\tif language == settings.LANGUAGE_CODE:\n\t\traise Http404(u\"El idioma {0} es el idioma por defecto\".format(language))\n\n\t# Filtro para las traducciones\n\ttrans_filter = {\"lang\":language}\n\tif filter == \"all\" or filter is None:\n\t\tpass\n\telif filter == \"fuzzy\":\n\t\ttrans_filter[\"is_fuzzy\"] = True\n\telif filter == \"completed\":\n\t\ttrans_filter[\"is_fuzzy\"] = False\n\t\n\tsearch_query = \"\"\n\tif request.GET and \"search\" in request.GET and request.GET.get(\"search\")!=\"\":\n\t\tsearch_query = request.GET.get(\"search\")\n\t\ttrans_filter[\"source_text__icontains\"] = search_query\n\n\ttranslations = FieldTranslation.objects.filter(**trans_filter)\n\t\n\t# Actualiza las traducciones por si ha habido algún cambio en las\n\t# estructuras de los modelos referenciados y se tienen campos\n\t# de modelos que ya no los tienen\n\tactive_translations = []\n\tfor translation in translations:\n\t\tsource_model = translation.get_source_model()\n\t\tif not translation.field in source_model._meta.translatable_fields:\n\t\t\ttranslation.delete()\n\t\telse:\n\t\t\tactive_translations.append(translation)\n\t\n\treplacements = {\"translations\":active_translations, \"filter\":filter, \"lang\":language, \"language\":LANGUAGES[language], \"search_query\":search_query}\n\treturn render_to_response('modeltranslation/admin/list.html',replacements, RequestContext(request))\n\n\n########################################################################\n########################################################################\n# Editar una traducción al concreto\ndef edit(request, translation):\n\t\"\"\"\n\tEdita una traducción.\n\t\"\"\"\n\ttranslation = get_object_or_404(FieldTranslation, id=translation)\n\tif request.method == 'POST':\n\t\tif \"cancel\" in request.POST:\n\t\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:view_all_url\",args=(translation.lang,\"all\")))\n\t\telif \"save\" in request.POST:\n\t\t\tform = FieldTranslationForm(request.POST, instance=translation)\n\t\t\tvalid_form = form.is_valid()\n\t\t\tif valid_form:\n\t\t\t\ttranslation = form.save(commit=False)\n\t\t\t\ttranslation.context = u\"Admin. Traducciones\"\n\t\t\t\ttranslation.save()\n\t\t\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:view_all_url\",args=(translation.lang,\"all\")))\n\t\telse:\n\t\t\tform = FieldTranslationForm(instance=translation)\n\telse:\n\t\tform = FieldTranslationForm(instance=translation)\n\n\tLANGUAGES = dict(lang for lang in settings.LANGUAGES)\n\tlanguage = LANGUAGES[translation.lang]\n\t\n\treturn render_to_response('modeltranslation/admin/edit_translation.html',{\"translation\":translation, \"form\":form, \"lang\":translation.lang, \"language\":language}, RequestContext(request))\n\n\n########################################################################\n########################################################################\n## Importar traducción en formato .po\n@transaction.atomic\ndef import_translations(request, language):\n\t\"\"\"\n\tImporta las traducciones a partir de un archivo PO. Ten en cuenta\n\tque el archivo PO ha de ser generado desde esta aplicación, de forma\n\tque los comentarios sirvan como id de traducción (lo metemos nosotros\n\ten la exportación).\n\t\"\"\"\n\tdef _import_po_file(uploadedfile, lang):\n\t\tlines = []\n\t\tfor line in uploadedfile:\n\t\t\tlines.append(line)\n\t\tnum_lines = len(lines)\n\t\t\n\t\tprog_ctxt = re.compile(r\"msgctxt\\s+\\\"(?P\\d+)--(?P\\w+)--(?P\\d+)--(?P\\w+)\\\"\")\n\t\tprog_msgid = re.compile(r\"msgid\\s+\\\"(?P.+)\\\"$\")\n\t\tprog_msgstr = re.compile(r\"msgstr\\s+(?P.+)\")\n\t\t\n\t\ti = 0\n\t\twhile i < num_lines:\n\t\t\tline = lines[i]\n\t\t\tresult = prog_ctxt.match(line)\n\t\t\tif result:\n\t\t\t\tid = result.group(\"id\")\n\t\t\t\tis_fuzzy = (lines[i-1] == \"#, fuzzy\\n\")\n\n\t\t\t\tsource_text = lines[i+1]\n\t\t\t\ttranslation_line = lines[i+2]\n\n\t\t\t\t# Traducción\n\t\t\t\tg = prog_msgstr.match(translation_line)\n\t\t\t\tif g is None:\n\t\t\t\t\ti += 1\n\t\t\t\t\tcontinue\n\t\t\t\ttranslation = g.group(\"trans\").replace(\"msgstr\",\"\")[1:-1].replace(\"\\\\\\\"\",\"\\\"\").replace('\\\\\\'','\\'')\n\t\t\t\t\t\n\t\t\t\t# Obtención de la traducción a partir del id\n\t\t\t\ttry:\n\t\t\t\t\tfield_trans = FieldTranslation.objects.get(id=id)\n\t\t\t\texcept FieldTranslation.DoesNotExist:\n\t\t\t\t\tsource_text = source_text.replace(\"msgid\",\"\")[1:-1].replace(\"\\\\\\\"\",\"\\\"\").replace('\\\\\\'','\\'')\n\t\t\t\t\tsource_md5 = hashlib.md5(source_text.encode(\"utf-8\")).hexdigest()\n\t\t\t\t\tfield_trans = FieldTranslation(model=result.group(\"model\"), object_id=result.group(\"object_id\"), field=result.group(\"field\"), lang=lang, source_text=source_text, source_md5=source_md5)\n\t\t\t\t\t\n\t\t\t\t# Establecemos la traducción y si es fuzzy\n\t\t\t\tfield_trans.translation = translation\n\t\t\t\tfield_trans.is_fuzzy = is_fuzzy\n\t\t\t\tfield_trans.save()\n\t\t\t\t#print translation\n\t\t\t\t#print is_fuzzy\n\t\t\t\ti += 4\n\t\t\ti += 1\n\t\n\t# Elimina traducciones que no estén asociadas a ningún objeto\n\tFieldTranslation.delete_orphan_translations()\n\t\n\t# Acceso obligatoriamente por POST\n\tif request.method != \"POST\":\n\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:admin_url\"))\n\t\n\tform = ImportTranslationsForm(request.POST, request.FILES)\n\tif form.is_valid():\n\t\t_import_po_file(request.FILES['file'], language)\n\t\t#cache = TransCache.factory()\n\t\t#cache.clear()\n\t\treturn HttpResponseRedirect(reverse(\"modeltranslation:view_all_url\",args=(language,\"all\")))\n\n\treturn HttpResponseRedirect(reverse(\"modeltranslation:admin_url\"))\n\n\n########################################################################\n########################################################################\n## Exportar traducción en formato .po\ndef export_translations(request, language):\n\t\"\"\"\n\tVista de exportación de las traducciones\n\t\"\"\"\n\tFieldTranslation.delete_orphan_translations()\n\ttranslations = FieldTranslation.objects.filter(lang=language)\n\tfor trans in translations:\n\t\ttrans.source_text = trans.source_text.replace(\"'\",\"\\'\").replace(\"\\\"\",\"\\\\\\\"\")\n\t\ttrans.translation = trans.translation.replace(\"'\",\"\\'\").replace(\"\\\"\",\"\\\\\\\"\")\n\treplacements = {\"translations\":translations, \"lang\":language}\n\tif len(settings.ADMINS)>0:\n\t\treplacements[\"last_translator\"] = settings.ADMINS[0][0]\n\t\treplacements[\"last_translator_email\"] = settings.ADMINS[0][1]\n\tif settings.WEBSITE_NAME:\n\t\treplacements[\"website_name\"] = settings.WEBSITE_NAME\n\tresponse = render(request=request, template_name='modeltranslation/admin/export_translations.po', dictionary=replacements, context_instance=RequestContext(request), content_type=\"text/x-gettext-translation\")\n\tresponse['Content-Disposition'] = 'attachment; filename=\"{0}.po\"'.format(language)\n\treturn response\n\n\n########################################################################\n########################################################################\n## Actualizar las traducciones\ndef update_translations(request):\n\t\"\"\"\n\tActualiza las traducciones eliminando las huérfanas y\n\tgenerando traducciones vacías para todos los objetos que existan en\n\tbase de datos.\n\t\"\"\"\n\tFieldTranslation.delete_orphan_translations()\n\tnum_translations = FieldTranslation.update_translations()\n\treturn render_to_response('modeltranslation/admin/update_translations_ok.html',{\"num_translations\":num_translations}, RequestContext(request))\n\t\n","sub_path":"modeltranslation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"606606399","text":"#!/usr/bin/env python3\n\nimport sys\nfrom typing import List, Tuple\n\nWALK_DIFFS = {\n '>': (0, 1),\n '<': (0, -1),\n 'v': (1, 0),\n '^': (-1, 0)\n}\n\nTURNS = {\n ('>', 'L'): '^',\n ('>', 'R'): 'v',\n ('<', 'L'): 'v',\n ('<', 'R'): '^',\n ('v', 'L'): '>',\n ('v', 'R'): '<',\n ('^', 'L'): '<',\n ('^', 'R'): '>',\n}\n\nDIRECTION_SCORE = {\n '>': 0,\n 'v': 1,\n '<': 2,\n '^': 3\n}\n\n\ndef parse_path(raw_path: str) -> List[str]:\n path = []\n\n curr_number = ''\n\n for symbol in raw_path:\n if symbol.isdigit():\n curr_number += symbol\n else:\n path.append(curr_number)\n path.append(symbol)\n curr_number = ''\n\n if curr_number:\n path.append(curr_number)\n\n return path\n\n\ndef walk(\n field: List[str], pos: Tuple[int, int], direction: str, step: int\n) -> Tuple[int, int]:\n\n diff = WALK_DIFFS[direction]\n\n for i in range(step):\n row = (pos[0] + diff[0]) % len(field)\n column = (pos[1] + diff[1]) % len(field[row])\n\n if field[row][column] == '#':\n return pos\n\n if field[row][column] == '.':\n pos = (row, column)\n elif field[row][column] == ' ':\n new_row, new_column = row, column\n while field[new_row][new_column] == ' ':\n new_row = (new_row + diff[0]) % len(field)\n new_column = (new_column + diff[1]) % len(field[new_row])\n if field[new_row][new_column] == '.':\n pos = (new_row, new_column)\n\n return pos\n\n\ndef final_pos_score(data: List[str]) -> int:\n field = [line.rstrip() for line in data if len(line.rstrip())]\n field, path = field[:-1], parse_path(field[-1])\n max_row = max(len(line) for line in field)\n field = [line.ljust(max_row, ' ') for line in field]\n\n pos = (0, field[0].index('.'))\n\n direction = '>'\n\n for item in path:\n if item.isdigit():\n pos = walk(field, pos, direction, int(item))\n else:\n direction = TURNS[(direction, item)]\n\n return 1000 * (pos[0] + 1) + 4 * (pos[1] + 1) + DIRECTION_SCORE[direction]\n\n\ndef test_final_pos_score():\n data = [\n ' ...#',\n ' .#..',\n ' #...',\n ' ....',\n '...#.......#',\n '........#...',\n '..#....#....',\n '..........#.',\n ' ...#....',\n ' .....#..',\n ' .#......',\n ' ......#.',\n '',\n '10R5L5R10L4R5L5'\n ]\n\n assert final_pos_score(data) == 6032\n\n\ndef main():\n data = sys.stdin.readlines()\n result = final_pos_score(data)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2022/day_22_monkey_map_1.py","file_name":"day_22_monkey_map_1.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"357642079","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport bitfield.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0030_quote_refuse_reasons'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='quote',\n name='refuse_reasons',\n field=bitfield.models.BitField(((b'not-available', b\"Im' not available\"), (b'buget-unreasonable', b'Budget is unreasonable'), (b'wrong-project-type', b\"This isn't the type of project I like\")), default=0),\n preserve_default=True,\n ),\n ]\n","sub_path":"projects/migrations/0031_auto_20150320_2026.py","file_name":"0031_auto_20150320_2026.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616762611","text":"\"\"\" This file defines the training environment with multiple assets \"\"\"\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pickle\nmatplotlib.use('Agg')\n\nimport gym\nfrom gym.utils import seeding\nfrom gym import spaces\n\n\n# Shares normalization factor: 100 Shares per Trade\nHMAX_NORMALIZE = 100\n\n# Inital Balance of $1 million\nINITIAL_ACCOUNT_BALANCE=1000000\n\n# Total Number of Assets that can be bought\nASSET_DIM = 78\n\n# Transaction fees of 1/1000th of trade size\nTRANSACTION_FEE_PERCENT = 0.001\n\n# Reward Scaling\nREWARD_SCALING = 1e-4\n\nclass AssetEnvTrain(gym.Env):\n\t\"\"\"A Trading Environment for OpenAI gym\"\"\"\n\tmetadata = {'render.modes':['human']}\n\n\tdef __init__(self, df, day=0):\n\t\tself.day = day\n\t\tself.df = df\n\n\t\t\"\"\"\n\t\tCreate the action space.\n\t\tMake it normalized between -1 and 1, with STOCK_DIM\n\t\t\"\"\"\n\t\tself.action_space = spaces.Box(low=-1, high=1, shape=(ASSET_DIM,))\n\n\t\t\"\"\"\n\t\tCreate observation space.\n\t\tAgent can see: \n\t\t\tMoney in account [1], prices for contracts [78],\n\t\t\towned shares of each contract [78], macd for assets [78],\n\t\t\trsi for assets [78], cci for assets [78], adx for assets [78]\n\n\t\tObservation space dimension = 1 + 78 + 78 + 78 + 78 + 78 + 78\n\t\tObservation space dimension = (469,)\n\t\t\"\"\"\n\t\tself.observation_space = spaces.Box(low=0, high=np.inf, shape=(469,))\n\n\t\t# Load data from pandas dataframe\n\t\tself.unique_dates = self.df['date'].unique()\n\t\tself.data = self.df[self.df['date'] == self.unique_dates[self.day]]\n\t\tself.prices = self.data['close']\n\n\t\t# Terminal is FALSE until the end\n\t\tself.terminal = False\n\n\t\t# Initialize the state\n\t\tself.state = [INITIAL_ACCOUNT_BALANCE] + self.data['close_scaled'].values.tolist() + [0]*ASSET_DIM + self.data['macd'].values.tolist() + self.data['rsi'].values.tolist() + self.data['cci'].values.tolist() + self.data['adx'].values.tolist()\n\n\t\t# Initialize the reward\n\t\tself.reward = 0\n\t\tself.cost = 0\n\n\t\t# Initialize memory\n\t\tself.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n\t\tself.rewards_memory = []\n\t\tself.trades = 0\n\n\t\t# Initialize seed\n\t\tself._seed()\n\n\n\tdef _sell_asset(self, index, action):\n\t\t\"\"\" This function performs a sell action based on sign of the action \"\"\"\n\t\tif self.state[index+ASSET_DIM+1] > 0:\n\t\t\t# Update balance\n\t\t\tself.state[0] += self.prices.iloc[index]*min(abs(action), self.state[index + ASSET_DIM + 1]) * (1 - TRANSACTION_FEE_PERCENT)\n\n\t\t\tself.state[index + ASSET_DIM + 1] -= min(abs(action), self.state[index + ASSET_DIM + 1])\n\t\t\tself.cost += self.prices.iloc[index] * min(abs(action), self.state[index + ASSET_DIM + 1]) * TRANSACTION_FEE_PERCENT\n\n\t\t\tself.trades += 1\n\t\telse:\n\t\t\tpass\n\n\tdef _buy_asset(self, index, action):\n\t\t\"\"\" This function performs a buy action based on the sign of the action \"\"\"\n\t\t\n\t\t# How many CAN we buy?\n\t\tavailable_amount = self.state[0] // self.prices.iloc[index]\n\n\t\t# Update balance\n\t\tself.state[0] -= self.prices.iloc[index] * min(available_amount, action) * (1 + TRANSACTION_FEE_PERCENT)\n\n\t\tself.state[index + ASSET_DIM + 1] += min(available_amount, action)\n\n\t\tself.cost += self.prices.iloc[index] * min(available_amount, action) * TRANSACTION_FEE_PERCENT\n\n\t\tself.trades += 1\n\n\tdef step(self, actions):\n\t\t\"\"\" This function moves the agent forward in time \"\"\"\n\t\t\n\t\t# Is this the end?\n\t\tself.terminal = self.day >= (len(self.df['date'].unique())-1)\n\n\t\t# If it is the end, then plot and save figures\n\t\tif self.terminal:\n\t\t\tplt.plot(self.asset_memory, 'r')\n\t\t\tplt.savefig('results/account_value_train.png')\n\t\t\tplt.close()\n\n\t\t\tend_total_asset = self.state[0] + sum(np.array(self.prices)*np.array(self.state[(ASSET_DIM+1):(ASSET_DIM*2+1)]))\n\n\t\t\tdf_total_value = pd.DataFrame(self.asset_memory)\n\t\t\tdf_total_value.to_csv('results/account_value_train.csv')\n\t\t\tdf_total_value.columns = ['account_value']\n\t\t\tdf_total_value['daily_return'] = df_total_value.pct_change(1)\n\t\t\tsharpe = (252**0.5) * df_total_value['daily_return'].mean() / df_total_value['daily_return'].std()\n\n\t\t\tdf_rewards = pd.DataFrame(self.rewards_memory)\n\t\t\tdf_rewards.to_csv('results/rewards_value_train.csv')\n\n\t\t\treturn self.state, self.reward, self.terminal, {}\n\n\t\t# If it isn't the end, then take actions and move forward in time\n\t\telse:\n\n\t\t\t# Get actions\n\t\t\tactions = actions * HMAX_NORMALIZE\n\n\t\t\t# Find starting account value\n\t\t\tbegin_total_asset = self.state[0] + sum(np.array(self.prices) * np.array(self.state[(ASSET_DIM+1):(ASSET_DIM*2+1)]))\n\n\n\t\t\t# Take actions\n\t\t\targsort_actions = np.argsort(actions)\n\n\t\t\tsell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]\n\t\t\tbuy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]\n\n\t\t\tfor index in sell_index:\n\t\t\t\tself._sell_asset(index, actions[index])\n\n\t\t\tfor index in buy_index:\n\t\t\t\tself._buy_asset(index, actions[index])\n\n\n\t\t\t# Move forward in time\n\t\t\tself.day += 1\n\t\t\tself.data = self.df[self.df['date'] == self.unique_dates[self.day]]\n\t\t\tself.prices = self.data['close']\n\n\t\t\t# Get next state\n\t\t\tself.state = [self.state[0]] + self.data['close_scaled'].values.tolist() + list(self.state[(ASSET_DIM+1):(ASSET_DIM*2+1)]) + self.data['macd'].values.tolist() + self.data['rsi'].values.tolist() + self.data['cci'].values.tolist() + self.data['adx'].values.tolist()\n\n\t\t\t# Get new account value\n\t\t\tend_total_asset = self.state[0] + sum(np.array(self.prices) * np.array(self.state[(ASSET_DIM+1):(ASSET_DIM*2+1)]))\n\n\t\t\t# Remember new portfolio value\n\t\t\tself.asset_memory.append(end_total_asset)\n\n\t\t\t# Calculate Reward\n\t\t\tself.reward = ((end_total_asset - begin_total_asset) / begin_total_asset)\n\n\t\t\t# Remember reward\n\t\t\tself.rewards_memory.append(self.reward)\n\n\n\t\t\t## Standardize Reward\n\t\t\t#mean_reward = np.mean(self.rewards_memory)\n\t\t\t#if len(self.rewards_memory) > 1:\n\t\t\t#\tstd_reward = np.std(self.rewards_memory)\n\t\t\t#else:\n\t\t\t#\tstd_reward = 1\n\n\t\t\tself.reward = (self.reward - 1) * 100\n\n\n\t\treturn self.state, self.reward, self.terminal, {}\n\n\n\tdef reset(self):\n\t\tself.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n\t\tself.day = 0\n\t\tself.data = self.df[self.df['date'] == self.unique_dates[self.day]]\n\t\tself.prices = self.data['close']\n\t\tself.cost = 0\n\t\tself.trades = 0\n\t\tself.terminal = False\n\t\tself.rewards_memory = []\n\n\t\tself.state = [INITIAL_ACCOUNT_BALANCE] + self.data['close_scaled'].values.tolist() + [0]*ASSET_DIM + self.data['macd'].values.tolist() + self.data['rsi'].values.tolist() + self.data['cci'].values.tolist() + self.data['adx'].values.tolist()\n\n\t\treturn self.state\n\n\n\tdef render(self, mode='human'):\n\t\treturn self.state\n\n\tdef _seed(self, seed=None):\n\t\tself.np_random, seed = seeding.np_random(seed)\n\t\treturn [seed]\n","sub_path":"Springboard/quant_project/env/EnvMultipleAssets_train.py","file_name":"EnvMultipleAssets_train.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"260326797","text":"\"\"\"\n\nMiscellaneous utility functions for user applications.\n\n\"\"\"\n\nimport base64\nimport json\nimport os\nimport ssl\n\nimport boto3\nfrom pyhive import hive\n\nfrom hops import constants\n\n# ! Needed for hops library backwards compatability\nfrom hops.exceptions import UnkownSecretStorageError\n\ntry:\n import requests\nexcept:\n pass\n\ntry:\n import http.client as http\nexcept ImportError:\n import httplib as http\n\n\ndef project_id():\n \"\"\"\n Get the Hopsworks project id from environment variables\n\n Returns: the Hopsworks project id\n\n \"\"\"\n return os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_ID_ENV_VAR]\n\ndef project_name():\n \"\"\"\n Extracts the project name from the environment\n\n Returns:\n project name\n \"\"\"\n return os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_NAME_ENV_VAR]\n\ndef _get_hopsworks_rest_endpoint():\n \"\"\"\n\n Returns:\n The hopsworks REST endpoint for making requests to the REST API\n\n \"\"\"\n return os.environ[constants.ENV_VARIABLES.REST_ENDPOINT_END_VAR]\n\n\nhopsworks_endpoint = None\ntry:\n hopsworks_endpoint = _get_hopsworks_rest_endpoint()\nexcept:\n pass\n\n\ndef _get_host_port_pair():\n \"\"\"\n Removes \"http or https\" from the rest endpoint and returns a list\n [endpoint, port], where endpoint is on the format /path.. without http://\n\n Returns:\n a list [endpoint, port]\n \"\"\"\n endpoint = _get_hopsworks_rest_endpoint()\n if 'http' in endpoint:\n last_index = endpoint.rfind('/')\n endpoint = endpoint[last_index + 1:]\n host_port_pair = endpoint.split(':')\n return host_port_pair\n\n\ndef _get_http_connection(https=False):\n \"\"\"\n Opens a HTTP(S) connection to Hopsworks\n\n Args:\n https: boolean flag whether to use Secure HTTP or regular HTTP\n\n Returns:\n HTTP(S)Connection\n \"\"\"\n host_port_pair = _get_host_port_pair()\n if (https):\n PROTOCOL = ssl.PROTOCOL_TLSv1_2\n ssl_context = ssl.SSLContext(PROTOCOL)\n connection = http.HTTPSConnection(str(host_port_pair[0]), int(host_port_pair[1]), context=ssl_context)\n else:\n connection = http.HTTPConnection(str(host_port_pair[0]), int(host_port_pair[1]))\n return connection\n\n\ndef set_auth_header(headers):\n headers[constants.HTTP_CONFIG.HTTP_AUTHORIZATION] = \"ApiKey \" + os.environ[constants.ENV_VARIABLES.API_KEY_ENV_VAR]\n\n\ndef send_request(connection, method, resource, body=None, headers=None):\n \"\"\"\n Sends a request to Hopsworks. In case of Unauthorized response, submit the request once more as jwt might not\n have been read properly from local container.\n\n Args:\n connection: HTTP connection instance to Hopsworks\n method: HTTP(S) method\n resource: Hopsworks resource\n body: HTTP(S) body\n headers: HTTP(S) headers\n\n Returns:\n HTTP(S) response\n \"\"\"\n if headers is None:\n headers = {}\n set_auth_header(headers)\n connection.request(method, resource, body, headers)\n response = connection.getresponse()\n if response.status == constants.HTTP_CONFIG.HTTP_UNAUTHORIZED:\n set_auth_header(headers)\n connection.request(method, resource, body, headers)\n response = connection.getresponse()\n return response\n\ndef _create_hive_connection(featurestore):\n \"\"\"Returns Hive connection\n\n Args:\n :featurestore: featurestore to which connection will be established\n \"\"\"\n # get host without port\n host = os.environ[constants.ENV_VARIABLES.REST_ENDPOINT_END_VAR].split(':')[0]\n hive_conn = hive.Connection(host=host,\n port=9085,\n database=featurestore,\n auth='CERTIFICATES',\n truststore='trustStore.jks',\n keystore='keyStore.jks',\n keystore_password=os.environ[constants.ENV_VARIABLES.CERT_KEY_ENV_VAR])\n\n return hive_conn\n\n\ndef _parse_rest_error(response_dict):\n \"\"\"\n Parses a JSON response from hopsworks after an unsuccessful request\n\n Args:\n response_dict: the JSON response represented as a dict\n\n Returns:\n error_code, error_msg, user_msg\n \"\"\"\n error_code = -1\n error_msg = \"\"\n user_msg = \"\"\n if constants.REST_CONFIG.JSON_ERROR_CODE in response_dict:\n error_code = response_dict[constants.REST_CONFIG.JSON_ERROR_CODE]\n if constants.REST_CONFIG.JSON_ERROR_MSG in response_dict:\n error_msg = response_dict[constants.REST_CONFIG.JSON_ERROR_MSG]\n if constants.REST_CONFIG.JSON_USR_MSG in response_dict:\n user_msg = response_dict[constants.REST_CONFIG.JSON_USR_MSG]\n return error_code, error_msg, user_msg\n\ndef get_secret(project_name, secrets_store, secret_key):\n \"\"\"\n Returns secret value from the AWS Secrets Manager or Parameter Store\n\n Args:\n :project_name (str): name of project\n :secrets_store: the underlying secrets storage to be used, e.g. `secretsmanager` or `parameterstore`\n :secret_type (str): key for the secret value, e.g. `api-key`, `cert-key`, `trust-store`, `key-store`\n Returns:\n :str: secret value\n \"\"\"\n if secrets_store == constants.AWS.SECRETS_MANAGER:\n return _query_secrets_manager(project_name, secret_key)\n elif secrets_store == constants.AWS.PARAMETER_STORE:\n return _query_parameter_store(project_name, secret_key)\n else:\n raise UnkownSecretStorageError(\"Secrets storage \" + secrets_store + \" is not supported.\")\n\ndef _assumed_role():\n client = boto3.client('sts')\n response = client.get_caller_identity()\n # arns for assumed roles in SageMaker follow the following schema\n # arn:aws:sts::123456789012:assumed-role/my-role-name/my-role-session-name\n local_identifier = response['Arn'].split(':')[-1].split('/')\n if len(local_identifier) != 3 or local_identifier[0] != 'assumed-role':\n raise Exception('Failed to extract assumed role from arn: ' + response['Arn'])\n return local_identifier[1]\n\ndef _query_secrets_manager(project_name, secret_key):\n secret_name = 'hopsworks/project/' + project_name + '/role/' + _assumed_role()\n\n session = boto3.session.Session()\n if (os.environ[constants.ENV_VARIABLES.REGION_NAME_ENV_VAR] != constants.AWS.DEFAULT_REGION):\n region_name = os.environ[constants.ENV_VARIABLES.REGION_NAME_ENV_VAR]\n else:\n region_name = session.region_name\n\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n get_secret_value_response = client.get_secret_value(SecretId=secret_name)\n return json.loads(get_secret_value_response['SecretString'])[secret_key]\n\ndef _query_parameter_store(project_name, secret_key):\n ssm = boto3.client('ssm')\n name = '/hopsworks/project/' + project_name + '/role/' + _assumed_role() + '/type/' + secret_key\n return ssm.get_parameter(Name=name, WithDecryption=True)['Parameter']['Value']\n\ndef write_b64_cert_to_bytes(b64_string, path):\n \"\"\"Converts b64 encoded certificate to bytes file .\n\n Args:\n :b64_string (str): b64 encoded string of certificate\n :path (str): path where file is saved, including file name. e.g. /path/key-store.jks\n \"\"\"\n\n with open(path, 'wb') as f:\n cert_b64 = base64.b64decode(b64_string)\n f.write(cert_b64)\n\n\ndef abspath(hdfs_path):\n return hdfs_path\n","sub_path":"hops/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190898687","text":"import os\nimport numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\nfrom utils.loss_utils import running_avg_loss\nfrom utils.nn_utils import list_to_multi_hot, list_to_double_multi_hot\nfrom tflearn.data_utils import pad_sequences, to_categorical\nfrom deep_models.base.base_train import BaseTrain\n\n\nclass Trainer(BaseTrain):\n def __init__(self, model, config, gpu_config):\n super(Trainer, self).__init__(model, config)\n self.train_dataset = self.config.DataGenerator('train', self.config.num_epochs, self.config.batch_size, config)\n self.sv = self.build_sv()\n self.sess = self.sv.prepare_or_wait_for_session(config=gpu_config)\n self.summary_writer = tf.summary.FileWriter(os.path.join(self.config.summary_dir, \"train\"), self.sess.graph)\n # self.model.load(self.sess)\n if self.config.use_pretrain_embedding and \\\n self.model.has_ckpt(self.sess) is False and \\\n self.config.pre_train_ckpt_dir == '':\n print(\"load pretrain embedding...\")\n pretrain_embedding = self.config.data_obj.load_pretrain_embedding()\n self.sess.run(self.model.embedding_init, feed_dict={self.model.embedding_placeholder: pretrain_embedding})\n\n def train_epoch(self):\n avg_loss_all, avg_loss_law, avg_loss_accu, avg_loss_impris = 0., 0., 0., 0.\n avg_acc_impris = 0.\n avg_macro_f1_law, avg_micro_f1_law, avg_weighted_f1_law = 0., 0., 0.\n avg_macro_f1_accu, avg_micro_f1_accu, avg_weighted_f1_accu = 0., 0., 0.\n\n while not self.sv.should_stop():\n loss_law, loss_accu, loss_impris, loss_l2, loss_all, \\\n macro_f1_law, micro_f1_law, weighted_f1_law, \\\n macro_f1_accu, micro_f1_accu, weighted_f1_accu, \\\n acc_impris, summaries = self.train_step()\n global_steps = self.model.global_step_tensor.eval(self.sess)\n\n avg_loss_law = running_avg_loss(avg_loss_law, loss_law)\n avg_loss_accu = running_avg_loss(avg_loss_accu, loss_accu)\n avg_loss_impris = running_avg_loss(avg_loss_impris, loss_impris)\n avg_loss_all = running_avg_loss(avg_loss_all, loss_all)\n\n avg_macro_f1_law = running_avg_loss(avg_macro_f1_law, macro_f1_law)\n avg_micro_f1_law = running_avg_loss(avg_micro_f1_law, micro_f1_law)\n avg_weighted_f1_law = running_avg_loss(avg_weighted_f1_law, weighted_f1_law)\n\n avg_macro_f1_accu = running_avg_loss(avg_macro_f1_accu, macro_f1_accu)\n avg_micro_f1_accu = running_avg_loss(avg_micro_f1_accu, micro_f1_accu)\n avg_weighted_f1_accu = running_avg_loss(avg_weighted_f1_accu, weighted_f1_accu)\n\n avg_acc_impris = running_avg_loss(avg_acc_impris, acc_impris)\n self.summary_writer.add_summary(summaries, global_step=global_steps)\n if global_steps % 100 == 0:\n print(\"global_steps:{}\\tloss_all:{:.3f}\\tloss_law:{:.3f}\\tloss_accu:{:.3f}\\tloss_impris:{:.3f}\"\n \"\\tmacro_f1_law:{:.3f}\\tmicro_f1_law:{:.3f}\\tweighted_f1_law:{:.3f}\"\n \"\\tavg_macro_f1_accu:{:.3f}\\tavg_micro_f1_accu:{:.3f}\\tavg_weighted_f1_accu:{:.3f}\\t\"\n \"acc_impris:{}\".\n format(global_steps, avg_loss_all, avg_loss_law, avg_loss_accu, avg_loss_impris,\n avg_macro_f1_law, avg_micro_f1_law, avg_weighted_f1_law,\n avg_macro_f1_accu, avg_micro_f1_accu, avg_weighted_f1_accu,\n avg_acc_impris))\n self.summary_writer.flush()\n self.sv.Stop()\n\n def train_step(self):\n facts_batch, laws_batch, accus_batch, impris_batch, money_batch, death_batch = self.train_dataset.next_batch(self.sess)\n facts = pad_sequences(facts_batch, maxlen=self.config.max_seq_len,\n value=self.config.data_obj.vocab.word_to_id(self.config.PAD_TOKEN))\n laws = list_to_multi_hot(laws_batch, len(self.config.data_obj.law2id))\n accus = list_to_multi_hot(accus_batch, len(self.config.data_obj.accu2id))\n impris = impris_batch\n money = money_batch\n death = death_batch\n\n # if self.config.train_tasks:\n train_ops = [getattr(self.model, \"train_{}_op\".format(train_task)) for train_task in self.config.train_tasks]\n\n to_reture = [self.model.loss_law, self.model.loss_accu, self.model.loss_impris,\n self.model.loss_l2, self.model.loss_all,\n self.model.macro_f1_law,\n self.model.micro_f1_law,\n self.model.weighted_f1_law,\n self.model.macro_f1_accu,\n self.model.micro_f1_accu,\n self.model.weighted_f1_accu,\n self.model.accuracy_impris,\n self.model.summaries]\n to_reture.extend(train_ops)\n feed_dict = {self.model.facts: facts,\n self.model.laws: laws,\n self.model.accusations: accus,\n self.model.imprisonments: impris,\n self.model.death: death,\n self.model.is_training: True,\n self.model.keep_prob: self.config.keep_prob}\n return self.sess.run(to_reture, feed_dict=feed_dict)[: len(to_reture) - len(train_ops)]\n","sub_path":"deep_models/trainers/amtl_trainer.py","file_name":"amtl_trainer.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"617245491","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nDatos para agrupar el DataFrame aplicando la relación 'o'\r\n\r\nCreated on Mon Feb 1 14:54:29 2021\r\n\r\n__author__ = Pedro Biel\r\n__version__ = 0.0.0\r\n__email__ = pbiel@taimweser.com\r\n\"\"\"\r\n\r\n\r\nclass AgrupaO:\r\n \"\"\"Agrupa el DataFrame aplicando la relación 'o'.\"\"\"\r\n \r\n def __init__(self, df_implantaciones):\r\n \"\"\"Inicializa df_implantaciones.\"\"\"\r\n\r\n self.df_implantaciones = df_implantaciones\r\n \r\n self.cols = ['V', 'Hx', 'Hy', 'Mx', 'My', 'Mz']\r\n self.idxs = ['Modelo', 'Apoyo', 'Nudo', 'Grupo', 'Caso', 'Relación']\r\n \r\n def dataframe_gropuby_sum_max_min(self, df):\r\n \"\"\"\r\n Agrupa df aplicando la suma.\r\n\r\n Returns\r\n -------\r\n df_sum : pandas DataFrame\r\n \"\"\"\r\n \r\n df_sum_max_min = df.groupby(self.idxs)[self.cols].sum()\r\n \r\n return df_sum_max_min\r\n \r\n def dataframe_o(self, level, modelo):\r\n \"\"\"\r\n Agrupa un DataFrame por máximos y mínimos.\r\n\r\n Parameters\r\n ----------\r\n level : lista de str ; nivel de los índices del DataFrame agrupado\r\n modelo : bool ; True -> se indica el modelo en columnas;\r\n False -> no se indica el modelo en las \r\n columnas.\r\n\r\n Returns\r\n -------\r\n df_o : pandas DataFrame\r\n \"\"\"\r\n \r\n df_sum_max_min = self.dataframe_gropuby_sum_max_min(\r\n self.df_implantaciones\r\n )\r\n \r\n if modelo:\r\n df_o = df_sum_max_min.groupby(level=level).agg([\r\n ('max', max),\r\n ('Modelo_max', lambda x: x.idxmax()[0]),\r\n ('min', min),\r\n ('Modelo_min', lambda x: x.idxmin()[0])\r\n ])\r\n else:\r\n df_o = df_sum_max_min.groupby(level=level).agg(['max', 'min'])\r\n \r\n return df_o\r\n ","sub_path":"Hades_1/dlg_implantaciones/datos/datos_agrupao.py","file_name":"datos_agrupao.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"192059711","text":"import tensorflow as tf\nimport tqdm\nimport numpy as np\nimport seaborn as sns # for nice looking graphs\nfrom matplotlib import pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nnp.random.seed(42)\n\n\ndef create_layer(input_tensor,\n weight,\n name,\n activation,\n training=None):\n w = tf.Variable(weight)\n b = tf.Variable(tf.zeros([weight.shape[-1]]))\n z = tf.add(tf.matmul(input_tensor, w), b, name='layer_input_%s' % name)\n if name == 'output':\n return z, activation(z, name='activation_%s' % name)\n else:\n return activation(z, name='activation_%s' % name)\n\n\ndef create_batch_norm_layer(input_tensor,\n weight,\n name,\n activation,\n training):\n w = tf.Variable(weight)\n linear_output = tf.matmul(input_tensor, w)\n batch_norm_z = tf.layers.batch_normalization(linear_output,\n training=training,\n name='bn_layer_input_%s' % name)\n if name == 'output':\n return batch_norm_z, activation(batch_norm_z, name='bn_activation_%s' % name)\n else:\n return activation(batch_norm_z, name='bn_activation_%s' % name)\n\n\ndef get_tensors(layer_creation_fn,\n inputs,\n labels,\n weights,\n activation,\n learning_rate,\n is_training):\n l1 = layer_creation_fn(inputs, weights[0], '1', activation, training=is_training)\n l2 = layer_creation_fn(l1, weights[1], '2', activation, training=is_training)\n l3 = layer_creation_fn(l2, weights[2], '3', activation, training=is_training)\n l4 = layer_creation_fn(l3, weights[3], '4', activation, training=is_training)\n l5 = layer_creation_fn(l4, weights[4], '5', activation, training=is_training)\n l6 = layer_creation_fn(l5, weights[5], '6', activation, training=is_training)\n l7 = layer_creation_fn(l6, weights[6], '7', activation, training=is_training)\n l8 = layer_creation_fn(l7, weights[7], '8', activation, training=is_training)\n l9 = layer_creation_fn(l8, weights[8], '9', activation, training=is_training)\n logits, output = layer_creation_fn(\n l9, weights[9], 'output', tf.nn.sigmoid, training=is_training)\n\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))\n\n correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n if layer_creation_fn.__name__ == 'create_batch_norm_layer':\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate).minimize(cross_entropy)\n else:\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate).minimize(cross_entropy)\n\n return accuracy, optimizer\n\n\ndef train_network(\n learning_rate_val,\n num_batches,\n batch_size,\n activation,\n bad_init=False,\n plot_accuracy=True):\n\n inputs = tf.placeholder(tf.float32, shape=[None, 784], name='inputs')\n labels = tf.placeholder(tf.float32, shape=[None, 10], name='labels')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n is_training = tf.placeholder(tf.bool, name='is_training')\n\n np.random.seed(42)\n\n scale = 1 if bad_init else 0.1\n\n weights = [\n np.random.normal(size=(784, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 100), scale=scale).astype(np.float32),\n np.random.normal(size=(100, 10), scale=scale).astype(np.float32)]\n\n vanilla_accuracy, vanilla_optimizer = get_tensors(\n create_layer,\n inputs,\n labels,\n weights,\n activation,\n learning_rate,\n is_training)\n\n bn_accuracy, bn_optimizer = get_tensors(\n create_batch_norm_layer,\n inputs,\n labels,\n weights,\n activation,\n learning_rate,\n is_training)\n\n vanilla_accuracy_vals = []\n bn_accuracy_vals = []\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in tqdm.tqdm(list(range(num_batches))):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n sess.run([vanilla_optimizer], feed_dict={\n inputs: batch_xs,\n labels: batch_ys,\n learning_rate: learning_rate_val,\n is_training: True})\n\n sess.run([bn_optimizer], feed_dict={\n inputs: batch_xs,\n labels: batch_ys,\n learning_rate: learning_rate_val,\n is_training: True})\n\n if i % batch_size == 0:\n vanilla_acc = sess.run(vanilla_accuracy, feed_dict={\n inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training: False})\n\n bn_acc = sess.run(bn_accuracy, feed_dict={\n inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training: False})\n\n vanilla_accuracy_vals.append(vanilla_acc)\n bn_accuracy_vals.append(bn_acc)\n\n print(\n 'Iteration: %s; ' % i,\n 'Vanilla Accuracy: %2.4f; ' % vanilla_acc,\n 'BN Accuracy: %2.4f' % bn_acc)\n\n if plot_accuracy:\n plt.title('Training Accuracy')\n plt.plot(range(0, len(vanilla_accuracy_vals) * batch_size, batch_size),\n vanilla_accuracy_vals, label='Vanilla network')\n plt.plot(range(0, len(bn_accuracy_vals) * batch_size, batch_size),\n bn_accuracy_vals, label='Batch Normalized network')\n plt.tight_layout()\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n # train_network(0.01, 2000, 60, tf.nn.tanh)\n # train_network(0.01, 5000, 60, tf.nn.tanh, bad_init=True)\n # train_network(1, 5000, 60, tf.nn.tanh, bad_init=True)\n # train_network(0.01, 2000, 60, tf.nn.relu)\n train_network(0.01, 5000, 60, tf.nn.relu, bad_init=True)","sub_path":"Batch_Normalization/example_tf_0.2.py","file_name":"example_tf_0.2.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"58965921","text":"# **Reminder**: The general methodology to build a Neural Network is to:\n# 1. Define the neural network structure ( # of input units, # of hidden units, etc). \n# 2. Initialize the model's parameters\n# 3. Loop:\n# - Implement forward propagation\n# - Compute loss\n# - Implement backward propagation to get the gradients\n# - Update parameters (gradient descent)\n# \n# You often build helper functions to compute steps 1-3 and then merge them into one function \n#we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data.\nimport layer_sizes\nimport initialize_parameters\nimport update_parameters\nimport compute_cost\nimport backward_propagation \n\nfrom planar_utils import*\n\n\n\ndef nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n\n\n np.random.seed(3)\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n \n # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: \"n_x, n_h, n_y\". Outputs = \"W1, b1, W2, b2, parameters\".\n ### START CODE HERE ### (≈ 5 lines of code)\n parameters = initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache =forward_propagation(X,parameters)\n \n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = compute_cost(A2,Y,parameters)\n \n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = backward_propagation(parameters, cache, X, Y)\n \n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters =update_parameters(parameters, grads)\n \n ### END CODE HERE ###\n \n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n return parameters\n\nnoisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()\n\ndatasets = {\"noisy_circles\": noisy_circles,\n \"noisy_moons\": noisy_moons,\n \"blobs\": blobs,\n \"gaussian_quantiles\": gaussian_quantiles}\n\n### START CODE HERE ### (choose your dataset)\ndataset = \"noisy_moons\"\n### END CODE HERE ###\n\nX, Y = datasets[dataset]\nX, Y = X.T, Y.reshape(1, Y.shape[0])\n\n# make blobs binary\nif dataset == \"blobs\":\n Y = Y%2\n\n# Visualize the data\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);\n\nparameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))","sub_path":"nn_model.py","file_name":"nn_model.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"265794820","text":"list1=['cat', 'dog','fish','bird']\n\nimport random\nx=random.choice(list1)\n#n=random.randint(1,4)\nnew=list(x)\nrandom.shuffle(new)\nprint(\"\".join(new))\nn = input(\"dap an? \")\nif n==x:\n print(\"ye\")\nelse:\n print(\"leu leu\")\n","sub_path":"session7/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"57047402","text":"import unittest\n\nfrom app.utils import utils\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_get_domain(self):\n self.assertEqual(utils.get_domain(\"https://www.adobe.com/search_performace/testing\"), \"adobe.com\",\n \"get the domain from url\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Spark/search_performance/test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440499310","text":"from django.shortcuts import render, redirect, HttpResponse\r\nfrom time import strftime, localtime\r\nimport random, datetime\r\n\r\n\r\ndef index(request):\r\n if 'gold_total' in request.session:\r\n print('gold_total =', request.session['gold_total'])\r\n pass\r\n else:\r\n request.session['gold_total'] = 0\r\n request.session['message'] = []\r\n print('New Player!')\r\n return render(request, \"ninja_app/index.html\")\r\n\r\n\r\ndef process(request):\r\n timestamp = strftime('%#H:%M:%S%p, %B %#d, %Y', localtime()) \r\n if request.POST['building'] == 'farm':\r\n gold = random.randrange (10,21)\r\n print(gold)\r\n request.session['gold_total'] += gold\r\n data = {\r\n \"ledger\" : \"Earned\",\r\n \"status\" : \"earned\",\r\n 'gold' : gold,\r\n 'building' : request.POST['building'],\r\n 'time' : timestamp\r\n }\r\n request.session['message'].append(data)\r\n request.session.modified = True\r\n if request.POST['building'] == 'cave':\r\n gold = random.randrange (5,11)\r\n print(gold)\r\n request.session['gold_total'] += gold\r\n data = {\r\n \"ledger\" : \"Earned\",\r\n \"status\" : \"earned\",\r\n 'gold' : gold,\r\n 'building' : request.POST['building'],\r\n 'time' : timestamp\r\n }\r\n request.session['message'].append(data)\r\n request.session.modified = True\r\n if request.POST['building'] == 'house':\r\n gold = random.randrange (2,6)\r\n print(gold)\r\n request.session['gold_total'] += gold\r\n data = {\r\n \"ledger\" : \"Earned\",\r\n \"status\" : \"earned\",\r\n 'gold' : gold,\r\n 'building' : request.POST['building'],\r\n 'time' : timestamp\r\n }\r\n request.session['message'].append(data)\r\n request.session.modified = True\r\n if request.POST['building'] == 'casino':\r\n gold = random.randrange (-50,51)\r\n print(gold)\r\n request.session['gold_total'] += gold\r\n if gold >= 0:\r\n data = {\r\n \"ledger\" : \"Earned\",\r\n \"status\" : \"earned\",\r\n 'gold' : gold,\r\n 'building' : request.POST['building'],\r\n 'time' : timestamp\r\n }\r\n request.session['message'].append(data)\r\n request.session.modified = True\r\n else:\r\n data = {\r\n \"ledger\" : \"Lost\",\r\n \"status\" : \"lost\",\r\n 'gold' : gold,\r\n 'building' : request.POST['building'],\r\n 'time' : timestamp\r\n }\r\n request.session['message'].append(data)\r\n request.session.modified = True\r\n return redirect ('/')\r\n\r\ndef reset(request):\r\n request.session.clear()\r\n return redirect ('/')","sub_path":"ninja_gold/apps/ninja_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628284727","text":"import os\nfrom typing import List, Union\n\nfrom .compute_node import ComputeNode\n\nIntStr = Union[int, str]\n\n\nclass CoriKNLNode(ComputeNode):\n\n cpu_ids: List[IntStr] = list(range(68))\n gpu_ids: List[IntStr] = []\n\n @classmethod\n def get_job_nodelist(cls) -> List[\"CoriKNLNode\"]:\n \"\"\"\n Get all compute nodes allocated in the current job context\n \"\"\"\n nodelist_str = os.environ[\"SLURM_NODELIST\"]\n # string like: nid[02529,02878,03047,03290,03331,03813,11847-11848]\n # or like: nid0[3038-3039,8241-8246]\n # or like: nid00[858-861]\n # or like: nid000[10-11]\n # or like: nid00558\n\n # first remove 'nid', '[' and ']'\n nodelist_str = nodelist_str.replace(\"nid\", \"\").replace(\"[\", \"\").replace(\"]\", \"\")\n # now have something like '02529,02878,03047,03290,03331,03813,11847-11848'\n # split by comma\n node_ranges_str = nodelist_str.split(\",\")\n node_ids = []\n lo: Union[str, int]\n hi: Union[int, List[str]]\n for node_range_str in node_ranges_str:\n lo, *hi = node_range_str.split(\"-\")\n lo = int(lo)\n if hi:\n hi = int(hi[0])\n node_ids.extend(list(range(lo, hi + 1)))\n else:\n node_ids.append(lo)\n\n return [cls(node_id, f\"nid{node_id:05d}\") for node_id in node_ids]\n\n\nif __name__ == \"__main__\":\n if \"SLURM_NODELIST\" not in os.environ:\n os.environ[\"SLURM_NODELIST\"] = \"nid0[3038-3039,8241-8246]\"\n print([str(x) for x in CoriKNLNode.get_job_nodelist()])\n","sub_path":"balsam/platform/compute_node/nersc_coriknl_node.py","file_name":"nersc_coriknl_node.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"330982451","text":"import matplotlib.pyplot as plt\nimport matplotlib as m\nimport numpy as np \nimport numpy.random\nimport pickle\nimport sys\nimport glob\nimport math\nimport itertools\nimport featureSel\n\nskipDrugList=featureSel.skipDrugList\n\ndef main(argv):\n\n\tuniqID=int(argv[0])\n\n\tfsNum=100\n\tnumTrial=100\n\tcancerTypeNameList=featureSel.cancerTypeNameList\n\tcancerTypeNum=len(cancerTypeNameList)\n\ttotalHeatMapData=np.zeros((10,24,cancerTypeNum,numTrial),dtype=np.float64)\n\t\n\t# make avg heatmap\n\tfor i in range(numTrial):\n\t\teachHeatMapData=pickle.load(open('./heatMapData/bigHeatMapData-{}.pickle{}'.format(fsNum,uniqID+i),'rb'))\n\t\teachHeatMapData=eachHeatMapData[0]\n\t\tfor x in range(10):\n\t\t\tfor y in range(24):\n\t\t\t\tfor z in range(cancerTypeNum):\n\t\t\t\t\ttotalHeatMapData[x][y][z][i]=eachHeatMapData[x][y][z]\n\t\n\tavgHeatMapTmp=np.zeros((10,24,cancerTypeNum),dtype=np.float64)\n\tavgHeatMapTmp=np.sum(totalHeatMapData,axis=3)/float(numTrial)\n\n\tavgHeatMapBestTmp=np.zeros((24,cancerTypeNum),dtype=np.float64)\n\tfor x in range(24):\n\t\tfor y in range(cancerTypeNum):\n\t\t\tbest=0.\n\t\t\tfor z in range(10):\n\t\t\t\tif best3$']):\n\t# \tcbar.ax.text(.5, (2 * j + 1) / 8.0, lab, ha='center', va='center')\n\t\n\t# columns = ['{:.2f}'.format(each) for each in threshList]\n\n\tcolumns=['{}'.format(each) for each in sens_threshList]\n\trows=['{}'.format(each) for each in cancerTypeNameList]\n\tplt.xticks(np.arange(0,24)+0.5,columns)\n\tplt.yticks(np.arange(0,26)+0.5,rows)\n\tplt.xlabel('Drug # that starts from 0.',fontsize=15)\n\tplt.ylabel('CancerType # that starts from 0.',fontsize=15)\n\n\n\t# ###\n\t# ###\n\t# ###\n\t# plt.figure(2)\n\t# max=np.amax(heatMapData2)\n\t# min=np.amin(heatMapData2)\n\t# plt.title('F-score -> MIN: {:.3f}, MAX: {:.3f}'.format(min,max))\n\t# hereHeat=plt.pcolor(heatMapData2)\n\t# cbar=plt.colorbar(hereHeat)\n\t# # plt.clim(0,0.4)\n\n\t\n\t# # cbar.ax.get_yaxis().set_ticks([])\n\t# # cbar.ax.text(5.0, 0, ' Min: {0}'.format(min), ha='center', va='center')\n\t# # cbar.ax.text(5.0, 1.0, ' Max: {0}'.format(max), ha='center', va='center')\n\n\t# # for j, lab in enumerate(['$0$','$1$','$2$','$>3$']):\n\t# # \tcbar.ax.text(.5, (2 * j + 1) / 8.0, lab, ha='center', va='center')\n\t# columns = ['{:.2f}'.format(each) for each in threshList]\n\t# rows=['{:.2f}'.format(each) for each in sens_threshList]\n\t# plt.xticks(np.arange(0,10)+0.5,columns)\n\t# plt.yticks(np.arange(0,24)+0.5,rows)\n\t# plt.xlabel('Threshold for graph construction.',fontsize=15)\n\t# plt.ylabel('Drug # that starts from 0.',fontsize=15)\n\t# # plt.savefig('ari-{}.png'.format(uniqID))\n\t# ###\n\t# ###\n\t# ###\n\n\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","sub_path":"heatMapDrawPair.py","file_name":"heatMapDrawPair.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123110022","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, render_template, redirect\nimport pandas as pd\nimport pymongo\nfrom flask_pymongo import PyMongo\nimport time\n\nexecutable_path = {'executable_path': 'chromedriver.exe'}\n\n\napp = Flask(__name__)\n\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/Mars\"\nmongo = PyMongo(app)\n\n\ndef scrape():\n browser = Browser('chrome', **executable_path, headless=False)\n conn = 'mongodb://localhost:27017'\n client = pymongo.MongoClient(conn)\n db = client.Mars\n Mars = db.Mars.find()\n\n url = 'https://mars.nasa.gov/news/'\n browser.visit(url)\n \t\n time.sleep(3)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n firstitem = soup.find('ul', class_='item_list').find('li', class_='slide')\n newstitle = firstitem.find('h3').text\n newsp = firstitem.find('div', class_='article_teaser_body').text\n url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url)\n time.sleep(3)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n imageitem = soup.find('div', class_='carousel_items')\n\n featured_image_url = url[0:url.find('/spaceimages')] + imageitem.find('a')['data-fancybox-href']\n\n url = 'https://twitter.com/marswxreport?lang=en'\n browser.visit(url)\n time.sleep(3)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n mars_weather = soup.find('div',class_='js-tweet-text-container').text.strip()\n\n url = 'http://space-facts.com/mars/'\n tables = pd.read_html(url)\n df =tables[0]\n df.columns = ['description', 'value']\n df.set_index('description', inplace=True)\n myhtml = df.to_html()\n url = 'http://web.archive.org/web/20181114171728/https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n time.sleep(3)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n sites = soup.find('div',class_='collapsible results').find_all('a')\n myurl=''\n hemisphere_image_urls = []\n for site in sites:\n if (site.find('h3')):\n mydict ={'title':site.find('h3').text}\n myurl = url[0:url.find('/web/')] + site['href']\n browser.visit(myurl)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n mydict['img_url'] = soup.find('div', class_='downloads').find('ul').find('a', text='Sample')['href']\n hemisphere_image_urls.append(mydict)\n mydata ={'newstitle':newstitle, 'newa_p':newsp, 'featured_image_url':featured_image_url, 'mars_weather':mars_weather, 'mars_facts':myhtml, 'hemisphere_image_urls':hemisphere_image_urls}\n db.Mars.update({}, mydata, upsert=True)\n\n\n\n\n# create route that renders index.html template\n@app.route(\"/\")\ndef index():\n MarsData = mongo.db.Mars.find_one()\n return render_template(\"index.html\", MarsData=MarsData)\n\n@app.route(\"/scrape\")\ndef scraper():\n scrape()\n return redirect(\"/\", code=302)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"12/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491183532","text":"# ---------------------------------------------------------------------------\n# NAME: ARD HEA Tools.py\n# Version: 2.0 (ArcGIS 10.2)\n# Author: Research Planning, Inc.\n#\n# Description: Module containing reuseable code for ARD HEA Tool python scripts\n#\n# Notes: Currently the tool is designed to only be run via the ARD HEA Toolbox.\n#\n# Date Created: December 11, 2012\n# Date Modified: September 13, 2013\n#\n# ---------------------------------------------------------------------------\n\ndef version():\n text = \"2.0\"\n return text\n\ndef valueout (wfile, preface, content):\n import arcpy\n if content is not None and content != \" \":\n arcpy.AddMessage(\" \" + str(preface)+ \" \" + str(content))\n wfile.write(\" \" + str(preface)+ \" \" + str(content) + \"\\n\")\n else:\n arcpy.AddMessage(\" \" + str(preface)+ \" UNKNOWN\")\n wfile.write(\" \" + str(preface)+ \" UNKNOWN\\n\")\n\ndef stringout (wfile, preface, content):\n import arcpy\n if content is not None: \n arcpy.AddMessage(\" \" + str(preface)+ \" \" + content)\n wfile.write(\" \" + str(preface)+ \" \" + content + \"\\n\")\n else:\n arcpy.AddMessage(\" \" + str(preface)+ \" UNKNOWN\")\n wfile.write(\" \" + str(preface)+ \" UNKNOWN\\n\") \n\ndef textout (wfile, content): \n import arcpy\n arcpy.AddMessage(str(content))\n wfile.write(str(content) + \"\\n\")\n\ndef get_process_history (directory, inputLayer):\n import arcpy\n xmlDoc = directory + \"\\\\temp.xml\"\n f = open(xmlDoc, \"w\")\n f.write(\"\")\n f.close()\n arcpy.MetadataImporter_conversion(inputLayer, xmlDoc)\n f = open(xmlDoc, \"r\")\n mdText = f.read()\n f.close()\n start = mdText.find(\"\")+10\n if start != -1:\n process_history_text = mdText[start:stop]\n else:\n process_history_text = \"\"\n arcpy.Delete_management(xmlDoc)\n return process_history_text\n\ndef set_process_history (directory, inputLayer, process_history_text):\n import arcpy\n import string\n xmlDoc = directory + \"\\\\temp.xml\"\n newxmlDoc = directory + \"\\\\newtemp.xml\"\n f = open(xmlDoc, \"w\")\n f.write(\"\")\n f.close()\n arcpy.MetadataImporter_conversion(inputLayer, xmlDoc)\n f = open(xmlDoc, \"r\")\n mdText = f.read()\n f.close()\n start = mdText.find(\"\")+10\n old = mdText[start:stop]\n if start != -1:\n newmdText = string.replace(mdText, old, process_history_text, 1)\n else:\n newmdText = mdText\n f = open(newxmlDoc, \"w\")\n f.write(newmdText)\n f.close()\n arcpy.MetadataImporter_conversion(newxmlDoc, inputLayer) \n arcpy.Delete_management(xmlDoc)\n arcpy.Delete_management(newxmlDoc)\n\ndef sanitize (input):\n import string\n whitelist = string.letters + string.digits + \"_\"\n output = ''\n flag = 0\n for char in input:\n if char in whitelist:\n output += char\n if flag == 1:\n flag = 0\n else:\n if flag == 0:\n output += '_'\n flag = 1\n else:\n flag = 1\n if output[0] in string.digits:\n output = \"N\"+output\n return output.strip(\"_\")\n\ndef sanitizetext (input):\n import string\n whitelist = string.letters + string.digits + \"_\"\n output = ''\n flag = 0\n for char in input:\n if char in whitelist:\n output += char\n if flag == 1:\n flag = 0\n else:\n if flag == 0:\n output += '_'\n flag = 1\n else:\n flag = 1\n return output.strip(\"_\")\n\n","sub_path":"ARD_HEA_Tools_backup.py","file_name":"ARD_HEA_Tools_backup.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452740296","text":"\"\"\" RulesMenu class module.\n\"\"\"\n\nfrom typing import List, Callable\nfrom http.client import HTTPException\nfrom dms2021client.data.rest.exc import BadRequestError, NotFoundError\nfrom dms2021client.data.rest.exc import UnauthorizedError\nfrom dms2021client.data.rest import AuthService, SensorsService\nfrom dms2021client.presentation.sensor_menus.addrulesmenu import AddRulesMenu\nfrom dms2021client.presentation.orderedmenu import OrderedMenu\nfrom colorama import Fore # type: ignore\n\nclass RulesMenu(OrderedMenu):\n \"\"\" Add, Remove, Show or Execute Rules.\n \"\"\"\n\n def __init__(self, session_token: str, username: str,\n authservice: AuthService, sensorservice: SensorsService):\n \"\"\" Constructor method.\n\n Initializes the variables.\n ---\n Parameters:\n - session_token: The session_token of the user string.\n - username: The username string.\n - authservice: REST cliente to connect to the authentication service authservice.\n - sensorservice: REST cliente to connect to the sensorentication service sensorservice.\n \"\"\"\n self.__session_token: str = session_token\n self.__username: str = username\n self.__authservice: AuthService = authservice\n self.__sensorservice: SensorsService = sensorservice\n\n def set_title(self) -> None:\n \"\"\" Sets the menu title.\n ---\n Parameters:\n - title: A string with the title that will be displayed in the menu.\n \"\"\"\n self._ordered_title = \"MENÚ REGLAS\"\n\n def set_items(self) -> None:\n \"\"\" Sets the menu items.\n ---\n Parameters:\n - items: A list with the strings that will display the menu options.\n \"\"\"\n items: List[str] = []\n if self.__authservice.has_right(self.__username, \"AdminRules\"):\n items += [\"Ver reglas\", \"Añadir regla\", \"Eliminar regla\"]\n if self.__authservice.has_right(self.__username, \"ViewReports\"):\n items.append(\"Ejecutar regla\")\n if self.__authservice.has_right(self.__username, \"ViewReports\"):\n items.append(\"Ver historial de ejecución\")\n self._ordered_items = items\n\n def set_opt_fuctions(self) -> None:\n \"\"\" Sets the function that will be executed when you select one option.\n Parameters:\n - functions: A list with the functions that will be called when\n a menu option is selected.\n \"\"\"\n functions: List[Callable] = []\n if self.__authservice.has_right(self.__username, \"AdminRules\"):\n functions += [self.get_rules,\n AddRulesMenu(self.__session_token, self.__username,\n self.__authservice, self.__sensorservice).show_options,\n self.remove_rules]\n if self.__authservice.has_right(self.__username, \"ViewReports\"):\n functions.append(self.run_rule)\n if self.__authservice.has_right(self.__username, \"ViewReports\"):\n functions.append(self.get_log)\n self._ordered_opt_functions = functions\n\n def get_rules(self) -> None:\n \"\"\" Gets the list of rules.\n \"\"\"\n print(\"-\"*20 + \"VER REGLAS\" + \"-\"*20 + \"\\n\")\n try:\n result: List[dict] = self.__sensorservice.get_all_rules(self.__username)\n for rule in result:\n for k in rule:\n print(\"[\" + k.upper() + \"] -> \" + str(rule[k]))\n print(\"-\"*50)\n except UnauthorizedError:\n self.print_error(\"Usted no tiene permisos para realizar esta acción.\")\n except HTTPException:\n self.print_error(\"Ha ocurrido un error inesperado.\")\n\n def remove_rules(self) -> None:\n \"\"\" Removes a specified rule.\n \"\"\"\n try:\n print(\"-\"*20 + \"ELIMINAR REGLA\" + \"-\"*20 + \"\\n\")\n rulename: str = input(\"Introduzca el nombre de la regla: \")\n self.__sensorservice.delete_rule(rulename, self.__username)\n print(Fore.GREEN + \"\\n La regla \" + rulename + \" ha sido eliminada correctamente.\"\n + Fore.RESET)\n except BadRequestError:\n self.print_error(\"Se han introducido parámetros incorrectos.\")\n except UnauthorizedError:\n self.print_error(\"Usted no tiene permisos para realizar esta acción.\")\n except NotFoundError:\n self.print_error(\"No existe una regla con ese nombre.\")\n except HTTPException:\n self.print_error(\"Ha ocurrido un error inesperado.\")\n\n def run_rule(self) -> None:\n \"\"\" Runs a specified rule.\n \"\"\"\n try:\n print(\"-\"*20 + \"EJECUTAR REGLA\" + \"-\"*20 + \"\\n\")\n rulename: str = input(\"Introduzca el nombre de la regla: \")\n result: dict = self.__sensorservice.run_rule(rulename, self.__username)\n print(\"Resultado de la ejecución: \")\n print(result[\"result\"])\n except BadRequestError:\n self.print_error(\"Se han introducido parámetros incorrectos.\")\n except UnauthorizedError:\n self.print_error(\"Usted no tiene permisos para realizar esta acción.\")\n except NotFoundError:\n self.print_error(\"No existe una regla con ese nombre.\")\n except HTTPException:\n self.print_error(\"Ha ocurrido un error inesperado.\")\n\n def get_log(self) -> None:\n \"\"\" Gets the log.\n \"\"\"\n try:\n print(\"-\"*20 + \"VER HISTORIAL DE EJECUCIÓN\" + \"-\"*20 + \"\\n\")\n result: List[dict] = self.__sensorservice.get_log(self.__username)\n for rule in result:\n for k in rule:\n print(\"[\" + k.upper() + \"] -> \" + str(rule[k]))\n print(\"-\"*66)\n except UnauthorizedError:\n self.print_error(\"Usted no tiene permisos para realizar esta acción.\")\n except HTTPException:\n self.print_error(\"Ha ocurrido un error inesperado.\")\n","sub_path":"components/dms2021client/dms2021client/presentation/sensor_menus/rulesmenu.py","file_name":"rulesmenu.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506150744","text":"\"\"\"\r\nThis is the main file for the generation of the images of the parts from the robot,\r\nthis is the files that you called from the blender console using the following command\r\nexec(open(\"D:\\\\Documents\\\\PycharmProjects\\\\BioloidPartsDetection\\\\ImageGeneration\\\\mainDistractors.py\").read())\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport bpy\r\n\r\n# Save the blender context using the variable C to manage in the console and to set\r\n# that the images are with no background\r\nC = bpy.context\r\nC.scene.render.engine = 'CYCLES'\r\nC.scene.render.film_transparent = True\r\n\r\n# If the computer have Nvidia GPU available, use it\r\ntry:\r\n C.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'\r\n C.user_preferences.addons['cycles'].preferences.devices[0].use = True\r\nexcept(ValueError, Exception):\r\n print(\"Warning: CUDA device not detected, using CPU instead!\", file=sys.stderr)\r\n\r\n# The direction of the module to import it in the %PATH\r\ndirectory = 'D:\\\\Documents\\\\PycharmProjects\\\\BioloidPartsDetection'\r\n\r\nif not (directory in sys.path):\r\n sys.path.append(directory)\r\n\r\nimport ImageGeneration.RenderInterface as Render\r\n\r\n\"\"\" ************* User parameters for render ************* \"\"\"\r\nnum_images = 100\r\nobj_name = 'Plant'\r\nrender_folder = 'D:\\\\Documents\\\\PycharmProjects\\\\BioloidPartsDetection\\\\Render_Workspace\\\\Renders\\\\FlyingDistractorsPNG'\r\n\r\n# Instantiate the RenderInterface class and call the subject_in_scene method to render a object already in blender\r\n# scene (With it own texture or nodes)\r\nRI = Render.RenderInterface(num_images=num_images)\r\nRI.subject_in_scene(render_folder)\r\n\r\n\"\"\"\r\nRenderInterface has a BlenderRandomScene object that controls random variables responsible for the scene.\r\nThese random variables have distributions associated with them. Here we show how to render with the default \r\ndistributions.\r\n\r\nSetting distribution parameters.\r\nOne can change the distribution parameters of certain attributes in the rendering engine. \r\nThis involves specifying the attribute that needs to be adjusted (as long as the attribute exists) and\r\nthen specifying the parameter to tune.\r\n\r\nFor instance num_lamps is varied according to the continuous uniform distribution U[l,r]. This makes l and r \r\n(the upper and lower bound of the U-distribution) tunable parameters. For lamp energy, this is a truncated \r\nnormal with parameters: {mu: mean, sigmu: sigma/mu, l: lower bound, r: upper bound} and any of these can be tuned.\r\nHere the camera radius is set to be far of the figure, that its for the final part in the picture does not appear \r\ncut off and to be \r\n\"\"\"\r\nRI.set_attribute_distribution_params('num_lamps', 'mid', 6)\r\nRI.set_attribute_distribution_params('lamp_energy', 'mu', 50.0)\r\nRI.set_attribute_distribution_params('lamp_size', 'mu', 5.)\r\nRI.set_attribute_distribution_params('camera_radius', 'mu', 25)\r\nRI.set_attribute_distribution_params('camera_radius', 'l', 20)\r\n\"\"\"\r\nYou could also change the distribution of an attribute entirely, by giving it a distribution name. This will \r\nbe one of the distributions specified in ImageGeneration/RandomLib/random_render.py\r\nThe function signature is as follows: RI.set_attribute_distribution(attr_name, dist=dist_name, kwargs)\r\nWhere kwargs is a keyword argument dict of the required parameters for each distribution\r\n\"\"\"\r\nRI.set_attribute_distribution('lamp_energy', {'dist': 'UniformD', 'l': 0.0, 'r': 20.0})\r\nRI.set_attribute_distribution_params('camera_loc', 'normals', 'XYZ')\r\nRI.set_attribute_distribution_params('camera_loc', 'phi_sigma', 10.0)\r\n\r\n# Calling render_all creates all the images in the render_folder\r\nRI.render_all(dump_logs=False, visualize=True)\r\n\r\n# And finally rename the files in the render folder for the name of the object\r\nfiles_directory = os.listdir(render_folder)\r\ni = 0\r\nfor file in files_directory:\r\n os.rename(os.path.join(render_folder, file), os.path.join(render_folder, obj_name + str(i) + '.png'))\r\n i = i + 1\r\n","sub_path":"ImageGeneration/mainDistractors.py","file_name":"mainDistractors.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"227338330","text":"import hashlib\nfrom contextlib import contextmanager\n\nimport pytest\nfrom twitter.common.contextutil import temporary_file\n\nfrom pex.http import CachedRequestsContext, Context, RequestsContext, StreamFilelike, UrllibContext\nfrom pex.link import Link\n\ntry:\n from responses import RequestsMock\nexcept ImportError:\n RequestsMock = None\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\nBLOB = b'random blob of data'\nNO_REQUESTS = 'RequestsMock is None or requests is None'\n\n\ndef make_md5(blob):\n md5 = hashlib.md5()\n md5.update(blob)\n return md5.hexdigest()\n\n\n@contextmanager\ndef patch_requests():\n requests_mock = RequestsMock()\n requests_mock.start()\n yield requests_mock\n requests_mock.stop()\n\n\n@contextmanager\ndef make_url(blob, md5_fragment=None):\n url = 'http://pypi.python.org/foo.tar.gz'\n if md5_fragment:\n url += '#md5=%s' % md5_fragment\n\n with patch_requests() as responses:\n responses.add(\n responses.GET,\n url,\n status=200,\n body=blob,\n content_type='application/x-compressed')\n\n yield url\n\n\n@pytest.mark.skipif(NO_REQUESTS)\ndef test_stream_filelike_with_correct_md5():\n with make_url(BLOB, make_md5(BLOB)) as url:\n request = requests.get(url)\n filelike = StreamFilelike(request, Link.wrap(url))\n assert filelike.read() == BLOB\n\n\n@pytest.mark.skipif(NO_REQUESTS)\ndef test_stream_filelike_with_incorrect_md5():\n with make_url(BLOB, 'f' * 32) as url:\n request = requests.get(url)\n filelike = StreamFilelike(request, Link.wrap(url))\n with pytest.raises(Context.Error):\n filelike.read()\n\n\n@pytest.mark.skipif(NO_REQUESTS)\ndef test_stream_filelike_without_md5():\n with make_url(BLOB) as url:\n request = requests.get(url)\n filelike = StreamFilelike(request, Link.wrap(url))\n assert filelike.read() == BLOB\n\n\n@pytest.mark.skipif(NO_REQUESTS)\ndef test_requests_context():\n context = RequestsContext(verify=False)\n\n with make_url(BLOB, make_md5(BLOB)) as url:\n assert context.read(Link.wrap(url)) == BLOB\n\n with make_url(BLOB, make_md5(BLOB)) as url:\n filename = context.fetch(Link.wrap(url))\n with open(filename, 'rb') as fp:\n assert fp.read() == BLOB\n\n # test local reading\n with temporary_file() as tf:\n tf.write(b'goop')\n tf.flush()\n assert context.read(Link.wrap(tf.name)) == b'goop'\n","sub_path":"tests/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"525120524","text":"from django.urls import include, path\nfrom . import views\nfrom user_profiles.views import CustomerSignUpView, UserAgentSignUpView\nfrom django.contrib.auth.views import LoginView, LogoutView\n\n\napp_name = 'user_profiles'\nurlpatterns = [\n path('register-customer/', CustomerSignUpView.as_view(), name='register-customer'),\n path('register-agent/', UserAgentSignUpView.as_view(), name='register-agent'), \n path('mylogin/', views.mylogin, name='login'),#Handle login for a particular user\n path('mylogout/', views.mylogout, name='logout'),\n path('accounts/login/', LoginView),\n path('login_success/', views.login_success, name='login_success'),\n path('create-contact-message', views.contact, name='contact'),\n]\n\n","sub_path":"user_profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"181279865","text":"import os\nimport settings\nimport kuberfest.commands\nfrom kuberfest.tools.debug import Debug\n\n\ndef run(project, value):\n if not value:\n return True\n\n ip=os.popen('minikube ip').read().strip()\n if ip != '':\n Debug.info(\n \"You can access the app through: {address}\".format(\n address='http://{ip}:{port}/api/values'.format(\n ip=ip, \n port=project.get_variable('API_SERVICE_NODE_PORT')\n )\n )\n )\n else:\n Debug.error(\"Minikube is unavailable\")\n\n return True","sub_path":"kuberfest/commands/minikube_ip.py","file_name":"minikube_ip.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"319287098","text":"import re\n\nfrom .. import config\nfrom ..h import E, outerHTML\nfrom ..messages import die\nfrom . import steps\n\n\nclass ElementShorthand:\n def __init__(self):\n self.stage = \"start\"\n self.escapedText = None\n self.linkText = []\n self.bsAutolink = \"\"\n self.linkFor = None\n self.lt = None\n self.linkType = None\n\n def respond(self, match, dom=None):\n if self.stage == \"start\":\n return self.respondStart(match)\n elif self.stage == \"link text\":\n return self.respondLinkText(match, dom)\n elif self.stage == \"end\":\n return self.respondEnd()\n\n def respondStart(self, match):\n self.bsAutolink = match.group(0)\n groupdict = match.groupdict()\n if groupdict[\"escape\"]:\n self.escapedText = match.group(0)[1:]\n if groupdict[\"attr\"] is None and groupdict[\"value\"] is None:\n self.linkType = \"element\"\n self.linkFor = None\n self.lt = groupdict[\"element\"]\n elif groupdict[\"value\"] is None:\n self.linkType = \"element-sub\"\n self.linkFor = groupdict[\"element\"]\n self.lt = groupdict[\"attr\"]\n else:\n self.linkType = \"attr-value\"\n self.linkFor = groupdict[\"element\"] + \"/\" + groupdict[\"attr\"]\n self.lt = groupdict[\"value\"]\n if groupdict[\"linkType\"] is not None:\n self.linkType = groupdict[\"linkType\"]\n\n if groupdict[\"hasLinkText\"]:\n self.stage = \"link text\"\n return steps.NextBody(endRe)\n else:\n self.stage = \"end\"\n return steps.NextLiteral(endRe)\n\n def respondLinkText(self, match, dom): # pylint: disable=unused-argument\n self.linkText = dom\n self.bsAutolink += outerHTML(dom)\n return self.respondEnd()\n\n def respondEnd(self):\n if self.escapedText:\n return steps.Success(\n skips=[\"<\"], nodes=[self.escapedText[1:], *self.linkText, \"}>\"]\n )\n\n self.bsAutolink += \"}>\"\n\n if self.linkType not in config.markupTypes and self.linkType != \"element-sub\":\n die(\n \"Shorthand {0} gives type as '{1}', but only markup types ({2}) are allowed.\",\n self.bsAutolink,\n self.linkType,\n config.englishFromList(config.idlTypes),\n )\n return steps.Success(E.span({}, self.bsAutolink))\n\n if not self.linkText:\n self.linkText = self.lt\n\n attrs = {\n \"data-link-type\": self.linkType,\n \"for\": self.linkFor,\n \"lt\": self.lt,\n \"bs-autolink-syntax\": self.bsAutolink,\n }\n return steps.Success(E.a(attrs, self.linkText))\n\n\nElementShorthand.startRe = re.compile(\n r\"\"\"\n (?P\\\\)?\n <{\n (?P[\\w*-]+)\n (?:/\n (?P[\\w*-]+)\n (?:/(?P[^}!|]+))?\n )?\n (?:!!(?P[\\w-]+))?\n (?P\\|)?\"\"\",\n re.X,\n)\n\nendRe = re.compile(\"}>\")\n","sub_path":"bikeshed/shorthands/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419429333","text":"from py2neo import Graph, Node, Relationship, authenticate\nimport csv\n\n\nauthenticate(\"localhost:7474\",username,password)\t\ngraph = Graph(authenticate)\nfile1 = open('database.csv','r')\nreader = csv.reader(file1)\ndata = list(reader) #stores the entire database as a 2D array\ni = 1\nlength = len(data)\nwhile(i<=(length-2)):\n\ttweet = Node(\"Tweet\",ID=data[i][0],text=data[i][1])\n\tuser = Node(\"Original_User\",ID=data[i][2],name=data[i][3],followers=data[i][4],tweetid=data[i][0])\n\tgraph.create(tweet | user)\n\tgraph.create(Relationship(tweet, \"TWEETED_BY\", user))\n\tj = i\n\tflag = 0\n\twhile((j<=length) and (data[j][2] == data[i][2])): #for adding retweeter nodes\n\t\tflag = 1\t\t\t\n\t\tretweeter = Node(\"Retweeter\",ID=data[j][5],name=data[j][6],followers=data[j][7],tweetid=data[j][0])\n\t\tgraph.create(retweeter)\n\t\tif((j<=length) and (int(data[j][8]) == 1)):\n\t\t\tgraph.create(Relationship(user, \"FOLLOWED_BY\",retweeter,level=1))\n\t\telif(int(data[j][8]) == 2): \n\t\t\tfirst_level_follower = graph.find_one(\"Retweeter\",\"ID\",{data[j][9],\"tweetid\",data[j][0]})\n\t\t\tgraph.create(Relationship(first_level_follower,\"FOLLOWED_BY\",retweeter,level=2))\n\t\telif((j<=length) and (int(data[j][8]) == 3)): \n\t\t\tgraph.create(Relationship(tweet, \"RETWEETED_BY\", retweeter))\n\t\tj = j + 1\n\t\t\t\n\tif(flag == 1):\n\t\ti = j\n\telif(flag==0):\n\t\ti = i + 1\n\nfile1.close()\n\n\n\n","sub_path":"insert_into_neo4j.py","file_name":"insert_into_neo4j.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561191526","text":"#!/usr/bin/env python3\nimport socket\nimport sys\nimport os\nimport subprocess\nimport struct\n\nREPO_PATH = os.getenv(\"REPO_PATH\")\n\nif REPO_PATH == None:\n REPO_PATH = subprocess.check_output(\"pwd\", shell=True).rstrip()\n\nHOST, PORT = \"reds-codecheck-labs.heig-vd.ch\", 51011\nBUF_SIZE = 1000000 # 1 MB\n\nARCHIVE_PATH = \"archive.tar.gz.gpg\"\n\n# This is the only way I find of ignoring Eclipse's metadata\nCMD_DIFF = \"git diff\"\nCMD_DIFF_FIND = \"`find . -type f -not -path '*/.*/*' -not -name '.*' -not -name 'flash'`\"\nCMD_BRANCH = \"git symbolic-ref --short HEAD\"\n\ndata = \" \".join(sys.argv[1:])\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nbranch_name = None\n\ndef recvall(count):\n buf = b''\n while count:\n newbuf = sock.recv(count)\n if not newbuf: return None\n buf += newbuf\n count -= len(newbuf)\n\n return buf\n\ndef send_branch_name():\n global repo\n global branch_name\n\n branch_name = subprocess.check_output(CMD_BRANCH, shell=True).rstrip()\n\n length = len(branch_name)\n sock.sendall(struct.pack('!I', length))\n sock.sendall(branch_name)\n\ndef create_patch():\n global branch_name\n\n patch = subprocess.check_output(CMD_DIFF + \" tag_\" + branch_name.decode(\"utf-8\") + \" \" + CMD_DIFF_FIND, shell=True)\n sock.sendall(struct.pack('!I', len(patch)))\n sock.sendall(patch)\n\ndef send_patch():\n print(\"send_patch\")\n\ndef get_archive():\n try:\n lengthbuf = recvall(4)\n length, = struct.unpack('!I', lengthbuf)\n data = recvall(length)\n\n except (ConnectionResetError, TypeError) as e:\n print(\"Connection closed by remote host. \" +\n \"Are you on the correct branch? (git branch to check) \" +\n \"Did you submit an empty patch? (git diff to check)\")\n print(\"Also, please check your REPO_PATH variable. \" +\n \"If the problem persists, please report to your assistants.\")\n print(e)\n exit(-1)\n\n if not data:\n print(\"Connection closed by remote host. \" +\n \"Did you submit an empty patch? (git diff to check)\")\n print(\"Also, please check your REPO_PATH variable. \" +\n \"If the problem persists, please report to your assistants.\")\n exit(-1)\n\n archive = open(ARCHIVE_PATH, \"wb\")\n archive.write(data)\n\nprint(\"Connecting to \" + HOST + \":\" + str(PORT))\nsock.connect((HOST, PORT))\n\nprint(\"Sending branch name\")\nsend_branch_name()\n\nprint(\"Sending patch\")\ncreate_patch()\n\nprint(\"Retrieving archive, this may take some time ...\")\nget_archive()\n\nprint(\"Signed archive retrieved in \" + ARCHIVE_PATH)\n\nsock.close()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"297914256","text":"#coding=utf-8\n\"\"\"\n完整的神经网络样例程序\n\"\"\"\nimport tensorflow as tf\n\n#numpy是一个科学计算的工具包,这里通过numpy工具包生成模拟数据集\nfrom numpy.random import RandomState\n\n#定义训练数据batch的大小\nbatch_size = 8\n\n#定义神经网络参数,这里沿用之前的设置\nw1 = tf.Variable(tf.random_normal((2,3), stddev=1, seed=1))\nw2 = tf.Variable(tf.random_normal((3,1), stddev=1, seed=1))\n\n#在shape的一个维度上使用None可以方便使用不同的batch大小.\n#在训练需要把数据分成比较小的batch,但是在测试时,可以一次性使用全部数据.\n#当数据集比较小时,这样比较方便测试,但数据集比较大时,将大量数据放入一个batch可能造成内存溢出\nx = tf.placeholder(tf.float32, shape=(None, 2), name='x_input')\ny_ = tf.placeholder(tf.float32, shape=(None, 1), name='y_input')\n\n#定义神经网络前向传播的过程\na = tf.matmul(x, w1)\ny = tf.matmul(a, w2)\n\n#定义损失函数和前向传播算法\n#使用sigmoid函数讲y转换为0~1之间的数值.转换后的y代表预测是正样本的概率,1-y代表预测是负样本的概率\ny = tf.sigmoid(y)\n#定义损失函数来刻画预测值与真实值的差距\ncross_entropy = -tf.reduce_mean(\n y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))\n + (1-y)*tf.log(tf.clip_by_value(1-y, 1e-10, 1.0)))\n#定义学习效率\nlearning_rate = 0.001\n#定义反向传播算法来优化神经网络的参数\ntrain_step = \\\n tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\n\n#通过随机数生成一个模拟数据集\nrdm = RandomState(1)\ndataset_size = 128\nX = rdm.rand(dataset_size, 2)\n#定义规则来给出样本的标签.这里所有的x1+x2<1都被认为是正样本(比如零件合格),\n#而其他为负样本(比如零件不合格)\n#这里用0表示负样本,用1表示正样本,大部分解决神经网络的问题都会用0和1的表示方法\nY = [[int(x1+x2 < 1)] for (x1, x2) in X]\n\n#创建一个会话来运行tensorflow程序:\nwith tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n #初始化变量\n sess.run(init_op)\n\n print(sess.run(w1))\n print(sess.run(w2))\n\n \"\"\"\n 打印训练之前的神经网络参数值\n \n \"\"\"\n #设定训练的轮数\n STEPS = 5000\n for i in range(STEPS):\n #每次选取batch_size个样本进行训练\n start = (i * batch_size) % dataset_size\n end = min(start+batch_size, dataset_size)\n\n #通过选取的样本训练神经网络并更新参数\n sess.run(train_step,\n feed_dict = {x: X[start:end], y_:Y[start:end]})\n if i%1000 == 0:\n #每隔一段时间计算在所有数据上的交叉熵并输出\n total_cross_entropy = sess.run(\n cross_entropy, feed_dict={x: X, y_: Y})\n print(\"After {} training step(s), cross entropy on all data is {}\"\n .format(i, total_cross_entropy))\n\n print(sess.run(w1))\n print(sess.run(w2))\n\n\n\n\n\n\n\n\n\n","sub_path":"TFdemos-1/tfdemo07.py","file_name":"tfdemo07.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"122551079","text":"from setuptools import setup, find_packages\nfrom os import path\nfrom io import open\nimport pathlib\n\n# Directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# Get README for long description\nREADME = (HERE / \"README.md\").read_text()\n\n\n# Load all required modules\nwith open(path.join(HERE, \"requirements.txt\"), encoding=\"utf-8\") as f:\n all_requirements = f.read().split(\"\\n\")\ninstall_requires = [x.strip() for x in all_requirements if ('git+' not in x)\n and (not x.startswith('#')) and (not x.startswith('-'))]\ndependency_links = [x.strip().replace('git+', '') for x in all_requirements if 'git+' not in x]\n\n# Setup\nsetup(\n author=\"Qyutou\",\n name=\"image_to_ansi\",\n description=\"A simple application to convert images to ansi.\",\n version=\"1.0.0\",\n include_package_data=True,\n install_requires=install_requires,\n packages=find_packages(),\n entry_points=\"\"\"\n [console_scripts]\n image_to_ansi=image_to_ansi.main:main\n \"\"\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n author_email=\"qyutou@gmail.com\",\n license=\"MIT\",\n url=\"https://github.com/Qyutou/image_to_ansi\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"636717272","text":"__author__ = 'Ryan'\n\nimport socket\nimport time\nimport imp\nimport os\nimport traceback\nimport re\nimport inspect\nfrom threading import Thread\nfrom plugins.BasePlugin import BasePlugin\nfrom util.BaseSettings import Settings\nfrom database.BaseQueryHelper import BaseQueryHelper\nfrom signal import *\nimport threading\nimport logging\n\nclass TwitchBot:\n def __init__(self):\n for sig in (SIGINT, SIGTERM):\n signal(sig, self.kill)\n\n self.ircSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.irc_host = Settings.irc_host\n self.irc_chan = '#' + Settings.irc_channel.lower()\n self.connected = False\n self._pluginFolder = './plugins/'\n self._mainModule = 'plugin'\n self._plugins = []\n self.commands = []\n self.msgRegister = []\n self.joinPartHandlers = []\n self.loadedPluginNames = []\n self.moddedInList = ['#popethethird', '#joeruu']\n self.ignoredPlugins = ['SpamPlugin']\n self.ignoredUsers = []\n\n self.queryHelper = BaseQueryHelper()\n\n def kill(self):\n print(\"Twitch Bot Ending...\")\n for p in self._plugins:\n p._kill()\n\n def sendMessage(self, className, chan, message, needMod=True):\n if (needMod and chan in self.moddedInList) or (not needMod):\n print(\"%s: %s\" % (chan, message))\n self.ircSock.send(str('PRIVMSG %s :%s\\n' % (chan, message)).encode('UTF-8'))\n else:\n print(\"Channel %s attempted to use commands without modding bot\" % chan)\n\n def connect(self, port):\n self.ircSock.connect((self.irc_host, port))\n self.ircSock.send(str(\"Pass \" + Settings.irc_oauth + \"\\r\\n\").encode('UTF-8'))\n self.ircSock.send(str(\"NICK \" + Settings.irc_username + \"\\r\\n\").encode('UTF-8'))\n\n for channel in self.queryHelper.getAllChannels():\n if self.queryHelper.channelIsEnabled(channel.channel.lower()):\n self.joinChannel(channel.channel.lower())\n time.sleep(.5)\n\n def joinChannel(self, channel):\n self.ircSock.send(str(\"JOIN \" + channel.lower() + \"\\r\\n\").encode('UTF-8'))\n time.sleep(.5)\n # self.sendMessage(None, channel, \"Hello! I am MiniGameBot. A list of my commands may be found at twitch.tv/PopeTheThird. Please ensure I am modded and allow 30-60 seconds after joining to prevent rate-limiting. Enjoy!\", False)\n\n def partChannel(self, channel):\n self.ircSock.send(str(\"PART \" + channel.lower() + \"\\r\\n\").encode('UTF-8'))\n\n def registerCommand(self, className, command, pluginFunction):\n self.commands.append( {'regex': command, 'handler':pluginFunction, 'plugin':className} )\n\n def registerAll(self, className, pluginFunction):\n self.msgRegister.append( {'handler':pluginFunction, 'plugin':className} )\n\n def registerJoinPartNotifications(self, className, pluginFunction):\n self.joinPartHandlers.append( { 'handler':pluginFunction, 'plugin':className } )\n\n def handleIRCMessage(self, ircMessage):\n print(ircMessage)\n nick = ircMessage.split('!')[0][1:]\n\n # Message to a channel\n if ircMessage.find(' PRIVMSG #') != -1:\n chan = ircMessage.split(' ')[2]\n msg = ircMessage.split(' PRIVMSG ' + chan + ' :')[1]\n\n for pluginDict in self.commands:\n if re.search('^' + Settings.irc_trigger + pluginDict['regex'] + '\\\\b', msg, re.IGNORECASE) \\\n and not self.queryHelper.checkPluginDisabled(chan, pluginDict['plugin']):\n\n if not nick in self.ignoredUsers:\n handler = pluginDict['handler']\n args = msg.split(\" \")\n handler(nick, chan, args)\n\n if not (self.queryHelper.isMod(nick, chan) or self.queryHelper.isAdmin(nick)):\n self.ignoredUsers.append(nick)\n threading.Timer(5, self.removeIgnored, args=(nick,)).start()\n else:\n print(\"User %s attempted to use command quickly in %s\" % (nick, chan))\n\n for pluginDict in self.msgRegister:\n if not self.queryHelper.checkPluginDisabled(chan, pluginDict['plugin']):\n handler = pluginDict['handler']\n args = msg.split(\" \")\n handler(nick, chan, args)\n\n elif ircMessage.find('PING ') != -1:\n self.ircSock.send(str(\"PING :pong\\n\").encode('UTF-8'))\n\n # User joined channel\n elif ircMessage.find('JOIN ') != -1:\n nick = ircMessage.split('!')[0][1:]\n chan = ircMessage.split(' ')[2]\n\n print(nick + \" joined \" + chan)\n for handler in self.joinPartHandlers:\n if not self.queryHelper.checkPluginDisabled(chan, handler['plugin']):\n handler['handler'](nick, chan, True)\n\n # User left channel\n elif ircMessage.find('PART ') != -1:\n nick = ircMessage.split('!')[0][1:]\n chan = ircMessage.split(' ')[2]\n\n print(nick + \" left \" + chan)\n for handler in self.joinPartHandlers:\n if not self.queryHelper.checkPluginDisabled(chan, handler['plugin']):\n handler['handler'](nick, chan, False)\n\n # User oped in channel\n elif ircMessage.find('MODE ') != -1:\n nick = ircMessage.split(' ')[-1]\n chan = ircMessage.split(' ')[2]\n op = ircMessage.split(' ')[3]\n\n if nick.lower() == Settings.irc_username.lower():\n if op == \"+o\" and not chan in self.moddedInList:\n self.moddedInList.append(chan)\n print(\"Modded in %s\" % chan)\n elif op == \"-o\" and chan in self.moddedInList:\n self.moddedInList.remove(chan)\n print(\"Unmodded in %s\" % chan)\n else:\n pass\n\n def removeIgnored(self, username):\n if username in self.ignoredUsers:\n self.ignoredUsers.remove(username)\n\n def run(self):\n line_sep_exp = re.compile(b'\\r?\\n')\n socketBuffer = b''\n while True:\n try:\n self.connected = True\n socketBuffer += self.ircSock.recv(1024)\n\n ircMsgs = line_sep_exp.split(socketBuffer)\n\n socketBuffer = ircMsgs.pop()\n\n for ircMsg in ircMsgs:\n msg = ircMsg.decode('utf-8')\n logging.info(msg)\n Thread(target=self.handleIRCMessage, args=(msg,)).start()\n except Exception as e:\n print(traceback.format_exc())\n raise e\n\n def loadPlugins(self):\n potentialPlugins = []\n allPlugins = os.listdir(self._pluginFolder)\n for i in allPlugins:\n location = os.path.join(self._pluginFolder, i)\n if not os.path.isdir(location) or not self._mainModule + \".py\" in os.listdir(location):\n continue\n info = imp.find_module(self._mainModule, [location])\n potentialPlugins.append({\"name\": i, \"info\": info})\n\n print(\"Found plugin classes:\")\n for i in potentialPlugins:\n try:\n plugin = imp.load_module(self._mainModule, *i[\"info\"])\n pluginClasses = inspect.getmembers(plugin, inspect.isclass)\n for className, classObj in pluginClasses:\n if className == \"BasePlugin\" or className in self.loadedPluginNames or not issubclass(classObj, BasePlugin) or className in self.ignoredPlugins:\n continue\n print(className)\n pluginInstance = classObj(self)\n self._plugins.append(pluginInstance)\n self.loadedPluginNames.append(className)\n except Exception as e:\n print(\"Error loading plugin.\")\n print(traceback.format_exc())\n\n def reconnect(self):\n try:\n self.connect(6667)\n self.run()\n except Exception as e:\n print(traceback.format_exc())\n\nif __name__ == \"__main__\":\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='logs/irc_log.log', level=logging.DEBUG)\n while True:\n twitchBot = TwitchBot()\n try:\n twitchBot.loadPlugins()\n twitchBot.connect(6667)\n twitchBot.run()\n except Exception as e:\n print(traceback.format_exc())\n logging.error(traceback.format_exc)\n twitchBot.kill()\n time.sleep(5)\n","sub_path":"TwitchBot.py","file_name":"TwitchBot.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485056434","text":"from imports import *\nfrom parameters import DATA_PATH, FILENAME\n\n\ndef load_data(data_path, file_name):\n # load the dataset and remove all stopwords\n articles = []\n labels = []\n\n with open(os.path.join(data_path, file_name), \"r\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n next(reader)\n for row in reader:\n labels.append(row[0])\n articles.append(row[1])\n\n return articles, labels\n\n\nif __name__ == \"__main__\":\n\n\t# load dataset\n articles, labels = load_data(\n data_path=DATA_PATH, file_name=FILENAME\n )\n \n print(\"labels length:\", len(labels))\n print(\"articles length:\", len(articles))\n print(\"First News:\", articles[:1])\n print(\"Labels:\", list(set(labels)))\n","sub_path":"data_management.py","file_name":"data_management.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"271050189","text":"\"\"\"\nImplements fitness proportionate mutation --- the fittest chromosomes are less\nlikely to be mutated than unfit chromosomes.\n\"\"\"\nfrom mutator import BaseMutator\nimport numpy.random\n\n\nclass FitnessProportionate(BaseMutator):\n def __init__(self, mutation_rate):\n super(FitnessProportionate, self).__init__(mutation_rate)\n\n def _mutate(self, population):\n mutation_rate = self._mutation_rate\n fitnesses = population.fitnesses\n\n min_fitness = min(fitnesses)\n fitnesses -= min_fitness\n max_fitness = max(fitnesses)\n with numpy.errstate(all=\"ignore\"):\n fitnesses /= max_fitness\n mutate = [(1 - fitness)*mutation_rate for fitness in fitnesses]\n for c_index in xrange(population.size):\n for b_index in xrange(population.chromosome_size):\n if numpy.random.rand() < mutate[c_index]:\n parameters = numpy.copy(\n population.chromosomes[c_index].parameters)\n\n dimension = b_index % population.chromosome_shape[1]\n upper = population.chromosome_upper_limits[dimension]\n lower = population.chromosome_lower_limits[dimension]\n parameters[b_index] = (\n ((upper - lower) * numpy.random.random() + lower))\n\n # Use the update method so the chromosome knows it needs\n # its fitness value updated.\n population.chromosomes[c_index].update(parameters)\n","sub_path":"pygenopt/optimisation/mutator/fitness_proportionate.py","file_name":"fitness_proportionate.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276305069","text":"from sqlalchemy import Column, Integer, String, ForeignKey, Float\nfrom sqlalchemy.orm import relationship\nfrom src.models.ShopKeepers import ShopKeepers\n\nfrom src.database.db import Base\n\n\nclass Bankaccount(Base):\n __tablename__ = 'bankaccount'\n id = Column(Integer, primary_key=True, autoincrement=True)\n bank_name = Column(String(50), unique=False)\n acount_holder_name = Column(String(250), unique=False)\n acount_no = Column(String(30), unique=True)\n shopkeeper_id = Column(Integer,ForeignKey(\"shopkeepers.id\"))\n shopkeeper_rel = relationship(\"ShopKeepers\")\n\n def __init__(self,\n bank_name=None,\n acount_holder_name=None,\n acount_no= None,\n shopkeeper_id= None):\n self.bank_name = bank_name\n self.acount_holder_name =acount_holder_name\n self.acount_no =acount_no\n self.shopkeeper_id = shopkeeper_id\n\n def toDict(self):\n u = {\"bank_name\":self.bank_name,\"acount_holder_name\":self.acount_holder_name,\"acount_no\":self.acount_no}\n return u\n\n\n def product_in(self):\n prod = credits()\n property = delattr()\n prod = object\n def visit():\n for x in prod:\n if x in y:\n break\n while():\n if(True):\n tuple()","sub_path":"src/models/BankAccounts.py","file_name":"BankAccounts.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"5484516","text":"from django import forms\nfrom django.db import models\nfrom django.contrib.auth.models import User,auth\nfrom django.db.models import fields\nfrom .models import Studentdata,Checkname\nfrom teacher.models import Subject\nfrom PIL import Image\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom io import BytesIO\n\nclass Checkform(forms.ModelForm):\n class Meta:\n model = Checkname\n fields= ['status']\n labels = {\n 'status':'สถานะ'\n }\n select = [('มา',''),('ขาด',''),('ลาป่วย',''),('ลากิจ','')]\n widgets = {\n 'status':forms.RadioSelect(choices=select,attrs={'class':'form-control'})\n }\nclass Subjectform(forms.ModelForm):\n class Meta:\n model = Subject\n exclude= ['id','teacher_id']\n labels = {\n 'subject_id':'รหัสวิชา',\n 'name' :'ชื่อวิชา', \n 'group':'กลุ่มสาระ',\n 'unit' :'หน่วยกิต',\n 'classroom' :'ชั้น',\n 'room' :'ห้อง',\n }\n group_name = [('','...เลือก...'),('ภาษาไทย','ภาษาไทย'),('คณิตศาสตร์','คณิตศาสตร์'),('วิทยาศาสตร์และเทคโนโลยี','วิทยาศาสตร์และเทคโนโลยี'),('สุขศึกษา','สุขศึกษา'),('สังคมศึกษา','สังคมศึกษา'),('ศิลปะ','ศิลปะ'),('การงานอาชีพ','การงานอาชีพ'),('ภาษาต่างประเทศ','ภาษาต่างประเทศ'),('other','อื่น ๆ')]\n widgets = {\n 'group':forms.Select(choices=group_name),\n 'unit':forms.NumberInput(attrs={'step':'any'}),\n 'classroom':forms.NumberInput(),\n 'room':forms.NumberInput(),\n }","sub_path":"student/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282676339","text":"#!/usr/bin/python3\n'''BaseModel Module'''\n\nimport uuid\nfrom models import storage\nfrom datetime import datetime\n\n\nclass BaseModel():\n '''class BaseModel defines common attributes/methods for other classes'''\n\n __check = {}\n\n def __init__(self, *args, **kwargs):\n if (kwargs is not None and kwargs != {}):\n for keys in kwargs.keys():\n if keys == 'created_at' or keys == 'updated_at':\n dt = kwargs[keys]\n form = '%Y-%m-%dT%H:%M:%S.%f'\n if type(kwargs[keys]) is str:\n dt = datetime.strptime(kwargs[keys], form)\n setattr(self, keys, dt)\n elif keys != \"__class__\":\n setattr(self, keys, kwargs[keys])\n storage.new(self)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)\n\n def save(self):\n '''updates public instance attr updated_at with current datetime'''\n self.updated_at = datetime.utcnow()\n storage.save()\n\n def to_dict(self):\n '''returns a dict containing all keys/values of __dict__ of instance'''\n self_dict = dict(self.__dict__)\n\n for item in self.__dict__:\n if item == 'created_at' or item == 'updated_at':\n dt = self.__dict__[item].strftime('%Y-%m-%dT%H:%M:%S.%f')\n self_dict.update({item: dt})\n else:\n self_dict.update({item: self.__dict__.get(item)})\n self_dict.update({'__class__': self.__class__.__name__})\n return self_dict\n\n def __str__(self):\n '''should print: [] () '''\n return \"[{}] ({}) {}\\\n \".format(self.__class__.__name__, self.id, self.__dict__)\n\n def update_file(self):\n '''Updates the storage if the dictionary changes'''\n storage.new(self)\n self.__check = self.__dict__\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"357025329","text":"#!usr/env/python3\r\n#-*-:utf-8 -*-\r\n#Class test\r\n\r\nclass people:\r\n\t#basic attributes\r\n\tname=''\r\n\tage=0\r\n #define private attributes\r\n\t__weight=0\r\n\r\n\tdef __init__(self,name,age,wei):\r\n\t\tself.name=name\r\n\t\tself.age=age\r\n\t\tself.__weight=wei\r\n\r\n\tdef speak(self):\r\n\t\tprint(\"{0} say: I'm {1} now.\".format(self.name,self.age))\r\np=people('Rain',10,100)\r\np.speak()\r\n\r\n#inheritance\r\nclass student(people):\r\n\tgrade=''\r\n\tdef __init__(self,name,age,wei,gra):\r\n\t\tpeople.__init__(self,name,age,wei)\r\n\t\tself.grade=gra\r\n\tdef speak(self):\r\n\t\tprint(\"Wow you {0} say: I'm {1} in {2}th grade now\".format(self.name,self.age,self.grade))\r\n\r\n\r\ns=student('Rain',10,60,4)\r\ns.speak()","sub_path":"pytest_class.py","file_name":"pytest_class.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440105406","text":"# coding=utf-8\nimport logging\n\nfrom boilerplate.extensions import reqparse\n\n__author__ = 'ThucNC'\n_logger = logging.getLogger(__name__)\n\n\nclass RequestHelper:\n pagination_params = reqparse.RequestParser(bundle_errors=True)\n pagination_params.add_argument(\n 'page',\n type=int,\n help='Page number, starting from 1',\n required=False,\n default=1,\n location='args'\n )\n\n pagination_params.add_argument(\n 'pageSize',\n type=int,\n help='Page size',\n required=False,\n default=10,\n location='args'\n )\n","sub_path":"boilerplate/helpers/request_helper.py","file_name":"request_helper.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"120514137","text":"# coding:utf-8\n'''\n@Copyright:LintCode\n@Author: justice_103\n@Problem: http://www.lintcode.com/problem/validate-binary-search-tree\n@Language: Python\n@Datetime: 15-08-19 13:42\n'''\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: True if the binary tree is BST, or false\n \"\"\" \n def isValidBST(self, root):\n # write your code here\n if root==None:\n return True\n \n nums=[]\n midTravelBST(root, nums)\n \n last=nums[0]\n for n in nums[1:]:\n if n<=last:\n return False\n last=n\n \n return True\n \ndef midTravelBST(root, nums):\n if root.left!=None:\n midTravelBST(root.left, nums)\n nums.append(root.val)\n if root.right!=None:\n midTravelBST(root.right, nums)\n\n \n","sub_path":"lintcode/95_validate-binary-search-tree/validate-binary-search-tree.py","file_name":"validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"253712375","text":"\"\"\"\r\n.. module:: bmp180\r\n\r\n*************\r\nBMP180 Module\r\n*************\r\n\r\nThis module contains the driver for BOSCH BMP180 Digital Barometric Pressure Sensor. The ultra-low power, low voltage electronics of the BMP180 is optimized for use in mobile devices and the I2C interface allows for easy\r\nsystem integration with a microcontroller. The BMP180 is based on piezo-resistive technology for EMC robustness, high accuracy and linearity as\r\nwell as long term stability (`datasheet `_).\r\n \"\"\"\r\n\r\n\r\nimport i2c\r\n\r\n# BMP180 default address.\r\nBMP180_I2CADDR = 0x77\r\n\r\n# Operating Modes\r\nBMP180_ULTRALOWPOWER = 0\r\nBMP180_STANDARD = 1\r\nBMP180_HIGHRES = 2\r\nBMP180_ULTRAHIGHRES = 3\r\n\r\n# BMP180 Calibration coefficient contained in the internal eeprom\r\nBMP180_CAL_AC1_MSB = 0xAA\r\nBMP180_CAL_AC1_LSB = 0xAB \r\nBMP180_CAL_AC2_MSB = 0xAC \r\nBMP180_CAL_AC2_LSB = 0xAD \r\nBMP180_CAL_AC3_MSB = 0xAE \r\nBMP180_CAL_AC3_LSB = 0xAF \r\nBMP180_CAL_AC4_MSB = 0xB0\r\nBMP180_CAL_AC4_LSB = 0xB1 \r\nBMP180_CAL_AC5_MSB = 0xB2 \r\nBMP180_CAL_AC5_LSB = 0xB3 \r\nBMP180_CAL_AC6_MSB = 0xB4 \r\nBMP180_CAL_AC6_LSB = 0xB5 \r\nBMP180_CAL_B1_MSB = 0xB6 \r\nBMP180_CAL_B1_LSB = 0xB7 \r\nBMP180_CAL_B2_MSB = 0xB8 \r\nBMP180_CAL_B2_LSB = 0xB9 \r\nBMP180_CAL_MB_MSB = 0xBA \r\nBMP180_CAL_MB_LSB = 0xBB \r\nBMP180_CAL_MC_MSB = 0xBC\r\nBMP180_CAL_MC_LSB = 0xBD\r\nBMP180_CAL_MD_MSB = 0xBE\r\nBMP180_CAL_MD_LSB = 0xBF\r\n\r\n# Register Address\r\nBMP180_CONTROL = 0xF4\r\nBMP180_TEMPDATA = 0xF6\r\nBMP180_PRESSUREDATA = 0xF6\r\n\r\n# Commands\r\nBMP180_READ_T_CMD = 0x2E\r\nBMP180_READ_P_CMD = [0x34, 0x74, 0xB4, 0xF4]\r\n\r\nclass BMP180(i2c.I2C):\r\n \"\"\"\r\n.. class:: BMP180(i2cdrv, addr=0x77, clk=100000)\r\n\r\n Creates an intance of a new BMP180.\r\n\r\n :param i2cdrv: I2C Bus used '( I2C0, ... )'\r\n :param addr: Slave address, default 0x77\r\n :param clk: Clock speed, default 100kHz\r\n\r\n Example: ::\r\n\r\n from bosch.bmp180 import bmp180\r\n\r\n ...\r\n\r\n bmp = bmp180.BMP180(I2C0)\r\n bmp.start()\r\n bmp.init()\r\n temp, pres = bmp.get_temp_pres()\r\n\r\n \"\"\"\r\n\r\n #Init\r\n def __init__(self, drvname, addr=BMP180_I2CADDR, clk=400000):\r\n i2c.I2C.__init__(self,drvname,addr,clk)\r\n self._addr = addr\r\n self._oss = 0\r\n try:\r\n self.start()\r\n except PeripheralError as e:\r\n print(e) \r\n\r\n def init(self, oss=0):\r\n \"\"\"\r\n\r\n.. method:: init(oss=0)\r\n\r\n Initialize the BMP180 calibrating the sensor and setting the oss value.\r\n\r\n :param oss: OverSampling Setting value (from 0 to 4), default 0\r\n\r\n \"\"\"\r\n self._calibrate()\r\n self.set_over_sampling_setting(oss) #default oss\r\n\r\n #Write on register\r\n def _write(self, addr, data):\r\n buffer = bytearray(2)\r\n buffer[0] = addr\r\n buffer[1] = data\r\n\r\n self.write(buffer)\r\n\r\n #Read raw pressure\r\n def _read_uint_from_16_to_19(self, reg):\r\n data = self.write_read(reg, 3) #data[0] --> MSB, data[1] --> LSB, data[2] --> XLSB\r\n res = (((data[0] << 16) + (data[1] << 8) + data[2]) >> (8-self._oss))\r\n return res\r\n\r\n #Read raw temperature or uint16 register\r\n def _read_uint16(self, reg):\r\n data = self.write_read(reg, 2) #data[0] --> MSB, data[1] --> LSB\r\n res = ((data[0] << 8 | (data[1])) & 0xFFFF)\r\n return res\r\n\r\n #Read int16 register\r\n def _read_int16(self, reg):\r\n res = self._read_uint16(reg)\r\n if res > 32767:\r\n res -= 65536\r\n return res\r\n\r\n #Calilbrate the sensor\r\n def _calibrate(self):\r\n self.cal_AC1 = self._read_int16(BMP180_CAL_AC1_MSB) # INT16\r\n self.cal_AC2 = self._read_int16(BMP180_CAL_AC2_MSB) # INT16\r\n self.cal_AC3 = self._read_int16(BMP180_CAL_AC3_MSB) # INT16\r\n self.cal_AC4 = self._read_uint16(BMP180_CAL_AC4_MSB) # UINT16\r\n self.cal_AC5 = self._read_uint16(BMP180_CAL_AC5_MSB) # UINT16\r\n self.cal_AC6 = self._read_uint16(BMP180_CAL_AC6_MSB) # UINT16\r\n self.cal_B1 = self._read_int16(BMP180_CAL_B1_MSB) # INT16\r\n self.cal_B2 = self._read_int16(BMP180_CAL_B2_MSB) # INT16\r\n self.cal_MB = self._read_int16(BMP180_CAL_MB_MSB) # INT16\r\n self.cal_MC = self._read_int16(BMP180_CAL_MC_MSB) # INT16\r\n self.cal_MD = self._read_int16(BMP180_CAL_MD_MSB) # INT16\r\n \r\n #Set oversampling parameter\r\n def set_over_sampling_setting(self, oss):\r\n \"\"\"\r\n\r\n.. method:: set_over_sampling_setting(oss)\r\n\r\n Sets the OverSampling Setting value of the BMP180.\r\n\r\n :param oss: OverSampling Setting value (from 0 to 4 allowed)\r\n\r\n.. note:: The OverSampling Setting parameter selects different operating modes according to give the possibility for findind the optimum compromise between power consumption, speed, and resolution; in the table below allowed values are reported with related operating modes.\r\n\r\n========= ===================== ============ =============== ======================\r\nOSS param Operating Mode N of samples Conversion time Avg Current 1 sample/s\r\n========= ===================== ============ =============== ======================\r\n 0 Ultra Low Power 1 4.5 ms 3 uA\r\n 1 Standard 2 7.5 ms 5 uA\r\n 2 High Resolution 4 13.5 ms 7 uA\r\n 3 Ultra High Resolution 8 25.5 ms 12 uA\r\n========= ===================== ============ =============== ======================\r\n \r\n \"\"\"\r\n if oss in range(4):\r\n self._oss = oss\r\n\r\n #Get raw temperature\r\n def get_raw_temp(self):\r\n \"\"\"\r\n\r\n.. method:: get_raw_temp()\r\n\r\n Retrieves the current temperature data from the sensor as raw value.\r\n\r\n Returns raw_t\r\n\r\n \"\"\"\r\n self._write(BMP180_CONTROL, BMP180_READ_T_CMD)\r\n sleep(5) # p.21 max conversion time for temp reading\r\n raw_t = self._read_uint16(BMP180_TEMPDATA)\r\n return raw_t\r\n\r\n #Get raw pressure\r\n def get_raw_pres(self):\r\n \"\"\"\r\n\r\n.. method:: get_raw_pres()\r\n\r\n Retrieves the current pressure data from the sensor as raw value; according to the OverSampling Setting value this measure can be\r\n faster but less accurate or more precise but slower. (see :func:`set_over_sampling_setting()`)\r\n\r\n Returns raw_p\r\n\r\n \"\"\"\r\n self._write(BMP180_CONTROL, BMP180_READ_P_CMD[self._oss])\r\n if self._oss == BMP180_ULTRALOWPOWER:\r\n sleep(5)\r\n elif self._oss == BMP180_STANDARD:\r\n sleep(8)\r\n elif self._oss == BMP180_HIGHRES:\r\n sleep(14)\r\n elif self._oss == BMP180_ULTRAHIGHRES:\r\n sleep(26)\r\n raw_p = self._read_uint_from_16_to_19(BMP180_PRESSUREDATA)\r\n return raw_p\r\n\r\n #Get Temperature in °C\r\n def get_temp(self):\r\n \"\"\"\r\n\r\n.. method:: get_temp()\r\n\r\n Retrieves the current temperature data from the sensor as calibrate value in °C.\r\n\r\n Returns temp\r\n\r\n \"\"\"\r\n rt = self.get_raw_temp()\r\n # p.15 datasheet - calculate true temperature\r\n x1 = ((rt - self.cal_AC6) * self.cal_AC5) >> 15\r\n x2 = (self.cal_MC << 11) // (x1 + self.cal_MD)\r\n xx = x1 + x2\r\n temp = ((xx + 8) >> 4) / 10.0\r\n return temp\r\n\r\n #Get pressure in Pa\r\n def get_pres(self):\r\n \"\"\"\r\n\r\n.. method:: get_pres()\r\n\r\n Retrieves the current pressure data from the sensor as calibrate value in Pa; according to the OverSampling Setting value this measure can be\r\n faster but less accurate or more precise but slower. (see :func:`set_over_sampling_setting()`)\r\n\r\n Returns pres\r\n\r\n \"\"\"\r\n rt = self.get_raw_temp()\r\n rp = self.get_raw_pres()\r\n # p.15 datasheet - calculate true pressure\r\n x1 = ((rt - self.cal_AC6) * self.cal_AC5) >> 15\r\n x2 = (self.cal_MC << 11) // (x1 + self.cal_MD)\r\n xx = x1 + x2\r\n \r\n # Pressure Calculations\r\n yy = xx - 4000\r\n x1 = (self.cal_B2 * (yy * yy) >> 12) >> 11\r\n x2 = (self.cal_AC2 * yy) >> 11\r\n x3 = x1 + x2\r\n vv = (((self.cal_AC1 * 4 + x3) << self._oss) + 2) // 4\r\n x1 = (self.cal_AC3 * yy) >> 13\r\n x2 = (self.cal_B1 * ((yy * yy) >> 12)) >> 16\r\n x3 = ((x1 + x2) + 2) >> 2\r\n ww = (self.cal_AC4 * (x3 + 32768)) >> 15\r\n zz = float(rp - vv) * float(50000 >> self._oss)\r\n pres = (zz * 2) // ww\r\n pres = int(pres)\r\n x1 = (pres >> 8) * (pres >> 8)\r\n x1 = (x1 * 3038) >> 16\r\n x2 = (-7357 * pres) >> 16\r\n pres = pres + ((x1 + x2 + 3791) >> 4)\r\n return pres\r\n\r\n #Get both temp and pres\r\n def get_temp_pres(self):\r\n \"\"\"\r\n\r\n.. method:: get_temp_pres()\r\n\r\n Retrieves the current temperature (in °C) and pressure (in Pa) data from the sensor as calibrate values in one call.\r\n\r\n Returns temp\r\n\r\n \"\"\"\r\n t = self.get_temp()\r\n p = self.get_pres()\r\n return t, p\r\n\r\n #Get altitude in m\r\n def get_altitude(self):\r\n \"\"\"\r\n\r\n.. method:: get_altitude()\r\n\r\n Calculates, from measured pressure, the current altitude data as value in meters.\r\n\r\n Returns altitude\r\n\r\n \"\"\"\r\n # p.16 datasheet - Calculating absolute altitude\r\n pres = float(self.get_pres())\r\n p0 = 101325.0 # pressure at sea leve in Pa\r\n altitude = 44330.0 * (1.0 - ((pres / p0)**(1.0/5.255)))\r\n return altitude\r\n\r\n #Get pressure at sea level in Pa\r\n def get_sea_level_pres(self, altitude_m=0.0):\r\n \"\"\"\r\n\r\n.. method:: get_sea_level_pres()\r\n\r\n Calculates, from measured pressure, the pressure (in Pa) at sea level when given a known altitude in meters.\r\n\r\n Returns p0 (pressure at sea level)\r\n\r\n \"\"\"\r\n # p.17 datasheet - Calculating pressure at sea level\r\n pres = float(self.get_pres())\r\n p0 = pres / ((1.0 - altitude_m/44330.0)**(5.255))\r\n return p0\r\n","sub_path":"bmp180.py","file_name":"bmp180.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561396017","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom bowring.forms.event.newform import NewForm\nfrom bowring.forms.event.showform import ShowForm\nfrom bowring.models.events import Event\nfrom bowring.models.results import Result\n\n\n@login_required\ndef new(request):\n form = NewForm()\n return render(request, 'event/new.html', {\"form\": form})\n\n\ndef create(request):\n print(request.user)\n form = NewForm(request.POST)\n print(form.errors)\n if form.is_valid():\n event = Event(\n name=form.data[\"name\"],\n event_date=form.data[\"event_date\"],\n user=request.user\n )\n\n event.save(user=request.user)\n result = Result(\n user=request.user,\n event=event,\n total_score=0,\n base_score=0,\n total_handicap=0\n )\n result.save(user=request.user)\n\n\n return redirect(\"bowring:home\")\n\n return render(request, \"event/new.html\", {\"form\": form})\n\n\ndef show(request, event_id):\n form = ShowForm(event_id)\n return render(request, 'event/show.html', {\"form\": form})\n\n\ndef edit(request, event_id):\n form = NewForm.edit(event_id)\n return render(request, \"event/edit.html\", {\"form\": form})\n\n\ndef update(request):\n form = NewForm(request.POST)\n if form.is_valid():\n print(form.data)\n event = Event.objects.get(id=form.data[\"id\"])\n if event and event.user == request.user:\n event.name = form.data[\"name\"]\n event.event_date = form.data[\"event_date\"]\n event.save(request)\n return redirect(\"bowring:evnet_show\", event_id=event.id)\n\n return render(request, 'event/new.html', {\"form\": form})\n","sub_path":"bowring/views/eventview.py","file_name":"eventview.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566841315","text":"import re\nimport traceback\nfrom datetime import date, timedelta\nfrom collections import OrderedDict\nfrom json import JSONDecodeError\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\nDATE_MAP = {0: \"maanantai\", 1: \"tiistai\", 2: \"keskiviikko\", 3: \"torstai\", 4: \"perjantai\", 5: \"lauantai\", 6: \"sunnuntai\"}\n\n\ndef get_min():\n try:\n t = date.today()\n month = t.month if t.month > 10 else (\"0\" + str(t.month))\n day = t.day if t.day > 10 else (\"0\" + str(t.day))\n base_url = \"https://www.sodexo.fi/ruokalistat/output/daily_json/70\"\n r = requests.get(f\"{base_url}/{t.year}-{month}-{day}\")\n data = r.json()\n lst = []\n for _, number in data[\"courses\"].items():\n lst.append(\"{} ({})\".format(number[\"title_fi\"], number[\"properties\"]))\n return lst\n except Exception:\n return None\n\n\ndef get_hiili():\n try:\n t = date.today()\n month = t.month if t.month > 10 else (\"0\" + str(t.month))\n day = t.day if t.day > 10 else (\"0\" + str(t.day))\n base_url = \"https://www.sodexo.fi/ruokalistat/output/daily_json/7498\"\n r = requests.get(f\"{base_url}/{t.year}-{month}-{day}\")\n data = r.json()\n lst = []\n for _, number in data[\"courses\"].items():\n lst.append(\"{} ({})\".format(number[\"title_fi\"], number[\"properties\"]))\n return lst\n except Exception:\n return None\n\n\ndef crawl_factory():\n try:\n r = requests.get(\"https://ravintolafactory.com/lounasravintolat/ravintolat/helsinki-salmisaari/\")\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n today = DATE_MAP[date.weekday(date.today())].capitalize()\n h3 = soup.find(\"h3\", string=re.compile(today))\n if not h3:\n return None\n\n next_p = h3.findNext(\"p\")\n if next_p.find(\"img\"):\n next_p = next_p.findNext(\"p\")\n\n return [str(item).strip() for item in next_p.contents if str(item) != \"
\"]\n except Exception:\n return None\n\n\ndef crawl_himasali():\n try:\n r = requests.get(\"https://www.himasali.com/lounaslista/\")\n\n today = DATE_MAP[date.weekday(date.today())].capitalize()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n for i in range(0, 20):\n lst = soup.find_all(\"p\")[i].text\n if str(today) in lst:\n return (\"\\n\".join(lst.split(\"\\n\")[1:])).splitlines()\n except Exception:\n return None\n\n\ndef crawl_dylanmilk():\n try:\n r = requests.get(\"https://www.dylan.fi/milk\")\n soup = BeautifulSoup(r.text, \"html.parser\")\n for i in range(0, 50):\n row = soup.find_all(\"p\")[i].text\n today = DATE_MAP[date.weekday(date.today())].capitalize()\n\n if str(today) in str(row):\n i += 1\n break\n\n arr = []\n for i in range(i, 50):\n row = soup.find_all(\"p\")[i].text\n if len(str(row)) < 10:\n break\n arr.append(row)\n\n return arr\n except Exception:\n return None\n\n\ndef crawl_garam_page(url):\n def select_text_content(text_contents):\n lookfor = [\"maanantai\", \"tiistai\", \"keskiviikko\", \"torstai\", \"perjantai\"]\n for div in text_contents:\n count = 0\n for word in lookfor:\n if word in div.text.lower():\n count += 1\n if count >= 3:\n # If most of the weekday words appear in the div, it is\n # probably the lunch list. This lets them to typo couple of the\n # words, but they still need to get most of them right.\n return div\n return text_contents[3]\n try:\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n text_contents = soup.find_all(class_=\"b-text-c\")\n wrapper = select_text_content(text_contents)\n if not wrapper or not wrapper.p:\n return None\n\n today = DATE_MAP[date.weekday(date.today())].upper()\n tomorrow = DATE_MAP[date.weekday(date.today() + timedelta(1))].upper()\n\n all_p = wrapper.find_all(\"p\")\n today_index = next(iter([i for i, s in enumerate(all_p) if today in s.text]), None)\n tomorrow_index = next(iter([i for i, s in enumerate(all_p) if tomorrow in s.text]), None)\n\n if not tomorrow_index:\n tomorrow_index = next(iter([i for i, s in enumerate(all_p) if \"Hinnat\" in s.text]), None)\n\n if today_index and tomorrow_index:\n return [p.text for p in all_p[today_index + 1: tomorrow_index]]\n except Exception:\n return None\n\n return None\n\n\ndef crawl_oikeus():\n return crawl_garam_page(\"https://www.cafeteria.fi/ravintola-oikeus/\")\n\n\ndef crawl_silta():\n return crawl_garam_page(\"https://www.cafeteria.fi/silta-itamerentalo/\")\n\n\n@app.route(\"/\")\ndef index():\n data = OrderedDict(\n {\n \"min\": get_min(),\n \"hiili\": get_hiili(),\n \"silta\": crawl_silta(),\n \"oikeus\": crawl_oikeus(),\n \"factory\": crawl_factory(),\n \"hima&Sali\": crawl_himasali(),\n \"Dylan Milk\": crawl_dylanmilk(),\n }\n )\n return render_template(\"index.html\", data=data)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362425010","text":"#!/usr/bin/env python\n\nprint(\"Wombats are great!\")\n\n\nx = 5\ny = 20\nresult = x + y\nprint(result)\n\nx_value = 5\ny_value = 20\n\nprint(type(x))\ncave_man = 'Barney Rubble'\nprint(type(cave_man))\n\ndocument_type = None\n\nclass_for_trip = 'Sophomore'\nbomb_yield = '98 kilotons'\n\nlocation_dir = 'North'\nmax_kumquats = 37\nuser_id = 'jstrickler'\n\n\nx = 10\ny = \"20\"\n\n# result = x + y\n\nprint(str(x) + y)\nprint(x + int(y))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"wombat.py","file_name":"wombat.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405616415","text":"import kerastuner\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nfrom autokeras import adapters\nfrom autokeras import blocks\nfrom tests import utils\n\n\ndef test_image_build_return_tensor():\n block = blocks.ImageBlock()\n\n outputs = block.build(\n kerastuner.HyperParameters(),\n tf.keras.Input(shape=(32, 32, 3), dtype=tf.float32))\n\n assert len(nest.flatten(outputs)) == 1\n assert isinstance(nest.flatten(outputs)[0], tf.Tensor)\n\n\ndef test_image_deserialize_to_image():\n serialized_block = blocks.serialize(blocks.ImageBlock())\n\n block = blocks.deserialize(serialized_block)\n\n assert isinstance(block, blocks.ImageBlock)\n\n\ndef test_image_get_config_has_all_attributes():\n block = blocks.ImageBlock()\n\n config = block.get_config()\n\n assert utils.get_func_args(\n blocks.ImageBlock.__init__).issubset(config.keys())\n\n\ndef test_text_build_return_tensor():\n block = blocks.TextBlock()\n\n outputs = block.build(\n kerastuner.HyperParameters(),\n tf.keras.Input(shape=(1,), dtype=tf.string))\n\n assert len(nest.flatten(outputs)) == 1\n assert isinstance(nest.flatten(outputs)[0], tf.Tensor)\n\n\ndef test_text_deserialize_to_text():\n serialized_block = blocks.serialize(blocks.TextBlock())\n\n block = blocks.deserialize(serialized_block)\n\n assert isinstance(block, blocks.TextBlock)\n\n\ndef test_text_get_config_has_all_attributes():\n block = blocks.TextBlock()\n\n config = block.get_config()\n\n assert utils.get_func_args(\n blocks.TextBlock.__init__).issubset(config.keys())\n\n\ndef test_structured_build_return_tensor():\n block = blocks.StructuredDataBlock()\n block.column_names = ['0', '1']\n block.column_types = {'0': adapters.NUMERICAL,\n '1': adapters.NUMERICAL}\n\n outputs = block.build(\n kerastuner.HyperParameters(),\n tf.keras.Input(shape=(2,), dtype=tf.string))\n\n assert len(nest.flatten(outputs)) == 1\n assert isinstance(nest.flatten(outputs)[0], tf.Tensor)\n\n\ndef test_structured_deserialize_to_structured():\n serialized_block = blocks.serialize(blocks.StructuredDataBlock())\n\n block = blocks.deserialize(serialized_block)\n\n assert isinstance(block, blocks.StructuredDataBlock)\n\n\ndef test_structured_get_config_has_all_attributes():\n block = blocks.StructuredDataBlock()\n\n config = block.get_config()\n\n assert utils.get_func_args(\n blocks.StructuredDataBlock.__init__).issubset(config.keys())\n\n\ndef test_timeseries_build_return_tensor():\n block = blocks.TimeseriesBlock()\n block.column_names = ['0', '1']\n block.column_types = {'0': adapters.NUMERICAL,\n '1': adapters.NUMERICAL}\n\n outputs = block.build(\n kerastuner.HyperParameters(),\n tf.keras.Input(shape=(32, 2), dtype=tf.float32))\n\n assert len(nest.flatten(outputs)) == 1\n assert isinstance(nest.flatten(outputs)[0], tf.Tensor)\n\n\ndef test_timeseries_deserialize_to_timeseries():\n serialized_block = blocks.serialize(blocks.TimeseriesBlock())\n\n block = blocks.deserialize(serialized_block)\n\n assert isinstance(block, blocks.TimeseriesBlock)\n\n\ndef test_timeseries_get_config_has_all_attributes():\n block = blocks.TimeseriesBlock()\n\n config = block.get_config()\n\n assert utils.get_func_args(\n blocks.TimeseriesBlock.__init__).issubset(config.keys())\n","sub_path":"tests/autokeras/blocks/wrapper_test.py","file_name":"wrapper_test.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"630667664","text":"import torch.nn as nn\n\nfrom .build import BACKBONE_REGISTRY\nfrom .backbone import Backbone\n\n\nclass Convolution(nn.Module):\n\n def __init__(self, c_in, c_out):\n super().__init__()\n self.conv = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1)\n self.relu = nn.ReLU(True)\n\n def forward(self, x):\n return self.relu(self.conv(x))\n\n\nclass ConvNet(Backbone):\n\n def __init__(self, c_in=3, c_hidden=64, nb=4):\n super().__init__()\n backbone = []\n backbone += [Convolution(c_in, c_hidden)]\n backbone += [nn.MaxPool2d(2)]\n for i in range(nb - 1):\n backbone += [Convolution(c_hidden, c_hidden)]\n backbone += [nn.MaxPool2d(2)]\n self.backbone = nn.Sequential(*backbone)\n\n self._out_features = 2**2 * c_hidden\n\n def _check_input(self, x):\n H, W = x.shape[2:]\n assert H == 32 and W == 32, \\\n 'Input to network must be 32x32, ' \\\n 'but got {}x{}'.format(H, W)\n\n def forward(self, x):\n self._check_input(x)\n f = self.backbone(x)\n return f.view(f.size(0), -1)\n\n\n@BACKBONE_REGISTRY.register()\ndef cnn_digitsdg(**kwargs):\n \"\"\"\n This architecture was used for DigitsDG dataset in:\n\n - Zhou et al. Deep Domain-Adversarial Image Generation\n for Domain Generalisation. AAAI 2020.\n \"\"\"\n return ConvNet(c_hidden=64, nb=4)\n","sub_path":"dassl/modeling/backbone/cnn_digitsdg.py","file_name":"cnn_digitsdg.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"290620146","text":"import numpy as np\nfrom keras import optimizers\nfrom keras.layers import Dense\nfrom keras.models import Sequential\n\nnp.random.seed(35)\n# seed( ) 用于指定随机数生成时所用算法开始的整数值,如果使用相同的seed( )值,则每次生成的随即数都相同,\n# 如果不设置这个值,则系统根据时间来自己选择这个值,此时每次生成的随机数因时间差异而不同。\n\nx_train = [1, 2, 3, 4]\ny_train = [1, 2, 3, 4]\n\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=1))\nmodel.summary()\n# prints summary of the model to the terminal\n\nsgd = optimizers.SGD(lr=0.1) \n#keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n#随机梯度下降法,支持动量参数,支持学习衰减率,支持Nesterov动量\n#lr:大或等于0的浮点数,学习率\n#momentum:大或等于0的浮点数,动量参数\n#decay:大或等于0的浮点数,每次更新后的学习率衰减值\n#nesterov:布尔值,确定是否使用Nesterov动量\nmodel.compile(loss='mse', optimizer=sgd)\n#loss=mean_squared_error或mse\nmodel.fit(x_train, y_train, epochs=200)\n\ny_predict = model.predict(np.array([5]))\n# 可以使用predict(预测)关键字对模型进行调用。\nprint(y_predict)\n","sub_path":"keras/klab-02-1-linear_regression.py","file_name":"klab-02-1-linear_regression.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294218930","text":"import speech_recognition as sr\nimport os\nfrom os import path\nimport pydub\nfrom pydub import AudioSegment\nfrom pydub.effects import normalize\nfrom pydub.playback import play\nfrom pydub.utils import which\nimport youtube_dl\nimport traceback\nfrom speech_recognition import RequestError\nimport shutil\n\nAudioSegment.converter = which(\"ffmpeg\")\n\ndef speech_to_text(url):\n text = ''\n try:\n if os.path.exists('audio'):\n shutil.rmtree('audio')\n \n os.mkdir('audio')\n\n # Extract Audio From YouTube\n os.system('youtube-dl -f bestaudio --extract-audio --audio-format mp3 --output audio/test.mp3 --audio-quality 0 '+url)\n\n # convert mp3 file to wav\n sound = AudioSegment.from_file(\"audio/test.mp3\")\n sound.export(\"audio/test.wav\", format=\"wav\")\n\n s = AudioSegment.from_file(\"audio/test.wav\")\n s = s+10\n s = normalize(s)\n s.export(\"audio/test.wav\",format=\"wav\")\n # transcribe audio file\n AUDIO_FILE = \"audio/test.wav\" \n\n # use the audio file as the audio source\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source) # read the entire audio file\n\n text = r.recognize_google(audio)\n\n with open('transcript.txt' , 'w') as f:\n f.write(text)\n\n return text \n \n except RequestError as r:\n print(\"Recognition request failed: Bad Request\")\n \n except PermissionError as p:\n print(p,\"\\nRe-run your program\")\n \n except Exception as e:\n print(\"Error in Google Speech to Text\")\n print(e)\n traceback.print_exc()\n \n finally:\n if os.path.exists('audio'):\n shutil.rmtree('audio')\n return text\n \n \nif __name__ == '__main__':\n url = input()\n text = speech_to_text(url)\n print(text)","sub_path":"backend/google_speech_to_text.py","file_name":"google_speech_to_text.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"111314030","text":"from sys import platform\n#########################################\n# DIGITAL CONTROLLER PARAMS #\n#########################################\n# Values: R, G, B, STATE_ON_OFF, IS_INITIALIZED\n__DCACHE = [100, 100, 100, 0]\n__NEOPIXEL_OBJ = None\n__PERSISTENT_CACHE = False\n\n\n#########################################\n# DIGITAL rgb WITH 1 \"PWM\" #\n#########################################\n\n\ndef __init_NEOPIXEL(n=24):\n \"\"\"\n Init NeoPixel module\n n - number of led fragments\n n - must be set from code! (no persistent object handling in LMs)\n \"\"\"\n global __NEOPIXEL_OBJ\n if __NEOPIXEL_OBJ is None:\n from neopixel import NeoPixel\n from machine import Pin\n from LogicalPins import get_pin_on_platform_by_key\n neopixel_pin = Pin(get_pin_on_platform_by_key('pwm_3')) # Get Neopixel pin from LED PIN pool\n __NEOPIXEL_OBJ = NeoPixel(neopixel_pin, n) # initialize for max 8 segments\n del neopixel_pin\n return __NEOPIXEL_OBJ\n\n\ndef __persistent_cache_manager(mode):\n \"\"\"\n pds - persistent data structure\n modes:\n r - recover, s - save\n \"\"\"\n if not __PERSISTENT_CACHE:\n return\n global __DCACHE\n if mode == 's':\n # SAVE CACHE\n with open('neopixel.pds', 'w') as f:\n f.write(','.join([str(k) for k in __DCACHE]))\n return\n try:\n # RESTORE CACHE\n with open('neopixel.pds', 'r') as f:\n __DCACHE = [int(data) for data in f.read().strip().split(',')]\n except:\n pass\n\n\ndef neopixel_cache_load_n_init(cache=None):\n global __PERSISTENT_CACHE\n if cache is None:\n __PERSISTENT_CACHE = False if platform == 'esp8266' else True\n else:\n __PERSISTENT_CACHE = cache\n __persistent_cache_manager('r') # recover data cache\n if __PERSISTENT_CACHE and __DCACHE[3] == 1:\n neopixel() # Set each LED for the same color\n return \"CACHE: {}\".format(__PERSISTENT_CACHE)\n\n\ndef neopixel(r=None, g=None, b=None):\n \"\"\"\n Simple NeoPixel wrapper\n - Set all led fragments for the same color set\n - Default and cached color scheme\n \"\"\"\n global __DCACHE\n r = __DCACHE[0] if r is None else r\n g = __DCACHE[1] if g is None else g\n b = __DCACHE[2] if b is None else b\n # Set each LED for the same color\n for element in range(0, __init_NEOPIXEL().n): # Iterate over led string elements\n __NEOPIXEL_OBJ[element] = (r, g, b) # Set LED element color\n __NEOPIXEL_OBJ.write() # Send data to device\n # Set cache\n if r > 0 or g > 0 or b > 0:\n __DCACHE = [r, g, b, 1] # Cache colors + state (True-ON)\n else:\n __DCACHE[3] = 0 # State - False - OFF\n __persistent_cache_manager('s') # Save cache - __DCACHE - to file\n return \"NEOPIXEL SET TO R{}G{}B{}\".format(r, g, b)\n\n\ndef segment(r=None, g=None, b=None, s=0):\n r = __DCACHE[0] if r is None else r\n g = __DCACHE[1] if g is None else g\n b = __DCACHE[2] if b is None else b\n if s <= __init_NEOPIXEL().n:\n __NEOPIXEL_OBJ[s] = (r, g, b)\n __NEOPIXEL_OBJ.write()\n return \"NEOPIXEL {} SEGMENT WAS SET R{}G{}B{}\".format(s, r, g, b)\n return \"NEOPIXEL s={} SEGMENT OVERLOAD\".format(s)\n\n\ndef toggle(state=None):\n \"\"\"\n ON - OFF NeoPixel\n \"\"\"\n if state is not None:\n __DCACHE[3] = 0 if state else 1\n if __DCACHE[3] == 1:\n neopixel(r=0, g=0, b=0)\n return \"OFF\"\n neopixel(__DCACHE[0], __DCACHE[1], __DCACHE[2])\n return \"ON\"\n\n#########################################\n# HELP #\n#########################################\n\n\ndef help():\n return 'neopixel(r=<0-255>, g, b, n=24', 'toggle(state=None)', \\\n 'neopixel_cache_load_n_init(cache=None', \\\n 'segment(r, g, b, s=<0-n>', '[!]PersistentStateCacheDisabledOn:esp8266'\n","sub_path":"micrOS/LM_neopixel.py","file_name":"LM_neopixel.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646648735","text":"import itertools\r\nimport numpy, scipy, matplotlib.pyplot as plt, pandas, librosa,sklearn\r\n#import config\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC\r\n\r\n\r\ndata_set=pandas.read_csv(r'C:\\Users\\Rag9704\\Pictures\\Music-Genre-Classification-master\\data_set.csv',index_col=False)\r\nGENRES=['METAL', 'CLASSICAL', 'HIPHOP', 'BLUES', 'POP', 'REGGAE']\r\n\r\nnumber_of_rows,number_of_cols = data_set.shape\r\ndata_set[:5].style\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndata_set_values=numpy.array(data_set)\r\n\r\ntrain, test = train_test_split(data_set_values, test_size = 0.85,random_state=2,\r\n stratify=data_set_values[:,number_of_cols-1])\r\n\r\ntrain_x=train[:,:number_of_cols-1]\r\ntrain_y=train[:,number_of_cols-1]\r\n\r\ntest_x=test[:,:number_of_cols-1]\r\ntest_y=test[:,number_of_cols-1]\r\n\r\nprint(\"Training data size: {}\".format(train.shape))\r\nprint(\"Test data size: {}\".format(test.shape))\r\n\r\n\r\n\r\ndef confusion_matrix(cm, classes,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = numpy.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, cm[i, j],\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n\r\ndef plot_cnf(model,dataset_x,dataset_y,GENRES):\r\n true_y=dataset_y\r\n true_x=dataset_x\r\n pred=model.predict(true_x)\r\n\r\n print(\"---------------PERFORMANCE ANALYSIS FOR THE MODEL----------------\\n\")\r\n\r\n print(\"Real Test dataset labels: \\n{}\\n\".format(true_y))\r\n print(\"Predicted Test dataset labels: \\n{}\".format(pred))\r\n\r\n cnf_matrix=sklearn.metrics.confusion_matrix(true_y,pred)\r\n plt.figure()\r\n a=confusion_matrix(cnf_matrix,classes=GENRES,title='Confusion matrix')\r\n\r\n\r\n\r\nsvm=SVC(C=100,gamma=0.08)\r\nsvm.fit(train_x,train_y)\r\nprint(\"Training Score: {:.3f}\".format(svm.score(train_x,train_y)))\r\nprint(\"Test score: {:.3f}\".format(svm.score(test_x,test_y)))\r\n\r\nplot_cnf(svm,test_x,test_y,GENRES)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"ceps_1.py","file_name":"ceps_1.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"559615852","text":"from math import *\n\nprint(\"Pls enter 3 valeurs of triangle\", end=\" \")\n# string = input()\nstring = [10, 12, 14]\n# string = [5.8, 3, 5]\n\nnn = list(eval(str(string)))\nprint(nn)\n\nBC, AC, AB = nn[0], nn[1], nn[2]\na, b, c = nn[0], nn[1], nn[2]\n\n# Angle A\nangle_A = (-a**2 + b**2 + c**2) / (2 * b * c)\nangle_A = acos(angle_A)\nangle_A = (angle_A * 180)/pi\n\n# Angle B\nangle_B = (a**2 - b**2 + c**2) / (2 * c * a)\nangle_B = acos(angle_B)\nangle_B = (angle_B * 180)/pi\n\n# Angle C\nangle_C = (a**2 + b**2 - c**2) / (2 * a * b)\nangle_C = acos(angle_C)\nangle_C = (angle_C * 180)/pi\n\nallAngles = int(angle_A + angle_B + angle_C)\n\n\nif allAngles == 180:\n try:\n if a == b and b == c and c == b:\n print(\"The triangle is a Triangle Isocseles.\")\n elif a == b or b == c or c == b:\n print(\"The triangle is a Triangle Isocseles rectangle.\")\n elif 89 <= angle_A <= 91 or 89 <= angle_B <= 91 or 89 <= angle_C <= 91:\n print(\"The triangle is a Triangle rectangle.\")\n else:\n print(\"The triangle is an unspecified Triangle knowing.\")\n \n except:\n print(\"Oops! That was no valid number. Try again...\")\nelse:\n print(\"Is not a triangle\")\n","sub_path":"Python/Triangles.py","file_name":"Triangles.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"467736200","text":"\"\"\"\ncitadel.rest_api.point\n\nThis module handles point create, view and delete functions\n\"\"\"\nimport sys\nfrom uuid import uuid4\nfrom copy import deepcopy\nimport pdb\nimport json\n\nfrom flask import request, jsonify\nfrom flask.views import MethodView\n#from ..models import Point\nfrom flask_restplus import Api, Resource, Namespace, fields\nimport arrow\n\nfrom . import api\nfrom . import responses\nfrom .. import timeseriesdb\nfrom ..models import timeseries as ts\nfrom ..models.metadata import Point\nfrom ..schema.converters import schema_converter\nfrom mongoengine import NotUniqueError\n\n# API Models\n# ALL FIELD SHOULD HAVE AN EXAMPLE AT LEAST FOR DOCUMENTATION\n\ndef model_to_json(m):\n d = dict()\n for key,val in m.items():\n d[key] = val.example\n return json.dumps(d)\n\npoint_api = Namespace('point', description='Operations related to points')\n\nm_geo_point = point_api.model('GeoPoint', {\n 'coordinates': fields.List(\n fields.Float, min_items=2, max_items=2, example=[0.1234, -0.1234]),\n 'type': fields.String(example='Point')\n },\n example={'coordinates':[32.881679, -117.233344],'type':'Point'},\n description='Geological point with latitude and longitude')\n\n\"\"\" comment out for possible future purpose \nm_geo_point_list = point_api.model('GeoPointList', {\n 'geo_point_list': fields.List(fields.Nested(m_geo_point)),\n 'geo_type': fields.String()\n },\n example={'geo_point_list':\n [{'geo_point':[32.881679, -117.233344]}], 'geo_type': 'point'\n },\n description='Geological representation which could be either a point, \\\n a line, or a countour'\n )\n\"\"\"\n\n\nm_point_post = point_api.model('PointPost',{\n 'tags': fields.Raw(\n example={'tag1':'value1', 'tag2':'value2'},\n description='key, value pairs in dictionary format'),\n 'name': fields.String(\n description='Unique human readable identifier of point',\n example='point_name_1'),\n 'geometry': fields.Nested(m_geo_point, example=model_to_json(m_geo_point))\n },\n description='Representation of a data point',\n example={\n 'tags': {\n 'tag1': 'value1',\n 'tag2': 'value2'\n },\n 'name': 'user_defined_name1',\n 'geometry': {\n 'coordinates':[32.881679, -117.233344],\n 'type': 'point'\n }\n }\n )\n\nm_point = point_api.inherit('Point', m_point_post, {\n 'uuid': fields.String(\n description='Unique identifier of point',\n example='random_uuid_1')\n },\n )\nm_point.example = dict(list(json.loads(model_to_json(m_point_post)).items())\\\n +[('uuid','random_uuid_1')])\n\nm_point_list = point_api.model('PointList', {\n 'point_list': fields.List(fields.Nested(m_point),\n example=[model_to_json(m_point)])\n })\n\nm_message = point_api.model('Message',{\n 'success': fields.Boolean(example=True),\n 'reason': fields.String(example='reason_string'),\n 'uuid': fields.String(example='random_uuid_1')\n })\n\nm_timeseries = point_api.model('Timeseries',{\n 'success': fields.Boolean(example=True)\n })\n\nm_timeseries_post = point_api.model('TimeseriesPost', {\n 'samples': fields.Raw(\n description='Dictionary where key=timestamp integer \\\n and value=data value',\n example={'timestamp1':'value1', 'timestampe2':'value2'}\n )\n })\n\n\npoint_create_success_msg = 'Point created'\npoint_create_fail_msg = 'Failed to create point'\npoint_delete_success_msg = 'Point deleted'\npoint_delete_fail_msg = 'Failed to delete point'\n\ninfluxdb_time_format = \"2009-11-10T23:00:00Z\"\n\npoint_query_parser = point_api.parser()\npoint_query_parser.add_argument('tag_query', type=str, location='args',\n help=model_to_json(m_point_post)\n )\npoint_query_parser.add_argument('geo_query', type=str, location='args')\npoint_query_parser.add_argument('name', type=str, location='args')\n# TODO: Can this be more specified to have certain JSON structure in the str?\n\n@point_api.doc()\n@point_api.route('/')\nclass PointGenericAPI(Resource):\n\n# @point_api.doc(body=m_point)\n @point_api.expect(point_query_parser)\n @point_api.response(200, 'Points found', m_point)\n @point_api.marshal_list_with(m_point_list)\n def get(self):\n \"\"\" Query to points \"\"\"\n args = point_query_parser.parse_args()\n tag_query_str = args.get('tag_query')\n point_name = args.get('name')\n\n if point_name:\n query_result = Point.objects(name=point_name)\n else:\n flattened_tag_query = dict()\n if tag_query_str:\n tag_query = json.loads(tag_query_str)\n for tag, value in tag_query.items():\n flattened_tag_query['tags.%s'%tag] = value\n else:\n flattened_tag_query = {}\n \n geo_query_str = args.get('geo_query')\n if geo_query_str:\n geo_query = json.loads(geo_query_str)\n if geo_query['type']=='bounding_box':\n west_south = geo_query['geometry_list'][0]\n east_north = geo_query['geometry_list'][1]\n query_result = Point.objects(\\\n __raw__=flattened_tag_query,\\\n geometry__geo_within_box=[west_south, east_north])\n else:\n query_result = Point.objects(__raw__=flattened_tag_query)\n\n return {'point_list': query_result}\n\n @point_api.doc(body=m_point_post)\n @point_api.response(201, point_create_success_msg)\n @point_api.response(409, point_create_fail_msg)\n @point_api.marshal_with(m_message)\n def post(self):\n \"\"\" \n Creates a point\n \"\"\"\n data = request.get_json(force=True)\n point_name = data['name']\n tags = data['tags']\n uuid = str(uuid4())\n\n try:\n normalized_tags = schema_converter(tags)\n except KeyError as err:\n resp_data = {\n 'success': False,\n 'reason': 'Not matched to the schema: ' + str(err)\n }\n status_code = 409\n return resp_data, 409\n\n # Currently only geo_point type is supported\n # TODO: Extend this to include line, shape, etc.\n if data['geometry']['type'].lower() == 'point':\n lng = data['geometry']['coordinates'][0]\n lat = data['geometry']['coordinates'][1]\n else:\n raise Exception\n\n try:\n res = Point(\n name=point_name, \n uuid=uuid, \n tags=normalized_tags, \n geometry=[lng, lat]\n ).save()\n resp_data = {\n 'success': True,\n 'reason': '',\n 'uuid': uuid,\n }\n status_code = 201\n except Exception as err:\n resp_data = {\n 'success': False,\n 'msg': point_create_fail_msg\n }\n if isinstance(err, NotUniqueError): \n # TODO: This needs to recieve implementation-agnostic Exception\n # I.e., we need Custom Exception classes\n resp_data['reason'] = 'Given name already exists'\n status_code = 409\n else:\n resp_data['reason'] = str(err)\n status_code = 400\n\n return resp_data, status_code\n\n\n@point_api.route('/')\nclass PointAPI(Resource):\n\n @point_api.marshal_with(m_point)\n def get(self):\n \"\"\" Get metadata of a point with given UUID \"\"\"\n return Point.objects(uuid=uuid).first()\n\n @point_api.response(200, point_delete_success_msg)\n @point_api.response(404, point_delete_fail_msg)\n @point_api.marshal_with(m_message)\n def delete(self, uuid):\n \"\"\" Deletes a point with given UUID \"\"\"\n \n # delete from timeseries db (influxdb for now)\n try:\n ts.delete_point(uuid)\n except:\n pass\n\n # delete from metadata db (mongodb for now)\n point = Point.objects(uuid=uuid)\n if len(point)==0:\n resp_data = {\n 'success': False,\n 'reason': 'UUID Not found: {0}'.format(uuid)\n }\n status_code = 404\n else:\n resp_data = {\n 'success': True\n }\n point.get().delete()\n status_code = 200\n \n return resp_data, status_code\n\n@point_api.param('uuid', 'Unique identifier of point')\n@point_api.route('//timeseries')\nclass TimeSeriesAPI(Resource):\n\n def get(self, uuid):\n \"\"\"\n Reads the time series data of a point for the requested range\n\n Parameters:\n \"uuid\": \n \"start_time\": \n \"end_time\": \n\n Returns (JSON):\n {\n \"data\":{\n \"name\": \n \"series\": [\n \"columns\": [column definitions]\n ]\n \"values\":[list of point values]\n }\n \"success\": \n }\n\n \"\"\"\n start_time = request.args.get('start_time')\n end_time = request.args.get('end_time')\n points = ts.read_ts_data(uuid, start_time, end_time)\n response = dict(responses.success_true)\n response.update({'data': points})\n return response\n\n @point_api.doc(body=m_timeseries_post)\n def post(self, uuid):\n \"\"\" Posts timeseries data of a point \"\"\"\n data = request.get_json(force=True)\n result = ts.write_ts_data(uuid, data)\n if result:\n response = dict(responses.success_true)\n else:\n response = dict(responses.success_false)\n response.update({'error': 'Error occurred when writing to InfluxDB'})\n\n return response\n\n @point_api.marshal_with(m_message)\n def delete(self, uuid):\n \"\"\"\n Deletes timeseries data of a point in the requested time range.\n \"\"\"\n start_time = request.args.get('start_time')\n end_time = request.args.get('end_time')\n ts.delete_ts_data(uuid, start_time, end_time)\n response = dict(responses.success_true)\n response['uuid'] = uuid\n return response\n","sub_path":"app/citadel/rest_api/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":10438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"225776192","text":"# ~ from sklearn.datasets import load_iris\n# ~ iris = load_iris().data\nimport umap\nreducer=umap.UMAP(n_neighbors=20, n_components=2, metric='euclidean', n_epochs=500, learning_rate=1.0, init='spectral', min_dist=0.1, spread=1.0, set_op_mix_ratio=0.1, local_connectivity=1.0, repulsion_strength=1.0, negative_sample_rate=5, transform_queue_size=4.0, a=None, b=None, random_state=None, metric_kwds=None, angular_rp_forest=False, target_n_neighbors=-1, target_metric='categorical', target_metric_kwds=None, target_weight=0.5, transform_seed=42, verbose=False)\n# ~ reducer.fit_transform(iris)\n\nimport numpy as np\nw=[]\nwith open(\"test.tsv\") as f:\n next(f)\n for line in f:\n w.append([float(x) for x in line.rstrip().split('\\t')])\nreducer.fit_transform(np.array(w).T)\n","sub_path":"07-EPISTEME/src/umap/umap_python/umapTest.py","file_name":"umapTest.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268204250","text":"import requests\n\n# localtion=input(\"请输入地点:\")\ni=1\nwhile i<=3:\n url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'\n data={\n \"cname\":\"\",\n \"pid\":\"\",\n \"keyword\": \"北京\",\n \"pageIndex\": i,\n \"pageSize\": 10\n }\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36\"\n }\n response=requests.post(url=url,data=data,headers=headers)\n data_dic=response.text\n fileNmae=\"北京\"+\".text\"\n with open(fileNmae,'w',encoding='utf-8') as fp:\n fp.write(data_dic)\n i+=1\n print(\"爬取完成!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"05_requests实战之肯德基餐厅位置爬取.py","file_name":"05_requests实战之肯德基餐厅位置爬取.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627701085","text":"#!/bin/env/python\n# -*- coding: utf-8 -*-\n\"\"\"\n run_coveralls.py\n ~~~~~~~~~~~~~~~~\n\n A WSGI compliant REST micro-framework.\n\n :copyright: (c) 2016 Anomaly Software\n :license: Apache 2.0, see LICENSE for more details.\n\"\"\"\n\nimport os\nimport sys\nfrom subprocess import Popen, STDOUT, PIPE\nfrom future.utils import PYPY, PY2, PY3, PY27\n\nprint(\"PYPY: {}\".format(PYPY))\nprint(\"PY2: {}\".format(PY2))\nprint(\"PY27: {}\".format(PY27))\nprint(\"PY3: {}\".format(PY3))\n\nif __name__ == '__main__':\n if 'TRAVIS' in os.environ:\n coverage_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, '.coveragerc')\n rc = Popen(['coveralls'], stdout=PIPE, stderr=STDOUT)\n while True:\n line = rc.stdout.readline()\n if not line:\n break\n # py2to3 execute only else statement\n if sys.version_info.major == 2:\n sys.stdout.write(str(line))\n else:\n sys.stdout.write(line.decode('utf-8'))\n sys.stdout.flush()\n # py2to3 execute only else statement\n if sys.version_info.major == 2:\n exit(rc.wait())\n else:\n # noinspection PyArgumentList\n exit(rc.wait(timeout=5))\n else:\n print(\"skipping coverage for python version: {}\".format(\n os.environ.get('TRAVIS_PYTHON_VERSION',\n \".\".join([str(v) for v in sys.version_info]))))\n raise SystemExit(0)\n","sub_path":"bin/run_coveralls.py","file_name":"run_coveralls.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364504340","text":"# Use from Assignment 3.5\n\ndef euklid(a, b):\n \"\"\"\n Implements Euklid's algorithm from pseudo-code.\n :param a: an integer\n :param b: an integer\n :return: largest common divisor of a and b\n \"\"\"\n if b == 0:\n return a\n elif a == 0:\n return b\n elif a > b:\n return euklid(a - b, b)\n else:\n return euklid(a, b - a)\n\n\nclass Ratio():\n def __init__(self, numerator, denominator):\n assert all([type(numerator) is int, type(denominator) is int, denominator != 0])\n if (denominator < 0):\n numerator = -numerator\n denominator = -denominator\n self.numerator = numerator\n self.denominator = denominator\n\n def __str__(self):\n if self.denominator == 1:\n return str(self.numerator)\n else:\n return str(self.numerator) + \"/\" + str(self.denominator)\n\n def __eq__(self, other):\n assert type(other) is Ratio\n return str(self) == str(other)\n\n def expand(self, factor):\n self.denominator *= factor\n self.numerator *= factor\n return self\n\n def reduce(self):\n lcd = euklid(self.numerator, self.denominator)\n self.numerator //= lcd\n self.denominator //= lcd\n return self\n\n def __add__(self, other):\n assert type(other) is Ratio\n if self.denominator == other.denominator:\n return Ratio(self.numerator + other.numerator, self.denominator)\n else:\n # lcd (m,n) * scm(m,n) = m * n ==> scm(m,n) = m * [ n / lcd (m,n) ]\n lcd = euklid(self.denominator, other.denominator)\n self_factor = other.denominator//lcd\n other_factor = self.denominator // lcd\n self.expand(self_factor)\n other.expand(other_factor)\n return Ratio(self.numerator + other.numerator, self.denominator)\n\n\n\n","sub_path":"lecture_7/ratio.py","file_name":"ratio.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"265554984","text":"f = open(\"Tides.txt\", \"rt\")\nelements = []\n\n# Load the records from the file into the elements structure\nfor tides in f:\n tides = tides.rstrip()\n rec = str.split(tides, \",\")\n rec[1] = float(rec[1])\n rec[2] = float(str.rstrip(rec[2], ' meters'))\n elements.append(rec)\n\n# Print the elements\nfor rec in elements:\n print(rec[0] + \": \" + str(rec[1]) + \" meters at lowest and \" + str(rec[2]) + \" meters at highest\")\n\nf.close()\n\n","sub_path":"Week 4/Task 2c.py","file_name":"Task 2c.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233867684","text":"#!/usr/bin/env python3\n\nimport threading\n\nclass PCQueue:\n\n def __init__ (self):\n self.queue = []\n self.q_lock = threading.Lock()\n self.full = threading.Semaphore(0)\n self.empty = threading.Semaphore(10)\n\n def put(self,item):\n self.empty.acquire()\n self.q_lock.acquire()\n self.queue.append(item)\n self.q_lock.release()\n self.full.release()\n\n def get(self):\n self.full.acquire()\n self.q_lock.acquire()\n item = self.queue.pop(0)\n self.q_lock.release()\n self.empty.release()\n return item\n\n def markEnd(self):\n self.put('end')\n","sub_path":"PCQueue.py","file_name":"PCQueue.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"557218697","text":"# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_132: \\\nÉcrivez un script qui permette d'obtenir à l'écran les 15 premiers termes des \\\ntables de multiplication par 2, 3, 5, 7, 11, 13, 17, 19 (ces nombres seront \\\nplacés au départ dans une liste) sous la forme d'une table similaire à la \\\nsuivante : \\\n 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 \\\n 3 6 9 12 15 18 21 24 27 30 33 36 39 42 45 \\\n 5 10 15 20 25 30 35 40 45 50 55 60 65 70 75.\\n\"\n)\n# Affichage de tables de multiplication\nnt = [2, 3, 5, 7, 9, 11, 13, 17, 19]\n\ndef tableMulti(m, n):\n ''' renvoie n termes de la table de multiplication par m '''\n ch = \"\"\n for i in range(n):\n # calcul d'un des termes\n v = m * (i + 1)\n # formatage à 4 caractères\n ch = ch + \"%4d\" % (v)\n return ch\n\n# 15 premiers termes seulement\nfor a in nt:\n print(tableMulti(a, 15))\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_133: \\\nSoit la liste suivante : \\\n['Jean-Michel', 'Marc', 'Vanessa', 'Anne', 'Maximilien', 'Alexandre-Benoît', 'Louise']. \\\nÉcrivez un script qui affiche chacun de ces noms avec le nombre de \\\ncaractères correspondant.\\n\"\n)\nlst = ['Jean-Michel', 'Marc', 'Vanessa', 'Anne', 'Maximilien', 'Alexandre-Benoît', 'Louise']\nfor e in lst:\n print(\"%s : %s caractères\" % (e, len(e)))\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_134: \\\nVous disposez d'une liste de nombres entiers quelconques, certains d'entre \\\neux étant présents en plusieurs exemplaires. Écrivez un script qui recopie \\\ncette liste dans une autre, en omettant les doublons. La liste finale devra \\\nêtre triée.\\n\"\n)\n# Elimination de doublons\nlst = [9, 12, 40, 5, 12, 3, 27, 5, 9, 3, 8, 22, 40, 3, 2, 4, 6, 25]\nlst2 = []\n\nfor el in lst:\n if el not in lst2:\n lst2.append(el)\n\nlst2.sort()\nprint(lst2)\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_136: \\\nÉcrivez un script capable d'afficher la liste de tous les jours d'une année \\\nimaginaire, laquelle commencerait un Jeudi. Votre script utilisera lui-même \\\ntrois listes : une liste des noms de jours de la semaine, une liste des noms \\\ndes mois, et une liste des nombres de jours que comportent chacun des mois \\\n(ne pas tenir compte des années bissextiles). \\\nExemple de sortie : \\\nJeudi 1 Janvier Vendredi 2 Janvier Samedi 3 Janvier Dimanche 4 Janvier \\\n... et ainsi de suite jusqu'au Jeudi 31 Décembre.\\n\"\n)\n# Cette variante utilise une liste de listes ##\n# (que l'on pourrait aisément remplacer par deux listes distinctes)\n\n# La liste ci-dessous contient deux éléments qui sont eux-mêmes des listes.\n# l'élément 0 contient les nombres de jours de chaque mois, tandis que\n# l'élément 1 contient les noms des douze mois :\nmois = [[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], ['Janvier', 'Février', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Août', 'Septembre', 'Octobre', 'Novembre', 'Décembre']]\njour = ['Dimanche', 'Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi']\n\nja, jm, js, m = 0, 0, 0, 0\nwhile ja < 365:\n # ja = jour dans l'année, jm = jour dans le mois\n ja, jm = ja + 1, jm + 1\n # js = jour de la semaine. Le décalage ajouté permet de choisir le jour de départ\n js = (ja + 3) % 7\n # élément m de l'élément 0 de la liste\n if jm > mois[0][m]:\n jm, m = 1, m + 1\n # élément m de l'élément 1 de la liste\n print(jour[js], jm, mois[1][m])\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_139: \\\nSoient les listes suivantes : \\\nt1 = [31,28,31,30,31,30,31,31,30,31,30,31] \\\nt2 = ['Janvier','Février','Mars','Avril','Mai','Juin', \\\n'Juillet','Août','Septembre','Octobre','Novembre','Décembre'] \\\nÉcrivez un petit programme qui insère dans la seconde liste tous les éléments \\\nde la première, de telle sorte que chaque nom de mois soit suivi du nombre de \\\njours correspondant : ['Janvier',31,'Février',28,'Mars',31, etc...].\\n\"\n)\n# Insertion de nouveaux éléments dans une liste existante\nt1 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nt2 = ['Janvier','Février','Mars','Avril','Mai','Juin',\n 'Juillet','Août','Septembre','Octobre','Novembre','Décembre']\n\nc, d = 1, 0\nwhile d < 12:\n # ! l'élément inséré doit être une liste\n t2[c:c] = [t1[d]]\n c, d = c + 2, d + 1\n\nprint(t2)\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_143: \\\nUn nombre premier est un nombre qui n'est divisible que par un et par lui-même.\\\nÉcrivez un programme qui établisse la liste de tous les nombres premiers \\\ncompris entre 1 et 1000, en utilisant la méthode du crible d'Eratosthène : \\\n- Créez une liste de 1000 éléments, chacun initialisé à la valeur 1. \\\n- Parcourez cette liste à partir de l'élément d'indice 2 : \\\nsi l'élément analysé possède la valeur 1, mettez à zéro tous les autres \\\néléments de la liste, dont les indices sont des multiples entiers de l'indice \\\nauquel vous êtes arrivé. \\\nLorsque vous aurez parcouru ainsi toute la liste, les indices des éléments qui \\\nseront restés à 1 seront les nombres premiers recherchés. \\\nEn effet : A partir de l'indice 2, vous annulez tous les éléments d'indices \\\npairs : 4, 6, 8, 10, etc. \\\nAvec l'indice 3, vous annulez les éléments d'indices 6, 9, 12, 15, etc., et \\\nainsi de suite. Seuls resteront à 1 les éléments dont les indices sont \\\neffectivement des nombres premiers.\\n\"\n)\n# Crible d'Eratosthène pour rechercher les nombres premiers de 1 à 999\n# Créer une liste de 1000 éléments 1 (leurs indices vont de 0 à 999) :\nlst = [1] * 1000\n# Parcourir la liste à partir de l'élément d'indice 2:\nfor i in range(2, 1000):\n # Mettre à zéro les éléments suivants dans la liste,\n # dont les indices sont des multiples de i :\n for j in range(i * 2, 1000, i):\n lst[j] = 0\n\n# Afficher les indices des éléments restés à 1 (on ignore l'élément 0) :\nfor i in range(1, 1000):\n if lst[i]:\n print(i)\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_146: \\\nVous allez écrire un programme destiné à vérifier le fonctionnement du générateur de \\\nnombres aléatoires de Python en appliquant la théorie exposée ci-dessus. Votre programme \\\ndevra donc : \\\na) Demander à l'utilisateur le nombre de valeurs à tirer au hasard à l'aide de la fonction \\\nrandom(). Il serait intéressant que le programme propose un nombre par défaut (1000 par \\\nexemple). \\\nb)Demander à l'utilisateur en combien de fractions il souhaite partager l'intervalle des valeurs \\\npossibles (c'est-à-dire l'intervalle de 0 à 1). Ici aussi, il faudrait proposer un nombre de par \\\ndéfaut (5 fractions, par exemple). Vous pouvez également limiter le choix de l'utilisateur à \\\nun nombre compris entre 2 et le 1/10e du nombre de valeurs tirées au hasard. \\\nc) Construire une liste de N compteurs (N étant le nombre de fractions souhaitées). Chacun \\\nd'eux sera évidemment initialisé à zéro. \\\nd)Tirer au hasard toutes les valeurs demandées, à l'aide de la fonction random() , et \\\nmémoriser ces valeurs dans une liste. \\\ne) Mettre en œuvre un parcours de la liste des valeurs tirées au hasard (boucle), et effectuer \\\nun test sur chacune d'elles pour déterminer dans quelle fraction de l'intervalle 0-1 elle se \\\nsitue. Incrémenter de une unité le compteur correspondant. \\\nf) Lorsque c'est terminé, afficher l'état de chacun des compteurs.\\n\"\n)\n# tire au hasard un réel entre 0 et 1\nfrom random import random\n\nn = input(\"Nombre de valeurs à tirer au hasard (défaut = 1000) : \")\nif n == \"\":\n nVal = 1000\nelse:\n nVal = int(n)\n\nn = input(\"Nombre de fractions dans l'intervalle 0-1 (entre 2 et \" + str(nVal/10) + \", défaut =5) : \")\nif n == \"\":\n nFra = 5\nelse:\n nFra = int(n)\n\nif nFra < 2:\n nFra = 2\nelif nFra > nVal / 10:\n nFra = nVal / 10\n\nprint(\"Tirage au sort des\", nVal, \"valeurs ...\")\n# créer une liste de zéros\nlistVal = [0] * nVal\n# puis modifier chaque élément\nfor i in range(nVal):\n listVal[i] = random()\n\nprint(\"Comptage des valeurs dans chacune des\", nFra, \"fractions ...\")\n# créer une liste de compteurs\nlistCompt = [0] * nFra\n\n# parcourir la liste des valeurs :\nfor valeur in listVal:\n # trouver l'index de la fraction qui contient la valeur :\n index = int(valeur * nFra)\n # incrémenter le compteur correspondant :\n listCompt[index] = listCompt[index] + 1\n\n# afficher l'état des compteurs :\nfor compt in listCompt:\n print(compt)\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_147: \\\nEcrivez un script qui tire au hasard des cartes à jouer. Le nom de la carte \\\ntirée doit être correctement présenté, « en clair ». Le programme affichera \\\npar exemple : \\\nFrappez pour tirer une carte : \\\nDix de Trèfle \\\nFrappez pour tirer une carte : \\\nAs de Carreau \\\nFrappez pour tirer une carte : \\\nHuit de Pique\\n\"\n)\nfrom random import randrange\n\ncouleurs = ['Pique', 'Trèfle', 'Carreau', 'Coeur']\nvaleurs = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'valet', 'dame', 'roi', 'as']\n\n# Construction de la liste des 52 cartes :\ncarte = []\nfor coul in couleurs:\n for val in valeurs:\n carte.append(\"%s de %s \" % (str(val), coul))\n\n# Tirage au hasard :\nwhile 1:\n k = input(\"Frappez pour tirer une carte, pour terminer : \")\n if k == \"\":\n break\n r = randrange(52)\n print(carte[r])\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_148: \\\nCréez un dictionnaire qui contienne les noms d'une série d'élèves, leur âge et \\\nleur taille. Le nom de l'élève servira de clé d'accès. Exprimez l'âge en \\\nannées (nombre entier), et exprimez la taille en mètres (ainsi vous devrez \\\nemployer pour celle-ci une variable de type float !). Écrivez un petit script \\\nqui affiche le contenu de ce dictionnaire en utilisant le formatage des chaînes\\\nde caractères décrit à la page 127.\\n\"\n)\ndico = {'Marinette':(15, 1.65),\n 'Sylvie':(14, 1.58),\n 'Albert':(16, 1.72),\n 'Juliette':(15, 1.59),\n 'Nicolas':(14, 1.62)}\n\nfor cle in dico:\n item = dico[cle]\n age = item[0]\n taille = item[1]\n print(\"Nom = %s - Age = %s ans - Taille = %s m.\" % (cle, age, taille))\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_149: \\\nÉcrivez une fonction qui échange les clés et les valeurs d'un dictionnaire (ce\\\nqui permettra par exemple de transformer un dictionnaire anglais/français en \\\nun dictionnaire français/anglais). (On suppose que le dictionnaire ne contient \\\npas plusieurs valeurs identiques).\\n\"\n)\ndef inverse(dico):\n ''' Construction d'un nouveau dico, pas à pas '''\n dic_inv = {}\n for cle in dico:\n item = dico[cle]\n dic_inv[item] = cle\n\n return dic_inv\n\n# programme test :\ndico = {'Computer':'Ordinateur',\n 'Mouse':'Souris',\n 'Keyboard':'Clavier',\n 'Hard disk':'Disque dur',\n 'Screen':'Ecran'}\n\nprint(dico)\nprint(inverse(dico))\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_150: \\\nVous avez à votre disposition un fichier texte quelconque (pas trop gros). \\\nÉcrivez un script qui compte les occurrences de chacune des lettres de \\\nl'alphabet dans ce texte (on ne tiendra pas compte du problème des lettres \\\naccentuées).\\n\"\n)\n# histogramme\nnFich = input(\"Nom du fichier : \")\nfi = open(nFich, 'r')\n# conversion du fichier en une chaîne de caractères\ntext = fi.read()\nfi.close()\n\nprint(text)\ndico = {}\nfor c in text:\n # conversion de toutes les lettres en majuscules\n c = c.upper()\n dico[c] = dico.get(c, 0) + 1\n\nliste = dico.items()\nprint(liste)\nliste = sorted(liste)\nprint(liste)\n\n# ----------------\nprint(\"\\n-----\\n\")\n# ----------------\n\nprint(\n \"ex_151: \\\nModifiez le script ci-dessus afin qu'il établisse une table des occurrences de \\\nchaque mot dans le texte. Conseil : dans un texte quelconque, les mots ne sont \\\npas seulement séparés par des espaces, mais également par divers signes de \\\nponctuation. Pour simplifier le problème, vouspouvez commencer par remplacer \\\ntous les caractères non-alphabétiques par des espaces, et convertir la chaîne \\\nrésultante en une liste de mots à l'aide de la méthode split().\\n\"\n)\nnFich = input(\"Nom du fichier : \")\nfi = open(nFich, 'r')\n# conversion du fichier en une chaîne de caractères\ntext = fi.read()\nfi.close()\nprint(text)\n\n# afin de pouvoir aisément séparer les mots du texte, on commence\n# par convertir tous les caractères non-alphabétiques en espaces :\nalpha = \"abcdefghijklmnopqrstuvwxyzéèàùçâêîôûäëïöü\"\n\n# nouvelle chaîne à construire\nlettres = ''\nfor c in text:\n # conversion de chaque caractère en minuscule\n c = c.lower()\n if c in alpha:\n lettres = lettres + c\n else:\n lettres = lettres + ' '\n\n# conversion de la chaîne résultante en une liste de mots :\nmots = lettres.split()\n\n# construction de l'histogramme :\ndico = {}\nfor m in mots:\n dico[m] = dico.get(m, 0) + 1\n\nliste = dico.items()\n# tri de la liste résultante :\nliste = sorted(liste)\n\n# affichage en clair :\nfor item in liste:\n print(item[0], \" : \", item[1])\n","sub_path":"pyNutz/001_Apprendre_a_programmer_avec_Python/Chapitre 10 - Approfondir les structures de données/approfondir_les_structures_de_donnees_02.py","file_name":"approfondir_les_structures_de_donnees_02.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"98523270","text":"\n\"\"\" Pobiera metadane z pliku mp3: odczytuje numer ścieżki i zmienia nazwę pliku wg numeru ścieżki.\n Wykorzystuje mutagen id3. TAG 'TIT2' - tytuł, 'TRCK' - numer ścieżki \"\"\"\n\nfrom mutagen.id3 import ID3\nimport os\nimport sys\nimport re\n\n# path = sys.argv[1] # argument wiersza poleceń -- katalog z utworami\n# path = ' '.join(sys.argv[1:]) # Nie trzeba używać \"\"\n\n\ndef create_numbers(path):\n # ending = input('Rodzaj pliku (mp3, FLAC): ')\n # Wszystkie pliki z katalogu z końcówką mp3\n ending = 'mp3'\n all_files = [files for files in os.listdir(path) if files.endswith(ending)]\n\n audio_files = []\n # Utworzenie ścieżki dostępu do każdego pliku mp3\n for elem in all_files:\n audio_files.append(os.path.join(path, elem))\n\n files_metadata_dct = {}\n\n # Wyszukanie tylko cyfr/liczb początkowych\n pattern = re.compile(r\"\"\"^(\\d{1,3})\"\"\") # Jedna, dwie, lub trzy cyfry\n\n # Pobranie numeru ścieżki i utworzenie słownika NUM:TITLE\n strange_symbol_pattern = re.compile(r\"\"\"(\\*)*\"\"\") # Jeśli w metadata pojawią się znaki '*'\n for f in audio_files:\n audio = ID3(f)\n digit = pattern.search(audio['TRCK'].text[0]) # Dopasowanie wzorca\n if int(digit.group()) in files_metadata_dct.keys(): # Jeśli pojawi się ponownie ten sam numer utworu\n print(f'Nadpisanie utworu {digit.group()}.\\nSprawdź poprawność i ilość plików.')\n sys.exit()\n new_audio = strange_symbol_pattern.sub('', audio['TIT2'].text[0]) # Usuń znalezione gwiazdki\n files_metadata_dct.setdefault(int(digit.group()), new_audio)\n # files_metadata_dct[int(digit.group())] = audio['TIT2'].text[0] # Druga możliwość przypisania\n\n tmp = []\n # Lista Track_Num, Title\n for k, v in files_metadata_dct.items():\n if int(k) < 10:\n tmp.append('0' + str(k))\n tmp.append(v)\n else:\n tmp.append(str(k))\n tmp.append(v)\n\n # Utworzenie pełnego tytułu ze składowych listy tmp\n full_title_list = [i + ' ' + j + '.mp3' for i, j in zip(tmp[0::2], tmp[1::2])]\n\n for i, elem in enumerate(audio_files):\n # print(f'{elem}', ' --> ', f'{path}\\\\{full_title_list[i]}')\n # os.rename(f'{elem}', f'{path}\\\\{full_title_list[i]}') # old, new\n os.rename(elem, os.path.join(path, full_title_list[i]))\n\n\n","sub_path":"014_gui_mp3_metadata/gui_mp3_metadata.py","file_name":"gui_mp3_metadata.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304139210","text":"import cv2\nimport pickle\nfrom keras.models import load_model\nimport vocabulary\n\n\n# пути к моделям\nnum_model = './models/num.model'\nrus_model = './models/rus.model'\nnum_letter_model = './models/num_letter_glyphs_model.model'\ngost_model = './models/gost.model'\n\n# пути к классам\nnum_classes = 'classes/num.pickle'\nrus_classes = 'classes/rus.pickle'\nnum_letter_classes = 'classes/num_letter_glyphs_classes.pickle'\ngost_classes = 'classes/gost.pickle'\n\n\n# для правильной кодировки кириллицы d в пути изображения\ndef predict(letter):\n # загружаем модель и бинаризатор меток\n model = load_model(gost_model)\n lb = pickle.loads(open(gost_classes, 'rb').read())\n\n # меняем его размер на необходимый\n image = cv2.resize(letter, (64, 64))\n\n # масштабируем значения пикселей к диапазону [0, 1]\n image = image.astype('float') / 255.0\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n\n # делаем предсказание на изображении\n preds = model.predict(image)\n # находим индекс метки класса с наибольшей вероятностью соответствия\n i = preds.argmax(axis=1)[0]\n label = lb.classes_[i]\n\n symbol = vocabulary.symbols.get(label)\n percent = f'{preds[0][i] * 100:.2f}%'\n\n return symbol, percent\n","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"115617147","text":"\"\"\"\nA Kraken Trade Bot which use tensorflow to predict price changes\n\"\"\"\nfrom setuptools import find_packages, setup\nimport re\nimport io\n\ndependencies = ['click', 'peewee', 'matplotlib', 'ta-lib']\n\n__version__ = re.search(\n r'__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', # It excludes inline comment too\n io.open('Kraken-TF-Trade/_version.py', encoding='utf_8_sig').read()\n).group(1)\n\nsetup(\n name='Kraken-TF-Trade',\n version=__version__,\n url='https://github.com/Marbel/Kraken-TF-Trade',\n license='MIT',\n author='Jan-Andre Moebis',\n author_email='das.marbel@googlemail.com',\n description='A Kraken Trade Bot which use tensorflow to predict price changes',\n long_description=__doc__,\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=dependencies,\n entry_points={\n 'console_scripts': [\n 'Kraken-TF-Trade = Kraken-TF-Trade.cli:cli',\n ],\n },\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n # 'Development Status :: 1 - Planning',\n 'Development Status :: 2 - Pre-Alpha',\n # 'Development Status :: 3 - Alpha',\n # 'Development Status :: 4 - Beta',\n # 'Development Status :: 5 - Production/Stable',\n # 'Development Status :: 6 - Mature',\n # 'Development Status :: 7 - Inactive',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS',\n 'Operating System :: Unix',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"624748951","text":"# Internal imports\nimport time\nimport pickle\n\n# External imports\nimport numpy as np\n\n# ASE\nfrom ase import Atoms\nfrom ase.db import connect\nfrom ase.dft.dos import DOS\nfrom ase.parallel import world\nfrom ase.optimize import BFGS\n\n# GPAW\nfrom gpaw import GPAW, PW\n\n''' \nPerform GPAW DFT calculation of the electron density of states\nfor the nanoparticles with N < 100. \n'''\n# Connect to DB\nstructDB = connect('../CourseGitRepo/HA5_Al-clusters-initial.db')\neosDB = connect('./eos.db', append=False)\n\n# Sort the clusters based on number of atoms\nallClust = list(structDB.select())\nsort = np.argsort([len(clust.numbers) for clust in allClust])\nallClust = np.array(allClust)[sort]\n\nfor clust in allClust:\n # General info\n atoms = clust.toatoms()\n N = len(atoms.positions)\n if(N<100):\n start = time.time()\n # if world.rank == 0:\n print(f'Calculating EOS for Al{N}')\n\n # Define electron calculator (GPAW)\n calc = GPAW(\n mode=PW(300), # Lower for computational efficiency\n txt=f'./gpaw-out/EOS_{N}_1core.txt'\n ) # Use the same calculator as in task6\n atoms.set_calculator(calc)\n pot_e = atoms.get_potential_energy() # Self-constistently optimize the electron density\n # if world.rank == 0:\n print(f'Cluster Al{N} finished potential energy per atom: {pot_e / N:.2f} eV')\n\n # Get the electronic DOS\n dos = DOS(calc, npts=800, width=0.2)\n \n e = dos.get_energies()\n d = dos.get_dos()\n e_f = calc.get_fermi_level() \n e -= e_f # Subtract the Fermi level from the energy \n \n ##### Get the DOS using the same method as in task6\n # print('Electronic band structure calculated')\n # kpts = {'size': (40,40,40)}\n # calc.set(\n # kpts = kpts, \n # fixdensity=True,\n # symmetry='off', \n # )\n # # Fix the potential\n # calc.get_potential_energy()\n # e, dos = calc.get_dos(spin=0, npts=1001, width=0.5) # Get energy and density of states\n # e_f = calc.get_fermi_level() \n\n # Edos = {\n # 'e': e, \n # 'dos': dos,\n # 'fermi': e_f\n # } \n\n # # Save results\n # pickle.dump( Edos, open( f'./dos/Edos_Al{N}_1core.p', \"wb\" ) ) # Save the electronic DOS\n\n end = time.time()\n # if world.rank == 0:\n print(f'Cluster Al{N} finished ---- Time: {(end-start):.2f} s')\n eosDB.write(atoms, data={'energy': e, 'DOS': d, 'fermi': e_f})\n calc.write(f'./calculators/calc{N}.gpw') # Save the calculator\n else:\n # if world.rank == 0:\n print(f'Skipping Al{N}')","sub_path":"computational_materials_and_molecular_physics/HW5/task5/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344497608","text":"from pyA20.gpio import gpio\nfrom pyA20.gpio import port\n\n#import RPi.GPIO as GPIO\nimport dht22\nimport time\nimport datetime\nimport paho.mqtt.publish as publish\nimport os\n\n# initialize GPIO\n#gpio.setwarnings(False)\n#gpio.setmode(GPIO.BCM)\nPIN2 = port.PA6\ngpio.init()\n#gpio.cleanup()\n\n# read data using pin 14\ninstance = dht22.DHT22(pin=PIN2)\n\nTEMP = 0\nHUM = 0\n\ndef pub (topic, val):\n publish.single(\"orangepipc2/\"+topic, val, client_id=\"OrangPI\", hostname=\"%ip%\", auth={'username':\"%username%\",'password':\"%password%\"})\n\ndef readDHT22 ():\n while True:\n result = instance.read()\n if result.is_valid():\n TEMP1 = (\"%.2f\" % result.temperature)\n HUM1 = (\"%.2f\" % result.humidity)\n return TEMP1, HUM1\n time.sleep(1)\n\nwhile True:\n\n val = readDHT22()\n TEMP1 = val[0]\n HUM1 = val[1]\n if TEMP1 != TEMP:\n TEMP = TEMP1\n # print(TEMP)\n pub(\"TEMP\", TEMP)\n if HUM1 != HUM:\n HUM = HUM1\n # print(HUM)\n pub(\"HUM\", HUM)\n time.sleep(120)\n","sub_path":"dht22_mqtt.py","file_name":"dht22_mqtt.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"51218158","text":"import configparser\nimport pathlib\nimport os\nimport subprocess\n\ndef read_config(reference = \"b37\", conda_env = \"bp\"):\n lib_home = os.path.dirname(os.path.realpath(__file__))\n pipe_home = os.path.normpath(lib_home + \"/..\")\n env_dir = subprocess.check_output(\"conda info -e | grep -w ^{} | awk '{{print $NF}}'\".format(conda_env),\n shell=True, universal_newlines=True).strip()\n config = configparser.ConfigParser()\n config[\"PATH\"] = {\n \"pipe_home\": pipe_home,\n \"env_dir\": env_dir\n }\n\n config.read(pipe_home + (\"/config.hg19.ini\" if reference == \"hg19\" else \"/config.hg38.ini\" if reference == \"hg38\" else \"/config.ini\"))\n for section in [\"TOOLS\", \"RESOURCES\"]:\n for key in config[section]:\n # config[section][key] = pipe_home + \"/\" + config[section][key]\n config[section][key] = config[section][key].format(ENVDIR=env_dir, PIPEHOME=pipe_home)\n\n return config\n\ndef run_info(fname, reference, conda_env = \"bp\"):\n config = read_config(reference, conda_env)\n pathlib.Path(os.path.dirname(fname)).mkdir(parents=True, exist_ok=True)\n with open(fname, \"w\") as run_file:\n run_file.write(\"#PATH\\nPIPE_HOME={}\\nENV_DIR={}\\n\".format(config[\"PATH\"][\"pipe_home\"], config[\"PATH\"][\"env_dir\"]))\n for section in [\"TOOLS\", \"RESOURCES\"]:\n run_file.write(\"\\n#{section}\\n\".format(section=section))\n for key in config[section]:\n run_file.write(\"{key}={val}\\n\".format(\n key=key.upper(), val=config[section][key]))\n\ndef run_info_append(fname, line):\n with open(fname, \"a\") as run_file:\n run_file.write(line + \"\\n\")\n\ndef log_dir(sample):\n log_dir = sample+\"/logs\"\n pathlib.Path(log_dir).mkdir(parents=True, exist_ok=True)\n return log_dir\n\ndef save_hold_jid(fname, jid):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n with open(fname, 'w') as f:\n print(jid, file=f)\n","sub_path":"library/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"80978631","text":"\"\"\" Transfers Fifa main script \"\"\"\n\nimport sys\nimport os\n\nclubs = {}\ntransfer_history = []\n\nclass Club:\n\n def __init__(self, name):\n self.name = name\n self.Players_main = {}\n self.Players_sub = {}\n self.mainMoney = 150 # Default\n self.subMoney = 30 # \n self.transfer_history = []\n def add_player(self, playerName, playerPrice, current_transfer):\n if len(self.Players_main) < 11:\n self.Players_main[playerName] = playerPrice\n self.mainMoney = self.mainMoney - playerPrice\n else:\n self.Players_sub[playerName] = playerPrice\n self.subMoney = self.subMoney - playerPrice\n \n self.transfer_history.append(current_transfer)\n\n def remove_player(self, playerName, transferFee):\n pass\n\ndef print_header():\n print(\"\"\"\n###############################################################\n _____ ___ _ _ _ ___ ___ ___ ___ ___ ___ ___ ___ _ \n |_ _| _ \\ /_\\ | \\| / __| __| __| _ \\/ __| | __|_ _| __/_\\ \n | | | / / _ \\| .` \\__ \\ _|| _|| /\\__ \\ | _| | || _/ _ \\ \n |_| |_|_\\/_/ \\_\\_|\\_|___/_| |___|_|_\\|___/ |_| |___|_/_/ \\_\n\n###############################################################\n\"\"\"\n )\n\ndef main():\n inp = 0\n while inp != 3:\n os.system('cls' if os.name == \"nt\" else 'clear')\n print_header()\n\n print(\"\"\"\nHOME\n1. Start Tournament\n2. Continue Tournament\n3. Exit\n\"\"\"\n )\n\n inp = int(input(\"Enter option: \"))\n if inp == 1:\n init_tournament()\n elif inp == 2:\n tournament()\n elif inp == 3:\n break\n \ndef init_club(i):\n name = input(\"%d. Name: \" % i)\n clubs[name] = Club(name)\n\ndef init_tournament():\n os.system('cls' if os.name == \"nt\" else 'clear')\n print_header()\n\n clubs = {}\n transfer_history = []\n\n num_clubs = int(input('Input number of clubs: '))\n for i in range(0, num_clubs):\n init_club(i+1)\n os.system('cls' if os.name == \"nt\" else 'clear')\n i=1\n input(\"\\nPress any key to continue...\")\n\n tournament()\n\ndef tournament():\n inp = 0\n while inp != 4:\n os.system('cls' if os.name == 'nt' else 'clear')\n print_header()\n print(\"\"\" \n1. Make Transfer\n2. Club Information\n3. Transfer History\n4. Return\n\"\"\"\n )\n inp = int(input(\"\\nEnter option: \"))\n \n if inp == 1:\n make_transfer()\n elif inp == 2:\n club_information()\n elif inp == 3:\n transfer_History()\n elif inp == 4:\n#save_tournament()\n break;\n elif inp == 5:\n break\n\ndef make_transfer():\n inp = 0\n while inp != 4:\n os.system('cls' if os.name == 'nt' else 'clear')\n print_header() \n print(\"\"\"\n1. Public Auction\n2. Private Transfers\n3. Transfer History\n4. Return\n\"\"\"\n )\n inp = int(input(\"\\nEnter option: \"))\n\n if inp == 1:\n public_transfer()\n elif inp == 2:\n private_transfer()\n elif inp == 3:\n transfer_History()\n elif inp == 4:\n break\n\ndef show_available_clubs():\n print(\"Available Clubs: \")\n i=1\n for club in clubs:\n print(\"%d. \" %i, club)\n i+=1\n\ndef public_transfer():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print_header() \n show_available_clubs()\n\n playerName = input(\"\\nEnter Player's Name: \")\n playerPrice = float(input(\"\\nEnter Player's Final Bid: \"))\n transferClub = input(\"\\nEnter Club the Player is Transferring to: \")\n \n if transferClub not in clubs:\n print(\"Club does not exist.\")\n input(\"Press Enter to continue...\")\n else:\n current_transfer = \"%s --> %s for %d\" % (playerName, transferClub, playerPrice) \n clubs[transferClub].add_player(playerName, playerPrice, current_transfer) \n transfer_history.append(current_transfer)\n\ndef transfer_History():\n os.system('cls' if os.name == 'nt' else 'clear')\n print_header() \n\n# not exactly pythonic, but bah. i can't figure how to add numbering without a loop and an extra variable.\n i = 1\n for item in transfer_history:\n print(\"%d. \" % i, item)\n i += 1\n\n input()\n\ndef club_information():\n os.system('cls' if os.name == 'nt' else 'clear')\n print_header()\n\n print(\"Club Information\")\n show_available_clubs()\n \n clubName = input(\"Enter Club Name: \")\n show_club_information(clubName)\n \ndef show_club_information(clubName):\n os.system('cls' if os.name == 'nt' else 'clear')\n print_header()\n\n print(\"Club Name: %s\" % clubs[clubName].name)\n print(\"Total Money Left (Main XI): %d\" % clubs[clubName].mainMoney)\n print(\"Total Money Left (Subs): %d\" % clubs[clubName].subMoney)\n print(\"\\nMain XI Players: \")\n for player in clubs[clubName].Players_main:\n print(player,\": \", clubs[clubName].Players_main[player])\n print(\"\\nSub Players: \")\n for player in clubs[clubName].Players_sub:\n print(player, \": \", clubs[clubName].Players_sub[player])\n\n print(\"\\nTransfer History: \")\n i=0\n for item in clubs[clubName].transfer_history:\n i += 1\n print(i, \". \", item)\n input()\n\ndef save_tournament():\n pass\n\nmain()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"176706642","text":"\"\"\"\nВивести всі слова в рядку в зворотному порядку\n\"\"\"\ninput_array = []\noutput_array = []\n\n\ndef output():\n \"\"\"\n output some data\n :return: none\n \"\"\"\n for i in output_array:\n buff = i\n print(\"Ваш перевернутий рядок виглядає так:\", buff)\n\n\ndef algorithm():\n \"\"\"\n call some function to rob data\n :return: none\n \"\"\"\n txt = input_array[0]\n sep = input_array[1]\n output_array.append(reverse(txt, sep))\n\n\ndef reverse(text, separator):\n \"\"\"\n revers text\n :param text:str,\"some rowe\",\"asdfasf asdasfa asfasf\"\n :param separator: str,\"sep\",\" \"\n :return: str,\"rowe some\",\"asfasf asdasfa asdfasf\"\n \"\"\"\n sent = text.split(separator)[::-1]\n result = separator.join(sent)\n return result\n\n\ndef input_data():\n \"\"\"\n put some data into input array\n :return: none\n \"\"\"\n main_data = input(\"Введіть ваш рядок:\")\n input_array.append(main_data)\n main_data = input(\"Введіть ваш знак розділення:\")\n input_array.append(main_data)\n\n\ndef interface():\n \"\"\"\n print some information\n :return: none\n \"\"\"\n print(\" Варіант №19.Лабораторна робота №3 Завдання 1. Перевернути рядок \"\n \"навпаки\")\n print(\"Рудий О.В. КМ-91\")\n\n\ndef waiter():\n \"\"\"\n restart program if answer is Y\n and close program if answer is N\n :return:False or this function\n \"\"\"\n print(\n \"Якщо бажаєте продовжити роботу з програмою натисніть Y\"\n \" якщо бажаєте припинити роботу програми натисніть N\") # restart or stoped\n ans = input()\n while ans != \"N\" and ans != \"Y\":\n print(\"Ви ввели некоректний символ , спробуйте знову\")\n del ans\n ans = input()\n if ans == \"Y\":\n main()\n return waiter()\n elif ans == \"N\":\n print(\"До зустрічі\")\n return False\n\n\ndef start():\n \"\"\"\n start program with started interface\n :return:none\n \"\"\"\n interface()\n print(\"Якщо бажаєте розпочати роботу з програмою натисніть Y ,в іншому випадку натисніть N\") # start or stoped\n ans2 = input()\n while ans2 != \"N\" and ans2 != \"Y\":\n print(\"Ви ввели некоректний символ , спробуйте знову\")\n del ans2\n ans2 = input()\n if ans2 == \"Y\":\n main()\n waiter()\n\n\ndef main():\n \"\"\"\n run all function\n :return:none\n \"\"\"\n input_data()\n algorithm()\n output()\n input_array.clear()\n output_array.clear()\n\n\nstart()\n","sub_path":"laboratory3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"298107744","text":"import numpy as np\nfrom operator import itemgetter\nimport preprocess\nfrom scipy import sparse\n\nclass my_encoding():\n\t\"\"\"encoder used to encode lists of documents (strings) to the input format used in the original paper\n\tthe fit function creates a dictionary of all the occuring letters in the training set.\n\tthe transform function transforms a list of strings using the dictionary\"\"\"\n\tdef __init__(self):\n\t\tself.max_number_of_words_per_document = 128\n\t\tself.max_number_of_coordinates_per_word = 256\n\t\tself.map = {}\n\n\n\tdef fit(self,X):\n\t\t\"\"\"\n\t\tparam X: for now assumed to be a list of document, each document being a string\n\t\treturns: a list containing the matrix representation of each document, as described in the paper\n\t\t\"\"\"\n\n\n\t\t#first we preprocess the data\n\t\tX_new = []\n\t\tfor document in X:\n\t\t\tdocument = preprocess.preprocessDocument(document)\n\t\t\tX_new.append(document)\n\n\n\t\t#first we rank of each character in the corpus\n\t\tranked_characters = self._getRankOfCharacters(X_new)\n\t\tprint(\"Got rank\")\n\t\t#now get mapping from character to compress code\n\t\tself._getMappingCharToCompressCode(ranked_characters)\n\n\n\n\tdef transform(self,X):\n\t\t# first we preprocess the data\n\t\tX_new = []\n\t\tfor document in X:\n\t\t\tdocument = preprocess.preprocessDocument(document)\n\t\t\tX_new.append(document)\n\n\t\tmapping = self.map\n\n\t\t# now we can build the representation of each document\n\t\trepresentations = []\n\t\tfor document in X_new:\n\t\t\tdocument_representation = []\n\t\t\twords = document.split()\n\t\t\tfor word in words:\n\t\t\t\tword_representation = np.array([])\n\t\t\t\tfor char in word:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tword_representation = np.concatenate((word_representation, mapping[char]))\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tword_representation = word_representation\n\n\t\t\t\t\tif len(word_representation) > self.max_number_of_coordinates_per_word:\n\t\t\t\t\t\tbreak\n\t\t\t\tword_representation = self._fixSizeOfWordRepresentation(word_representation)\n\t\t\t\tdocument_representation.append(word_representation)\n\t\t\t\tif len(document_representation) > self.max_number_of_words_per_document:\n\t\t\t\t\tbreak\n\t\t\tdocument_representation = self._fixSizeOfDocumentRepresentation(document_representation)\n\t\t\tdocument_representation = np.asarray(document_representation)\n\n\t\t\t###Addition###\n\t\t\t#document_representation = sparse.csr_matrix(document_representation)\n\t\t\trepresentations.append(document_representation)\n\n\t\treturn np.array(representations)\n\n\n\n\tdef _getRankOfCharacters(self,X):\n\t\t\n\t\t#first we get count of each character\n\t\tcount = {}\n\t\tfor document in X:\n\t\t\t\tfor charac in document:\n\t\t\t\t\tif charac in count:\n\t\t\t\t\t\tcount[charac] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount[charac] = 1\n\n\t\tlist_count = []\n\t\tfor charac in count:\n\t\t\tlist_count.append((charac,count[charac]))\n\n\t\t#sort in decreasing order\n\t\tlist_count.sort(reverse=True, key=itemgetter(1))\n\n\t\tranked_characters = []\n\t\tfor i in range(len(list_count)):\n\t\t\tranked_characters.append(list_count[i][0])\n\n\t\treturn ranked_characters\n\n\n\tdef _getMappingCharToCompressCode(self, ranked_characters):\n\t\t\"\"\"\n\t\tparam ranked_characters: list of characters in decreasing order of frequency in the corpus\n\t\treturns: a dictionnary mapping each character to its compress code\n\t\t\"\"\"\n\n\t\tmapping = {}\n\t\tfor rank in range(len(ranked_characters)):\n\t\t\tmapping[ranked_characters[rank]] = self._createCompressCode(rank)\n\t\tself.map = mapping\n\n\n\tdef _createCompressCode(self,rank):\n\t\t\"\"\"\n\t\tparam rank: rank of the character in the corpus\n\t\treturns: compress code corresponding to that character, as a numpy array\n\t\t\"\"\"\n\n\t\tcompress_code = [1]\n\t\tfor i in range(rank):\n\t\t\tcompress_code.append(0)\n\t\tcompress_code.append(1)\n\t\treturn np.asarray(compress_code)\n\n\n\n\n\tdef _fixSizeOfWordRepresentation(self,word_representation):\n\t\t\"\"\"\n\t\tparam word_representation: numpy array representing the code of a word, either too long or too short\n\t\treturns: the word representation with the good size\n\t\t\"\"\"\n\t\tif len(word_representation) > self.max_number_of_coordinates_per_word:\n\t\t\tword_representation = word_representation[:self.max_number_of_coordinates_per_word]\n\t\t\treturn word_representation\n\t\tif len(word_representation) < self.max_number_of_coordinates_per_word:\n\t\t\tword_representation = np.concatenate((word_representation,np.zeros(self.max_number_of_coordinates_per_word - len(word_representation))))\n\t\treturn word_representation\n\n\n\tdef _fixSizeOfDocumentRepresentation(self,document_representation):\n\t\tif len(document_representation) >= self.max_number_of_words_per_document:\n\t\t\tdocument_representation = document_representation[:self.max_number_of_words_per_document]\n\t\t\treturn document_representation\n\t\telse:\n\t\t\twhile len(document_representation) < self.max_number_of_words_per_document:\n\t\t\t\tzero_line = np.zeros(self.max_number_of_coordinates_per_word)\n\t\t\t\tdocument_representation = np.concatenate((document_representation,[zero_line]))\n\t\t\treturn document_representation\n\n","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"367550766","text":"from collections import defaultdict\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n # def verticalTraversal(self, root: TreeNode) -> List[List[int]]:\n def verticalTraversal(self, root: TreeNode):\n ans = defaultdict(list)\n que = [(root, 0)]\n\n while que:\n new = []\n dic = defaultdict(list)\n for node, lvl in que:\n dic[lvl].append(node.val)\n if node.left: new.append((node.left, lvl - 1))\n if node.right: new.append((node.right, lvl + 1))\n\n que = new\n for lvl in dic: ans[lvl].extend(sorted(dic[lvl]))\n\n return [ans[i] for i in sorted(ans)]\n\n","sub_path":"LeetCode/0987_VerticalOrderTraversalOfABinaryTree/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"323175976","text":"#\n# @lc app=leetcode id=300 lang=python3\n#\n# [300] Longest Increasing Subsequence\n#\n# https://leetcode.com/problems/longest-increasing-subsequence/description/\n#\n# algorithms\n# Medium (41.86%)\n# Likes: 3725\n# Dislikes: 84\n# Total Accepted: 315.5K\n# Total Submissions: 752.8K\n# Testcase Example: '[10,9,2,5,3,7,101,18]'\n#\n# Given an unsorted array of integers, find the length of longest increasing\n# subsequence.\n# \n# Example:\n# \n# \n# Input: [10,9,2,5,3,7,101,18]\n# Output: 4 \n# Explanation: The longest increasing subsequence is [2,3,7,101], therefore the\n# length is 4. \n# \n# Note: \n# \n# \n# There may be more than one LIS combination, it is only necessary for you to\n# return the length.\n# Your algorithm should run in O(n^2) complexity.\n# \n# \n# Follow up: Could you improve it to O(n log n) time complexity?\n# \n#\n\n# @lc code=start\nimport heapq\n# from bisect import bisect_left\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n seen = []\n for num in nums:\n nums_lt_count = self.floorSearch(seen, num)+1\n if nums_lt_count == len(seen):\n seen.append(num)\n else:\n seen[nums_lt_count] = num\n # print(seen, num, nums_lt_count)\n return len(seen)\n \n def floorSearch(self, arr, x):\n start, end = 0, len(arr)-1\n floor = -1\n while start <= end:\n mid = (start + end)//2\n if arr[mid] < x:\n floor = mid\n start = mid + 1\n else:\n end = mid - 1\n return floor\n\n\n\n\n# @lc code=end\n\n","sub_path":"300.longest-increasing-subsequence.py","file_name":"300.longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"650330237","text":"#! /bin/env python\n#!_*_coding:utf-8\nimport sys\n\"\"\"\n猜数字\n\"\"\"\nnum = 10\n\nfor x in range(3):\n\ttry:\n\t\tprint(\"必须输入整数,当前您还有%d机会\" % (3-x))\n\t\tuser_input = int(input(\"有一个隐藏的数字,请你猜猜:\"))\n\texcept (ValueError,SyntaxError):\n\t\tcontinue\n\texcept (KeyboardInterrupt,EOFError):\n\t\tprint()\n\t\tsys.exit(1)\n\tif num == user_input:\n\t\tprint(\"congratulations! you win!\")\n\t\tsys.exit(1)\n\telif x == 2:\n\t\tprint(\"Oops! you fail!\")\n\n\n\n\n\n","sub_path":"py01/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"114511095","text":"#! python3\n\n\"\"\"This is safeprint module.\n\n\"\"\"\n\ndef safeprint(*ss):\n\t\"\"\"Safe print, skip error decode.\"\"\"\n\t\n\tss = [s if type(s) is str else str(s) or str(type(s)) for s in ss]\n\ts = \" \".join(ss)\n\n\tfor c in s:\n\t\ttry:\n\t\t\tprint(c, end=\"\")\n\t\texcept UnicodeEncodeError:\n\t\t\tprint(\"?\", end=\"\")\n\t\t\t\t\n\tprint(\"\")\n\t\n\tfor f in _callbacklist:\n\t\tf(s)\n\n\t\t\n_callbacklist = []\ndef addcallback(callback):\n\tif callback in _callbacklist:\n\t\treturn\n\tif callable(callback):\n\t\t_callbacklist.append(callback)\n\ndef removecallback(callback):\n\tif callback in _callbacklist:\n\t\t_callbacklist.remove(callback)\n\t\nif __name__ == \"__main__\":\n\tsafeprint(\"Hello World!\", \"你好世界!\", \"ハローワールド\", \"हैलो वर्ल्ड\")\n","sub_path":"safeprint.py","file_name":"safeprint.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"9473034","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport time\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport copy\nimport numpy as np\nfrom torchvision import datasets, transforms\nimport torch\nimport random\nimport csv\n\nfrom utils.sampling import mnist_iid, mnist_noniid, cifar_iid\nfrom utils.options import args_parser\nfrom models.CIFAR_Updates_v2_labelFlipping import LocalUpdate\nfrom models.Nets import MLP, CNNMnist, CNNCifar, customCNNCifar\nfrom models.Fed import FedAvg\n#from models.test import test_img\nfrom models.CIFAR_test_v2_labelFlipping import test_img\nfrom collections import OrderedDict,defaultdict\n\n\nif __name__ == '__main__':\n start = time.time()\n # parse args\n args = args_parser()\n args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')\n\n # load dataset and split users\n if args.dataset == 'mnist':\n trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)\n dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)\n # sample users\n if args.iid:\n dict_users = mnist_iid(dataset_train, args.num_users)\n else:\n dict_users = mnist_noniid(dataset_train, args.num_users)\n elif args.dataset == 'cifar':\n trans_cifar_train = transforms.Compose([transforms.RandomCrop(32,padding=4),transforms.RandomHorizontalFlip(),transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n trans_cifar_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar_train)\n dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar_test)\n if args.iid:\n dict_users = cifar_iid(dataset_train, args.num_users)\n else:\n exit('Error: only consider IID setting in CIFAR10')\n else:\n exit('Error: unrecognized dataset')\n img_size = dataset_train[0][0].shape\n\n # build model\n if args.model == 'cnn' and args.dataset == 'cifar':\n net_glob = customCNNCifar(args=args).to(args.device)\n net_glob1 = customCNNCifar(args=args).to(args.device)\n net_glob5 = customCNNCifar(args=args).to(args.device)\n net_glob10 = customCNNCifar(args=args).to(args.device)\n net_glob15 = customCNNCifar(args=args).to(args.device)\n net_glob20 = customCNNCifar(args=args).to(args.device)\n net_glob25 = customCNNCifar(args=args).to(args.device)\n net_glob30 = customCNNCifar(args=args).to(args.device)\n elif args.model == 'cnn' and args.dataset == 'mnist':\n net_glob = CNNMnist(args=args).to(args.device)\n\n elif args.model == 'mlp':\n len_in = 1\n for x in img_size:\n len_in *= x\n net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)\n else:\n exit('Error: unrecognized model')\n print(net_glob)\n net_glob.train()\n net_glob1.train()\n net_glob5.train()\n net_glob10.train()\n net_glob15.train()\n net_glob20.train()\n net_glob25.train()\n net_glob30.train()\n\n # copy weights\n w_glob = net_glob.state_dict()\n w_glob1 = net_glob1.state_dict()\n w_glob5 = net_glob5.state_dict()\n w_glob10 = net_glob10.state_dict()\n w_glob15 = net_glob15.state_dict()\n w_glob20 = net_glob20.state_dict()\n w_glob25 = net_glob25.state_dict()\n w_glob30 = net_glob30.state_dict()\n\n # training - NO ATTACK\n loss_train = []\n cv_loss, cv_acc = [], []\n val_loss_pre, counter = 0, 0\n net_best = None\n best_loss = None\n val_acc_list, net_list = [], []\n\n #VIVEK constant attack experiment - 1 MALICIOUS\n loss_train_1 = []\n fixed_agent_1 = random.randint(0,31) #random agent between 0 and 31 is fixed\n updates_recorded_1 = False\n fixed_agent_storage_1 = None\n count_array_1 = []\n\n #VIVEK constant attack experiment - 5 MALICIOUS\n loss_train_5 = []\n fixed_agent_5 = random.sample(range(32),5)\n updates_recorded_mapping_5 = defaultdict(bool)\n for i in fixed_agent_5:\n updates_recorded_mapping_5[i] = False #KEY = agent no. & VAL = boolean\n fixed_agent_storage_mapping_5 = {} #KEY = agent no. & VAL = Fixed Updates\n count_array_5 = []\n\n #VIVEK constant attack experiment - 10 MALICIOUS\n loss_train_10 = []\n fixed_agent_10 = random.sample(range(32),10)\n updates_recorded_mapping_10 = defaultdict(bool)\n for i in fixed_agent_10:\n updates_recorded_mapping_10[i] = False\n fixed_agent_storage_mapping_10 = {}\n count_array_10 = []\n\n #VIVEK constant attack experiment - 15 MALICIOUS\n loss_train_15 = []\n fixed_agent_15 = random.sample(range(32),15)\n updates_recorded_mapping_15 = defaultdict(bool)\n for i in fixed_agent_15:\n updates_recorded_mapping_15[i] = False\n fixed_agent_storage_mapping_15 = {}\n count_array_15 = []\n\n #VIVEK constant attack experiment - 20 MALICIOUS\n loss_train_20 = []\n fixed_agent_20 = random.sample(range(32),20)\n updates_recorded_mapping_20 = defaultdict(bool)\n for i in fixed_agent_20:\n updates_recorded_mapping_20[i] = False\n fixed_agent_storage_mapping_20 = {}\n count_array_20 = []\n\n #VIVEK constant attack experiment - 25 MALICIOUS\n loss_train_25 = []\n fixed_agent_25 = random.sample(range(32),25)\n updates_recorded_mapping_25 = defaultdict(bool)\n for i in fixed_agent_25:\n updates_recorded_mapping_25[i] = False\n fixed_agent_storage_mapping_25 = {}\n count_array_25 = []\n\n #VIVEK constant attack experiment - 30 MALICIOUS\n loss_train_30 = []\n fixed_agent_30 = random.sample(range(32),30)\n updates_recorded_mapping_30 = defaultdict(bool)\n for i in fixed_agent_30:\n updates_recorded_mapping_30[i] = False\n fixed_agent_storage_mapping_30 = {}\n count_array_30 = []\n\n malicious_dict = {1:[fixed_agent_1],5:fixed_agent_5,10:fixed_agent_10,15:fixed_agent_15,20:fixed_agent_20,25:fixed_agent_25,30:fixed_agent_30}\n malicious_count ={1:0,5:0,10:0,15:0,20:0,25:0,30:0}\n\n for iter in range(args.epochs):\n #agent_found_count = 0\n w_locals, loss_locals = [], [] #w_locals = array of local_weights\n w_locals_1, loss_locals_1 = [],[]\n w_locals_5, loss_locals_5 = [],[]\n w_locals_10, loss_locals_10 = [],[]\n w_locals_15, loss_locals_15 = [],[]\n w_locals_20, loss_locals_20 = [],[]\n w_locals_25, loss_locals_25 = [],[]\n w_locals_30, loss_locals_30 = [],[]\n m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this\n idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM\n\n for idx in idxs_users:\n local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local1 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local15 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local20 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local25 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n local30 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n\n w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\n\n print(\"***BLAH BLAH BLAH***\")\n\n if idx == fixed_agent_1:\n w1,loss1 = local1.train_flipped(net=copy.deepcopy(net_glob1).to(args.device))\n malicious_count[1]+=1\n\n if idx != fixed_agent_1:\n w1, loss1 = local1.train(net=copy.deepcopy(net_glob1).to(args.device))\n\n if idx in fixed_agent_5:\n w5,loss5 = local5.train_flipped(net=copy.deepcopy(net_glob5).to(args.device))\n malicious_count[5]+=1\n\n if idx not in fixed_agent_5:\n w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))\n\n if idx in fixed_agent_10:\n w10,loss10 = local10.train_flipped(net=copy.deepcopy(net_glob10).to(args.device))\n malicious_count[10]+=1\n\n if idx not in fixed_agent_10:\n w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))\n\n if idx in fixed_agent_15:\n w15,loss15 = local15.train_flipped(net=copy.deepcopy(net_glob15).to(args.device))\n malicious_count[15]+=1\n\n if idx not in fixed_agent_15:\n w15, loss15 = local15.train(net=copy.deepcopy(net_glob15).to(args.device))\n\n if idx in fixed_agent_20:\n w20,loss20 = local20.train_flipped(net=copy.deepcopy(net_glob20).to(args.device))\n malicious_count[20]+=1\n\n if idx not in fixed_agent_20:\n w20, loss20 = local20.train(net=copy.deepcopy(net_glob20).to(args.device))\n\n if idx in fixed_agent_25:\n w25,loss25 = local25.train_flipped(net=copy.deepcopy(net_glob25).to(args.device))\n malicious_count[25]+=1\n\n if idx not in fixed_agent_25:\n w25, loss25 = local25.train(net=copy.deepcopy(net_glob25).to(args.device))\n\n if idx in fixed_agent_30:\n w30,loss30 = local30.train_flipped(net=copy.deepcopy(net_glob30).to(args.device))\n malicious_count[30]+=1\n\n if idx not in fixed_agent_30:\n w30, loss30 = local30.train(net=copy.deepcopy(net_glob30).to(args.device))\n\n\n #NO ATTACK\n w_locals.append(copy.deepcopy(w))\n loss_locals.append(copy.deepcopy(loss))\n\n #1 MALICIOUS\n w_locals_1.append(copy.deepcopy(w1))\n loss_locals_1.append(copy.deepcopy(loss1))\n\n #5 MALICIOUS\n w_locals_5.append(copy.deepcopy(w5))\n loss_locals_5.append(copy.deepcopy(loss5))\n\n #10 MALICIOUS\n w_locals_10.append(copy.deepcopy(w10))\n loss_locals_10.append(copy.deepcopy(loss10))\n\n #15 MALICIOUS\n w_locals_15.append(copy.deepcopy(w15))\n loss_locals_15.append(copy.deepcopy(loss15))\n\n #20 MALICIOUS\n w_locals_20.append(copy.deepcopy(w20))\n loss_locals_20.append(copy.deepcopy(loss20))\n\n #25 MALICIOUS\n w_locals_25.append(copy.deepcopy(w25))\n loss_locals_25.append(copy.deepcopy(loss25))\n\n #30 MALICIOUS\n w_locals_30.append(copy.deepcopy(w30))\n loss_locals_30.append(copy.deepcopy(loss30))\n\n # update global weights\n w_glob = FedAvg(w_locals)\n w_glob_1 = FedAvg(w_locals_1)\n w_glob_5 = FedAvg(w_locals_5)\n w_glob_10 = FedAvg(w_locals_10)\n w_glob_15 = FedAvg(w_locals_15)\n w_glob_20 = FedAvg(w_locals_20)\n w_glob_25 = FedAvg(w_locals_25)\n w_glob_30 = FedAvg(w_locals_30)\n\n # copy weight to net_glob\n net_glob.load_state_dict(w_glob)\n net_glob1.load_state_dict(w_glob_1)\n net_glob5.load_state_dict(w_glob_5)\n net_glob10.load_state_dict(w_glob_10)\n net_glob15.load_state_dict(w_glob_15)\n net_glob20.load_state_dict(w_glob_20)\n net_glob25.load_state_dict(w_glob_25)\n net_glob30.load_state_dict(w_glob_30)\n\n # print loss\n loss_avg = sum(loss_locals) / len(loss_locals)\n loss_avg_1 = sum(loss_locals_1) / len(loss_locals_1)\n loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)\n loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)\n loss_avg_15 = sum(loss_locals_15) / len(loss_locals_15)\n loss_avg_20 = sum(loss_locals_20) / len(loss_locals_20)\n loss_avg_25 = sum(loss_locals_25) / len(loss_locals_25)\n loss_avg_30 = sum(loss_locals_30) / len(loss_locals_30)\n\n\n print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))\n print('C1 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_1))\n print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))\n print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))\n print('C15 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_15))\n print('C20 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_20))\n print('C25 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_25))\n print('C30 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_30))\n #count_array.append(agent_found_count)\n loss_train.append(loss_avg)\n loss_train_1.append(loss_avg_1)\n loss_train_5.append(loss_avg_5)\n loss_train_10.append(loss_avg_10)\n loss_train_15.append(loss_avg_15)\n loss_train_20.append(loss_avg_20)\n loss_train_25.append(loss_avg_25)\n loss_train_30.append(loss_avg_30)\n\n # plot loss curve\n #plt.figure()\n #plt.subplots()\n #attack_no = plt.plot(range(len(loss_train)), loss_train)\n #attack_1 = plt.plot(range(len(loss_train_1)),loss_train_1)\n #plt.ylabel('train_loss')\n #plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))\n #print(\"COUNT DATA\",str(count_array))\n print(\"NO ATTACK DATA=\",loss_train)\n print(\"1 ATTACK DATA=\",loss_train_1)\n print(\"5 ATTACK DATA=\",loss_train_5)\n print(\"10 ATTACK DATA=\",loss_train_10)\n print(\"15 ATTACK DATA=\",loss_train_15)\n print(\"20 ATTACK DATA=\",loss_train_20)\n print(\"25 ATTACK DATA=\",loss_train_25)\n print(\"30 ATTACK DATA=\",loss_train_30)\n\n print(malicious_dict)\n print(malicious_count)\n\n # testing\n net_glob.eval()\n #print(\"Agent_Found_Count\",agent_found_count)\n acc_train, loss_train, acc_train0,acc_train1,acc_train2,acc_train3,acc_train4,acc_train5,acc_train6,acc_train7,acc_train8,acc_train9,target_train_freq,correct_train_freq = test_img(net_glob, dataset_train, args)\n acc_test, loss_test, acc_test0,acc_test1,acc_test2,acc_test3,acc_test4,acc_test5,acc_test6,acc_test7,acc_test8,acc_test9,target_test_freq,correct_test_freq = test_img(net_glob, dataset_test, args)\n print(\"Training accuracy (NO ATTACK): {:.2f}\".format(acc_train))\n print(\"Testing accuracy (NO ATTACK): {:.2f}\".format(acc_test))\n print(\"Training accuracy-airplane (NO ATTACK): {:.2f}\".format(acc_train0))\n print(\"Testing accuracy-airplane (NO ATTACK): {:.2f}\".format(acc_test0))\n print(\"\\n\")\n\n net_glob1.eval()\n acc_train_1, loss_train_1, acc_train0_1,acc_train1_1,acc_train2_1,acc_train3_1,acc_train4_1,acc_train5_1,acc_train6_1,acc_train7_1,acc_train8_1,acc_train9_1,target_train_freq_1,correct_train_freq_1 = test_img(net_glob1, dataset_train, args)\n acc_test_1, loss_test_1, acc_test0_1,acc_test1_1,acc_test2_1,acc_test3_1,acc_test4_1,acc_test5_1,acc_test6_1,acc_test7_1,acc_test8_1,acc_test9_1,target_test_freq_1,correct_test_freq_1 = test_img(net_glob1, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 1): {:.2f}\".format(acc_train_1))\n print(\"Testing accuracy (LABEL FLIPPED 1): {:.2f}\".format(acc_test_1))\n print(\"Training accuracy-airplane (LABEL FLIPPED 1): {:.2f}\".format(acc_train0_1))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 1): {:.2f}\".format(acc_test0_1))\n print(\"\\n\")\n\n net_glob5.eval()\n acc_train_5, loss_train_5, acc_train0_5,acc_train1_5,acc_train2_5,acc_train3_5,acc_train4_5,acc_train5_5,acc_train6_5,acc_train7_5,acc_train8_5,acc_train9_5,target_train_freq_5,correct_train_freq_5 = test_img(net_glob5, dataset_train, args)\n acc_test_5, loss_test_5, acc_test0_5,acc_test1_5,acc_test2_5,acc_test3_5,acc_test4_5,acc_test5_5,acc_test6_5,acc_test7_5,acc_test8_5,acc_test9_5,target_test_freq_5,correct_test_freq_5 = test_img(net_glob5, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 5): {:.2f}\".format(acc_train_5))\n print(\"Testing accuracy (LABEL FLIPPED 5): {:.2f}\".format(acc_test_5))\n print(\"Training accuracy-airplane (LABEL FLIPPED 5): {:.2f}\".format(acc_train0_5))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 5): {:.2f}\".format(acc_test0_5))\n print(\"\\n\")\n\n net_glob10.eval()\n acc_train_10, loss_train_10, acc_train0_10,acc_train1_10,acc_train2_10,acc_train3_10,acc_train4_10,acc_train5_10,acc_train6_10,acc_train7_10,acc_train8_10,acc_train9_10,target_train_freq_10,correct_train_freq_10 = test_img(net_glob10, dataset_train, args)\n acc_test_10, loss_test_10, acc_test0_10,acc_test1_10,acc_test2_10,acc_test3_10,acc_test4_10,acc_test5_10,acc_test6_10,acc_test7_10,acc_test8_10,acc_test9_10,target_test_freq_10,correct_test_freq_10 = test_img(net_glob10, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 10): {:.2f}\".format(acc_train_10))\n print(\"Testing accuracy (LABEL FLIPPED 10): {:.2f}\".format(acc_test_10))\n print(\"Training accuracy-airplane (LABEL FLIPPED 10): {:.2f}\".format(acc_train0_10))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 10): {:.2f}\".format(acc_test0_10))\n print(\"\\n\")\n\n net_glob15.eval()\n acc_train_15, loss_train_15, acc_train0_15,acc_train1_15,acc_train2_15,acc_train3_15,acc_train4_15,acc_train5_15,acc_train6_15,acc_train7_15,acc_train8_15,acc_train9_15,target_train_freq_15,correct_train_freq_15 = test_img(net_glob15, dataset_train, args)\n acc_test_15, loss_test_15, acc_test0_15,acc_test1_15,acc_test2_15,acc_test3_15,acc_test4_15,acc_test5_15,acc_test6_15,acc_test7_15,acc_test8_15,acc_test9_15,target_test_freq_15,correct_test_freq_15 = test_img(net_glob15, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 15): {:.2f}\".format(acc_train_15))\n print(\"Testing accuracy (LABEL FLIPPED 15): {:.2f}\".format(acc_test_15))\n print(\"Training accuracy-airplane (LABEL FLIPPED 15): {:.2f}\".format(acc_train0_15))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 15): {:.2f}\".format(acc_test0_15))\n print(\"\\n\")\n\n net_glob20.eval()\n acc_train_20, loss_train_20, acc_train0_20,acc_train1_20,acc_train2_20,acc_train3_20,acc_train4_20,acc_train5_20,acc_train6_20,acc_train7_20,acc_train8_20,acc_train9_20,target_train_freq_20,correct_train_freq_20 = test_img(net_glob20, dataset_train, args)\n acc_test_20, loss_test_20, acc_test0_20,acc_test1_20,acc_test2_20,acc_test3_20,acc_test4_20,acc_test5_20,acc_test6_20,acc_test7_20,acc_test8_20,acc_test9_20,target_test_freq_20,correct_test_freq_20 = test_img(net_glob20, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 20): {:.2f}\".format(acc_train_20))\n print(\"Testing accuracy (LABEL FLIPPED 20): {:.2f}\".format(acc_test_20))\n print(\"Training accuracy-airplane (LABEL FLIPPED 20): {:.2f}\".format(acc_train0_20))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 20): {:.2f}\".format(acc_test0_20))\n print(\"\\n\")\n\n net_glob25.eval()\n acc_train_25, loss_train_25, acc_train0_25,acc_train1_25,acc_train2_25,acc_train3_25,acc_train4_25,acc_train5_25,acc_train6_25,acc_train7_25,acc_train8_25,acc_train9_25,target_train_freq_25,correct_train_freq_25 = test_img(net_glob25, dataset_train, args)\n acc_test_25, loss_test_25, acc_test0_25,acc_test1_25,acc_test2_25, acc_test3_25,acc_test4_25,acc_test5_25,acc_test6_25,acc_test7_25,acc_test8_25,acc_test9_25,target_test_freq_25,correct_test_freq_25 = test_img(net_glob25, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 25): {:.2f}\".format(acc_train_25))\n print(\"Testing accuracy (LABEL FLIPPED 25): {:.2f}\".format(acc_test_25))\n print(\"Training accuracy-airplane (LABEL FLIPPED 25): {:.2f}\".format(acc_train0_25))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 25): {:.2f}\".format(acc_test0_25))\n print(\"\\n\")\n\n net_glob30.eval()\n acc_train_30, loss_train_30, acc_train0_30,acc_train1_30,acc_train2_30,acc_train3_30,acc_train4_30,acc_train5_30,acc_train6_30,acc_train7_30,acc_train8_30,acc_train9_30,target_train_freq_30,correct_train_freq_30 = test_img(net_glob30, dataset_train, args)\n acc_test_30, loss_test_30, acc_test0_30,acc_test1_30,acc_test2_30,acc_test3_30,acc_test4_30,acc_test5_30,acc_test6_30,acc_test7_30,acc_test8_30,acc_test9_30,target_test_freq_30,correct_test_freq_30 = test_img(net_glob30, dataset_test, args)\n print(\"Training accuracy (LABEL FLIPPED 30): {:.2f}\".format(acc_train_30))\n print(\"Testing accuracy (LABEL FLIPPED 30): {:.2f}\".format(acc_test_30))\n print(\"Training accuracy-airplane (LABEL FLIPPED 30): {:.2f}\".format(acc_train0_30))\n print(\"Testing accuracy-airplane (LABEL FLIPPED 30): {:.2f}\".format(acc_test0_30))\n print(\"\\n\")\n\n with open(\"label_flipped_final_data.csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file,delimiter=',')\n writer.writerow((\"OVERALL_TRAIN_ACCURACY\",\"OVERALL_TEST_ACCURACY\",\"TRAIN_ACC_0\",\"TEST_ACC_0\",\"TRAIN_ACC_1\",\"TEST_ACC_1\",\"TRAIN_ACC_2\",\"TEST_ACC_2\",\"TRAIN_ACC_3\",\"TEST_ACC_3\",\"TRAIN_ACC_4\",\"TEST_ACC_4\",\"TRAIN_ACC_5\",\"TEST_ACC_5\",\"TRAIN_ACC_6\",\"TEST_ACC_6\",\"TRAIN_ACC_7\",\"TEST_ACC_7\",\"TRAIN_ACC_8\",\"TEST_ACCURACY_8\",\"TRAIN_ACCURACY_9\",\"TEST_ACCURACY_9\"))\n writer.writerow((acc_train.item(),acc_test.item(),acc_train0,acc_test0,acc_train1,acc_test1,acc_train2,acc_test2,acc_train3,acc_test3,acc_train4,acc_test4,acc_train5,acc_test5,acc_train6,acc_test6,acc_train7,acc_test7,acc_train8,acc_test8,acc_train9,acc_test9))\n writer.writerow((acc_train_1.item(),acc_test_1.item(),acc_train0_1,acc_test0_1,acc_train1_1,acc_test1_1,acc_train2_1,acc_test2_1,acc_train3_1,acc_test3_1,acc_train4_1,acc_test4_1,acc_train5_1,acc_test5_1,acc_train6_1,acc_test6_1,acc_train7_1,acc_test7_1,acc_train8_1,acc_test8_1,acc_train9_1,acc_test9_1))\n writer.writerow((acc_train_5.item(),acc_test_5.item(),acc_train0_5,acc_test0_5,acc_train1_5,acc_test1_5,acc_train2_5,acc_test2_5,acc_train3_5,acc_test3_5,acc_train4_5,acc_test4_5,acc_train5_5,acc_test5_5,acc_train6_5,acc_test6_5,acc_train7_5,acc_test7_5,acc_train8_5,acc_test8_5,acc_train9_5,acc_test9_5))\n writer.writerow((acc_train_10.item(),acc_test_10.item(),acc_train0_10,acc_test0_10,acc_train1_10,acc_test1_10,acc_train2_10,acc_test2_10,acc_train3_10,acc_test3_10,acc_train4_10,acc_test4_10,acc_train5_10,acc_test5_10,acc_train6_10,acc_test6_10,acc_train7_10,acc_test7_10,acc_train8_10,acc_test8_10,acc_train9_10,acc_test9_10))\n writer.writerow((acc_train_15.item(),acc_test_15.item(),acc_train0_15,acc_test0_15,acc_train1_15,acc_test1_15,acc_train2_15,acc_test2_15,acc_train3_15,acc_test3_15,acc_train4_15,acc_test4_15,acc_train5_15,acc_test5_15,acc_train6_15,acc_test6_15,acc_train7_15,acc_test7_15,acc_train8_15,acc_test8_15,acc_train9_15,acc_test9_15))\n writer.writerow((acc_train_20.item(),acc_test_20.item(),acc_train0_20,acc_test0_20,acc_train1_20,acc_test1_20,acc_train2_20,acc_test2_20,acc_train3_20,acc_test3_20,acc_train4_20,acc_test4_20,acc_train5_20,acc_test5_20,acc_train6_20,acc_test6_20,acc_train7_20,acc_test7_20,acc_train8_20,acc_test8_20,acc_train9_20,acc_test9_20))\n writer.writerow((acc_train_25.item(),acc_test_25.item(),acc_train0_25,acc_test0_25,acc_train1_25,acc_test1_25,acc_train2_25,acc_test2_25,acc_train3_25,acc_test3_25,acc_train4_25,acc_test4_25,acc_train5_25,acc_test5_25,acc_train6_25,acc_test6_25,acc_train7_25,acc_test7_25,acc_train8_25,acc_test8_25,acc_train9_25,acc_test9_25))\n writer.writerow((acc_train_30.item(),acc_test_30.item(),acc_train0_30,acc_test0_30,acc_train1_30,acc_test1_30,acc_train2_30,acc_test2_30,acc_train3_30,acc_test3_30,acc_train4_30,acc_test4_30,acc_train5_30,acc_test5_30,acc_train6_30,acc_test6_30,acc_train7_30,acc_test7_30,acc_train8_30,acc_test8_30,acc_train9_30,acc_test9_30))\n\n\n with open(\"label_flipped_frequency_data.csv\",\"w\") as f:\n writer = csv.writer(f,delimiter=',')\n writer.writerow((\"TOTAL_TRAIN_0\",\"CORRECT_TRAIN_0\",\"TOTAL_TEST_0\",\"CORRECT_TEST_0\",\"TOTAL_TRAIN_1\",\"CORRECT_TRAIN_1\",\"TOTAL_TEST_1\",\"CORRECT_TEST_1\",\"TOTAL_TRAIN_2\",\"CORRECT_TRAIN_2\",\"TOTAL_TEST_2\",\"CORRECT_TEST_2\",\"TOTAL_TRAIN_3\",\"CORRECT_TRAIN_3\",\"TOTAL_TEST_3\",\"CORECT_TEST_3\",\"TOTAL_TRAIN_4\",\"CORRECT_TRAIN_4\",\"TOTAL_TEST_4\",\"CORRECT_TEST_4\",\"TOTAL_TRAIN_5\",\"CORRECT_TRAIN_5\",\"TOTAL_TEST_5\",\"CORRECT_TEST_5\",\"TOTAL_TRAIN_6\",\"CORRECT_TRAIN_6\",\"TOTAL_TEST_6\",\"CORRECT_TEST_6\",\"TOTAL_TRAIN_7\",\"CORRECT_TRAIN_7\",\"TOTAL_TEST_7\",\"CORRECT_TEST_7\",\"TOTAL_TRAIN_8\",\"CORRECT_TRAIN_8\",\"TOTAL_TEST_8\",\"CORRECT_TEST_8\",\"TOTAL_TRAIN_9\",\"CORRECT_TRAIN_9\",\"TOTAL_TEST_9\",\"CORRECT_TEST_9\"))\n writer.writerow((target_train_freq[0],correct_train_freq[0],target_test_freq[0],correct_test_freq[0],target_train_freq[1],correct_train_freq[1],target_test_freq[1],correct_test_freq[1],target_train_freq[2],correct_train_freq[2],target_test_freq[2],correct_test_freq[2],target_train_freq[3],correct_test_freq[3],target_test_freq[3],correct_test_freq[3],target_train_freq[4],correct_train_freq[4],target_test_freq[4],correct_test_freq[4],target_train_freq[5],correct_train_freq[5],target_test_freq[5],correct_test_freq[5],target_train_freq[6],correct_train_freq[6],target_test_freq[6],correct_test_freq[6],target_train_freq[7],correct_train_freq[7],target_test_freq[7],correct_test_freq[7],target_train_freq[8],correct_train_freq[8],target_test_freq[8],correct_test_freq[8],target_train_freq[9],correct_train_freq[9],target_test_freq[9],correct_test_freq[9]))\n writer.writerow((target_train_freq_1[0],correct_train_freq_1[0],target_test_freq_1[0],correct_test_freq_1[0],target_train_freq_1[1],correct_train_freq_1[1],target_test_freq_1[1],correct_test_freq_1[1],target_train_freq_1[2],correct_train_freq_1[2],target_test_freq_1[2],correct_test_freq_1[2],target_train_freq_1[3],correct_test_freq_1[3],target_test_freq_1[3],correct_test_freq_1[3],target_train_freq_1[4],correct_train_freq_1[4],target_test_freq_1[4],correct_test_freq_1[4],target_train_freq_1[5],correct_train_freq_1[5],target_test_freq_1[5],correct_test_freq_1[5],target_train_freq_1[6],correct_train_freq_1[6],target_test_freq_1[6],correct_test_freq_1[6],target_train_freq_1[7],correct_train_freq_1[7],target_test_freq_1[7],correct_test_freq_1[7],target_train_freq_1[8],correct_train_freq_1[8],target_test_freq_1[8],correct_test_freq_1[8],target_train_freq_1[9],correct_train_freq_1[9],target_test_freq_1[9],correct_test_freq_1[9]))\n writer.writerow((target_train_freq_5[0],correct_train_freq_5[0],target_test_freq_5[0],correct_test_freq_5[0],target_train_freq_5[1],correct_train_freq_5[1],target_test_freq_5[1],correct_test_freq_5[1],target_train_freq_5[2],correct_train_freq_5[2],target_test_freq_5[2],correct_test_freq_5[2],target_train_freq_5[3],correct_test_freq_5[3],target_test_freq_5[3],correct_test_freq_5[3],target_train_freq_5[4],correct_train_freq_5[4],target_test_freq_5[4],correct_test_freq_5[4],target_train_freq_5[5],correct_train_freq_5[5],target_test_freq_5[5],correct_test_freq_5[5],target_train_freq_5[6],correct_train_freq_5[6],target_test_freq_5[6],correct_test_freq_5[6],target_train_freq_5[7],correct_train_freq_5[7],target_test_freq_5[7],correct_test_freq_5[7],target_train_freq_5[8],correct_train_freq_5[8],target_test_freq_5[8],correct_test_freq_5[8],target_train_freq_5[9],correct_train_freq_5[9],target_test_freq_5[9],correct_test_freq_5[9]))\n writer.writerow((target_train_freq_10[0],correct_train_freq_10[0],target_test_freq_10[0],correct_test_freq_10[0],target_train_freq_10[1],correct_train_freq_10[1],target_test_freq_10[1],correct_test_freq_10[1],target_train_freq_10[2],correct_train_freq_10[2],target_test_freq_10[2],correct_test_freq_10[2],target_train_freq_10[3],correct_test_freq_10[3],target_test_freq_10[3],correct_test_freq_10[3],target_train_freq_10[4],correct_train_freq_10[4],target_test_freq_10[4],correct_test_freq_10[4],target_train_freq_10[5],correct_train_freq_10[5],target_test_freq_10[5],correct_test_freq_10[5],target_train_freq_10[6],correct_train_freq_10[6],target_test_freq_10[6],correct_test_freq_10[6],target_train_freq_10[7],correct_train_freq_10[7],target_test_freq_10[7],correct_test_freq_10[7],target_train_freq_10[8],correct_train_freq_10[8],target_test_freq_10[8],correct_test_freq_10[8],target_train_freq_10[9],correct_train_freq_10[9],target_test_freq_10[9],correct_test_freq_10[9]))\n writer.writerow((target_train_freq_15[0],correct_train_freq_15[0],target_test_freq_15[0],correct_test_freq_15[0],target_train_freq_15[1],correct_train_freq_15[1],target_test_freq_15[1],correct_test_freq_15[1],target_train_freq_15[2],correct_train_freq_15[2],target_test_freq_15[2],correct_test_freq_15[2],target_train_freq_15[3],correct_test_freq_15[3],target_test_freq_15[3],correct_test_freq_15[3],target_train_freq_15[4],correct_train_freq_15[4],target_test_freq_15[4],correct_test_freq_15[4],target_train_freq_15[5],correct_train_freq_15[5],target_test_freq_15[5],correct_test_freq_15[5],target_train_freq_15[6],correct_train_freq_15[6],target_test_freq_15[6],correct_test_freq_15[6],target_train_freq_15[7],correct_train_freq_15[7],target_test_freq_15[7],correct_test_freq_15[7],target_train_freq_15[8],correct_train_freq_15[8],target_test_freq_15[8],correct_test_freq_15[8],target_train_freq_15[9],correct_train_freq_15[9],target_test_freq_15[9],correct_test_freq_15[9]))\n writer.writerow((target_train_freq_20[0],correct_train_freq_20[0],target_test_freq_20[0],correct_test_freq_20[0],target_train_freq_20[1],correct_train_freq_20[1],target_test_freq_20[1],correct_test_freq_20[1],target_train_freq_20[2],correct_train_freq_20[2],target_test_freq_20[2],correct_test_freq_20[2],target_train_freq_20[3],correct_test_freq_20[3],target_test_freq_20[3],correct_test_freq_20[3],target_train_freq_20[4],correct_train_freq_20[4],target_test_freq_20[4],correct_test_freq_20[4],target_train_freq_20[5],correct_train_freq_20[5],target_test_freq_20[5],correct_test_freq_20[5],target_train_freq_20[6],correct_train_freq_20[6],target_test_freq_20[6],correct_test_freq_20[6],target_train_freq_20[7],correct_train_freq_20[7],target_test_freq_20[7],correct_test_freq_20[7],target_train_freq_20[8],correct_train_freq_20[8],target_test_freq_20[8],correct_test_freq_20[8],target_train_freq_20[9],correct_train_freq_20[9],target_test_freq_20[9],correct_test_freq_20[9]))\n writer.writerow((target_train_freq_25[0],correct_train_freq_25[0],target_test_freq_25[0],correct_test_freq_25[0],target_train_freq_25[1],correct_train_freq_25[1],target_test_freq_25[1],correct_test_freq_25[1],target_train_freq_25[2],correct_train_freq_25[2],target_test_freq_25[2],correct_test_freq_25[2],target_train_freq_25[3],correct_test_freq_25[3],target_test_freq_25[3],correct_test_freq_25[3],target_train_freq_25[4],correct_train_freq_25[4],target_test_freq_25[4],correct_test_freq_25[4],target_train_freq_25[5],correct_train_freq_25[5],target_test_freq_25[5],correct_test_freq_25[5],target_train_freq_25[6],correct_train_freq_25[6],target_test_freq_25[6],correct_test_freq_25[6],target_train_freq_25[7],correct_train_freq_25[7],target_test_freq_25[7],correct_test_freq_25[7],target_train_freq_25[8],correct_train_freq_25[8],target_test_freq_25[8],correct_test_freq_25[8],target_train_freq_25[9],correct_train_freq_25[9],target_test_freq_25[9],correct_test_freq_25[9]))\n writer.writerow((target_train_freq_30[0],correct_train_freq_30[0],target_test_freq_30[0],correct_test_freq_30[0],target_train_freq_30[1],correct_train_freq_30[1],target_test_freq_30[1],correct_test_freq_30[1],target_train_freq_30[2],correct_train_freq_30[2],target_test_freq_30[2],correct_test_freq_30[2],target_train_freq_30[3],correct_test_freq_30[3],target_test_freq_30[3],correct_test_freq_30[3],target_train_freq_30[4],correct_train_freq_30[4],target_test_freq_30[4],correct_test_freq_30[4],target_train_freq_30[5],correct_train_freq_30[5],target_test_freq_30[5],correct_test_freq_30[5],target_train_freq_30[6],correct_train_freq_30[6],target_test_freq_30[6],correct_test_freq_30[6],target_train_freq_30[7],correct_train_freq_30[7],target_test_freq_30[7],correct_test_freq_30[7],target_train_freq_30[8],correct_train_freq_30[8],target_test_freq_30[8],correct_test_freq_30[8],target_train_freq_30[9],correct_train_freq_30[9],target_test_freq_30[9],correct_test_freq_30[9]))\n\nend = time.time()\nprint(end-start)\n\n\n \n","sub_path":"federated-learning-master/main_fed_v2_label_flipping_CIFAR.py","file_name":"main_fed_v2_label_flipping_CIFAR.py","file_ext":"py","file_size_in_byte":32822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313861802","text":"import pygame\nfrom game.game import Game\nfrom game.config import SCREEN_SIZE\n\n\ndef main():\n\n running = True\n playing = True\n\n # Initialise pygame, the clock and setup the screen\n pygame.init()\n pygame.mixer.init()\n pygame.font.init()\n screen = pygame.display.set_mode(SCREEN_SIZE)\n pygame.display.set_caption(\"Isometric World\")\n clock = pygame.time.Clock()\n font = pygame.font.SysFont(\"Ubuntu\", 20)\n\n # Implement menus\n\n # Implement game\n game = Game(screen, clock, font)\n\n while running:\n\n # Start menu\n\n while playing:\n # Game loop\n game.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pygame/isometric-blocks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"188620172","text":"import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# general dash tutorial --> https://www.youtube.com/watch?v=hSPmj7mK6ng\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n\n# read data set\ndff = pd.read_csv('year_data/2012.csv')\n\n# dash components and html goes here\napp.layout = html.Div([\n\n dbc.Row(\n children=[\n dbc.Col(\n html.Img(\n src='https://cdn.dal.ca/content/dam/dalhousie/images/dept/communicationsandmarketing/01%20DAL%20FullMark-Blk.jpg.lt_412f83be03abff99eefef11c3f1ec3a4.res/01%20DAL%20FullMark-Blk.jpg',\n width='240px'),\n className='col-2'\n ),\n dbc.Col(\n [\n html.P(children='Welcome Data Bender!', className='display-4 text-center', style={'font-size': 38}),\n html.P(children='Unleash your data bending powers.', className='lead text-center'),\n ],\n className='col-8'\n ),\n dbc.Col(\n [\n html.P(children='Visual Analytics', className='lead text-center'),\n html.Div(children='CSCI 6612', className='lead text-center'),\n ],\n className='col-2 pt-2'\n ),\n ]\n ),\n html.Hr(),\n # dbc.Row(\n # dbc.Col(\n # children=[\n # html.H1(\"Suicides in India Dashboard\", style={'text-align': 'center'}),\n # ]\n # )\n # ),\n # html.Hr(),\n dbc.Row(\n children=[\n dbc.Col(\n children=[\n html.H1('Data Map'),\n dcc.Graph(id='suicide_map', figure={}),\n html.Br(),\n\n dcc.Slider(\n id='select_year',\n min=2001,\n max=2012,\n step=1,\n value=2012,\n ),\n html.Div(id='output_container', children=[])\n ],\n className='col-6'\n ),\n dbc.Col(\n children=[\n html.P(id='selected_print',\n children={},\n ),\n dcc.Dropdown(id='description_select', value=[], options=[],\n clearable=False, searchable=False\n ),\n dcc.Graph(id='feature_description', figure={}),\n html.P(id='risk_level', children={}),\n html.P(children='Note: if a single state was selected, this data is based on that state from 2001 '\n 'to 2012 data.')\n ],\n className='col-6'\n )\n ]\n ),\n html.Hr(),\n dbc.Row(\n children=[\n dbc.Col(\n children=[\n dcc.Dropdown(id='data_by_select',\n value='profession',\n options=[{'label': 'Data by Profession', 'value': 'profession'},\n {'label': 'Data by Cause', 'value': 'cause'},\n {'label': 'Data by Gender', 'value': 'gender'}, ],\n className='col-3', clearable=False, searchable=False\n )\n ]\n )\n ]\n ),\n dbc.Row(\n children=[\n dcc.Graph(id='data_by_figure', figure={})\n ],\n justify='center',\n align='center'\n ),\n html.Hr(),\n\n])\n\n\n# connect Plotly graphs and Dash components\n@app.callback(\n [Output(component_id='output_container', component_property='children'),\n Output(component_id='suicide_map', component_property='figure')],\n [Input(component_id='select_year', component_property='value')]\n)\ndef update_graph(slider_select):\n container = \"The year chosen by user was: {}\".format(slider_select)\n\n dff = pd.read_csv('year_data/' + str(slider_select) + '.csv')\n\n # Plotly Express\n fig = px.choropleth(\n data_frame=dff,\n # api for map of india\n # found here https://stackoverflow.com/questions/60910962/is-there-any-way-to-draw-india-map-in-plotly\n geojson=\"https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw\"\n \"/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson\",\n featureidkey='properties.ST_NM',\n locations='State',\n color='Total', # z\n # cluster\n hover_data=['State', 'Total', 'Causes_Bankruptcy or Sudden change in Economic Status',\n 'Causes_Cancellation/Non-Settlement of Marriage', 'Causes_Cancer', 'Causes_Causes Not known',\n 'Causes_Death of Dear Person', 'Causes_Divorce', 'Causes_Dowry Dispute'],\n # color_continuous_scale=px.colors.sequential.YlOrRd,\n color_continuous_scale='Reds',\n range_color=[0, 20000],\n labels={'Total': 'Total Suicides in India'},\n # template='plotly_dark',\n )\n\n fig.update_geos(fitbounds=\"locations\", visible=False)\n\n return container, fig\n\n\n# for selecting map data\n@app.callback(\n [Output(component_id='selected_print', component_property='children'),\n Output(component_id='description_select', component_property='value'),\n Output(component_id='description_select', component_property='options')],\n [Input(component_id='suicide_map', component_property='clickData'),\n Input(component_id='suicide_map', component_property='selectedData')]\n)\ndef select_data(clicked, selected):\n try:\n if selected is None:\n selected = clicked\n\n locations_str = ['State(s) Selected: ']\n locations = []\n for point in selected['points']:\n for attr, value in point.items():\n if attr == 'location':\n locations_str.append(str(value) + \", \")\n locations.append(str(value))\n\n # combine all select states into one df series\n # this is what the figures will use for data\n selected_states = dff[dff['State'].isin(locations)]\n\n # find all column options and slice them for labels to look better\n feature_description_select = []\n for col in selected_states.columns:\n if 'Professional_Profile_' in col:\n label = col[21:]\n feature_description_select.append({'label': label, 'value': col})\n elif 'Education_Status_' in col:\n label = col[17:]\n feature_description_select.append({'label': label, 'value': col})\n elif 'Social_Status_' in col:\n label = col[14:]\n feature_description_select.append({'label': label, 'value': col})\n\n # set default value\n feature_description_value = feature_description_select[0]['value']\n\n return locations_str, feature_description_value, feature_description_select\n\n # init data\n except TypeError:\n temp = dff[dff['State'] == 'Maharashtra']\n\n # find all column options and slice them for labels to look better\n feature_description_select = []\n for col in temp.columns:\n if 'Professional_Profile_' in col:\n label = col[21:]\n feature_description_select.append({'label': label, 'value': col})\n elif 'Education_Status_' in col:\n label = col[17:]\n feature_description_select.append({'label': label, 'value': col})\n elif 'Social_Status_' in col:\n label = col[14:]\n feature_description_select.append({'label': label, 'value': col})\n\n # set default value\n feature_description_value = feature_description_select[0]['value']\n\n return ['States(s) Selected: Maharashtra'], feature_description_value, feature_description_select\n\n\n@app.callback([Output(component_id='feature_description', component_property='figure'),\n Output(component_id='risk_level', component_property='children')],\n [Input(component_id='description_select', component_property='value'),\n Input(component_id='suicide_map', component_property='clickData'),\n Input(component_id='suicide_map', component_property='selectedData')])\ndef feature_matrix(feature, clicked, selected):\n try:\n if selected is None:\n selected = clicked\n\n locations = []\n for point in selected['points']:\n for attr, value in point.items():\n if attr == 'location':\n locations.append(str(value))\n\n # combine all select states into one df series\n # this is what the figures will use for data\n if len(locations) == 1:\n selected_states = pd.DataFrame()\n for location in locations:\n df = pd.read_csv('state_data/' + str(location) + '.csv')\n selected_states = selected_states.append(df, ignore_index=True)\n else:\n locations = []\n for point in selected['points']:\n for attr, value in point.items():\n if attr == 'location':\n locations.append(str(value))\n\n selected_states = dff[dff['State'].isin(locations)]\n\n label = ''\n if 'Professional_Profile_' in feature:\n label = feature[21:]\n elif 'Education_Status_' in feature:\n label = feature[17:]\n elif 'Social_Status_' in feature:\n label = feature[14:]\n\n # find correlation matrix\n corr = selected_states[['Total', feature]].corr(method='pearson')\n # visualize correlation matrix\n corr_map = go.Figure()\n corr_map.add_trace(go.Heatmap(x=['Total', label], y=['Total', label], z=corr, colorscale='reds'))\n corr_map.update_layout(title_text='Correlation Heatmap')\n\n risk = corr.iloc[0, 1]\n\n if risk >= .80:\n risk_rating = 'This group is at high risk.'\n elif (risk < .80) and (risk >= 0.50):\n risk_rating = 'This group is at moderate risk.'\n else:\n risk_rating = 'This group is at a low risk.'\n return corr_map, risk_rating\n\n # init data\n except TypeError:\n selected_states = pd.read_csv('state_data/Maharashtra.csv')\n\n label = ''\n if 'Professional_Profile_' in feature:\n label = feature[21:]\n elif 'Education_Status_' in feature:\n label = feature[17:]\n elif 'Social_Status_' in feature:\n label = feature[14:]\n\n # find correlation matrix\n corr = selected_states[['Total', feature]].corr(method='pearson')\n # visualize correlation matrix\n corr_map = go.Figure()\n corr_map.add_trace(go.Heatmap(x=['Total', label], y=['Total', label], z=corr, colorscale='reds'))\n corr_map.update_layout(title_text='Correlation Heatmap')\n\n risk = corr.iloc[0, 1]\n\n if risk >= .80:\n risk_rating = 'This group is at high risk.'\n elif (risk < .80) and (risk >= 0.50):\n risk_rating = 'This group is at moderate risk.'\n else:\n risk_rating = 'This group is at a low risk.'\n return corr_map, risk_rating\n\n\n@app.callback(Output(component_id='data_by_figure', component_property='figure'),\n [Input(component_id='data_by_select', component_property='value'),\n Input(component_id='suicide_map', component_property='clickData'),\n Input(component_id='suicide_map', component_property='selectedData')])\ndef data_by(figure_select, clicked, selected):\n try:\n if selected is None:\n selected = clicked\n\n locations_str = ['State(s) Selected: ']\n locations = []\n for point in selected['points']:\n for attr, value in point.items():\n if attr == 'location':\n locations_str.append(str(value) + \", \")\n locations.append(str(value))\n\n # combine all select states into one df series\n # this is what the figures will use for data\n selected_states = dff[dff['State'].isin(locations)]\n\n if figure_select == 'profession':\n # get all professions\n professions = []\n for col in selected_states.columns:\n if 'Professional_Profile_' in col:\n professions.append(col)\n\n # slice the profession strings so they just say the professions\n professions_sliced = []\n for profession in professions:\n professions_sliced.append(profession[21:])\n\n # y values for professions\n profession_values = []\n for profession in professions:\n profession_values.append(selected_states[profession].sum())\n\n # chart for profession\n professions_bar = go.Figure()\n professions_bar.add_trace(go.Bar(x=professions_sliced, y=profession_values,\n marker=dict(color=profession_values, colorscale='reds')))\n professions_bar.update_layout(title_text='Data by Profession', width=1200)\n\n return professions_bar\n\n elif figure_select == 'cause':\n # get all causes\n causes = []\n for column in selected_states.columns:\n if 'Causes_' in column:\n causes.append(column)\n\n # slice the causes strings so they just say the causes\n causes_sliced = []\n for cause in causes:\n causes_sliced.append(cause[7:])\n\n # y value for causes\n causes_values = []\n for cause in causes:\n causes_values.append(selected_states[cause].sum())\n\n # chart for causes\n causes_bar = go.Figure()\n causes_bar.add_trace(go.Bar(x=causes_sliced, y=causes_values,\n marker=dict(color=causes_values, colorscale='reds')))\n causes_bar.update_layout(title_text='Data by Cause', width=1200)\n\n return causes_bar\n\n else: # gender\n gender_bar = go.Figure()\n gender_bar.add_trace(\n go.Bar(y=['Gender'], x=[selected_states['Female'].sum()], name='Female', orientation='h',\n marker=dict(color=[selected_states['Female'].sum()], colorscale='reds')))\n gender_bar.add_trace(go.Bar(y=['Gender'], x=[selected_states['Male'].sum()], name='Male', orientation='h',\n marker=dict(color=[selected_states['Male'].sum()], colorscale='reds')))\n gender_bar.update_layout(title_text='Data by Gender', width=1200)\n\n return gender_bar\n\n # init figure\n except TypeError:\n temp = dff[dff['State'] == 'Maharashtra']\n\n if figure_select == 'profession':\n # get all professions\n professions = []\n for col in temp.columns:\n if 'Professional_Profile_' in col:\n professions.append(col)\n\n # slice the profession strings so they just say the professions\n professions_sliced = []\n for profession in professions:\n professions_sliced.append(profession[21:])\n\n # y values for professions\n profession_values = []\n for profession in professions:\n profession_values.append(temp[profession].sum())\n\n # chart for profession\n professions_bar = go.Figure()\n professions_bar.add_trace(go.Bar(x=professions_sliced, y=profession_values,\n marker=dict(color=profession_values, colorscale='reds')))\n professions_bar.update_layout(title_text='Data by Profession', width=1200)\n\n return professions_bar\n\n elif figure_select == 'cause':\n # get all causes\n causes = []\n for column in temp.columns:\n if 'Causes_' in column:\n causes.append(column)\n\n # slice the causes strings so they just say the causes\n causes_sliced = []\n for cause in causes:\n causes_sliced.append(cause[7:])\n\n # y value for causes\n causes_values = []\n for cause in causes:\n causes_values.append(temp[cause].sum())\n\n # chart for causes\n causes_bar = go.Figure()\n causes_bar.add_trace(go.Bar(x=causes_sliced, y=causes_values,\n marker=dict(color=causes_values, colorscale='reds')))\n causes_bar.update_layout(title_text='Data by Cause', width=1200)\n\n return causes_bar\n\n else: # gender\n gender_bar = go.Figure()\n gender_bar.add_trace(\n go.Bar(y=['Gender'], x=[temp['Female'].sum()], name='Female', orientation='h',\n marker=dict(color=[temp['Female'].sum()], colorscale='reds')))\n gender_bar.add_trace(go.Bar(y=['Gender'], x=[temp['Male'].sum()], name='Male', orientation='h',\n marker=dict(color=[temp['Male'].sum()], colorscale='reds')))\n gender_bar.update_layout(title_text='Data by Gender', width=1200)\n\n return gender_bar\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"dash_app/old_dashboard.py","file_name":"old_dashboard.py","file_ext":"py","file_size_in_byte":17761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388327135","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 23 11:50:44 2019\r\n\r\n@author: Shri\r\n\"\"\"\r\n\r\ndef is_palindrome(st):\r\n n = len(st) \r\n st = st.upper()\r\n \r\n if (n == 0) : \r\n return True\r\n \r\n return isPalRec(st, 0, n - 1);\r\n\r\ndef isPalRec(st, s, e) : \r\n \r\n \r\n if (s == e): \r\n return True\r\n \r\n \r\n if (st[s] != st[e]) : \r\n return False \r\n if (s < e + 1) : \r\n return isPalRec(st, s + 1, e - 1); \r\n \r\n return True\r\n \r\nresult=is_palindrome(\"mm\")\r\nif(result):\r\n print(\"The given word is a Palindrome\")\r\nelse:\r\n print(\"The given word is not a Palindrome\")","sub_path":"assignmentt6.py","file_name":"assignmentt6.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"43012309","text":"import random\nimport torch\nimport numpy as np\nimport math\n\ndef set_random_seed(seed, is_cuda):\n \"\"\"Sets the random seed.\"\"\"\n if seed > 0:\n torch.manual_seed(seed)\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(seed)\n np.random.seed(seed)\n # some cudnn methods can be random even after fixing the seed\n # unless you tell it to be deterministic\n torch.backends.cudnn.deterministic = True\n\n if is_cuda and seed > 0:\n # These ensure same initialization in multi gpu mode\n torch.cuda.manual_seed(seed)\n\n\ndef time_since(s): # compute time\n m = math.floor(s / 60)\n s -= m * 60\n h = math.floor(m / 60)\n m -= h * 60\n return '%dh %dm %ds' % (h, m, s)\n\n\ndef copy_list(l):\n r = []\n if len(l) == 0:\n return r\n for i in l:\n if type(i) is list:\n r.append(copy_list(i))\n else:\n r.append(i)\n return r\n\n\n# for ape210k\ndef is_equal(a, b):\n \"\"\"\n 比较两个结果是否相等\n \"\"\"\n a = round(float(a), 6)\n b = round(float(b), 6)\n return a == b\n\n\n# for ape210k\ndef remove_bucket(equation):\n \"\"\"去掉冗余的括号\n \"\"\"\n l_buckets, buckets = [], []\n for i, c in enumerate(equation):\n if c == '(':\n l_buckets.append(i)\n elif c == ')':\n buckets.append((l_buckets.pop(), i))\n eval_equation = eval(equation)\n for l, r in buckets:\n new_equation = '%s %s %s' % (\n equation[:l], equation[l + 1:r], equation[r + 1:]\n )\n try:\n if is_equal(eval(new_equation.replace(' ', '')), eval_equation):\n equation = new_equation\n except:\n pass\n return equation.replace(' ', '')","sub_path":"code_bak_v1_508/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"291640314","text":"from random import randint, sample, uniform\nfrom acme import Product\n\nadjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\nnouns = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\n\n\ndef generate_products(num_products=30):\n \"\"\"\n Generate list of products for Acme Corporation\n \"\"\"\n products = []\n for i in range(0, num_products):\n name = adjectives[randint(0, len(adjectives)-1)] + \\\n \" \" + nouns[randint(0, len(nouns)-1)]\n price = randint(5, 100)\n weight = randint(5, 100)\n flammability = uniform(0.0, 2.5)\n product = Product(name, price, weight, flammability)\n products.append(product)\n return products\n\n\ndef inventory_report(products):\n \"\"\"\n print inventory report of Acme Corporation products\n \"\"\"\n products_name = [i.name for i in products]\n products_price = [i.price for i in products]\n products_weight = [i.weight for i in products]\n products_flammability = [i.flammability for i in products]\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names:\", len(set(products_name)))\n print(\"Average price:\", sum(products_price)/len(products_price))\n print(\"Average weight:\", sum(products_weight)/len(products_weight))\n print(\"Average flammability:\",\n sum(products_flammability)/len(products_flammability))\n\nif __name__ == '__main__':\n inventory_report(generate_products())\n","sub_path":"acme_report.py","file_name":"acme_report.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"472974923","text":"# coding:utf-8\nimport sys, os, re\nfrom socket import *\nfrom subprocess import getoutput\nimport struct\nfrom django import get_version\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom admins.models import UserRandom, SSH\nfrom random import random, randrange\nimport paramiko\nimport traceback\nimport zipfile, tarfile\nimport threading\n\n\n# new thread for compress package\nclass threading_compress(threading.Thread):\n def __init__(self, cmd):\n threading.Thread.__init__(self)\n self.cmd = cmd\n\n def run(self):\n os.chmod(self.cmd, 755)\n output = getoutput(self.cmd)\n\n\ndef compress_main(cmd):\n t = threading_compress(cmd)\n t.setDaemon(True)\n t.start()\n\n\ndef check_online(ip):\n host = ip\n port = 12777\n addr = (host, port)\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(1)\n try:\n sock.connect(addr)\n except Exception as e:\n print('check_online e:%s' % e)\n return False\n else:\n return True\n finally:\n sock.close()\n\n\nclass upload(object):\n def makeself(self, src_dir, dst_filename, package_name, init_script):\n makeself = getoutput('whereis makeself').split(':')[1].split(' ')[1]\n if not makeself:\n return \"MAKESELF can not found!\"\n cmd = makeself + ' ' + src_dir + ' ' + dst_filename + ' ' + package_name + ' ' + init_script\n compress_main(cmd)\n\n def extract_tar_zip(self, f, dst):\n if tarfile.is_tarfile(f):\n fd = tarfile.open(f)\n names = fd.getnames()\n for name in names:\n fd.extract(name, path=dst)\n fd.close()\n return names\n elif zipfile.is_zipfile(f):\n fd = zipfile.ZipFile(f)\n names = fd.namelist()\n for filename in names:\n fd.extract(filename, dst)\n fd.close()\n return names\n\n def upload_handler(self, fileobj, filepath):\n try:\n fd = open(filepath, 'wb')\n for line in fileobj.chunks():\n fd.write(line)\n fd.flush()\n fd.close()\n # names = self.extract_tar_zip(filename,MEDIA_ROOT+'/'+dst_dir)\n # return names\n except:\n info = sys.exc_info()\n for filename, lineno, function, text in traceback.extract_tb(info[2]):\n return (filename, \" line:\", lineno, \" in\", function, ' ', text), info[:2],\n\n\nclass SSHClient:\n def __init__(self, host, username, password, port=22):\n self.host = host\n self.username = username\n self.password = password\n self.port = port\n self.addr = (host, port)\n\n def exec_command(self, command):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh.connect(self.host, self.port, self.username, self.password)\n stdin_x, stdout_x, stderr_x = ssh.exec_command(command)\n except Exception as e:\n ret = dict()\n ret['stderr'] = e\n return ret\n else:\n ret = dict()\n ret['stdin'] = stdin_x\n ret['stdout'] = stdout_x.readlines()\n ret['stderr'] = stderr_x.readlines()\n ssh.close()\n return ret\n\n def putfile(self, local, remote):\n remote_path = remote\n local_path = local\n try:\n t = paramiko.Transport(self.addr)\n t.connect(None, self.username, self.password)\n sftp = paramiko.SFTPClient.from_transport(t)\n sftp.put(local_path, remote_path)\n except Exception:\n print(traceback.print_exc())\n finally:\n if t:\n t.close()\n\n def getfile(self, remote, local):\n t = paramiko.Transport(self.addr)\n t.connect(None, self.username, self.password)\n sftp = paramiko.SFTPClient.from_transport(t)\n remote_path = remote\n local_path = local\n try:\n sftp.get(remote_path, local_path)\n except IOError as e:\n return \"got error while get remote file from %s ! %s\" % (self.addr[0], e)\n sys.exit(1)\n t.close()\n sys.exit(0)\n\n\nclass Ajaxterm(object):\n def __init__(self, request=None, host=None):\n self.request = request\n self.host = host\n\n self.pid = ''\n self.pid_file = ''\n\n def start_daemon(self):\n ajaxterm = os.path.abspath(os.path.join(os.path.dirname(__file__), 'ajaxterm/ajaxterm.py'))\n pex = os.path.abspath(os.path.join(os.path.dirname(__file__), 'ajaxterm/pe.py'))\n self.pid = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'ajaxterm/var/' + str(self.request.user)))\n self.pid_file = self.pid + '/ajaxterm.pid'\n if not os.path.exists(self.pid):\n os.makedirs(self.pid)\n self.stop_daemon()\n p = SSH.objects.get(host=self.host)\n username = p.username\n password = p.password\n p: UserRandom = UserRandom.objects.get(username=self.request.user)\n random_pass = p.random_password\n random_port = p.random_port\n\n os.system('chmod +x ' + ajaxterm + ' ' + pex)\n\n cmd = ajaxterm + ' --port=' + random_port + ' --command=\\\"' + pex + ' ' + username + ' ' + password + ' ' + \\\n self.host + ' ' + random_pass + '\\\" --daemon'\n output = getoutput(cmd)\n print('Ajaxterm start_daemon output:%s' % output)\n m = re.search('pid:\\s(\\d+)', output)\n pid_ajaxterm = m.group(1)\n print('pid_ajaxterm:', pid_ajaxterm)\n fd = open(self.pid_file, 'wb')\n fd.write(pid_ajaxterm.encode())\n fd.flush()\n fd.close()\n return random_pass, random_port\n\n def stop_daemon(self):\n if self.request.user:\n self.pid = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'ajaxterm/var/' + str(self.request.user)))\n self.pid_file = self.pid + '/ajaxterm.pid'\n\n if not os.path.exists(self.pid_file):\n return\n\n fd = open(self.pid_file)\n pid = fd.readline()\n if len(pid) > 0:\n try:\n os.kill(int(pid), 9)\n except Exception as e:\n print('Ajaxterm stop_daemon e:%s' % e)\n fd.close()\n\n\ndef init_random_pass(request):\n users = UserRandom.objects.filter(username=request.user)\n if users:\n user_random: UserRandom = users[0]\n else:\n user_random: UserRandom = UserRandom()\n user_random.username = request.user\n\n random_password = str(random())[6:-1]\n random_port = randrange(12778, 12800) # 12778 - 12799\n\n user_random.random_password = random_password\n user_random.random_port = random_port\n user_random.save()\n # random_pass = UserRandom.objects.filter(username=request.user)\n # if not random_pass:\n # p = UserRandom(username=request.user, random_password=str(random())[6:-1], random_port=randrange(50000, 65500))\n # p.save()\n # else:\n # password = str(random())[6:-1]\n # port = randrange(50000, 65500)\n # UserRandom.objects.filter(username=request.user).update(random_password=password, random_port=port)\n\n\nclass get_info():\n def get_python_version(self):\n return sys.version\n\n def get_sys_platform(self):\n return sys.platform\n\n def get_loadavg(self):\n return os.getloadavg()\n\n def get_django_version(self):\n return get_version()\n\n\ndef render(template_name, data=None, request=None):\n info = get_info()\n data['info'] = info\n return render_to_response(template_name, data, context_instance=RequestContext(request))\n\n\nclass DeviceInfo(object):\n def __init__(self, ip):\n self.ip = ip\n self.port = 12777\n self.addr = (self.ip, self.port)\n # self.do_connect()\n\n def do_connect(self):\n self.SockClient = socket(AF_INET, SOCK_STREAM)\n self.SockClient.settimeout(2)\n self.SockClient.connect(self.addr)\n\n def command_to_order(self, command):\n commands = {'cpu': 'getcpu', 'mem': 'getmem', 'disk': 'getharddrive', 'partition': 'getparttion',\n 'virtued': 'getifvirtued', 'nic': 'getnic', 'vga': 'getvga', 'sysinfo': 'getsysinfo'}\n if commands.get(command):\n return commands[command]\n\n def do_exec(self, command):\n print('command:', command)\n if not command:\n command = \"help\"\n command = self.command_to_order(command)\n print('command:', command)\n # buff_size = struct.calcsize('!1024s')\n command_net = struct.pack('!1024s', command.encode())\n try:\n self.do_connect()\n self.SockClient.send(command_net)\n except Exception as e:\n print(\"send Error! %s!\" % e)\n return \"Error! %s!\" % e\n try:\n print('recv...')\n data = self.SockClient.recv(1024)\n print('revc data:', data)\n data = data.decode('utf8').replace('\\x00', '')\n except Exception as e:\n print(\"recv Error! %s!\" % e)\n return \"No data received!\"\n else:\n return data\n finally:\n self.do_close()\n\n def do_close(self):\n try:\n self.SockClient.shutdown(SHUT_RDWR)\n self.SockClient.close()\n except Exception as e:\n print('do_close e:%s' % e)\n\n\nclass OsmsGate(object):\n def __init__(self, host):\n self.host = host\n self.port = 12777\n self.addr = (self.host, self.port)\n self.do_connect()\n\n def do_connect(self):\n self.SockClient = socket(AF_INET, SOCK_STREAM)\n self.SockClient.settimeout(3)\n self.SockClient.connect(self.addr)\n\n def file(self, cmd):\n cmd = str(cmd.strip())\n if \"putfile\" in cmd:\n filename = cmd.split(' ')[1]\n if not os.path.exists(filename):\n return \"File not found!\"\n self.do_close()\n cmd = struct.pack('!1024s', cmd)\n self.SockClient.send(cmd)\n try:\n fd = open(filename, 'rb')\n except IOError as e:\n return \"Something wrong happend while open file! %s\" % e\n self.do_close()\n buf = \"\"\n while True:\n data = fd.read(1024)\n if not data:\n break\n # data = data.encode('hex')\n self.SockClient.send(data)\n fd.close()\n self.do_close()\n\n # filepath must be absolute path!\n BUF_SIZE1 = struct.calcsize('!128s')\n if \"getfile\" in cmd:\n cmd = str(cmd.strip())\n filename = cmd.split(' ')[2]\n # filename = cmd.split(' ')[1].split('/')[-1]\n cmd = struct.pack('!1024s', cmd)\n self.SockClient.send(cmd)\n rev = self.SockClient.recv(BUF_SIZE1)\n ret = struct.unpack('!128s', rev)[0].replace('\\x00', '')\n if \"not\" in ret:\n return ret\n self.do_close()\n else:\n try:\n fp = open(filename, 'wb')\n except IOError as e:\n return \"errors happend while open %s! %s\" % (filename, e)\n fp.close()\n self.close_socket()\n while True:\n data = self.SockClient.recv(1024)\n if not data:\n break\n fp.write(data.decode('hex'))\n fp.flush()\n fp.close()\n self.do_close()\n\n def do_exec(self, command):\n if not command:\n command = \"help\"\n BUF_SIZE = struct.calcsize('!1024s')\n command = struct.pack('!1024s', command)\n try:\n self.SockClient.connect(self.addr)\n self.SockClient.send(command)\n except Exception as e:\n return \"Error! %s!\" % e\n try:\n data = self.SockClient.recv(BUF_SIZE)\n if not data or len(data) <= 0:\n return \"No data received!\"\n else:\n return data.strip()\n finally:\n self.do_close()\n\n def do_close(self):\n self.SockClient.shutdown(SHUT_RDWR)\n self.SockClient.close()\n","sub_path":"admins/tools/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567410547","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom django.shortcuts import render, HttpResponseRedirect\nfrom .models import Post, Category\n\n\n# Create your views here.\ndef blog_home(request,cat_slug=None):\n if cat_slug:\n try:\n Posts = Post.objects.filter(category__slug=cat_slug)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('blog:blog_home'))\n else:\n Posts = Post.objects.all()\n cats = Category.objects.all()\n paginator = Paginator(Posts, 6)\n page = request.GET.get('page')\n try:\n page_content = paginator.page(page)\n except PageNotAnInteger:\n page_content = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n page_content = paginator.page(paginator.num_pages)\n context = {\n 'posts': page_content, 'cats': cats\n }\n return render(request, template_name='blog_home.html', context=context)\n\n\ndef post_view(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('blog:blog_home'))\n cats = Category.objects.all()\n context = {'cats': cats, 'post': post}\n return render(request,template_name='post.html', context=context)\n\n\ndef search_posts(request):\n try:\n words = request.GET['q']\n if not words == \"\":\n results = Post.objects.filter(Q(title__icontains=words) | Q(content__icontains=words))\n return render(request, template_name='search.html', context={'results': results})\n else:\n return render(request, template_name='search.html')\n except KeyError:\n return render(request, template_name='search.html')\n","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"513449643","text":"\"\"\"Unique Binary Search Trees\"\"\"\r\n\r\nn = 5\r\n\"\"\"Dynamical Programming: We can create a formula to compute result.\"\"\"\r\ndp = [0 for _ in range(n + 1)]\r\ndp[0] = 1\r\ndp[1] = 1\r\nfor i in range(2, n + 1):\r\n for j in range(i):\r\n dp[i] += dp[j] * dp[i - j - 1]\r\n\r\nprint(dp[-1])\r\n","sub_path":"Problem0096_UniqueBinarySearchTree.py","file_name":"Problem0096_UniqueBinarySearchTree.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450205764","text":"x=int(raw_input('input the ingeter number which you want to calculate its square root: '))\nepsilon=0.01\nstep=0.5\nnumGuesses=0\nans=0.0\n\nwhile(abs(ans**2-x))>=epsilon and ans<=x:\n ans+=step\n numGuesses +=1\n\nprint('numGuesses= '+ str(numGuesses))\n\nif abs(ans**2-x) >= epsilon:\n print('Failed on square root of ' + str(x))\nelse:\n print(str(ans) + ' is close to the swuare root of the ' + str(x))","sub_path":"08.04.2015/canopy3.py","file_name":"canopy3.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294767059","text":"## This file is part of Scapy\n## \n## Copyright (C) Min Cao \n\n\"\"\"\nNVGRE (Network Virtual GRE).\n\"\"\"\n\nfrom scapy.packet import *\nfrom scapy.fields import *\nfrom scapy.layers.inet import UDP,IP\nfrom scapy.layers.inet6 import IPv6\nfrom scapy.layers.l2 import Ether\nfrom scapy.layers.l2 import GRE\n\nIPPROTO_NVGRE=47\n\nclass NVGRE(Packet):\n name = \"Network Virtual GRE\"\n fields_desc = [BitField(\"c\", 0, 1),\n BitField(\"r\", 0, 1),\n BitField(\"k\", 1, 1),\n BitField(\"s\", 0, 1),\n BitField(\"reserved0\", 0, 9),\n BitField(\"ver\", 0, 3),\n XShortField(\"protocoltype\", 0x6558),\n X3BytesField(\"TNI\", 1),\n ByteField(\"reserved1\", 0)]\n\n def mysummary(self): \n return self.sprintf(\"NVGRE (tni=%NVGRE.tni%)\") \n\n\nbind_layers(NVGRE, Ether, protocoltype=0x6558)\n# fix conflict of GRE and NVGRE\nsplit_layers(IP, GRE, frag=0, proto=IPPROTO_NVGRE)\nbind_layers(IP, NVGRE, frag=0, proto=IPPROTO_NVGRE)\n\n","sub_path":"dep/nvgre.py","file_name":"nvgre.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91619727","text":"import os\r\nfrom collections import Counter\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.model_selection import train_test_split as tts\r\nfrom sklearn.metrics import accuracy_score\r\nimport cPickle as c\r\n\r\ndef load(clf_file):\r\n with open (clf_file) as fp:\r\n clf = c.load(fp)\r\n return (clf)\r\n\r\ndef make_dict ():\r\n direc = \"emails/\"\r\n files = os.listdir(direc)\r\n\r\n emails = [direc + email for email in files]\r\n\r\n words = []\r\n c = len(emails)\r\n for email in emails:\r\n f = open(email)\r\n blob = f.read()\r\n words += blob.split(\" \")\r\n print (c)\r\n c-=1\r\n for i in range (len(words)):\r\n if not words[i].isalpha():\r\n words[i] = \"\"\r\n\r\n dictionary = Counter(words)\r\n del dictionary [\"\"]\r\n return (dictionary.most_common(3000))\r\n\r\nclf = load (\"text_classifier.mdl\")\r\nd = make_dict()\r\n\r\nwhile True:\r\n features = []\r\n inp =\"\"\r\n\r\n inp = raw_input(\">\").split()\r\n if inp ==\"exit\":\r\n break\r\n for word in d:\r\n features.append(inp.count(word[0]))\r\n\r\n res = clf.predict([features])\r\n\r\n print [\"NOT SPAM\", \"SPAM\"][res[0]]\r\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"43080447","text":"from django.urls import path\nfrom . import views\napp_name = 'my_app'\nurlpatterns = [\n path('upload/', views.UserUpdateView.as_view(), name='upload'),\n path('', views.ProductDetailView.as_view(), name='details'),\n path('imprimir_archivo', views.proc_dem_file, name=\"imprimir\"),\n path('suavizar', views.funcion_dma, name=\"dma\"),\n path('produc_list', views.ProductListView.as_view(), name='product_list'),\n path('api/data//', views.get_data, name=\"api-data\"),\n]\n","sub_path":"toy_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"381761821","text":"rotate_num = int(input())\n\nanswers = []\nfor i in range(rotate_num):\n M, N, x, y = list(map(int, input().split() ))\n if x == M: x = 0\n if y == N: y = 0\n answer = -1\n for i in range(x, M*N+1, M):\n if i % N == y: \n answer = i\n break\n answers.append(answer)\n \nfor ans in answers:\n print(ans)","sub_path":"week3_extra/BOJ_6064_탐색_합동식_카잉달력.py","file_name":"BOJ_6064_탐색_합동식_카잉달력.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"23074424","text":"import time\nimport urllib\nimport json\n\nfrom sjsclient import client\nfrom sjsclient import app\nfrom .Config import *\n\nclass AsyncSparkJob():\n APPLICATION_NAME = 'auger_ml_application'\n CONTEXT_NAME = 'auger_ml_py_context'\n\n def __init__(self, method, data_path, options, url=None):\n self.jobData = {'method': method, 'data_path': data_path, 'options': options}\n\n self.start_time = None\n self.result = None\n self.sjJobId = None\n if url is None:\n url = getConfig().jobServerUrl \n self.sjClient = client.Client(url)\n\n @classmethod\n def create_from_id(cls, job_id, url = None):\n aj = AsyncSparkJob(None, None, None, url)\n aj.sjJobId = job_id\n return aj\n\n def get_id(self):\n return self.sjJobId\n\n @classmethod\n def init_remote_application(cls, num_cores = 0, max_memory = \"\", egg_file_path=None):\n asyncJob = AsyncSparkJob(None, None, None)\n\n cls.sjApp = None\n for app_item in asyncJob.sjClient.apps.list():\n if app_item.name == cls.APPLICATION_NAME:\n cls.sjApp = app_item\n break\n\n if cls.sjApp is None:\n if egg_file_path is not None:\n egg_blob = open(egg_file_path, 'rb').read()\n else:\n egg_blob = b'\\x50\\x4b\\x03\\x04\\x04' # Zip header, use pip install for auger_ml\n\n cls.sjApp = asyncJob.sjClient.apps.create(cls.APPLICATION_NAME, egg_blob, app.AppType.PYTHON)\n\n cls.sjContext = None\n for ctx in asyncJob.sjClient.contexts.list():\n if ctx.name == cls.CONTEXT_NAME:\n cls.sjContext = ctx\n break\n\n # if cls.sjContext is not None:\n # asyncJob.sjClient.contexts.delete(cls.sjContext.name)\n # cls.sjContext = None\n \n if cls.sjContext is None:\n context_params = {'context-factory': 'spark.jobserver.python.PythonSessionContextFactory'}\n if num_cores > 0 :\n context_params['num-cpu-cores']= num_cores\n if len(max_memory)>0 :\n context_params['memory-per-node']= max_memory\n \n cls.sjContext = asyncJob.sjClient.contexts.create(cls.CONTEXT_NAME, context_params)\n\n def run(self, job_class='auger_ml.AugerMLJob.AugerMLJob', sync_wait_sec=0):\n self.start_time = time.time()\n\n if self.sjClient is None:\n from importlib import import_module\n\n module_name, class_name = job_class.rsplit(\".\", 1)\n job = getattr(import_module(module_name), class_name)()\n #TODO: pass spark context\n job.run_job(context, None, self.jobData)\n else: \n job = self.sjClient.jobs.create(self.sjApp, job_class, \n ctx=self.sjContext, conf='input.string=\"' + urllib.urlencode(self.jobData) + '\"', sync=False)\n self.sjJobId = job.jobId\n\n if sync_wait_sec>0:\n time.sleep(sync_wait_sec)\n while self.is_alive():\n time.sleep(sync_wait_sec)\n\n return self.get_result()\n \n return self\n\n def get_elapsed_time(self):\n return time.time() - self.start_time\n\n def is_alive(self):\n job = self.sjClient.jobs.get(self.sjJobId)\n return job.status == \"RUNNING\"\n\n def cancel(self):\n job = self.sjClient.jobs.get(self.sjJobId)\n job.delete()\n\n def get_status(self):\n job = self.sjClient.jobs.get(self.sjJobId)\n return job.status\n\n def _make_result(self, job):\n if (job.status == 'ERROR'):\n raise Exception(job.result)\n else:\n return job.result\n \n def get_result(self):\n job = self.sjClient.jobs.get(self.sjJobId)\n return self._make_result(job)\n\n def get_result_and_isalive(self):\n job = self.sjClient.jobs.get(self.sjJobId)\n return self._make_result(job), job.status == \"RUNNING\"\n","sub_path":"auger_ml/AsyncSparkJob.py","file_name":"AsyncSparkJob.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"61372252","text":"\"\"\"\r\n--------------------------------\r\nPython Web Development Techdegree\r\nProject 2 - Basketball Stats Tool\r\nVersion 1.0\r\n--------------------------------\r\n\"\"\"\r\n\r\n#Import player data, copy module, random module, itertools module\r\nimport constants\r\nimport copy\r\nimport itertools\r\nimport random\r\n\r\n#Copy of tables and create new ones\r\nteams_copy = copy.deepcopy(constants.TEAMS)\r\nplayers_copy = copy.deepcopy(constants.PLAYERS)\r\nTeams = dict.fromkeys(teams_copy, [])\r\n\r\nNUM_PLAYERS_TEAM = len(constants.PLAYERS) / len(constants.TEAMS)\r\nteams_iter = itertools.cycle(teams_copy)\r\n\r\n\r\n#'Height' saved as an integer\r\ndef clean_height():\r\n for player in players_copy:\r\n player['height'] = int(player['height'][:2])\r\n\r\n\r\n#'Experience' saved as a boolean value \r\ndef clean_experience():\r\n for player in players_copy:\r\n player['experience'] = True if player['experience'].lower() == 'yes' else False\r\n\r\n\r\n#'Guardians' split up string into a list\r\ndef clean_guardians():\r\n for player in players_copy:\r\n player['guardians'] = player['guardians'].split(' and ')\r\n\r\n\r\n#Balance the players across the three teams with equal experienced\r\ndef random_player_balance():\r\n exp = 0\r\n for player in players_copy:\r\n if player['experience'] == bool('TRUE'):\r\n exp += 1\r\n num_exp_players_team = exp / len(constants.TEAMS)\r\n num_non_exp_players_team = NUM_PLAYERS_TEAM - num_exp_players_team\r\n \r\n print(NUM_PLAYERS_TEAM)\r\n print(num_exp_players_team)\r\n \r\n random.shuffle(players_copy)\r\n count = 0\r\n while count < NUM_PLAYERS_TEAM:\r\n Teams[next(teams_iter)].append(players_copy[count])\r\n count += 1\r\n\r\n\r\n#Display menu to user and continuously loop until user selects 'Quit'\r\ndef start_menu():\r\n print('\\n BASKETBALL TEAM STATS TOOL\\n','\\n -----MENU-----\\n\\n',' Here are your choices:\\n',' 1) Display Team Stats\\n',' 2) Quit\\n')\r\n number_selection = input('Enter an option: ')\r\n next = False\r\n while next is False:\r\n if number_selection == \"1\":\r\n next = True\r\n stats_menu()\r\n elif number_selection == '2':\r\n next = True\r\n print('\\nGoodbye!\\n')\r\n else:\r\n print('The selection was not a valid option.')\r\n number_selection = input('Enter an option: ')\r\n\r\n\r\n#Display layer two menu used in stat_menu() only\r\ndef stats_menu():\r\n print('\\n Here are your choices:\\n',' 1) Panthers\\n',' 2) Bandits\\n',' 3) Warriors\\n',' 4) Quit\\n')\r\n second_number_selection = input('Enter an option: ')\r\n next = False\r\n while next is False:\r\n if second_number_selection == '1':\r\n next = True\r\n print('\\n Team: Panthers Stats\\n', '-'*20)\r\n stats(Teams['Panthers'])\r\n elif second_number_selection == '2':\r\n next = True\r\n print('\\n Team: Bandits Stats\\n', '-'*20)\r\n stats(Teams['Bandits'])\r\n elif second_number_selection == '3':\r\n next = True\r\n print('\\n Team: Warriors Stats\\n', '-'*20)\r\n stats(Teams['Warriors'])\r\n elif second_number_selection == '4':\r\n next = True\r\n print('\\nGoodbye!\\n')\r\n else:\r\n print('The selection was not a valid option.')\r\n second_number_selection = input('Enter an option: ')\r\n\r\n\r\n#Print stats for selected team used in stats_menu() only\r\ndef stats(input_team):\r\n total_height = 0\r\n exp = 0\r\n players_list = []\r\n guardians_list = []\r\n for player in input_team:\r\n total_height += player['height']\r\n players_list.append(player['name'])\r\n guardians_list.extend(player['guardians'])\r\n if player['experience'] == bool('TRUE'):\r\n exp += 1\r\n average_height = round(total_height / len(input_team),2)\r\n \r\n print(' Total players: {}'.format(len(input_team)))\r\n print(' Total experienced: {}'.format(exp))\r\n print(' Total inexperienced: {}'.format(exp)) \r\n print(' Average height: {}'.format(average_height)) \r\n print(' \\nPlayers on Team: \\n ' + ', '.join(players_list))\r\n print(' \\nGuardians: \\n ' + ', '.join(guardians_list)) \r\n input('\\nPress ENTER to continue... ')\r\n start_menu() \r\n\r\n\r\nif __name__ == \"__main__\":\r\n clean_height()\r\n clean_experience()\r\n clean_guardians()\r\n random_player_balance()\r\n start_menu()\r\n","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95080374","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\nplayer =Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\nscreen.listen()\nscreen.onkey(player.move,\"Up\")\nscreen.onkey(player.right_p,\"l\")\nscreen.onkey(player.left_p,\"r\")\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n car_manager.create_car()\n car_manager.start_move()\n # detect collision with cars\n for car in car_manager.cars:\n if(car.distance(player) < 20):\n game_is_on =False\n scoreboard.game_over()\n if player.is_at_finish_line():\n scoreboard.add_score()\n player.go_to_start()\n\n\nscreen.exitonclick()\n\n\n\n","sub_path":"day25-turtle/pong/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"588412270","text":"# util function and hardware specifications for the robot;\n\nimport numpy as np\n\nLIDAR_ANGLES = np.arange(-135,135.25, 0.25) * np.pi/180.0\nLIDAR_MIN_JITTER = 0.2\nLIDAR_MAX = 30\n\nP_LIDAR_TO_BODY = [0.29833, 0, 0.51435]\n\ndistance_per_tic = 0.0022\nyaw_deviation_res = 0.02 # 1.14 degrees\n\n","sub_path":"utils/robot_utils.py","file_name":"robot_utils.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389034610","text":"import numpy as np\nimport pandas as pd\nimport time as t\nimport os\nimport sys\n\ntrain_read = sys.argv[1]\ntrain_path = os.path.abspath(train_read)\npath_train = os.path.dirname(train_path)\nos.chdir(path_train)\n\ntest_read = sys.argv[2]\ntest_path = os.path.abspath(test_read)\npath_test = os.path.dirname(test_path)\nos.chdir(path_test)\n\nx_train = (pd.read_csv(train_read, header = None, na_filter = False, low_memory = False)).values\nx_test = (pd.read_csv(test_read, header = None, na_filter = False, low_memory = False)).values\ny_train = list(x_train[:,0])\ny_test = list(x_test[:,0])\nx_train = x_train[:,1:]/255.0\nx_test = x_test[:,1:]/255.0\n\nmethod = 'sigmoid'\nbatch = 128\n\ndef f(l, method):\n if(method == 'relu'):\n l[l<0] = 0\n return(l)\n \n elif(method == 'sigmoid'):\n return(1.0/(1+np.exp(-l)))\n \n elif(method == 'tanh'):\n return(np.tanh(l))\n\ndef df(l, method):\n if(method == 'relu'):\n l[l<0] = 0\n l[l>0] = 1\n return(l)\n \n elif(method == 'sigmoid'):\n return(f(l, 'sigmoid')*(1-f(l, 'sigmoid')))\n \n elif(method == 'tanh'):\n return(1 - (np.tanh(l))**2)\n\ndef layers(inputs = 1024, outputs = 46, hidden = [100, 100]):\n w = [None]*(len(hidden))\n z = [None]*(len(hidden)) \n bias = [None]*(len(hidden)-1)\n \n w[0] = np.random.uniform(low = 0, high = 0.1, size = (inputs, hidden[0]))\n z[0] = np.random.rand(inputs,1)\n \n for i in range(1,len(hidden)):\n w[i] = np.random.uniform(low = 0, high = 0.1, size = (hidden[i-1],hidden[i]))\n z[i] = np.zeros((hidden[i-1],1))\n bias[i-1] = np.random.rand(hidden[i-1],1)\n \n w.append(np.random.uniform(low = 0, high = 0.1, size = (hidden[len(hidden)-1], outputs)))\n \n z.append(np.zeros((hidden[len(hidden)-1],1)))\n z.append(np.zeros((outputs,1)))\n \n bias.append(np.random.rand(hidden[len(hidden)-1],1))\n bias.append(np.random.rand(outputs,1))\n \n network = [w, z, bias]\n \n return network\n\ndef forward(network, ex):\n w = network[0]\n z = network[1]\n bias = network[2]\n \n ex = np.array(ex)\n ex.shape = (len(ex),1)\n \n z[0] = ex\n z[1] = (w[0].T).dot(z[0]) + bias[0]\n \n for i in range(1,len(w)-1):\n z[i+1] = (w[i].T).dot(f(z[i], method)) + bias[i]\n \n z[len(z)-1] = (w[len(w)-1].T).dot(f(z[len(z)-2], 'sigmoid')) + bias[len(bias)-1]\n \n return(network)\n\n# data = layers(inputs = 784, outputs = 10, hidden = [100])\n\n# net1 = forward(data, x_train[0])\n\n# out = [0]*10\n# out[y_train[0]] = 1\n# out = np.array(out)\n# out.shape = (len(out),1)\n\n# w = net1[0]\n# z = net1[1]\n# bias = net1[2]\n\n# delta = [None]*(len(z)-1)\n# del_w = [None]*(len(w))\n# del_b = [None]*(len(bias))\n\n# delta[len(delta)-1] = (f(z[len(z)-1], 'sigmoid') - out)*df(z[len(z)-1], 'sigmoid')\n# del_w[len(w)-1] = delta[len(delta)-1]*(f(z[len(z)-2], method).T)\n# del_b[len(bias)-1] = np.copy(delta[len(delta)-1])\n# del_w[0] = delta[0]*f(z[0], method).T\n# del_b[0] = (w[1].dot(delta[1]))*df(z[1], method)\n\ndef backward(network, out):\n w = network[0]\n z = network[1]\n bias = network[2]\n \n delta = [None]*(len(z)-1)\n del_w = [None]*(len(w))\n del_b = [None]*(len(bias))\n \n delta[len(delta)-1] = (f(z[len(z)-1], 'sigmoid') - out)*df(z[len(z)-1], 'sigmoid')\n del_w[len(w)-1] = delta[len(delta)-1]*(f(z[len(z)-2], method).T)\n del_b[len(bias)-1] = np.copy(delta[len(delta)-1])\n \n for i in range(len(delta)-2,-1,-1):\n delta[i] = (w[i+1].dot(delta[i+1]))*df(z[i+1], method)\n del_w[i] = delta[i]*f(z[i], method).T\n del_b[i] = (w[i+1].dot(delta[i+1]))*df(z[i+1], method)\n \n del_w = np.array(del_w)\n del_b = np.array(del_b)\n chg = [del_w, del_b]\n \n return(chg)\n\ndef train(network, x_train, y_train, batch, rate):\n t_w = np.copy(network[0])*0\n t_b = np.copy(network[2])*0\n \n w = network[0]\n z = network[1]\n bias = network[2]\n \n for num_iter in range(1):\n for i in range(0,x_train.shape[0], batch):\n for j in range(i, i+batch):\n net = forward([w, z, bias], x_train[j,:])\n \n out = [0]*46\n out[y_train[j]] = 1\n out = np.array(out)\n out.shape = (len(out),1)\n \n delta_w, delta_b = backward(net, out)\n for k in range(len(t_w)):\n t_w[k] += delta_w[k].T\n t_b[k] += delta_b[k]\n \n for k in range(len(t_w)):\n w[k] -= (t_w[k])*(rate/((batch)*(num_iter+1)**0.5))\n bias[k] -= (t_b[k])*(rate/((batch)*(num_iter+1)**0.5))\n \n return(network) \n\n# one = layers(inputs = 1024, outputs = 46, hidden = [100, 100])\n# two = train(one, x_train, y_train, batch, 0.1)\n\n# ans = []\n# for i in range(x_test.shape[0]):\n# c = forward(b, x_test[i])\n# ans.append(np.argmax(f(c[1][2]/max(c[1][2]), 'sigmoid')))\n\nans = list(np.array(y_train)*0)\n\nout_write = sys.argv[3]\npath_out = os.path.abspath(out_write)\nout_path = os.path.dirname(path_out)\nos.chdir(out_path)\nnp.savetxt(out_write, ans)\n\n","sub_path":"assign_2_1c.py","file_name":"assign_2_1c.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"259755665","text":"from CSIKit.csi import CSIFrame\n\nimport ast\nimport numpy as np\n\nclass ESP32CSIFrame(CSIFrame):\n\n # https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/network/esp_wifi.html#_CPPv418wifi_pkt_rx_ctrl_t\n # https://github.com/espressif/esp-idf/blob/9d0ca60398481a44861542638cfdc1949bb6f312/components/esp_wifi/include/esp_wifi_types.h#L314\n\n SIGS = {\n 0: \"nonHT\",\n 1: \"HT\",\n 3: \"VHT\"\n }\n\n SECONDARY_CHANNELS = {\n 0: \"none\",\n 1: \"above\",\n 2: \"below\"\n }\n\n __slots__ = [\"type\", \"role\", \"mac\", \"rssi\", \"rate\", \"sig_mode\", \"mcs\", \"bandwidth\", \"smoothing\", \"not_sounding\",\n \"aggregation\", \"stbc\", \"fec_coding\", \"sgi\", \"noise_floor\", \"ampdu_cnt\", \"channel\", \"secondary_channel\",\n \"local_timestamp\", \"ant\", \"sig_len\", \"rx_state\", \"real_time_set\", \"real_timestamp\", \"len\", \"CSI_DATA\"]\n def __init__(self, csv_line: list):\n self.type = csv_line[0]\n self.role = csv_line[1]\n self.mac = csv_line[2]\n\n self.rssi = int(csv_line[3])\n\n # https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Data_rates\n self.rate = int(csv_line[4])\n\n self.sig_mode = self.SIGS[int(csv_line[5])]\n\n # https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Data_rates\n self.mcs = int(csv_line[6])\n\n # MHz\n self.bandwidth = 20 if csv_line[7] == \"0\" else 40\n\n # Supposedly reserved\n self.smoothing = int(csv_line[8])\n self.not_sounding = int(csv_line[9])\n\n self.aggregation = \"MPDU\" if csv_line[10] == \"0\" else \"AMPDU\"\n self.stbc = bool(int(csv_line[11]))\n self.fec_coding = bool(int(csv_line[12]))\n\n self.sgi = \"long\" if csv_line[13] == \"0\" else \"short\"\n\n # Unit: 0.25dBm\n self.noise_floor = int(csv_line[14])\n self.ampdu_cnt = int(csv_line[15])\n self.channel = int(csv_line[16])\n self.secondary_channel = self.SECONDARY_CHANNELS[int(csv_line[17])]\n self.local_timestamp = int(csv_line[18])\n self.ant = int(csv_line[19])\n\n self.sig_len = int(csv_line[20])\n self.rx_state = int(csv_line[21])\n self.real_time_set = bool(int(csv_line[22]))\n self.real_timestamp = float(csv_line[23])\n self.len = int(csv_line[24])\n\n string_data = csv_line[25]\n self.csi_matrix = ESP32CSIFrame.parse_matrix(string_data)\n\n @staticmethod\n def parse_matrix(string_data, bandwidth=20):\n array_string = string_data.replace(\" \", \", \")\n array_string_asarray = ast.literal_eval(array_string)\n\n if bandwidth == 20 and len(array_string_asarray) < 128:\n ESP32CSIFrame.fill_missing(array_string_asarray, 128)\n elif bandwidth == 40 and len(array_string_asarray) < 256:\n ESP32CSIFrame.fill_missing(array_string_asarray, 256)\n\n int8_matrix = np.array(array_string_asarray)\n int8_matrix = int8_matrix.reshape(-1, 2)\n\n complex_matrix = int8_matrix.astype(np.float32).view(np.complex64)\n return complex_matrix\n\n # Seems some CSI lines are missing a value.\n # Very rare, I assume weird dropped behaviour.\n # Probably not the best way to fill the gap.\n @staticmethod\n def fill_missing(array, expected_length):\n remainder = expected_length - len(array)\n for _ in range(remainder):\n array.append(0)","sub_path":"CSIKit/csi/frames/esp.py","file_name":"esp.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"3819683","text":"import global_vars\nfrom main.command import command_listener\nimport telegram\nimport logging\nfrom telegram.ext import Filters, CommandHandler\nfrom bot_constant import FORWARD_LIST\nlogger = logging.getLogger(\"CTB.\" + __name__)\n\nlogger.debug(__name__ + \" loading\")\n\n\n@command_listener('show group id', 'id', tg_only=True, description='show current telegram group id')\ndef show_tg_group_id(tg_group_id: int,\n tg_user: telegram.User,\n tg_message_id: int,\n tg_reply_to: telegram.Message):\n msg = 'Telegram group id is: ' + str(tg_group_id)\n global_vars.tg_bot.sendMessage(chat_id=tg_group_id,\n text=msg)\n\n\n@command_listener('show group id', 'id', qq_only=True, description='show current QQ group id')\ndef show_qq_group_id(qq_group_id: int,\n qq_discuss_id: int,\n qq_user: int):\n if qq_group_id:\n msg = 'QQ group id is: ' + str(qq_group_id)\n return {'reply': msg}\n else:\n msg = 'QQ discuss id is: ' + str(qq_discuss_id)\n return {'reply': msg}\ndef get_connected_groups(bot: telegram.Bot,\n update: telegram.Update,\n args: list):\n if update.message.from_user.id != global_vars.admin_list['TG'][0]:\n return\n s = \"\"\n for forward in FORWARD_LIST:\n s += \"QQ: `%d`, TG: `%d`\\n\"%(forward['QQ'], forward['TG'])\n update.message.reply_markdown(text=s)\nglobal_vars.dp.add_handler(CommandHandler(command='get_connected_groups',\n callback=get_connected_groups,\n filters=Filters.private,\n pass_args=True))\n","sub_path":"plugins/show_group_id.py","file_name":"show_group_id.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399996452","text":"from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\n\nfrom .import views\n\napp_name = 'users'\n\nurlpatterns = [\n path('login/', views.UserLoginView.as_view(), name='login'),\n path('signup/', views.signup, name='signup'),\n path('confirm-email///',\n views.ConfirmRegistrationView.as_view(), name='confirm-email'),\n path('/profile/', views.UserProfileView.as_view(),\n name='user-profile'),\n path('/profile/update/', views.UserProfileUpdateView.as_view(),\n name='user-profile'),\n path('', include('django.contrib.auth.urls')),\n]","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502731341","text":"import os\nfrom ranger.core.loader import CommandLoader\nfrom ranger.api.commands import *\n\n\nclass tar(Command):\n def execute(self):\n \"\"\" Compress marked files in current buffer \"\"\"\n cwd = self.fm.thisdir\n marked_files = cwd.get_selection() or []\n\n if not marked_files:\n return\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n original_path = cwd.path\n parts = self.line.split()\n args = parts[1:] if parts else \"\"\n\n desc = \"archiving files in: \" + os.path.basename(parts[1])\n obj = CommandLoader(\n args=[\"tar\", \"czf\"]\n + args\n + [os.path.relpath(f.path, cwd.path) for f in marked_files],\n descr=desc,\n read=True,\n )\n\n obj.signal_bind(\"after\", refresh)\n self.fm.loader.add(obj)\n\n self.fm.notify(\"compressed!\")\n\n def tab(self):\n cwd = self.fm.thisdir.path\n return str(os.path.basename(cwd)) + \".tar.gz\"\n\n\nclass shred(Command):\n def execute(self):\n \"\"\" Compress marked files in current buffer \"\"\"\n if self.rest(1):\n self.fm.notify(\n \"Error: shred takes no arguments! It shreds the selected file(s).\",\n bad=True,\n )\n return\n\n cwd = self.fm.thisdir\n cf = self.fm.thisfile\n\n many_files = cwd.marked_items or (\n cf.is_directory and not cf.is_link and len(os.listdir(cf.path)) > 0\n )\n\n marked_files = cwd.get_selection() or cf\n\n if not marked_files:\n self.fm.notify(\"Error: no file selected for shredding!\", bad=True)\n return\n\n self.fm.ui.console.ask(\n \"Confirm shredding of: %s (y/n)\"\n % \", \".join(f.basename for f in marked_files),\n self._question_callback,\n )\n\n def _question_callback(self, answer):\n if answer.lower() != \"y\":\n return\n\n original_path = self.fm.thisdir\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n marked_files = self.fm.thisdir.get_selection() or self.fm.thisfile\n\n args = [\"shred\", \"-u\"] + [os.path.abspath(f.path) for f in marked_files]\n obj = CommandLoader(args=args, read=True, descr=\"shred files\")\n obj.signal_bind(\"after\", refresh)\n\n self.fm.loader.add(obj)\n self.fm.notify(\"shredding!\")\n\n\nclass fzf_select(Command):\n \"\"\"\n :fzf_select\n\n Find a file using fzf.\n\n With a prefix argument select only directories.\n\n See: https://github.com/junegunn/fzf\n \"\"\"\n\n def execute(self):\n import subprocess\n import os.path\n\n if self.quantifier:\n # match only directories\n command = \"find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \\\n -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m\"\n else:\n # match files and directories\n command = \"find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \\\n -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m\"\n fzf = self.fm.execute_command(\n command, universal_newlines=True, stdout=subprocess.PIPE\n )\n stdout, stderr = fzf.communicate()\n if fzf.returncode == 0:\n fzf_file = os.path.abspath(stdout.rstrip(\"\\n\"))\n if os.path.isdir(fzf_file):\n self.fm.cd(fzf_file)\n else:\n self.fm.select_file(fzf_file)\n","sub_path":"ranger/.config/ranger/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"624333650","text":"# -*- coding: UTF-8 -*-\n\nimport numpy\nfrom PIL import Image\nimport binascii\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef getMatrixfrom_bin(filename, width):\n with open(filename, 'rb') as f:\n content = f.read()\n hexst = binascii.hexlify(content) #将二进制文件转换为十六进制字符串\n fh = numpy.array([int(hexst[i:i+2], 16) for i in range(0, len(hexst), 2)]) #按字节分割\n rn = len(fh)/width\n fh = numpy.reshape(fh[:rn*width], (-1, width)) #根据设定的宽度生成矩阵\n fh = numpy.uint8(fh)\n return fh\n\nsource_path = \"/home/nick/research/vs/Virus.Win\"\nall_files = [f for f in listdir(source_path) if isfile(join(source_path, f))]\nkey_word = \"Virus.Win32.Bube\"\nparticular_files = [f for f in all_files if key_word in f]\n\nfor f in particular_files:\n filename = join(source_path, f)\n im = Image.fromarray(getMatrixfrom_bin(filename, 64)) #转换为图像\n im.save(\"/home/nick/research/vs-output-images/Virus.Win.Output.Images/{0}.png\".format(f))\n","sub_path":"src/generate_image_from_binary.py","file_name":"generate_image_from_binary.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"19242165","text":"#!/usr/bin/python\n\nfrom urlparse import urlparse, parse_qs\nfrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer\n\nserverPort = 80\n\nclass webServer(BaseHTTPRequestHandler):\n\n def do_GET(self,):\n self.send_response(200)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n\n query_components = parse_qs(urlparse(self.path).query)\n if \"result\" in query_components:\n print(query_components[\"result\"][0].decode(\"base64\"))\n self.wfile.write(\"\")\n return\n cmd = raw_input(\"$ \")\n self.wfile.write(\"{}\".format(cmd))\n return\n\n def log_message(self, format, *args):\n return\n\ntry:\n server = HTTPServer((\"\", serverPort), webServer)\n print(\"Server running on port: {}\".format(serverPort))\n server.serve_forever()\nexcept KeyboardInterrupt:\n server.socket.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"33638589","text":"\nimport logging\n\nfrom text_transformer.tt_unchangeable_text_sub_part import TTUnchangeableTextSubPart\nfrom text_transformer.tt_changeable_text_sub_part import TTChangeableTextSubPart\n\n\n\n\n\nclass TTIndivisibleTextPart:\n \"\"\"A TTIndivisibleTextPart is a text part that can be changed only in\nit's complete form. Can not be diviseble. It is usefull for situations\nlike computer programs transformations, where each program token must\nbe changed only in it's complete for. Example. Consider changing the\nfunction name \"f\" to \"g\" in a Python program. The \"f\" in the \"def\"\nkeyword should not be changed to \"g\"..\n\n \"\"\"\n\n\n\n\n\n def __init__(self, text_string):\n\n self.sub_part = TTUnchangeableTextSubPart(text_string)\n\n # keys are the substrings in the original string to be\n # transformed (changeable strings). values are lists with\n # subsparts ocurrences objects\n self.sub_parts_dictionary = {}\n\n\n\n\n\n def to_string(self):\n\n return self.sub_part.to_string()\n\n\n\n\n \n # def get_string_sub_parts(self, original_text, changeable_string):\n\n # splited_text = original_text.split(changeable_string)\n\n # sub_parts_list = []\n\n # for element in splited_text[0:-1]:\n\n # sub_part = TTUnchangeableTextSubPart(element)\n # sub_parts_list.append(sub_part)\n\n # sub_part = TTChangeableTextSubPart(changeable_string)\n # sub_parts_list.append(sub_part)\n # #self.transformer.append_sub_part(changeable_string, sub_part)\n # self.sub_parts_dictionary[changeable_string].append(sub_part)\n\n # last_element = splited_text[-1]\n # sub_part = TTUnchangeableTextSubPart(last_element)\n # sub_parts_list.append(sub_part)\n\n # return sub_parts_list\n \n\n\n\n\n def create_sub_parts(self, changeable_string):\n\n logging.debug(\"TTIndivisibleTextPart: create_sub_parts original_string = \\\"\"\n + changeable_string + \"\\\"\")\n\n self.sub_parts_dictionary[changeable_string] = []\n\n if isinstance(self.sub_part, TTUnchangeableTextSubPart):\n\n if self.get_original_text() == changeable_string:\n\n new_sub_part = TTChangeableTextSubPart(changeable_string)\n self.sub_part = new_sub_part\n self.sub_parts_dictionary[changeable_string].append(new_sub_part)\n\n elif isinstance(self.sub_part, TTChangeableTextSubPart):\n\n # already a TTChangeableTextSubPart. Nothing to do.\n pass\n\n else:\n raise Exception(\"TTTextPart this should be impossible to reach.\")\n\n\n\n\n\n # def get_sub_parts_list(self):\n\n # return self.sub_parts_list\n\n\n\n\n\n def get_changeable_sub_parts(self, changeable_string):\n\n return self.sub_parts_dictionary[changeable_string]\n\n\n\n\n\n def transform(self, original_string, new_string):\n\n #print('|||||||| part transform')\n\n if (isinstance(self.sub_part, TTChangeableTextSubPart) and\n self.sub_part.get_original_text() == original_string):\n\n #print('|||||||| sub part transform')\n \n sub_part.transform(new_string)\n \n\n\n\n\n def get_original_text(self):\n\n return self.sub_part.get_original_text()\n \n","sub_path":"03_Implementação/qom_questions_transformer/text_transformer/tt_indivisible_text_part.py","file_name":"tt_indivisible_text_part.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"232876832","text":"\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--language\", dest=\"language\", type=str)\nparser.add_argument(\"--load-from\", dest=\"load_from\", type=str)\n\nargs=parser.parse_args()\nprint(args)\n\ni = 0\nimport os.path\nimport subprocess\n\n\n\n\nwhile os.path.isfile(\"/checkpoint/mhahn/wiki-german-nospaces-bugfix-checkpoints_CHECKPOINT\"+str(i)+\".pth.tar\"):\n subprocess.call([\"python\", \"char-lm-ud-stationary-separate-bidir-with-spaces-probe-baseline-prediction-wiki.py\", \"--language\", \"german\", \"--batchSize\", \"128\", \"--char_embedding_size\", \"100\", \"--hidden_dim\", \"1024\", \"--layer_num\", \"2\", \"--weight_dropout_in\", \"0.1\", \"--weight_dropout_hidden\", \"0.35\", \"--char_dropout_prob\", \"0.0\", \"--char_noise_prob\", \"0.01\", \"--learning_rate\", \"0.2\", \"--load-from\", \"wiki-german-nospaces-bugfix-checkpoints_CHECKPOINT\"+str(i)])\n i += 1\n\n","sub_path":"germanGenderTrajectories.py","file_name":"germanGenderTrajectories.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602819263","text":"# -*- coding:utf-8 -*-\nfrom django.apps import AppConfig\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass MySQLConfig(AppConfig):\n name = 'django_mysql'\n verbose_name = _('MySQL extensions')\n\n def ready(self):\n self.perform_monkey_patches()\n self.add_lookups()\n\n def perform_monkey_patches(self):\n from django.db.backends.mysql.base import DatabaseWrapper\n from django_mysql import monkey_patches\n\n # Fine to patch straight on since it's a cached_property descriptor\n DatabaseWrapper.is_mariadb = monkey_patches.is_mariadb\n\n # Depends on setting\n if getattr(settings, 'DJANGO_MYSQL_REWRITE_QUERIES', False):\n monkey_patches.patch_CursorWrapper_execute()\n\n def add_lookups(self):\n from django.db.models import CharField, TextField\n from django_mysql.models.lookups import (\n CaseSensitiveExact, Soundex, SoundsLike\n )\n\n CharField.register_lookup(CaseSensitiveExact)\n CharField.register_lookup(SoundsLike)\n CharField.register_lookup(Soundex)\n TextField.register_lookup(CaseSensitiveExact)\n TextField.register_lookup(SoundsLike)\n TextField.register_lookup(Soundex)\n","sub_path":"django_mysql/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"400337247","text":"from concurrent.futures import ThreadPoolExecutor\n\nfrom packaging import version\nimport pandas as pd\nimport requests\nfrom requests_futures.sessions import FuturesSession\nfrom tqdm.auto import tqdm\n\nfrom mstrio.api import reports\nimport mstrio.config as config\nfrom mstrio.connection import Connection\nfrom mstrio.utils.filter import Filter\nfrom mstrio.utils.helper import fallback_on_timeout\nimport mstrio.utils.helper as helper\nfrom mstrio.utils.parser import Parser\n\n\nclass Report:\n \"\"\"Access, filter, publish, and extract data from in-memory reports.\n\n Create a Report object to load basic information on a report dataset.\n Specify subset of report to be fetched through `Report.apply_filters()` and\n `Report.clear_filters()`. Fetch dataset through `Report.to_dataframe()`\n method.\n\n Attributes:\n connection: MicroStrategy connection object returned by\n `connection.Connection()`.\n report_id: Identifier of a pre-existing report containing the required\n data.\n instance_id (str): Identifier of an instance if report instance has been\n already initialized, NULL by default.\n parallel (bool, optional): If True (default), utilize optimal number of\n threads to increase the download speed. If False, this feature will\n be disabled.\n progress_bar(bool, optional): If True (default), show the download\n progress bar.\n \"\"\"\n\n def __init__(self, connection: \"Connection\", report_id: str, instance_id: str = None,\n parallel: bool = True, progress_bar: bool = True):\n \"\"\"Initialize an instance of a report.\n\n Args:\n connection: MicroStrategy connection object returned by\n `connection.Connection()`.\n report_id (str): Identifier of a pre-existing report containing\n the required data.\n instance_id (str): Identifier of an instance if report instance has\n been already initialized, NULL by default.\n parallel (bool, optional): If True (default), utilize optimal number\n of threads to increase the download speed. If False, this\n feature will be disabled.\n progress_bar(bool, optional): If True (default), show the download\n progress bar.\n \"\"\"\n if not connection.application_id:\n helper.exception_handler(\n (\"Please provide an application id or application name when creating the\"\n \"Connection object.\"), ConnectionError)\n self._connection = connection\n self._report_id = report_id\n self.instance_id = instance_id\n self.parallel = parallel\n self.progress_bar = True if progress_bar and config.progress_bar else False\n\n self._subtotals = None\n self.cross_tab = False\n self.cross_tab_filter = {}\n self._size_limit = 10000000 # this sets desired chunk size in bytes\n self._initial_limit = 1000 # initial limit for the report_instance request\n self._dataframe = None\n self._attr_elements = None\n\n # load report information\n self.__definition()\n self.__filter = Filter(attributes=self.attributes, metrics=self.metrics)\n\n def to_dataframe(self, limit: int = None) -> pd.DataFrame:\n \"\"\"Extract contents of a report instance into a Pandas `DataFrame`.\n\n Args:\n limit (None or int, optional): Used to control data extract\n behavior. By default (None) the limit is calculated\n automatically, based on an optimized physical size of one chunk.\n Setting limit manually will force the number of rows per chunk.\n Depending on system resources, a higher limit (e.g. 50,000) may\n reduce the total time required to extract the entire dataset.\n\n Returns:\n Pandas Data Frame containing the report contents.\n \"\"\"\n if limit:\n self._initial_limit = limit\n self.instance_id = None\n\n if self.instance_id is None:\n res = self.__initialize_report(self._initial_limit)\n else:\n # try to get first chunk from already initialized instance of report\n # if not possible, initialize new instance\n try:\n res = self.__get_chunk(instance_id=self.instance_id, offset=0,\n limit=self._initial_limit)\n except requests.HTTPError:\n res = self.__initialize_report(self._initial_limit)\n\n # Gets the pagination totals from the response object\n _instance = res.json()\n self.instance_id = _instance['instanceId']\n paging = _instance['data']['paging']\n\n # initialize parser and process first response\n p = Parser(response=_instance, parse_cube=False)\n p.parse(response=_instance)\n\n # If there are more rows to fetch, fetch them\n if paging['current'] != paging['total']:\n if not limit:\n limit = max(1000, int((self._initial_limit * self._size_limit) / len(res.content)))\n # Count the number of additional iterations\n it_total = int((paging['total'] - self._initial_limit) / limit) + \\\n ((paging['total'] - self._initial_limit) % limit != 0)\n\n if self.parallel and it_total > 1:\n threads = helper.get_parallel_number(it_total)\n with FuturesSession(executor=ThreadPoolExecutor(max_workers=threads),\n session=self._connection.session) as session:\n fetch_pbar = tqdm(desc=\"Downloading\", total=it_total + 1,\n disable=(not self.progress_bar))\n future = self.__fetch_chunks_future(session, paging, self.instance_id, limit)\n fetch_pbar.update()\n for i, f in enumerate(future, start=1):\n response = f.result()\n if not response.ok:\n helper.response_handler(response, \"Error getting report contents.\")\n fetch_pbar.update()\n fetch_pbar.set_postfix(\n rows=str(min(self._initial_limit + i * limit, paging['total'])))\n p.parse(response.json())\n fetch_pbar.close()\n else:\n self.__fetch_chunks(p, paging, it_total, self.instance_id, limit)\n\n # return parsed data as a data frame\n self._dataframe = p.dataframe\n\n # filter dataframe if report had crosstabs and filters were applied\n if self.cross_tab_filter != {}:\n if self.cross_tab_filter['metrics'] is not None:\n # drop metrics columns from dataframe\n metr_names = [\n el['name'] for el in list(\n filter(lambda x: x['id'] not in self.cross_tab_filter['metrics'],\n self.metrics))\n ]\n self._dataframe = self._dataframe.drop(metr_names, axis=1)\n\n if self.cross_tab_filter['attr_elements'] is not None:\n # create dict of attributes and elements to iterate through\n attr_dict = {}\n for attribute in self.cross_tab_filter['attr_elements']:\n key = attribute[:32]\n attr_dict.setdefault(key, []).append(attribute[33:])\n # initialize indexes series for filter\n indexes = pd.Series([False] * len(self._dataframe))\n\n # logical OR for filtered attribute elements\n for attribute in attr_dict:\n attr_name = list(filter(lambda x: x['id'] in attribute,\n self.attributes))[0]['name']\n elements = attr_dict[attribute]\n indexes = indexes | self._dataframe[attr_name].isin(elements)\n # select datframe indexes with\n self._dataframe = self._dataframe[indexes]\n\n if self.cross_tab_filter['attributes'] is not None:\n attr_names = [\n el['name'] for el in list(\n filter(lambda x: x['id'] not in self.cross_tab_filter['attributes'],\n self.attributes))\n ]\n # filtering out attribute forms cloumns\n to_be_removed = []\n to_be_added = []\n for attr in attr_names:\n forms = [\n column for column in self._dataframe.columns\n if column.startswith(attr + '@')\n ]\n if forms:\n to_be_removed.append(attr)\n to_be_added.extend(forms)\n for elem in to_be_removed:\n attr_names.remove(elem)\n attr_names.extend(to_be_added)\n # drop filtered out columns\n self._dataframe = self._dataframe.drop(attr_names, axis=1)\n return self._dataframe\n\n def __fetch_chunks_future(self, future_session, pagination, instance_id, limit):\n # Fetch add'l rows from this object instance\n return [\n reports.report_instance_id_coroutine(\n future_session,\n connection=self._connection,\n report_id=self._report_id,\n instance_id=instance_id,\n offset=_offset,\n limit=limit,\n ) for _offset in range(self._initial_limit, pagination['total'], limit)\n ]\n\n def __fetch_chunks(self, parser, pagination, it_total, instance_id, limit):\n\n # Fetch add'l rows from this object instance\n with tqdm(desc=\"Downloading\", total=it_total + 1,\n disable=(not self.progress_bar)) as fetch_pbar:\n fetch_pbar.update()\n for _offset in range(self._initial_limit, pagination['total'], limit):\n response = self.__get_chunk(instance_id=instance_id, offset=_offset, limit=limit)\n fetch_pbar.update()\n fetch_pbar.set_postfix(rows=str(min(_offset + limit, pagination['total'])))\n parser.parse(response=response.json())\n\n def __initialize_report(self, limit: int) -> requests.Response:\n inst_pbar = tqdm(desc='Initializing an instance of a report. Please wait...',\n bar_format='{desc}', leave=False, ncols=285,\n disable=(not self.progress_bar))\n\n # Switch off subtotals if I-Server version is higher than 11.2.1\n body = self.__filter._filter_body()\n if version.parse(self._connection.iserver_version) >= version.parse(\"11.2.0100\"):\n self._subtotals[\"visible\"] = False\n body[\"subtotals\"] = {\"visible\": self._subtotals[\"visible\"]}\n\n # Request a new instance, set instance id\n response = reports.report_instance(\n connection=self._connection,\n report_id=self._report_id,\n body=body,\n offset=0,\n limit=self._initial_limit,\n )\n inst_pbar.close()\n return response\n\n def __get_chunk(self, instance_id: str, offset: int, limit: int) -> requests.Response:\n return reports.report_instance_id(\n connection=self._connection,\n report_id=self._report_id,\n instance_id=instance_id,\n offset=offset,\n limit=limit,\n )\n\n def apply_filters(self, attributes: list = None, metrics: list = None,\n attr_elements: list = None, operator: str = 'In') -> None:\n \"\"\"Apply filters on the reports's objects.\n\n Filter by attributes, metrics and attribute elements.\n\n Args:\n attributes (list or None, optional): ids of attributes to be\n included in the filter. If list is empty, no attributes will be\n selected and metric data will be aggregated.\n metrics (list or None, optional): ids of metrics to be included in\n the filter. If list is empty, no metrics will be selected.\n attr_elements (list or None, optional): attribute elements to be\n included in the filter.\n operator (str, optional): a str flag used to specify if the\n attribute elements selected inside the filter should be included\n or excluded. Allowed values are: 'In', 'NotIn'.\n \"\"\"\n filtering_is_requested = bool(not all(\n element is None for element in [attributes, metrics, attr_elements]))\n\n if self.cross_tab:\n self.cross_tab_filter = {\n 'attributes': attributes,\n 'metrics': metrics,\n 'attr_elements': attr_elements\n }\n elif filtering_is_requested:\n self.__filter._clear(attributes=attributes, metrics=metrics,\n attr_elements=attr_elements)\n self.__filter.operator = operator\n self._select_attribute_filter_conditionally(attributes)\n self._select_metric_filter_conditionally(metrics)\n self._select_attr_el_filter_conditionally(attr_elements)\n # Clear instance, to generate new with new filters\n self.instance_id = None\n\n def _select_attribute_filter_conditionally(self, attributes_filtered) -> None:\n if attributes_filtered:\n self.__filter._select(object_id=attributes_filtered)\n elif attributes_filtered is not None:\n self.__filter.attr_selected = []\n\n def _select_metric_filter_conditionally(self, metrics_filtered) -> None:\n if metrics_filtered:\n self.__filter._select(object_id=metrics_filtered)\n elif metrics_filtered is not None:\n self.__filter.metr_selected = []\n\n def _select_attr_el_filter_conditionally(self, attr_el_filtered) -> None:\n if attr_el_filtered is not None:\n self.__filter._select_attr_el(element_id=attr_el_filtered)\n\n def clear_filters(self) -> None:\n \"\"\"Clear previously set filters, allowing all attributes, metrics, and\n attribute elements to be retrieved.\"\"\"\n\n self.__filter._clear()\n if self.cross_tab:\n self.__filter._select(object_id=[el['id'] for el in self.attributes])\n self.__filter._select(object_id=[el['id'] for el in self.metrics])\n # Clear instance, to generate new with new filters\n self.instance_id = None\n\n def __definition(self) -> None:\n \"\"\"Get the definition of a report, including attributes and metrics.\n\n Implements GET /v2/reports/.\n \"\"\"\n\n response = reports.report_definition(connection=self._connection,\n report_id=self._report_id).json()\n\n grid = response[\"definition\"][\"grid\"]\n available_objects = response['definition']['availableObjects']\n\n if version.parse(self._connection.iserver_version) >= version.parse(\"11.2.0100\"):\n self._subtotals = grid[\"subtotals\"]\n self._name = response[\"name\"]\n self.cross_tab = grid[\"crossTab\"]\n\n # Check if report have custom groups or consolidations\n if available_objects['customGroups']:\n helper.exception_handler(msg=\"Reports with custom groups are not supported.\",\n exception_type=ImportError)\n if available_objects['consolidations']:\n helper.exception_handler(msg=\"Reports with consolidations are not supported.\",\n exception_type=ImportError)\n\n full_attributes = []\n for row in grid[\"rows\"]:\n if row[\"type\"] == \"attribute\":\n full_attributes.append(row)\n for column in grid[\"columns\"]:\n if column[\"type\"] == \"attribute\":\n full_attributes.append(column)\n self._attributes = [{'name': attr['name'], 'id': attr['id']} for attr in full_attributes]\n\n # Retrieve metrics from the report grid (only selected metrics)\n metrics_position = grid.get(\"metricsPosition\")\n if metrics_position is None:\n self._metrics = []\n else:\n full_metrics = grid[metrics_position[\"axis\"]][metrics_position[\"index\"]][\"elements\"]\n self._metrics = [{'name': metr['name'], 'id': metr['id']} for metr in full_metrics]\n\n def __get_attr_elements(self, limit: int = 50000) -> list:\n \"\"\"Get elements of report attributes synchronously.\n\n Implements GET /reports//attributes//elements.\n \"\"\"\n\n def fetch_for_attribute(attribute):\n\n @fallback_on_timeout()\n def fetch_for_attribute_given_limit(limit):\n response = reports.report_single_attribute_elements(\n connection=self._connection,\n report_id=self._report_id,\n attribute_id=attribute['id'],\n offset=0,\n limit=limit,\n )\n # Get total number of rows from headers.\n total = int(response.headers['x-mstr-total-count'])\n # Get attribute elements from the response.\n elements = response.json()\n\n # If total number of elements is bigger than the chunk size\n # (limit), fetch them incrementally.\n for _offset in range(limit, total, limit):\n response = reports.report_single_attribute_elements(\n connection=self._connection,\n report_id=self._report_id,\n attribute_id=attribute['id'],\n offset=_offset,\n limit=limit,\n )\n elements.extend(response.json())\n\n # Return attribute data.\n return {\n \"attribute_name\": attribute['name'],\n \"attribute_id\": attribute['id'],\n \"elements\": elements\n }\n\n return fetch_for_attribute_given_limit(limit)[0]\n\n attr_elements = []\n if self.attributes:\n pbar = tqdm(self.attributes, desc=\"Loading attribute elements\", leave=False,\n disable=(not self.progress_bar))\n attr_elements = [fetch_for_attribute(attribute) for attribute in pbar]\n pbar.close()\n\n return attr_elements\n\n def __get_attr_elements_async(self, limit: int = 50000) -> list:\n \"\"\"Get elements of report attributes asynchronously.\n\n Implements GET /reports//attributes//elements.\n \"\"\"\n\n attr_elements = []\n if self.attributes:\n threads = helper.get_parallel_number(len(self.attributes))\n with FuturesSession(executor=ThreadPoolExecutor(max_workers=threads),\n session=self._connection.session) as session:\n # Fetch first chunk of attribute elements.\n futures = self.__fetch_attribute_elements_chunks(session, limit)\n pbar = tqdm(futures, desc=\"Loading attribute elements\", leave=False,\n disable=(not self.progress_bar))\n for i, future in enumerate(pbar):\n attr = self.attributes[i]\n response = future.result()\n if not response.ok:\n helper.response_handler(\n response, \"Error getting attribute \" + attr['name'] + \" elements\")\n elements = response.json()\n # Get total number of rows from headers.\n total = int(response.headers['x-mstr-total-count'])\n for _offset in range(limit, total, limit):\n response = reports.report_single_attribute_elements(\n connection=self._connection,\n report_id=self._report_id,\n attribute_id=attr[\"id\"],\n offset=_offset,\n limit=limit,\n )\n elements.extend(response.json())\n # Append attribute data to the list of attributes.\n attr_elements.append({\n \"attribute_name\": attr['name'],\n \"attribute_id\": attr['id'],\n \"elements\": elements\n })\n pbar.close()\n\n return attr_elements\n\n def __fetch_attribute_elements_chunks(self, future_session, limit: int) -> list:\n # Fetch add'l rows from this object instance\n return [\n reports.report_single_attribute_elements_coroutine(\n future_session,\n connection=self._connection,\n report_id=self._report_id,\n attribute_id=attribute['id'],\n offset=0,\n limit=limit,\n ) for attribute in self.attributes\n ]\n\n @property\n def name(self):\n return self._name\n\n @property\n def attributes(self):\n return self._attributes\n\n @property\n def metrics(self):\n return self._metrics\n\n @property\n def attr_elements(self):\n if not self._attr_elements:\n if self.parallel is True:\n # TODO: move the fallback inside the function to apply\n # per-attribute, like with non-async version.\n self._attr_elements = fallback_on_timeout()(\n self.__get_attr_elements_async)(50000)[0]\n else:\n self._attr_elements = self.__get_attr_elements()\n self.__filter.attr_elem_selected = self._attr_elements\n return self._attr_elements\n\n @property\n def selected_attributes(self):\n return self.__filter.attr_selected\n\n @property\n def selected_metrics(self):\n return self.__filter.metr_selected\n\n @property\n def selected_attr_elements(self):\n return self.__filter.attr_elem_selected\n\n @property\n def dataframe(self):\n if self._dataframe is None:\n helper.exception_handler(\n msg=\"Dataframe not loaded. Retrieve with Report.to_dataframe().\",\n exception_type=Warning)\n return self._dataframe\n","sub_path":"mstrio/application_objects/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":22623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336104644","text":"import asyncio\nimport discord\nimport json\nimport logging\nimport oauth\nimport os\n\nfrom discord.ext import commands, tasks\n\nPATH = \"data/twitch/\"\nFILENAME = \"twitch_data.json\"\nFILE_PATH = PATH + FILENAME\n\nNOTIFICATION_CHANNEL_ID = 329705297390338050\nADMIN_ID = 260862857707520010\nFIRE_ID = 105357121406664704\n\n\n# check\ndef is_admin(ctx):\n \"\"\"\n Check whether the author has permission to add and remove people from the\n watchlist\n \"\"\"\n roles = [role.id for role in ctx.author.roles]\n return ADMIN_ID in roles or FIRE_ID == ctx.author.id\n\n\nclass TwitchCog(commands.Cog):\n \"\"\"\n Cog to handle twitch notifications\n\n Checks for the current status of given twitch steams, and if a channel\n goes live it creates a fancy embed in a predefined channel.\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.logger = logging.getLogger(__name__)\n self.live_channels = []\n self.watching_channels = []\n self.subscriptions = {}\n self.token = {}\n self.update_streamers_idx = 0\n self.update_streamers_task.start()\n self.load_json()\n\n def cog_unload(self):\n self.update_streamers_task.cancel()\n\n @commands.group()\n async def twitch(self, ctx):\n \"\"\"\n Group for twitch commands\n \"\"\"\n if not ctx.invoked_subcommand:\n await ctx.send_help(ctx.command)\n\n @twitch.command(aliases=[\"list\"])\n async def show_streamers(self, ctx):\n \"\"\"\n Show the list of all streamers on the watchlist\n \"\"\"\n await ctx.send(\"```\\n\" + \"\\n\".join(sorted(self.watching_channels))\n + \"```\")\n\n @twitch.command()\n async def subscribe(self, ctx, streamer_name=None):\n \"\"\"\n Subscribe to a streamer on the watchlist\n\n If subscribed, you will get a mention every time the streamer goes\n live.\n\n Parameters\n ----------\n streamer_name : str\n the twitch username of the streamer you want to subscribe to\n\n Example\n -------\n >>> !twitch subscribe Jabbert\n Adds you to the sublist of Jabbert, so every time Jabbert goes live\n you will get mentioned.\n \"\"\"\n if streamer_name is None:\n await ctx.send_help(ctx.command)\n return await ctx.send(\"You need to specify who you want to subscribe to.\")\n if streamer_name in self.subscriptions:\n if str(ctx.author.id) not in self.subscriptions[streamer_name]:\n self.subscriptions[streamer_name].append(str(ctx.author.id))\n self.save_json()\n await ctx.send(f\"You are now subscribed to {streamer_name}\")\n self.logger.debug('%s - %s subscribed to %s' %\n (ctx.author, ctx.author.id, streamer_name)\n )\n else:\n await ctx.send(f\"It looks like {streamer_name} was not worthy \"\n \"enough for the kingdom.\")\n\n @twitch.command()\n async def unsubscribe(self, ctx, streamer_name=None):\n \"\"\"\n Unsubscribe from a streamer\n\n Parameters\n ----------\n streamer_name : str\n the twitch username of the streamer you want to unsubscribe from\n\n Example\n -------\n >>> !twitch unsubscribe Jabbert\n Removes you from the sublist of Jabbert, and you will no longer get\n mentioned by the bot when he goes live.\n \"\"\"\n if streamer_name is None:\n await ctx.send_help(ctx.command)\n return await ctx.send(\"You need to specify who you want to \"\n \"unsubscribe from.\")\n if streamer_name in self.subscriptions:\n if str(ctx.author.id) in self.subscriptions[streamer_name]:\n self.subscriptions[streamer_name].remove(str(ctx.author.id))\n self.save_json()\n await ctx.send(f\"You are no longer subscribed to {streamer_name}.\")\n self.logger.info('%s - %s unsubscribed from %s' % (ctx.author,\n ctx.author.id,\n streamer_name))\n else:\n await ctx.send(f\"It looks like {streamer_name} was not worthy \"\n \"enough for the kingdom.\")\n\n @twitch.command(aliases=[\"add\"])\n @commands.check(is_admin)\n async def add_streamer(self, ctx, streamer_name=None):\n \"\"\"\n Add a streamer to the twitch watchlist.\n\n Parameters\n ----------\n streamer_name : str\n the name of the twitch streamer you want to add\n\n Example\n -------\n >>> !twitch add jabbert\n Adds the twitch user \"jabbert\" to the watchlist\n \"\"\"\n if streamer_name is None:\n await ctx.send_help(ctx.command)\n return await ctx.send(\"You need to specify a twitchname to add.\")\n if streamer_name not in self.watching_channels:\n self.watching_channels.append(streamer_name)\n self.subscriptions[streamer_name] = []\n self.save_json()\n await ctx.send(f\"Successfully added {streamer_name} to the \"\n \"watchlist.\")\n self.logger.info('%s - %s added %s to the watchlist'\n % (ctx.author, ctx.author.id, streamer_name))\n else:\n await ctx.send(f\"{streamer_name} is already on the watchlist.\")\n\n @twitch.command(aliases=[\"remove\", \"rem\", \"del\", \"delete\"])\n @commands.check(is_admin)\n async def del_streamer(self, ctx, streamer_name=None):\n \"\"\"\n Remove a streamer from the twitch watchlist.\n\n Parameters\n ----------\n streamer_name : str\n the name of the twitch streamer you want to remove\n\n Example\n -------\n >>> !twitch delete jabbert\n Removes the twitch user \"jabbert\" from the watchlist\n \"\"\"\n if streamer_name is None:\n await ctx.send_help(ctx.command)\n return await ctx.send(\"You need to specify who you want to remove.\")\n if streamer_name is None:\n await ctx.send_help(ctx.command)\n return await ctx.send(\"You need to specify a twitchname to remove.\")\n if streamer_name in self.watching_channels:\n if streamer_name in self.live_channels:\n self.live_channels.remove(streamer_name)\n self.watching_channels.remove(streamer_name)\n self.subscriptions.pop(streamer_name, None)\n self.save_json()\n await ctx.send(f\"Successfully removed {streamer_name} from the \"\n \"watchlist.\")\n self.logger.info('%s - %s removed %s from the watchlist'\n % (ctx.author, ctx.author.id, streamer_name))\n else:\n await ctx.send(f\"{streamer_name} is not on the watchlist.\")\n\n @twitch.command(hidden=True)\n @commands.is_owner()\n async def manual_update(self, ctx):\n \"\"\"\n Manually update the currently live twitch streamers\n \"\"\"\n # this should use other functions !!!\n pass\n\n async def get_live_status(self, streamer):\n \"\"\"\n Query the twitch api for the stream status of a user\n\n Parameters\n ----------\n streamer: str\n the streamer that you want to get the info of\n\n Returns\n -------\n (ret: int, data: dict): tuple\n ret is the return code, data is the received info about the stream\n return codes:\n -1: json load failed\n -3: querying the api failed (most likely no internet)\n \"\"\"\n url = \"https://api.twitch.tv/helix/streams?user_login={}\"\n ret = 0\n data = {}\n expired = True\n while expired:\n headers = {\"Authorization\": f\"Bearer {self.token['access_token']}\"}\n try:\n async with self.bot.aiohttp_session.get(url.format(streamer),\n headers=headers) as resp:\n if resp.status == 401:\n await self.refresh_oauth_token()\n next\n else:\n expired = False\n try:\n data = json.loads(await resp.text())[\"data\"]\n except Exception as e:\n self.logger.warning(\"Couldnt load json data: %s\" % e)\n ret = -1\n except:\n ret = -3\n return (ret, data)\n\n def is_offline(self, streamer):\n \"\"\"\n Remove a streamer from the list of live channels\n\n Parameters\n ----------\n streamer: str\n the user you want to remove from the list\n \"\"\"\n if streamer in self.live_channels:\n self.logger.info(\"%s is no longer live\" % streamer)\n self.live_channels.remove(streamer)\n self.save_json()\n\n def is_online(self, streamer):\n \"\"\"\n Add a streamer to the list of live channels\n\n Parameters\n ----------\n streamer: str\n the user you want to add to the list\n\n Returns\n -------\n bool\n True if the streamer just went live, False if he has been live before\n \"\"\"\n if streamer not in self.live_channels:\n self.logger.info(\"%s just went live\" % streamer)\n self.live_channels.append(streamer)\n self.save_json()\n return True\n return False\n\n async def build_stream_dict(self, streamer, data):\n \"\"\"\n Builds a dictionary with useful info about a stream\n\n Parameters\n ----------\n streamer: str\n the streamer that you want to get the info about\n\n data: dict\n the data about the stream from the twitch \"streams\" endpoint\n\n Returns\n -------\n dict\n a dictionary containing all the necessary info for the stream embed\n None\n returns None if the user of the stream was not found\n \"\"\"\n stream = data[0]\n user_data = await self.query_api(\"users\", stream[\"user_id\"])\n if len(user_data) == 0:\n self.logger.critical(\"Could not resolve a streamer HMMM\")\n return None\n user = user_data[0]\n stream_dict = {\n \"game\": await self.get_game_name(stream[\"game_id\"]),\n \"viewers\": stream[\"viewer_count\"],\n \"preview_pic_url\": stream[\"thumbnail_url\"].format(width=1280,\n height=720),\n \"stream_title\": stream[\"title\"],\n \"logo_url\": user[\"profile_image_url\"],\n \"name\": user[\"login\"],\n \"display_name\": user[\"display_name\"],\n \"subscribers\": self.subscriptions[streamer]\n }\n return stream_dict\n\n async def get_game_name(self, game_id):\n game_data = await self.query_api(\"games\", game_id)\n return game_data[0][\"name\"] if len(game_data) > 0 else \"\"\n\n async def check_if_live(self):\n \"\"\"\n Check if any of the channels on the watchlist have gone live since\n last check\n\n Returns\n -------\n list\n a list with dictionaries in it, containing stream data about new\n live streamers\n -1\n json load failed\n -2\n twitch api returned an internal error\n -3\n request failed (no internet most likely)\n \"\"\"\n new_live_channels = []\n for streamer in self.watching_channels:\n ret, data = await self.get_live_status(streamer)\n if ret != 0:\n return ret\n if \"status\" in data:\n return -2\n if not data:\n self.is_offline(streamer)\n else:\n if self.is_online(streamer):\n stream_data = await self.build_stream_dict(streamer, data)\n if stream_data is not None:\n new_live_channels.append(stream_data)\n return new_live_channels\n\n async def refresh_oauth_token(self):\n \"\"\"Gets a new oauth token from twitch\"\"\"\n url = (f\"https://id.twitch.tv/oauth2/token?client_id={oauth.TWITCH_ID}\"\n f\"&client_secret={oauth.TWITCH_SECRET}\"\n \"&grant_type=client_credentials\")\n async with self.bot.aiohttp_session.post(url) as resp:\n try:\n self.token = json.loads(await resp.text())\n except:\n self.logger.warning(\"Couldn't get new token\")\n return -1\n self.logger.info(f\"Successfully refreshed oauth token. Expires in: {self.token['expires_in']}\")\n self.save_json()\n return self.token\n\n async def query_api(self, endpoint, identifier):\n \"\"\"Query the twitch API for an endpoint and an identifier\"\"\"\n url = f\"https://api.twitch.tv/helix/{endpoint}?id={identifier}\"\n expired = True\n while expired:\n headers = {\"Authorization\": f\"Bearer {self.token['access_token']}\"}\n async with self.bot.aiohttp_session.get(url, headers=headers) as resp:\n if resp.status == 401:\n await self.refresh_oauth_token()\n next\n else:\n expired = False\n ret = json.loads(await resp.text())\n return ret[\"data\"]\n\n def create_stream_embed(self, streamer_dict):\n \"\"\"\n Create a discord embed for a stream\n\n Parameters\n ----------\n streamer_dict: dict\n the data about the stream\n\n Returns\n discord.Embed()\n an embed containing all the important stream info\n \"\"\"\n if streamer_dict[\"game\"] == \"\":\n streamer_dict[\"game\"] = \"No game specified.\"\n if streamer_dict[\"display_name\"] in [\"\", None]:\n streamer_dict[\"display_name\"] = streamer_dict[\"name\"]\n embed = discord.Embed(title=streamer_dict[\"stream_title\"],\n url=f\"https://twitch.tv/{streamer_dict['name']}\",\n colour=discord.Colour(1).dark_magenta())\n embed.add_field(name=\"**Played Game**\",\n value=streamer_dict[\"game\"],\n inline=True)\n embed.add_field(name=\"**Viewers**\",\n value=streamer_dict[\"viewers\"],\n inline=True)\n embed.set_image(url=streamer_dict[\"preview_pic_url\"])\n embed.set_footer(text=\"Twitch.tv\")\n if streamer_dict[\"logo_url\"] is None:\n embed.set_author(name=streamer_dict[\"display_name\"],\n url=f\"https://twitch.tv/{streamer_dict['name']}\")\n else:\n embed.set_author(name=streamer_dict[\"display_name\"],\n url=f\"https://twitch.tv/{streamer_dict['name']}\",\n icon_url=streamer_dict[\"logo_url\"])\n embed.set_thumbnail(url=streamer_dict[\"logo_url\"])\n return embed\n\n @tasks.loop(seconds=60)\n async def update_streamers_task(self):\n \"\"\"\n Update all twitch channels on the watch list every minute.\n \"\"\"\n new_live_channels = await self.check_if_live()\n if new_live_channels in [-1, -2]:\n self.logger.error(\"Couldn't reach twitch api\")\n elif new_live_channels == -3:\n self.logger.critical(\"#NO INTERNET CONNECTION#\")\n elif len(new_live_channels) > 0:\n channel = self.bot.get_channel(NOTIFICATION_CHANNEL_ID)\n for streamer_dict in new_live_channels:\n embed = self.create_stream_embed(streamer_dict)\n msg = (\"Oy citizens of the JabKingdom ! \"\n f\"{streamer_dict['name']} is now live on \"\n f\"https://www.twitch.tv/{streamer_dict['name']} ! \"\n \"Go check it out :wink:!\\n\")\n for subscriber in streamer_dict[\"subscribers\"]:\n msg += f\" <@{subscriber}>\"\n await channel.send(msg, embed=embed)\n if self.update_streamers_idx % 60 == 0:\n self.logger.debug(\"Twitch updater running. Online time: %i hours.\"\n % (self.update_streamers_idx // 60))\n self.update_streamers_idx += 1\n\n @update_streamers_task.before_loop\n async def before_update_streamers(self):\n await self.bot.wait_until_ready()\n\n def load_json(self):\n \"\"\"\n Load the json data\n \"\"\"\n if not os.path.isdir(PATH):\n os.makedirs(PATH)\n self.logger.warning(\"Couln't find %s directory, created new one.\"\n % PATH)\n if os.path.isfile(FILE_PATH):\n with open(FILE_PATH, \"r\") as fp:\n data = json.load(fp)\n self.live_channels = data[\"LIVE\"]\n self.watching_channels = data[\"WATCHING\"]\n self.subscriptions = data[\"SUBS\"]\n self.token = data[\"TOKEN\"]\n else:\n self.save_json()\n self.logger.warning(\"Couln't find %s, created new one.\"\n % FILE_PATH)\n\n def save_json(self):\n \"\"\"\n Save the json data\n \"\"\"\n data = {\"LIVE\": self.live_channels,\n \"WATCHING\": self.watching_channels,\n \"SUBS\": self.subscriptions,\n \"TOKEN\": self.token}\n with open(FILE_PATH, \"w\") as fp:\n json.dump(data, fp, indent=2)\n return 1\n\n\ndef setup(bot):\n bot.add_cog(TwitchCog(bot))\n","sub_path":"cogs/twitch.py","file_name":"twitch.py","file_ext":"py","file_size_in_byte":17789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"575612609","text":"# Copyright 2021, Yahoo\n# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms\nimport math\nimport multiprocessing\nfrom datetime import datetime, timedelta\nfrom unittest import TestCase\n\nfrom mockito import unstub, when\n\nfrom ychaos.agents.agent import AgentState\nfrom ychaos.agents.system.cpu import CPUBurn, CPUBurnConfig, _burn\nfrom ychaos.utils.dependency import DependencyUtils\n\n\nclass TestCpuBurn(TestCase):\n def setUp(self) -> None:\n pass\n\n def test_burn_while_loop_exits_when_end_datetime_reached(self):\n start = datetime.now()\n _burn(end=start + timedelta(milliseconds=100))\n end = datetime.now()\n difference = end - start\n self.assertTrue(100000 < difference.microseconds)\n self.assertEqual(difference.days, 0)\n\n def test_cpu_burn_updates_state_when_complete(self):\n when(multiprocessing).cpu_count().thenReturn(\n 1\n ) # Mock that the system has only one CPU\n\n cpu_burn_config = CPUBurnConfig(duration=0.1)\n cpu_burn_agent = CPUBurn(cpu_burn_config)\n\n cpu_burn_agent.setup()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.SETUP)\n\n cpu_burn_agent.start()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.COMPLETED)\n\n cpu_burn_agent.teardown()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.TEARDOWN)\n\n def test_cpu_burn_init_when_psutil_is_not_installed(self):\n when(DependencyUtils).import_module(\"psutil\", raise_error=False).thenReturn(\n None\n )\n cpu_burn_config = CPUBurnConfig(duration=0.1)\n cpu_burn_agent = CPUBurn(cpu_burn_config)\n self.assertIsNone(cpu_burn_agent._psutil)\n\n def test_cpu_burn_when_effective_cpu_count_is_zero(self):\n cpu_burn_config = CPUBurnConfig(duration=0.1, cores_pct=0)\n cpu_burn_agent = CPUBurn(cpu_burn_config)\n\n self.assertEqual(cpu_burn_config.effective_cpu_count(), 0)\n\n cpu_burn_agent.setup()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.SETUP)\n\n cpu_burn_agent.start()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.COMPLETED)\n\n cpu_burn_agent.teardown()\n self.assertEqual(cpu_burn_agent.current_state, AgentState.TEARDOWN)\n\n def test_cpu_burn_monitor(self):\n cpu_burn_config = CPUBurnConfig(duration=0.1, cores_pct=0)\n cpu_burn_agent = CPUBurn(cpu_burn_config)\n\n monitor_status_queue = cpu_burn_agent.monitor()\n status = monitor_status_queue.get()\n\n self.assertTrue(\"cpu_usage\" in status.data)\n self.assertEqual(status.data[\"cpu_count\"], 0)\n\n def test_cpu_burn_monitor_when_psutil_package_not_installed(self):\n when(DependencyUtils).import_module(\"psutil\", raise_error=False).thenReturn(\n None\n )\n cpu_burn_config = CPUBurnConfig(duration=0.1, cores_pct=0)\n cpu_burn_agent = CPUBurn(cpu_burn_config)\n\n monitor_status_queue = cpu_burn_agent.monitor()\n status = monitor_status_queue.get()\n\n self.assertTrue(\"cpu_usage\" in status.data)\n self.assertTrue(math.isnan(status.data[\"cpu_usage\"]))\n self.assertEqual(status.data[\"cpu_count\"], 0)\n\n def tearDown(self) -> None:\n unstub()\n","sub_path":"tests/agents/system/test_cpu.py","file_name":"test_cpu.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"125003691","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.dates as mdates\nimport pickle\n\ndef loadSampleStockData():\n with open('stock_data.pickle', 'rb') as pickle_in:\n return pickle.load(pickle_in)\n\ndef bytespdate2num(fmt, encoding='utf-8'):\n strconverter = mdates.strpdate2num(fmt)\n def bytesconverter(b):\n s = b.decode(encoding)\n return strconverter(s)\n return bytesconverter\n\ndef graph_data(stock_data):\n date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter=',', unpack=True, converters={0: bytespdate2num('%Y-%m-%d')})\n\n fig = plt.figure()\n ax1 = plt.subplot2grid((1,1), (0,0))\n ax1.plot_date(date, closep, '-', label='Price')\n for label in ax1.xaxis.get_ticklabels():\n label.set_rotation(45)\n\n ax1.plot([], [], linewidth=5, label='loss', color='r', alpha=0.5)\n ax1.plot([], [], linewidth=5, label='gain', color='g', alpha=0.5)\n ax1.fill_between(date, closep, closep[0], where=(closep > closep[0]), facecolor='g', alpha=0.5)\n ax1.fill_between(date, closep, closep[0], where=(closep < closep[0]), facecolor='r', alpha=0.5)\n\n ax1.grid(True)\n ax1.xaxis.label.set_color('c')\n ax1.yaxis.label.set_color('r')\n ax1.set_yticks([0, 200, 400, 600, 800])\n\n\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.title('Sample Stock')\n plt.legend()\n plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0)\n plt.show()\n\ngraph_data(loadSampleStockData())","sub_path":"12_Colors&Fills.py","file_name":"12_Colors&Fills.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"461528561","text":"from datetime import datetime\n\nfrom structlog import get_logger\n\nfrom app.questionnaire.questionnaire_schema import DEFAULT_LANGUAGE_CODE\nfrom app.submitter.convert_payload_0_0_1 import convert_answers_to_payload_0_0_1\nfrom app.submitter.convert_payload_0_0_3 import convert_answers_to_payload_0_0_3\n\nlogger = get_logger()\n\n\nclass DataVersionError(Exception):\n def __init__(self, version):\n super().__init__()\n self.version = version\n\n def __str__(self):\n return \"Data version {} not supported\".format(self.version)\n\n\ndef convert_answers(schema, questionnaire_store, routing_path, flushed=False):\n \"\"\"\n Create the JSON answer format for down stream processing in the following format:\n ```\n {\n 'tx_id': '0f534ffc-9442-414c-b39f-a756b4adc6cb',\n 'type' : 'uk.gov.ons.edc.eq:surveyresponse',\n 'version' : '0.0.1',\n 'origin' : 'uk.gov.ons.edc.eq',\n 'survey_id': '021',\n 'flushed': true|false\n 'collection':{\n 'exercise_sid': 'hfjdskf',\n 'schema_name': 'yui789',\n 'period': '2016-02-01'\n },\n 'started_at': '2016-03-06T15:28:05Z',\n 'submitted_at': '2016-03-07T15:28:05Z',\n 'launch_language_code': 'en',\n 'channel': 'RH',\n 'metadata': {\n 'user_id': '789473423',\n 'ru_ref': '432423423423'\n },\n 'data': [\n ...\n ],\n }\n ```\n\n Args:\n schema: QuestionnaireSchema instance with populated schema json\n questionnaire_store: EncryptedQuestionnaireStorage instance for accessing current questionnaire data\n routing_path: The full routing path followed by the user when answering the questionnaire\n flushed: True when system submits the users answers, False when submitted by user.\n Returns:\n Data payload\n \"\"\"\n metadata = questionnaire_store.metadata\n response_metadata = questionnaire_store.response_metadata\n answer_store = questionnaire_store.answer_store\n list_store = questionnaire_store.list_store\n\n survey_id = schema.json[\"survey_id\"]\n submitted_at = datetime.utcnow()\n\n payload = {\n \"case_id\": metadata[\"case_id\"],\n \"tx_id\": metadata[\"tx_id\"],\n \"type\": \"uk.gov.ons.edc.eq:surveyresponse\",\n \"version\": schema.json[\"data_version\"],\n \"origin\": \"uk.gov.ons.edc.eq\",\n \"survey_id\": survey_id,\n \"flushed\": flushed,\n \"submitted_at\": submitted_at.isoformat(),\n \"collection\": _build_collection(metadata),\n \"metadata\": _build_metadata(metadata),\n \"launch_language_code\": metadata.get(\"language_code\", DEFAULT_LANGUAGE_CODE),\n }\n\n if metadata.get(\"channel\"):\n payload[\"channel\"] = metadata[\"channel\"]\n if metadata.get(\"case_type\"):\n payload[\"case_type\"] = metadata[\"case_type\"]\n if metadata.get(\"form_type\"):\n payload[\"form_type\"] = metadata[\"form_type\"]\n if metadata.get(\"region_code\"):\n payload[\"region_code\"] = metadata[\"region_code\"]\n if response_metadata.get(\"started_at\"):\n payload[\"started_at\"] = response_metadata[\"started_at\"]\n if metadata.get(\"case_ref\"):\n payload[\"case_ref\"] = metadata[\"case_ref\"]\n\n if schema.json[\"data_version\"] == \"0.0.3\":\n payload[\"data\"] = {\n \"answers\": convert_answers_to_payload_0_0_3(\n answer_store, list_store, schema, routing_path\n ),\n \"lists\": list_store.serialize(),\n }\n elif schema.json[\"data_version\"] == \"0.0.1\":\n payload[\"data\"] = convert_answers_to_payload_0_0_1(\n metadata, answer_store, list_store, schema, routing_path\n )\n else:\n raise DataVersionError(schema.json[\"data_version\"])\n\n logger.info(\"converted answer ready for submission\")\n return payload\n\n\ndef _build_collection(metadata):\n return {\n \"exercise_sid\": metadata[\"collection_exercise_sid\"],\n \"schema_name\": metadata[\"schema_name\"],\n \"period\": metadata[\"period_id\"],\n }\n\n\ndef _build_metadata(metadata):\n downstream_metadata = {\"user_id\": metadata[\"user_id\"], \"ru_ref\": metadata[\"ru_ref\"]}\n\n if metadata.get(\"ref_p_start_date\"):\n downstream_metadata[\"ref_period_start_date\"] = metadata[\"ref_p_start_date\"]\n if metadata.get(\"ref_p_end_date\"):\n downstream_metadata[\"ref_period_end_date\"] = metadata[\"ref_p_end_date\"]\n if metadata.get(\"display_address\"):\n downstream_metadata[\"display_address\"] = metadata[\"display_address\"]\n\n return downstream_metadata\n","sub_path":"app/submitter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"117596404","text":"\"\"\"\n Library of EV3 robot functions that are useful in many different applications. For example things\n like arm_up, arm_down, driving around, or doing things with the Pixy camera.\n\n Add commands as needed to support the features you'd like to implement. For organizational\n purposes try to only write methods into this library that are NOT specific to one tasks, but\n rather methods that would be useful regardless of the activity. For example, don't make\n a connection to the remote control that sends the arm up if the ir remote control up button\n is pressed. That's a specific input --> output task. Maybe some other task would want to use\n the IR remote up button for something different. Instead just make a method clled arm_up that\n could be called. That way it's a generic action that could be used in any task.\n\"\"\"\n\nimport ev3dev.ev3 as ev3\nimport math\nimport time\n\n\nclass Snatch3r(object):\n \"\"\"Commands for the Snatch3r robot that might be useful in many different programs.\"\"\"\n def __init__(self): # Initializes the Robot and it's components\n self.left_led = ev3.Leds.LEFT\n self.right_led = ev3.Leds.RIGHT\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = False\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.beacon_seeker = ev3.BeaconSeeker()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n assert self.ir_sensor\n assert self.color_sensor\n assert self.left_motor\n assert self.right_motor\n assert self.left_motor\n\n def drive_inches(self, dist, speed): # Drives a given distance and speed, determined by the user.\n\n position = dist * 90\n\n self.left_motor.run_to_rel_pos(position_sp=position, speed_sp=speed, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(position_sp=position, speed_sp=speed, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def turn_degrees(self, degrees, speed): # Turns robot by given degrees at given speed\n\n degrees = degrees * 4.6\n\n self.left_motor.run_to_rel_pos(position_sp=degrees, speed_sp=speed, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.right_motor.run_to_rel_pos(position_sp=-degrees, speed_sp=speed, stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def arm_down(self): # Returns arm to down position from up position.\n max_speed = 900\n self.arm_motor.run_to_abs_pos(position_sp=0, speed_sp=max_speed)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n\n def arm_up(self): # Moves arm upwards, this causes the Snatch3r to grab whatever is in front of it.\n max_speed = 900\n self.arm_motor.run_forever(speed_sp=max_speed)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep()\n\n def arm_calibration(self): # Calibrates the arm by moving it up to it's limit\n # and then sends it down the determined distance for its 0 point.\n max_speed = 900\n self.arm_motor.run_forever(speed_sp=max_speed)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep()\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is).\n\n def shutdown(self, dc): # Stops robot and breaks it out of its forever loop.\n dc.running = False\n self.running = False\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n\n print('Goodbye')\n ev3.Sound.speak('Goodbye').wait()\n\n def loop_forever(self): # Sets the robot in an infinite loop of actions.\n self.running = True\n while self.running:\n time.sleep(.01)\n\n def forward(self, left_speed, right_speed): # Drives forward at given speeds until told to stop.\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)\n\n def backward(self, left_speed, right_speed): # Drives backward at given speeds until told to stop.\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)\n\n def left(self, left_speed, right_speed): # Turns the robot left at given speeds until told to stop or move.\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)\n\n def right(self, left_speed, right_speed): # Turns the robot right at the given speeds until told to stop.\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)\n\n def stop(self): # Stops the robot.\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n\n def shutdown2(self): # Shuts down the robot when in an infinite loop and breaks the loop.\n self.running = False\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n\n ev3.Sound.speak('Goodbye')\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n\n def seek_beacon(self, channel, forward_speed, turn_speed): # Using the IR sensor, tracks and picks-up the beacon.\n self.beacon_seeker = ev3.BeaconSeeker(channel=channel)\n while not self.touch_sensor.is_pressed:\n # The touch sensor can be used to abort the attempt (sometimes handy during testing)\n\n # DONE: 3. Use the beacon_seeker object to get the current heading and distance.\n current_heading = self.beacon_seeker.heading # use the beacon_seeker heading\n current_distance = self.beacon_seeker.distance # use the beacon_seeker distance\n if current_distance == -128:\n # If the IR Remote is not found just sit idle for this program until it is moved.\n print(\"IR Remote not found. Distance is -128\")\n self.stop()\n else:\n\n if math.fabs(current_heading) < 2:\n if current_distance == 1:\n time.sleep(1)\n self.stop()\n return True\n if current_distance > 1:\n print(\"On the right heading. Distance: \", current_distance)\n self.forward(forward_speed, forward_speed)\n if math.fabs(current_heading) > 2 and math.fabs(current_heading) < 10:\n if current_heading < 0:\n self.left(turn_speed, turn_speed)\n print(\"Adjusting heading: \", current_heading)\n if current_heading > 0:\n self.right(turn_speed, turn_speed)\n print(\"Adjusting heading: \", current_heading)\n if math.fabs(current_heading) > 10:\n self.right(turn_speed, turn_speed)\n print(\"Heading is too far off to fix: \", current_heading)\n\n time.sleep(0.2)\n # The touch_sensor was pressed to abort the attempt if this code runs.\n print(\"Abandon ship!\")\n self.stop()\n return False\n","sub_path":"projects/Carson Meyer/robot_controller.py","file_name":"robot_controller.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485395839","text":"\"\"\"\nPython SDK for Copyleaks. More details can be read at https://api.copyleaks.com/Documentation.\nFirst register on Copyleaks and get the API key.\n\"\"\"\n\nimport requests\nimport json\nimport os\nfrom retrying import retry\n\n\nclass CopyLeaks(object):\n def __init__(self, username, api_key, **kwargs):\n \"\"\"\n Creates a Copyleak object which is used to make calls\n :param username: Username used to login\n :param api_key: API key used to make calls\n :param kwargs:\n :return:\n \"\"\"\n self.api_key = api_key\n self.username = username\n self.base_url = kwargs.get('base_url') or 'https://api.copyleaks.com'\n self.headers = kwargs.get('headers') or {'content-type': 'application/json'}\n self.access_token = None\n self.test_mode = kwargs.get('test_mode') or False\n self._login()\n\n @retry\n def _login(self):\n \"\"\"\n Does the login and saves access token for subsequent requests\n :return:\n \"\"\"\n assert self.api_key and self.username\n url = self.base_url + '/v1/account/login'\n data = dict(\n Username=self.username,\n Apikey=self.api_key\n\n )\n data = json.dumps(data)\n response = requests.post(url, headers=self.headers, data=data)\n assert response.status_code == 200\n self.access_token = response.json()['access_token']\n self._set_headers()\n\n def _set_headers(self):\n assert self.access_token\n self.headers['Authorization'] = 'Bearer %s' % self.access_token\n if self.test_mode:\n self.headers['copyleaks-sandbox-mode'] = ''\n\n @retry\n def count_credits(self):\n \"\"\"\n Counds credits and returns a response of the form:\n {\n \"Amount\": 1\n }\n :return:\n \"\"\"\n url = self.base_url + '/v1/account/count-credits'\n response = requests.get(url, headers=self.headers)\n assert 'Amount' in response.json()\n return response.json()\n\n @retry\n def create_process_by_url(self, url_to_process):\n \"\"\"\n Creates a process by scanning a URL and returns a response of the following format:\n {\n \"ProcessId\": \"1074ae9d-ec1c-46ff-adc2-c294abed29f3\",\n \"CreationTimeUTC\": \"01/01/2016 18:36:10\"\n }\n :param url: URL to scan\n :return:\n \"\"\"\n assert url_to_process\n url = self.base_url + '/v1/detector/create-by-url'\n data = dict(\n Url=url_to_process\n )\n data = json.dumps(data)\n response = requests.post(url, headers=self.headers, data=data)\n assert response.status_code == 200\n assert 'ProcessId' in response.json()\n assert 'CreationTimeUTC' in response.json()\n return response.json()\n\n @retry\n def _upload_file(self, url, file_path):\n \"\"\"\n Uploads a file for processing.\n :param url: URL to hit e.g. either 'create-by-file' or 'create-by-ocr'\n :param file_path: Valid file path\n :return:\n \"\"\"\n file_name, file_extension = os.path.splitext(file_path)\n valid_extensions = ['.html', '.txt', '.pdf', '.docx', '.doc', '.rtef', '.jpeg']\n assert file_extension in valid_extensions\n if file_extension == '.jpeg':\n files = {'media': (file_path, open(file_path, 'rb').read())}\n else:\n files = {'file': (file_path, open(file_path, 'rb').read())}\n headers = self.headers.copy()\n headers.pop('content-type')\n response = requests.post(url, headers=headers, files=files)\n assert 'CreationTimeUTC' in response.json()\n assert 'ProcessId' in response.json()\n assert response.status_code == 200\n return response.json()\n\n @retry\n def create_process_by_file(self, file_path):\n \"\"\"\n Creates a process by uploading a file to scan. Returns a response of the format:\n {\n \"ProcessId\": \"23af0314-2fb9-4b91-959f-87848572df1f\",\n \"CreationTimeUTC\": \"01/01/2016 18:36:10\"\n }\n :param file_path: Valid file path\n :return:\n \"\"\"\n assert file_path\n assert os.path.isfile(file_path)\n \n url = self.base_url + '/v1/detector/create-by-file'\n return self._upload_file(url, file_path)\n\n @retry\n def create_process_by_ocr(self, file_path, language='English'):\n \"\"\"\n Creates a process by uploading a file to scan. Returns a response of the format:\n {\n \"ProcessId\": \"23af0314-2fb9-4b91-959f-87848572df1f\",\n \"CreationTimeUTC\": \"01/01/2016 18:36:10\"\n }\n :param file_path: Valid file path\n :param language: OCR language codes as given here https://api.copyleaks.com/Documentation/OcrLanguages\n :return:\n \"\"\"\n assert file_path and language\n url = self.base_url + ('/v1/detector/create-by-file-ocr?language=%s' % language)\n return self._upload_file(url, file_path)\n\n @retry\n def get_process_status(self, process_id):\n \"\"\"\n Gets the scan progress and response looks like:\n {\n \"Status\": \"Processing\",\n \"ProgressPercents\": 50\n }\n :param process_id: A valid active process id\n :return:\n \"\"\"\n assert process_id\n url = self.base_url + ('/v1/detector/%s/status' % (process_id))\n response = requests.get(url, headers=self.headers)\n assert response.status_code == 200\n assert 'ProgressPercents' in response.json()\n assert 'Status' in response.json()\n return response.json()\n\n @retry\n def get_process_result(self, process_id):\n \"\"\"\n Gets the scan result and response looks like:\n [\n {\n \"URL\": \"http://site1.com/result\",\n \"Percents\": 99,\n \"NumberOfCopiedWords\": 250\n },\n {\n \"URL\": \"http://site2.com/result\",\n \"Percents\": 50,\n \"NumberOfCopiedWords\": 162\n }\n ]\n :param process_id: A valid active process id\n :return:\n \"\"\"\n assert process_id\n url = self.base_url + ('/v1/detector/%s/result' % (process_id))\n response = requests.get(url, headers=self.headers)\n assert response.status_code == 200\n return response.json()\n\n @retry\n def get_all_processes(self):\n \"\"\"\n Retusn list of all active processes. Response looks like:\n [\n {\n \"ProcessId\": \"e4d544af-da8c-4767-8dd0-f044191fa161\",\n \"CreationTimeUTC\": \"01/01/2016 18:36:10\",\n \"Status\": 0,\n \"CustomFields\": {\n \"key 1\": \"sample string 1\",\n \"key 2\": \"sample string 2\"\n }\n },\n {\n \"ProcessId\": \"ai282ns9-1jss-2918-11ks-sj29s2992kk1\",\n \"CreationTimeUTC\": \"02/02/2016 11:31:20\",\n \"Status\": 0,\n \"CustomFields\": {\n \"key 3\": \"sample string 3\",\n \"key 4\": \"sample string 4\"\n }\n }\n ]\n :return:\n \"\"\"\n url = self.base_url + '/v1/detector/list'\n response = requests.get(url, headers=self.headers)\n assert response.status_code == 200\n return response.json()\n\n @retry\n def delete_process(self, process_id):\n \"\"\"\n Deletes a process and returns status code of 200 in case of success.\n :param process_id: A valid active process id\n :return:\n \"\"\"\n assert process_id\n url = self.base_url + ('/v1/detector/%s/delete' % (process_id))\n response = requests.delete(url, headers=self.headers)\n assert response.status_code == 200\n\n","sub_path":"copyleaks_sdk/copyleaks.py","file_name":"copyleaks.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502586711","text":"# Copyright (c) 2015 Ultimaker B.V.\n# Uranium is released under the terms of the AGPLv3 or higher.\n\nfrom UM.Event import MouseEvent, KeyEvent\nfrom UM.Tool import Tool\nfrom UM.Application import Application\nfrom UM.Scene.BoxRenderer import BoxRenderer\nfrom UM.Scene.RayRenderer import RayRenderer\nfrom UM.Scene.Selection import Selection\nfrom UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator\n\nfrom PyQt5.QtGui import qAlpha, qRed, qGreen, qBlue\nfrom PyQt5 import QtCore, QtWidgets\n\nclass SelectionTool(Tool):\n PixelSelectionMode = 1\n BoundingBoxSelectionMode = 2\n\n def __init__(self):\n super().__init__()\n\n self._scene = Application.getInstance().getController().getScene()\n self._renderer = Application.getInstance().getRenderer()\n\n self._selection_pass = None\n\n self._selection_mode = self.PixelSelectionMode\n self._ctrl_is_active = None\n \n def checkModifierKeys(self, event):\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n self._ctrl_is_active = modifiers == QtCore.Qt.ControlModifier\n\n def setSelectionMode(self, mode):\n self._selection_mode = mode\n\n def event(self, event):\n if self._selection_pass is None:\n self._selection_pass = self._renderer.getRenderPass(\"selection\")\n\n self.checkModifierKeys(event)\n if event.type == MouseEvent.MousePressEvent and MouseEvent.LeftButton in event.buttons:\n if self._selection_mode == self.PixelSelectionMode:\n self._pixelSelection(event)\n else:\n self._boundingBoxSelection(event)\n\n return False\n\n def _boundingBoxSelection(self, event):\n root = self._scene.getRoot()\n\n ray = self._scene.getActiveCamera().getRay(event.x, event.y)\n\n intersections = []\n for node in BreadthFirstIterator(root):\n if node.isEnabled() and not node.isLocked():\n intersection = node.getBoundingBox().intersectsRay(ray)\n if intersection:\n intersections.append((node, intersection[0], intersection[1]))\n\n if intersections:\n intersections.sort(key=lambda k: k[1])\n\n node = intersections[0][0]\n if not Selection.isSelected(node):\n if not self._ctrl_is_active:\n Selection.clear()\n Selection.add(node)\n else:\n Selection.clear()\n\n def _pixelSelection(self, event):\n item_id = self._selection_pass.getIdAtPosition(event.x, event.y)\n\n if not item_id:\n Selection.clear()\n return\n\n for node in BreadthFirstIterator(self._scene.getRoot()):\n if id(node) == item_id:\n if self._ctrl_is_active:\n if Selection.isSelected(node):\n if node.getParent():\n group_node = node.getParent()\n if not group_node.callDecoration(\"isGroup\"):\n Selection.remove(node)\n else:\n while group_node.getParent().callDecoration(\"isGroup\"):\n group_node = group_node.getParent()\n Selection.remove(group_node)\n else:\n if node.getParent():\n group_node = node.getParent()\n if not group_node.callDecoration(\"isGroup\"):\n Selection.add(node)\n else:\n while group_node.getParent().callDecoration(\"isGroup\"):\n group_node = group_node.getParent()\n Selection.add(group_node)\n else:\n if not Selection.isSelected(node) or Selection.getCount() > 1:\n Selection.clear()\n if node.getParent():\n group_node = node.getParent()\n if not group_node.callDecoration(\"isGroup\"):\n Selection.add(node)\n else:\n while group_node.getParent().callDecoration(\"isGroup\"):\n group_node = group_node.getParent()\n Selection.add(group_node)\n","sub_path":"plugins/Tools/SelectionTool/SelectionTool.py","file_name":"SelectionTool.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54143230","text":"import pyttsx3\ntoSpeak = pyttsx3.init()\nstrList = ['Hi Friends How are you', 'Stage 1 Learing python',\n 'Stage 2-Mastering Python', 'Stage 3 Becoming world class programmer']\nfor i in strList:\n toSpeak.say(i)\n\ntoSpeak.runAndWait()\n\n# st=[{'[1,2,3]','3','Hello'}]\n# print(type(st))\n\n# print(\"iNautix Learning Welcomes You\"[:1:-2])\n# st1 ={'[1,2,3]','3','Hello'}\n\n# st.append(st1)\n# print(st)\n\n# i='12344'\n# print(i[-2:])\n","sub_path":"Audio_book.py","file_name":"Audio_book.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"421613276","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 03 09:03:54 2018\n\n@author: avenner\n\"\"\"\n#timestamp = np.genfromtxt('timestamp_' + str(filename), dtype = [('date','|S10'),('time','|S9')])\n#timestamp = np.loadtxt('timestamp_hdc104_cno10.txt', dtype = '|S19', delimiter = ',') \n\n# Programme to average multiple trials for wane, NREM and REM and plot percent time spent in various sleep stages against time\n\nimport os\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.mlab as mb\nimport datetime as dt\nimport matplotlib.dates as md\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\n\n\n### VARIABLES WE CAN CHANGE\nmouse = 'vip480_cno'\ncondition1 = 'wake'\ncondition2 = 'sws'\ncondition3 = 'rem'\ntimestamp = np.loadtxt('timestamp_vip461_sal10a.txt', dtype = '|S19', delimiter = ',') \nfig_title = '% sleep stages in ' + mouse + ' after 10am CNO'\npath = 'E:/VMPO Data/vip vmpo m3_sept2019/sleep_wake/results/' # insert the path to the directory of interest\nexport_folder = 'E:/VMPO Data/vip vmpo m3_sept2019/sleep_wake/results/averages/'\nvalue_threshold = -1\n\n\n### MANIPULATING DATA FOR PLOTTING\n\ndirList= os.listdir(path)\nfilename = []\nfilename_data = []\nfile_dict = {}\n\nfor fname in dirList:\n if mouse in fname:\n filename = np.append(filename, fname)\n\nfdata = list(filename)\n \nfor i in np.arange(filename.size): \n fdata[i] = np.loadtxt(str(path)+str(filename[i]))\n \n\nfor i in np.arange(filename.size):\n file_dict[filename[i]] = fdata[i]\n\nindex_keys = np.arange(len(file_dict.keys()))\nnum_files = []\nnum_files1 = []\nnum_files2= []\n \nfor i in np.arange(index_keys.size):\n if condition1 in file_dict.keys()[i]:\n num_files = np.append(num_files, 1)\n else:\n if condition2 in file_dict.keys()[i] :\n num_files1 = np.append(num_files1, 1)\n else:\n if condition3 in file_dict.keys()[i] :\n num_files2 = np.append(num_files2, 1)\n \narraylen = 0 \nx = 0 \n\nfor i in np.arange(index_keys.size):\n if condition1 in file_dict.keys()[i]:\n x = np.size(file_dict.values()[i])\n if x > arraylen:\n arraylen = np.size(file_dict.values()[i])\n\nnull = -1\nextra = []\napp_values = [] \n \nfor i in np.arange(index_keys.size):\n if arraylen > np.size(file_dict.values()[i]):\n extra = arraylen - np.size(file_dict.values()[i])\n app_values = np.tile(null, extra)\n file_dict[file_dict.keys()[i]] = np.append(file_dict.values()[i], app_values)\n \nselected_values = np.zeros((len(num_files), arraylen))\nselected_keys = range(0, len(num_files)) \n\nselected_values1 = np.zeros((len(num_files1), arraylen))\nselected_keys1 = range(0, len(num_files1))\n\nselected_values2 = np.zeros((len(num_files2), arraylen))\nselected_keys2 = range(0, len(num_files2))\n \nq = -1\np = -1\nr = -1\n \nfor i in np.arange(index_keys.size):\n if condition1 in file_dict.keys()[i]:\n q = q + 1\n selected_keys[q] = file_dict.keys()[i] \n selected_values[q,:] = file_dict.values()[i]\n else:\n if condition2 in file_dict.keys()[i]:\n p = p + 1\n selected_keys1[p] = file_dict.keys()[i] \n selected_values1[p,:] = file_dict.values()[i]\n else:\n if condition3 in file_dict.keys()[i]:\n r = r + 1\n selected_keys2[r] = file_dict.keys()[i] \n selected_values2[r,:] = file_dict.values()[i]\n \nsorted_keys = np.sort(selected_keys)\norder_index = np.arange(len(num_files))\n\nsorted_keys1 = np.sort(selected_keys1)\norder_index1 = np.arange(len(num_files1))\n\nsorted_keys2 = np.sort(selected_keys2)\norder_index2 = np.arange(len(num_files2))\n \nfor i in np.arange(num_files.size):\n order_index[i] = mb.find(sorted_keys == selected_keys[i])\n \nfor i in np.arange(num_files1.size):\n order_index1[i] = mb.find(sorted_keys1 == selected_keys1[i]) \n\nfor i in np.arange(num_files2.size):\n order_index2[i] = mb.find(sorted_keys2 == selected_keys2[i]) \n \nsorted_values = np.zeros((len(selected_keys), arraylen))\nsorted_values1 = np.zeros((len(selected_keys1), arraylen))\nsorted_values2 = np.zeros((len(selected_keys2), arraylen))\n \nfor i in np.arange(num_files.size):\n sorted_values[order_index[i],:] = selected_values[i,:] \n \nfor i in np.arange(num_files1.size):\n sorted_values1[order_index1[i],:] = selected_values1[i,:] \n \nfor i in np.arange(num_files2.size):\n sorted_values2[order_index2[i],:] = selected_values2[i,:] \n \nmasked_values = ma.masked_equal(sorted_values, value_threshold) \nmasked_values1 = ma.masked_equal(sorted_values1, value_threshold) \nmasked_values2 = ma.masked_equal(sorted_values2, value_threshold) \n\n\n## CREATING A STABLE TIMESTAMP (i.e. one in which we can specify the day and month) \n \n#==============================================================================\ntimestamp_lis = list(timestamp) # makes timestamp into a list so we can manipulate it\ntimestamp_sep = list() # creates an open list into which we can input values\n \nfor d in np.arange(timestamp.size): # puts each time point into an ndarray of datetime.datetime objects whereupon each component of the timepoint is separated out\n timestamp_sep = np.append(timestamp_sep, dt.datetime.strptime(timestamp_lis[d], \"%m/%d/%Y %H:%M:%S\"))\n \nnew_timestamp = timestamp_sep #creates a new object that is the same size and type as timestamp_sep\ntime_zero = dt.datetime.date(timestamp_sep[0]) # gives the date only of the 1st timepoint of timestamp_sep\n \nfor d in np.arange(timestamp.size): # returns an ndarray of datetime objects which have had their dates changed to the 1st or 2nd day of the month reflecting 1st or 2nd day of recording\n if dt.datetime.date(timestamp_sep[d]) == time_zero:\n new_timestamp[d] = new_timestamp[d].replace(day = 1, month = 1)\n else:\n new_timestamp[d] = new_timestamp[d].replace(day = 2, month = 1)\n \nt_diff = md.date2num(timestamp_sep[1]) - md.date2num(timestamp_sep[0]) # gives the time difference of the first 2 timepoints as a float\nnew_time = md.num2date(md.date2num(timestamp_sep) + t_diff) # adds one time interval to each time, keeps as a datetime object\n \n \n # PLOTTING THE DATA\n#time = dt.datestr2num(timestamp)\n#t = dt.num2date(time)\nhours = md.HourLocator(interval = 1)\nhoursFmt = md.DateFormatter('%H')\n \n#new_time = [-1,0,1,2,3,4,5,6,7,8,9,10] \n \nmean_y1 = np.mean(masked_values, axis = 0)\nmean_y2 = np.mean(masked_values1, axis = 0)\nmean_y3 = np.mean(masked_values2, axis = 0)\n \ncum_y1 = np.cumsum(masked_values, axis = 1)\ncumy_y2 = np.cumsum(masked_values1, axis = 1)\ncumy_y3 = np.cumsum(masked_values2, axis = 1)\n \nplt.hold\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nwake, = plt.plot(new_time, mean_y1, 'b-')\nsws, = plt.plot(new_time, mean_y2, 'g-')\nrem, = plt.plot(new_time, mean_y3, 'r-')\nplt.errorbar(new_time, mean_y1, yerr = st.sem(masked_values, axis = 0), fmt = 'b-') \nplt.errorbar(new_time, mean_y2, yerr = st.sem(masked_values1, axis =0), fmt = 'g') \nplt.errorbar(new_time, mean_y3, yerr = st.sem(masked_values2, axis =0), fmt = 'r')\n\n#ax.xaxis.set_major_locator(hours)\n#ax.xaxis.set_major_formatter(hoursFmt)\nplt.legend((wake,sws,rem),(condition1, condition2, condition3))\nplt.xlabel('Time (mins)')\nplt.ylabel('% time spent in stage')\nplt.title(fig_title) \n \nplt.hold\n\nnp.savetxt((str(export_folder) + str(mouse) +'_wakeAVG.txt'), mean_y1)\nnp.savetxt((str(export_folder) + str(mouse) +'_swsAVG.txt'), mean_y2)\nnp.savetxt((str(export_folder) + str(mouse) +'_remAVG.txt'), mean_y3)\n ","sub_path":"averaging.py","file_name":"averaging.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"208694040","text":"import psutil\nimport pygame\nimport math\n\n\npygame.init()\n\nscreen = pygame.display.set_mode((1000, 500))\n# for disk in psutil.disk_partitions():\n# free = psutil.disk_usage(disk.mountpoint).free/(1024*1024*1024)\n# total = psutil.disk_usage(disk.mountpoint).total/(1024*1024*1024)\n# used = psutil.disk_usage(disk.mountpoint).used/(1024*1024*1024)\n# ft = round(free/total * 100)\n# ut = round(used/total * 100)\n# print(disk.mountpoint)\n# print(f\"Free: {free:.4} gb (\" + str(ft) + \"%)\")\n# print(f\"Used: {used:.4} gb (\" + str(ut) + \"%)\")\n# print(f\"Total: {total:.4} gb (\" + str(100) + \"%)\")\n# print()\n\ndisks = psutil.disk_partitions()\niterator = 0\nclicked = False\nwhile True:\n pygame.draw.rect(screen, (0, 0, 0), pygame.Rect(0, 0, 1000, 500))\n font = pygame.font.Font(None, 50)\n free = psutil.disk_usage(disks[iterator].mountpoint).free / (1024 * 1024 * 1024)\n total = psutil.disk_usage(disks[iterator].mountpoint).total / (1024 * 1024 * 1024)\n used = psutil.disk_usage(disks[iterator].mountpoint).used / (1024 * 1024 * 1024)\n ft = round(free / total * 100)\n ut = round(used / total * 100)\n pygame.draw.arc(screen, (50, 50, 50), pygame.Rect(500, 75, 350, 350), 0, math.pi * 4, 175)\n pygame.draw.arc(screen, (255, 255, 255), pygame.Rect(500, 75, 350, 350), 0, math.pi * ft / 100, 175)\n screen.blit(font.render(f\"Free: {free:.2f} gb (\" + str(ft) + \"%)\", False, (255, 255, 255)), (10, 50))\n screen.blit(font.render(f\"Used: {used:.2f} gb (\" + str(ut) + \"%)\", False, (50, 50, 50)), (10, 100))\n screen.blit(font.render(f\"Total: {total:.2f} gb (\" + str(100) + \"%)\", False, (0, 255, 0)), (10, 150))\n screen.blit(font.render(f\"Name disk: {disks[iterator].mountpoint}\", False, (255, 255, 255)), (10, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN and not clicked:\n if event.button == 1:\n clicked = True\n mouse_x = event.pos[0]\n mouse_y = event.pos[1]\n if 950 <= mouse_x <= 1000 and 0 <= mouse_y <= 50:\n iterator += 1\n if iterator == len(disks):\n iterator = 0\n if event.type == pygame.MOUSEBUTTONUP and clicked:\n if event.button == 1:\n clicked = False\n disks = psutil.disk_partitions()\n pygame.display.update()\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"447443700","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import Sequential as Seq,Linear,ReLU,BatchNorm1d\nfrom torch_scatter import scatter_mean\nimport numpy as np\nimport json\nfrom . import copy_tensor,copy_layer\n\n\nclass model_io:\n SPECIAL_LAYERS=[\n \"node_mlp_2.3\",\n \"node_mlp_1.3\",\n \"edge_mlp.3\",\n \"bn\"\n ]\n \n def __init__(self,model,\n model_state_dict,\n activation_dest,):\n self.model=model\n self.model.load_state_dict(model_state_dict)\n self.dest=activation_dest\n\n # declare variables\n self.L=dict() # layers\n self.A=activation_dest # activations\n # self.R=dict() # relevance scores\n \n self._rules=dict() # rules to use for each layer\n self._hook_handles=[] # collection of all hook handles\n \n # extract layers and register hooks\n self._extract_layers(\"\",model,)\n self.n_layers=len(self.L.keys())\n\n # register rules for each layer\n self._register_rules()\n \n # register special layers\n self.special_layers=list()\n for key in model_io.SPECIAL_LAYERS:\n full_key=[layer_name for layer_name in self.L.keys() if key in layer_name][0]\n \n self.special_layers.append(full_key)\n\n\n \"\"\"\n rules functions\n \"\"\"\n def _register_rules(self):\n for layer_name in self.L.keys():\n layer=self.L[layer_name]\n layer_class=layer.__class__.__name__\n if layer_class==\"BatchNorm1d\":\n rule=\"z\"\n else:\n rule=\"eps\"\n self._rules[layer_name]=rule\n\n def get_rule(self,index=None,layer_name=None):\n assert (not index is None) or (not layer_name is None), \"at least one of (index,name) must be provided\" \n if layer_name is None:\n layer_name=self.index2name(index)\n\n if hasattr(self,\"_rules\"):\n return self._rules[layer_name]\n else:\n self._register_rules()\n return self._rules[layer_name]\n\n\n \"\"\"\n layer functions\n \"\"\"\n def _make_forward_hook(self,name):\n def get_activations(module,inputs,outputs):\n assert len(inputs)==1 # something specific about IN\n inputs=inputs[0]\n self.A[name]=copy_tensor(inputs)\n \n return get_activations\n\n def _extract_layers(self,name,model):\n l=list(model.named_children())\n \n if len(l)==0:\n self.L[name]=copy_layer(model)\n hook=self._make_forward_hook(name)\n self._hook_handles.append(model.register_forward_hook(hook))\n else:\n l=list(model.named_children())\n for i in l:\n self._extract_layers(name+\".\"+i[0],i[1])\n\n # self._clear_hooks()\n\n def get_layer(self,index=None,name=None):\n assert (not index is None) or (not name is None), \"at least one of (index,name) must be provided\" \n if name is None:\n name=self.index2name(index)\n\n return self.L[name]\n \n\n \"\"\"\n general getters\n \"\"\"\n def index2name(self,idx:int)->str:\n if not hasattr(self,\"_i2n\"):\n self._i2n=[]\n for i,n in enumerate(self.A.keys()):\n self._i2n.append(n)\n\n return self._i2n[idx]\n\n def name2index(self,name:str)->int:\n if not hasattr(self,\"_i2n\"):\n self._i2n=[]\n for i,n in enumerate(self.A.keys()):\n self._i2n.append(n)\n\n return self._i2n.index(name)\n\n \n \n \"\"\"\n reset and setter functions\n \"\"\"\n def _clear_hooks(self):\n for hook in self._hook_handles:\n hook.remove()\n\n def reset(self):\n \"\"\"\n reset the prepared model\n \"\"\"\n pass\n # self._clear_hooks()\n # self.A=dict()\n # self.R=dict()\n\n def set_dest(self,activation_dest):\n self.A=activation_dest","sub_path":"projects/project_65/src/util/model_io.py","file_name":"model_io.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"108467322","text":"# *args is an operator we can hand to functions. It gathers the arguments in a tuple. \ndef sum_all_nums(*args):\n\ttotal = 0\n\tfor num in args:\n\t\ttotal += num\n\treturn total\n\nprint(sum_all_nums(1, 4, 5))\n\n# **kwargs is another operator that we can hand to functions. It gathers the keyword arguments as a dictionary. \ndef fav_colors(**kwargs):\n\tfor person, color in kwargs.items():\n\t\tprint(f\"{person}'s favorite color is {color}\")\n\nfav_colors(Falon=\"black\", Martha=\"teal\", Marie=\"magenta\")","sub_path":"kwargs.py","file_name":"kwargs.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"456633925","text":"#!/usr/bin/python2.7\n'''\nCreated on Oct 9, 2013\n\n@author: carlos\nCode gathered from trevor's tips\n'''\n\nimport BearMate\ndef parseFile (inputFile):\n try:\n bearList = open(inputFile, 'r')\n except:\n print(\"Input File not found. Exiting\")\n exit(1)\n \n bears = []\n \n while True:\n oneLine = bearList.readline()\n if not oneLine:\n break\n lineSections = oneLine.split(\":\") \n newBear = BearMate.bear(lineSections[0], lineSections[1], lineSections[2], lineSections[3], lineSections[4])\n bears.append(newBear)\n \n return bears","sub_path":"src/BearParse.py","file_name":"BearParse.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386406085","text":"# -*- coding: utf-8 -*-\nimport itertools\nimport json\nimport os\nimport sys\nimport pytest\nimport time\nfrom urllib import urlencode\n\nfrom responses import (actionIndex_response, actionItems_response, actionPlay_response,\n actionView_seasons_response, actionView_without_seasons_response,\n watching_info_response_with_seasons, watching_info_response_without_seasons)\n\n\ncwd = os.path.dirname(os.path.abspath(__file__))\nhandle = 1\nplugin = \"plugin://video.kino.pub/{}\"\npytestmark = pytest.mark.usefixtures(\"fake_kodi_api\")\nqualities = [\"480p\", \"720p\", \"1080p\"]\nstreams = [\"hls\", \"hls4\", \"http\"]\n\n\nclass FakeAddon(object):\n def __init__(self, id=\"video.kino.pub\"):\n self._id = id\n self._settings = {\n \"access_token_expire\": str(int(time.time() + 1000)),\n \"video_quality\": \"720p\",\n \"stream_type\": \"hls4\"\n }\n\n def getAddonInfo(self, info_id):\n return {\"path\": cwd, \"id\": self._id}.get(info_id)\n\n def getSetting(self, setting_id):\n return self._settings.get(setting_id, \"\")\n\n def setSetting(self, setting_id, value):\n self._settings[setting_id] = value\n\n def getLocalizedString(self, id_):\n return {32000: u\"Привет, мир!\", 32001: u\"Я тебя люблю.\"}.get(id_)\n\n\n@pytest.fixture\ndef main():\n from default import main\n return main\n\n\n@pytest.fixture\ndef xbmcgui():\n from resources.lib.addonworker import xbmcgui\n return xbmcgui\n\n\n@pytest.fixture\ndef xbmcplugin():\n from resources.lib.addonworker import xbmcplugin\n return xbmcplugin\n\n\n@pytest.fixture\ndef settings():\n from resources.lib.data import __settings__\n return __settings__\n\n\n@pytest.fixture\ndef fake_kodi_api(mocker):\n \"\"\"Mock Kodi Python API\"\"\"\n mock_xbmcaddon = mocker.Mock()\n mock_xbmcaddon.Addon.side_effect = FakeAddon\n mocker.patch.dict(\"sys.modules\", xbmcaddon=mock_xbmcaddon, xbmc=mocker.Mock(),\n xbmcplugin=mocker.Mock(), xbmcgui=mocker.Mock())\n mocker.patch(\"resources.lib.addonworker.auth\")\n\n\n@pytest.fixture\ndef actionIndex(mocker):\n\n def side_effect(value):\n if value == \"types\":\n return mocker.Mock(**{\"get.return_value\": actionIndex_response})\n\n mock_KinoPubClient = mocker.Mock(side_effect=side_effect)\n mocker.patch(\"resources.lib.addonworker.KinoPubClient\", mock_KinoPubClient)\n mocker.patch.object(sys, \"argv\", [plugin.format(\"\"), handle, \"\"])\n\n\ndef test_actionIndex(mocker, actionIndex, main, xbmcplugin, xbmcgui):\n main()\n c = u\"[COLOR FFFFF000]{}[/COLOR]\"\n expected_results = [\n (handle, plugin.format(\"search\"), c.format(u\"Поиск\"), False),\n (handle, plugin.format(\"items\"), c.format(u\"Последние\"), True),\n (handle, plugin.format(\"items?sort=-rating\"), c.format(u\"Популярные\"), True),\n (handle, plugin.format(\"tv\"), c.format(u\"ТВ\"), True),\n (handle, plugin.format(\"bookmarks\"), c.format(u\"Закладки\"), True),\n (handle, plugin.format(\"watching\"), c.format(u\"Я смотрю\"), True),\n (handle, plugin.format(\"collections\"), c.format(u\"Подборки\"), True),\n (handle, plugin.format(\"index?type=movie\"), u\"Фильмы\", True),\n (handle, plugin.format(\"index?type=serial\"), u\"Сериалы\", True),\n (handle, plugin.format(\"index?type=tvshow\"), u\"ТВ шоу\", True),\n (handle, plugin.format(\"index?type=4k\"), u\"4K\", True),\n (handle, plugin.format(\"index?type=3d\"), u\"3D\", True),\n (handle, plugin.format(\"index?type=concert\"), u\"Концерты\", True),\n (handle, plugin.format(\"index?type=documovie\"), u\"Документальные фильмы\", True),\n (handle, plugin.format(\"index?type=docuserial\"), u\"Документальные сериалы\", True)\n ]\n for result in expected_results:\n handle_, link, title, is_directory = result\n xbmcgui.ListItem.assert_any_call(title.encode(\"utf-8\"))\n li = xbmcgui.ListItem()\n xbmcplugin.addDirectoryItem.assert_any_call(handle_, link, li, is_directory)\n xbmcplugin.endOfDirectory.assert_called_once_with(handle)\n\n\n@pytest.fixture(params=itertools.product(streams, qualities), ids=lambda ids: \"-\".join(ids))\ndef actionPlay(request, mocker, settings):\n settings.setSetting(\"stream_type\", request.param[0])\n settings.setSetting(\"video_quality\", request.param[1])\n id_ = actionPlay_response[\"item\"][\"id\"]\n\n def side_effect(value):\n if value == \"items/{}\".format(id_):\n return mocker.Mock(**{\"get.return_value\": actionPlay_response})\n else:\n return mocker.Mock()\n\n mock_KinoPubClient = mocker.Mock(side_effect=side_effect)\n mocker.patch(\"resources.lib.addonworker.KinoPubClient\", mock_KinoPubClient)\n title = actionPlay_response[\"item\"][\"title\"].encode(\"utf-8\")\n mocker.patch.object(sys, \"argv\", [\n plugin.format(\"play\"),\n handle,\n \"?{}\".format(urlencode({\"title\": title, \"id\": id_}))\n ])\n return request.param\n\n\ndef test_actionPlay(actionPlay, main, xbmcgui, xbmcplugin):\n stream, video_quality = actionPlay\n main()\n title = actionPlay_response[\"item\"][\"title\"].encode(\"utf-8\")\n xbmcgui.ListItem.assert_called_with(title)\n li = xbmcgui.ListItem(title)\n link = \"https://example.com/{}/{}\".format(stream, video_quality.rstrip(\"p\"))\n li.setPath.assert_called_once_with(link)\n xbmcplugin.setResolvedUrl.assert_called_once_with(handle, True, li)\n\n\n@pytest.fixture\ndef actionItems(mocker):\n\n def side_effect(value):\n if value == \"items\":\n return mocker.Mock(**{\"get.return_value\": actionItems_response})\n\n mock_KinoPubClient = mocker.Mock(side_effect=side_effect)\n mocker.patch(\"resources.lib.addonworker.KinoPubClient\", mock_KinoPubClient)\n mocker.patch.object(sys, \"argv\", [plugin.format(\"items\"), handle, \"\"])\n\n\ndef test_actionItems(main, actionItems, xbmcgui, xbmcplugin):\n main()\n s = plugin\n i = [item[\"id\"] for item in actionItems_response[\"items\"]]\n t = [item[\"title\"].encode(\"utf-8\") for item in actionItems_response[\"items\"]]\n expected_results = [\n (handle, s.format(\"play?{}\".format(urlencode({\"id\": i[0], \"title\": t[0]}))), t[0], False),\n (handle, s.format(\"play?{}\".format(urlencode({\"id\": i[1], \"title\": t[1]}))), t[1], False),\n (handle, s.format(\"view?id={}\".format(i[2])), t[2], True),\n (handle, s.format(\"view?id={}\".format(i[3])), t[3], True),\n (handle, s.format(\"view?id={}\".format(i[4])), t[4], True)\n ]\n for result in expected_results:\n handle_, link, title, is_directory = result\n xbmcgui.ListItem.assert_any_call(title)\n li = xbmcgui.ListItem()\n xbmcplugin.addDirectoryItem.assert_any_call(handle_, link, li, is_directory)\n xbmcplugin.endOfDirectory.assert_called_once_with(handle)\n\n\n@pytest.fixture\ndef actionView(mocker):\n id_ = actionView_seasons_response[\"item\"][\"id\"]\n\n def side_effect(value):\n if value == \"items/{}\".format(id_):\n return mocker.Mock(**{\"get.return_value\": actionView_seasons_response})\n elif value == \"watching\":\n return mocker.Mock(**{\"get.return_value\": watching_info_response_with_seasons})\n\n mock_KinoPubClient = mocker.Mock(side_effect=side_effect)\n mocker.patch(\"resources.lib.addonworker.KinoPubClient\", mock_KinoPubClient)\n return id_\n\n\n@pytest.fixture\ndef actionView_seasons(mocker, actionView):\n mocker.patch.object(sys, \"argv\", [plugin.format(\"view\"), handle, \"?id={}\".format(actionView)])\n\n\ndef test_actionView_seasons(main, actionView_seasons, xbmcgui, xbmcplugin):\n main()\n i = actionView_seasons_response[\"item\"][\"id\"]\n seasons = actionView_seasons_response[\"item\"][\"seasons\"]\n for season in seasons:\n xbmcgui.ListItem.assert_any_call(\"Сезон {}\".format(season[\"number\"]))\n link = plugin.format(\"view_season_episodes?season={}&id={}\".format(season[\"number\"], i))\n xbmcplugin.addDirectoryItem.assert_any_call(handle, link, xbmcgui.ListItem(), True)\n xbmcplugin.endOfDirectory.assert_called_once_with(handle)\n\n\n@pytest.fixture\ndef actionView_episodes(mocker, actionView):\n mocker.patch.object(sys, \"argv\", [\n plugin.format(\"view_season_episodes\"),\n handle,\n \"?id={}&season={}\".format(actionView, 1)\n ])\n\n\ndef test_actionView_episodes(request, main, actionView_episodes, xbmcgui, xbmcplugin):\n main()\n item = actionView_seasons_response[\"item\"]\n i = item[\"id\"]\n season = item[\"seasons\"][0]\n for episode in season[\"episodes\"]:\n episode_title = \"s{:02d}e{:02d}\".format(season[\"number\"], episode[\"number\"])\n if episode[\"title\"]:\n episode_title = \"{} | {}\".format(\n episode_title, episode[\"title\"].encode(\"utf-8\"))\n link = plugin.format(\"play?{}\".format(urlencode({\n \"id\": i,\n \"title\": episode_title,\n \"season\": season[\"number\"],\n \"number\": episode[\"number\"],\n \"video\": json.dumps(episode)\n })))\n xbmcgui.ListItem.assert_any_call(\n episode_title,\n iconImage=episode[\"thumbnail\"],\n thumbnailImage=episode[\"thumbnail\"]\n )\n li = xbmcgui.ListItem()\n li.setInfo.assert_any_call(\"Video\", {\"playcount\": episode[\"watched\"]})\n li.setArt.assert_called_once_with({\"poster\": item[\"posters\"][\"big\"]})\n li.setProperty.assert_called_once_with(\"IsPlayable\", \"true\")\n xbmcplugin.addDirectoryItem.assert_any_call(handle, link, xbmcgui.ListItem(), False)\n xbmcplugin.setContent.assert_called_once_with(handle, \"episodes\")\n xbmcplugin.endOfDirectory.assert_called_once_with(handle)\n\n\n@pytest.fixture\ndef actionView_standalone_episodes(mocker):\n id_ = actionView_without_seasons_response[\"item\"][\"id\"]\n\n def side_effect(value):\n if value == \"items/{}\".format(id_):\n return mocker.Mock(**{\"get.return_value\": actionView_without_seasons_response})\n elif value == \"watching\":\n return mocker.Mock(**{\"get.return_value\": watching_info_response_without_seasons})\n\n mock_KinoPubClient = mocker.Mock(side_effect=side_effect)\n mocker.patch(\"resources.lib.addonworker.KinoPubClient\", mock_KinoPubClient)\n mocker.patch.object(sys, \"argv\", [\n plugin.format(\"view\"),\n handle,\n \"?{}\".format(urlencode({\"id\": id_}))\n ])\n\n\ndef test_actionView_standalone_episodes(request, main, actionView_standalone_episodes, xbmcgui,\n xbmcplugin):\n main()\n item = actionView_without_seasons_response[\"item\"]\n for video in item[\"videos\"]:\n episode_title = \"e{:02d}\".format(video[\"number\"])\n if video[\"title\"]:\n episode_title = \"{} | {}\".format(episode_title, video[\"title\"].encode(\"utf-8\"))\n link = plugin.format(\"play?{}\".format(urlencode({\n \"id\": item[\"id\"],\n \"title\": episode_title,\n \"number\": video[\"number\"],\n \"video\": json.dumps(video)\n })))\n xbmcgui.ListItem.assert_any_call(\n episode_title,\n iconImage=video[\"thumbnail\"],\n thumbnailImage=video[\"thumbnail\"]\n )\n li = xbmcgui.ListItem()\n li.setInfo.assert_any_call(\"Video\", {\"playcount\": video[\"watched\"]})\n li.setArt.assert_any_call({\"poster\": item[\"posters\"][\"big\"]})\n li.setProperty.assert_any_call(\"IsPlayable\", \"true\")\n xbmcplugin.addDirectoryItem.assert_any_call(handle, link, xbmcgui.ListItem(), False)\n xbmcplugin.setContent.assert_called_once_with(handle, \"episodes\")\n xbmcplugin.endOfDirectory.assert_called_once_with(handle)\n","sub_path":"video.kino.pub/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"607666651","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'valerio cosentino'\n\nimport lizard\nimport subprocess\nimport os\nimport util\nimport code2db_extract_commit_file\n\n\nclass CodeQuerier():\n \"\"\"\n This class collects the code function data using the Lizard python library\n \"\"\"\n CLOC_PATH = os.path.dirname(util.__file__) + \"\\cloc\\cloc-1.72.exe\"\n ALLOWED_EXTENSIONS = ['java', 'py', 'php', 'scala', 'js', 'rb', 'cs', 'cpp', 'c']\n FORBIDDEN_EXTENSIONS = ['tar', 'bz2', \"gz\", \"lz\", \"apk\", \"tbz2\", \"lzma\", \"tlz\", \"war\", \"xar\", \"zip\", \"zipx\"]\n\n def __init__(self, logger, tmp_path):\n \"\"\"\n :type logger: Object\n :param logger: logger\n\n :type: tmp_path: str\n :param: tmp_path: tmp file path\n \"\"\"\n try:\n self._logger = logger\n self._tmp_path = tmp_path\n except:\n self._logger.error(\"FunQuerier init failed\")\n raise\n\n def get_comment_info(self, f):\n info = {'blanks': None, 'comments': None, 'loc': None}\n flag = False\n try:\n with open(self._tmp_path, \"w+\") as _write:\n pipe = subprocess.Popen([CodeQuerier.CLOC_PATH, f], stdout=_write)\n pipe.communicate()\n\n with open(self._tmp_path, \"r\") as _read:\n for line in _read:\n if flag:\n if not line.startswith(\"-----\"):\n digested = \" \".join(line.split())\n info_file = digested.split(\" \")\n blank_lines = int(info_file[2])\n commented_lines = int(info_file[3])\n loc = int(info_file[4])\n info = {'blanks': blank_lines, 'comments': commented_lines, 'loc': loc}\n break\n\n if line.lower().startswith(\"language\"):\n flag = True\n except Exception:\n self._logger.warning(\"something went wrong when extracting comment info from \" + f, exc_info=True)\n\n return info\n\n def get_complexity_info(self, f, import_type):\n funs = []\n i = lizard.analyze_file(f)\n\n info_comments = self.get_comment_info(f)\n\n # if i.nloc != info_comments.get('loc'):\n # self._logger.warning(\"CLOC and Lizard report different LOC : \" + str(i.nloc) + \" (Lizard) \"\n # + str(info_comments.get('loc')) + \" (CLOC)\")\n\n overall = {'ccn': i.CCN,\n 'avg_ccn': i.average_cyclomatic_complexity,\n 'avg_loc': i.average_nloc,\n 'avg_tokens': i.average_token_count,\n 'funs': len(i.function_list),\n 'loc': i.nloc,\n 'tokens': i.token_count,\n 'comments': info_comments.get('comments'),\n 'blanks': info_comments.get('blanks')\n }\n\n if import_type == code2db_extract_commit_file.Code2DbCommitFile.FULL_IMPORT_TYPE:\n for fun in i.function_list:\n funs.append(\n {'ccn': fun.cyclomatic_complexity,\n 'tokens': fun.token_count,\n 'loc': fun.nloc,\n 'lines': fun.length,\n 'name': fun.name,\n 'args': fun.parameter_count,\n 'start': fun.start_line,\n 'end': fun.end_line\n })\n\n return overall, funs","sub_path":"importers/vcs/git/querier_code.py","file_name":"querier_code.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"234849826","text":"from __future__ import print_function\nimport Pyro4.core\nimport Pyro4.naming\nimport chain\n\nthis_node = \"A\"\nnext_node = \"B\"\n\nservername = \"example.chain.\" + this_node\n\nwith Pyro4.core.Daemon() as daemon:\n obj = chain.Chain(this_node, next_node)\n uri = daemon.register(obj)\n with Pyro4.naming.locateNS() as ns:\n ns.register(servername, uri)\n\n # enter the service loop.\n print(\"Server started %s\" % this_node)\n daemon.requestLoop()\n","sub_path":"examples/circular/servA.py","file_name":"servA.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471344087","text":"from functools import reduce\nfrom models.custom import BatchNorm2d\nimport torch\nimport torch.nn as nn\n\n\nclass FeatureExtractor(nn.Module):\n\n def __init__(self, cnn):\n super(FeatureExtractor, self).__init__()\n features = cnn.features\n index = [\n i for i, layer in enumerate(features.children()) \\\n if isinstance(layer, nn.MaxPool2d)\n ][4]\n self.features = features[:index]\n mean = torch.Tensor([0.485, 0.456, 0.406])\n std = torch.Tensor([0.229, 0.224, 0.225])\n self.register_buffer('mean', mean.view(1,3,1,1))\n self.register_buffer('std', std.view(1,3,1,1))\n\n def forward(self, x):\n z = (x - self.mean) / self.std\n return self.features(x).squeeze()\n\n\nclass SRResNet(nn.Module):\n\n def __init__(self, ngb=16, upf=4):\n super(SRResNet, self).__init__()\n resnet_blocks = [_resnet_block(64, 64) for _ in range(ngb)]\n resnet_blocks.append(BNConv2dBlock(64, 64, p=1, n=True))\n self.network = nn.ModuleList([\n BNConv2dBlock(3, 64, k=9, p=4, a='PReLU()'),\n ResidualBlock(nn.Sequential(*resnet_blocks)),\n *[UpsampleBlock(64, 64) for _ in range(upf // 2)],\n BNConv2dBlock(64, 3, k=9, p=4),\n ])\n\n def forward(self, x, update_stats=True):\n return reduce(lambda z, f: f(z, update_stats), self.network, x)\n\n\nclass Discriminator_96(nn.Module):\n\n def __init__(self):\n super(Discriminator_96, self).__init__()\n self.network = nn.Sequential(\n Conv2dBlock(3, 64, s=1, p=1, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(64, 64, s=2, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(64, 128, s=1, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(128, 128, s=2, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(128, 256, s=1, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(256, 256, s=2, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(256, 512, s=1, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(512, 512, s=2, p=1, n=True, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(512, 1024, k=6, a='LeakyReLU(0.2, True)'),\n Conv2dBlock(1024, 1, k=1),\n )\n\n def forward(self, x):\n return self.network(x).squeeze()\n\n\ndef _resnet_block(nc_inp, nc_out):\n return ResidualBlock(nn.ModuleList([\n BNConv2dBlock(nc_inp, nc_out, p=1, n=True, a='PReLU()'),\n BNConv2dBlock(nc_inp, nc_out, p=1, n=True),\n ]))\n\n\nclass ResidualBlock(nn.Module):\n \n def __init__(self, block):\n super(ResidualBlock, self).__init__()\n self.block = block\n\n def forward(self, x, update_stats=True):\n return x + reduce(lambda z, f: f(z, update_stats), self.block, x)\n\n def __repr__(self):\n repr_string = str(self.block).split('\\n')\n repr_string = list(map(lambda x: '\\n│' + x, repr_string))\n repr_string[-1] = repr_string[-1][0] + '▼' + repr_string[-1][2:]\n return self._get_name() + ''.join(repr_string)\n\n\nclass BNConv2dBlock(nn.Module):\n\n def __init__(self, nc_inp, nc_out, k=3, s=1, p=0, n=False, a=None):\n super(BNConv2dBlock, self).__init__()\n self.P = nn.ZeroPad2d(p) if p else lambda x: x\n self.C = nn.Conv2d(nc_inp, nc_out, k, s)\n self.N = BatchNorm2d(nc_out) if n else lambda x, z: x\n self.A = eval(f'nn.{a}') if a else lambda x: x\n self.weight_init()\n\n def weight_init(self):\n a = {\n nn.LeakyReLU: 0.2 ,\n nn.PReLU : 0.25,\n nn.ReLU : 0.0 ,\n }.get(self.A.__class__, 1.0)\n\n nn.init.kaiming_normal_(self.C.weight, a=a, mode='fan_in')\n self.C.bias.data.zero_()\n\n if isinstance(self.N, nn.BatchNorm2d):\n self.N.weight.data.fill_(1)\n self.N.bias.data.zero_()\n\n def forward(self, x, update_stats=True):\n return self.A(self.N(self.C(self.P(x)), update_stats))\n\n\nclass Conv2dBlock(nn.Module):\n\n def __init__(self, nc_inp, nc_out, k=3, s=1, p=0, n=False, a=None):\n super(Conv2dBlock, self).__init__()\n self.P = nn.ZeroPad2d(p) if p else lambda x: x\n self.C = nn.Conv2d(nc_inp, nc_out, k, s)\n self.N = nn.BatchNorm2d(nc_out) if n else lambda x: x\n self.A = eval(f'nn.{a}') if a else lambda x: x\n self.weight_init()\n\n def weight_init(self):\n a = {\n nn.LeakyReLU: 0.2 ,\n nn.PReLU : 0.25,\n nn.ReLU : 0.0 ,\n }.get(self.A.__class__, 1.0)\n\n nn.init.kaiming_normal_(self.C.weight, a=a, mode='fan_in')\n self.C.bias.data.zero_()\n\n if isinstance(self.N, nn.BatchNorm2d):\n self.N.weight.data.fill_(1)\n self.N.bias.data.zero_()\n\n def forward(self, x):\n return self.A(self.N(self.C(self.P(x))))\n\n\nclass UpsampleBlock(nn.Module):\n\n def __init__(self, nc_inp, nc_out, upf=2):\n super(UpsampleBlock, self).__init__()\n self.block = nn.ModuleList([\n BNConv2dBlock(nc_inp, nc_out * (upf ** 2), p=1),\n nn.PixelShuffle(upf),\n nn.PReLU(),\n ])\n\n def forward(self, x, update_stats=True):\n return reduce(lambda z, f: f(z, update_stats) if isinstance(f, BNConv2dBlock) else f(z), self.block, x)\n\n\nif __name__ == '__main__':\n from torchvision.models import vgg19\n F = FeatureExtractor(vgg19(pretrained=True))\n G = SRResNet(16, 4)\n D = Discriminator_96()\n x = torch.randn(64, 3, 24, 24)\n\n print('======================================= FeatureExtractor =======================================')\n print(F)\n print('Input :', list(x.size()))\n print('Output:', list(F(x).size()))\n\n print('=========================================== SRResNet ===========================================')\n print(G)\n print('Input :', list(x.size()))\n print('Output:', list(G(x).size()))\n\n print('======================================= Discriminator_96 =======================================')\n print(D)\n print('Input :', list(G(x).size()))\n print('Output:', list(D(G(x)).size()))\n","sub_path":"models/srgan.py","file_name":"srgan.py","file_ext":"py","file_size_in_byte":6141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150897760","text":"import datetime\nimport weasyprint\n\nfrom django.contrib.admin import AdminSite\nfrom django.contrib import admin, messages\nfrom django.db.models import Sum\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.forms import ValidationError\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.safestring import mark_safe\n\nfrom . import forms, models, idoklad\n\n\nclass ProductionAdminSite(AdminSite):\n site_header = \"FFpasta - výroba\"\n site_title = \"Vyroba\"\n index_title = \"Vyroba\"\n\n def has_permission(self, request):\n return request.user.is_active and request.user.is_worker\n\n\nproduction_admin = ProductionAdminSite(name='production_admin')\n\n\ndef make_assign_to_price_category(price_category):\n def assign_to_price_category(modeladmin, request, queryset):\n for product in queryset:\n changed = product.assign_to_price_category(price_category)\n if changed:\n messages.info(request, f'Produkt { product.name } zařazen do cenové kategorie { price_category.name }')\n\n assign_to_price_category.short_description = f'Zařadit do cenové kategorie { price_category.name }'\n assign_to_price_category.__name__ = f'assign_to_price_category_{ price_category.pk }'\n\n return assign_to_price_category\n\n\nclass PublishMixin:\n actions = ['publish', 'hide']\n\n def publish(self, request, queryset):\n queryset.update(published=True)\n\n def hide(self, request, queryset):\n queryset.update(published=False)\n\n publish.short_description = 'Publikovat'\n hide.short_description = 'Skrýt'\n\n\nclass ProductMixin:\n actions = ['activate', 'deactivate']\n\n def activate(self, request, queryset):\n queryset.update(active=True)\n\n def deactivate(self, request, queryset):\n queryset.update(active=False)\n\n activate.short_description = 'Zařadit do nabídky'\n deactivate.short_description = 'Vyřadit z nabídky'\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n\n for price_category in list(models.PriceCategory.objects.all()):\n action = make_assign_to_price_category(price_category)\n actions[action.__name__] = (action, action.__name__, action.short_description)\n return actions\n\n\n@admin.register(models.PriceCategory)\nclass PriceCategoryAdmin(admin.ModelAdmin):\n list_display = ['name', 'unit_price']\n\n\n@admin.register(models.Pasta)\nclass PastaAdmin(ProductMixin, PublishMixin, admin.ModelAdmin):\n list_display = ['name', 'length', 'price_category', 'get_unit_price', 'published', 'active', 'in_stock']\n list_filter = ['length', 'price_category']\n\n\n@admin.register(models.Sauce)\nclass SauceAdmin(ProductMixin, PublishMixin, admin.ModelAdmin):\n list_display = ['name', 'sauce_type', 'price_category', 'get_unit_price', 'published', 'active', 'in_stock']\n list_filter = ['sauce_type']\n\n\n@admin.register(models.Delivery)\nclass DeliveryAdmin(admin.ModelAdmin):\n list_display = ['name', 'description', 'monday', 'tuesday', 'wednesday', 'thursday',\n 'friday', 'saturday', 'sunday']\n\n\n@admin.register(models.Address)\nclass AddressAdmin(admin.ModelAdmin):\n filter_horizontal = ['delivery']\n\n\nclass PriceInline(admin.TabularInline):\n model = models.Price\n form = forms.PriceAdminForm\n extra = 0\n\n\nclass AddressInline(admin.TabularInline):\n model = models.Address\n filter_horizontal = ['delivery']\n extra = 0\n\n\nclass HasIcoFilter(admin.SimpleListFilter):\n title = 'Ičo'\n parameter_name = 'má ičo'\n\n def lookups(self, request, model_admin):\n return (\n ('Yes', 'Ano'),\n ('No', 'Ne'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if value == 'Yes':\n return queryset.filter(ico__isnull=False)\n elif value == 'No':\n return queryset.filter(ico__isnull=True)\n return queryset\n\n\nclass HasIdIDokladFilter(admin.SimpleListFilter):\n title = 'Kontakt iDokladu'\n parameter_name = 'kontakt iDokladu'\n\n def lookups(self, request, model_admin):\n return (\n ('Yes', 'Ano'),\n ('No', 'Ne'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if value == 'Yes':\n return queryset.filter(id_idoklad__isnull=False)\n elif value == 'No':\n return queryset.filter(id_idoklad__isnull=True)\n return queryset\n\n\n@admin.register(models.Customer)\nclass CustomerAdmin(admin.ModelAdmin):\n form = forms.CustomerAdminForm\n list_display = ['__str__', 'ico', 'user', 'has_id_idoklad', 'has_delivery', 'email_is_verified']\n readonly_fields = ['email_is_verified']\n list_filter = [HasIcoFilter, HasIdIDokladFilter]\n actions = ['sync_customers_to_idoklad', 'sync_customers_from_idoklad']\n inlines = [AddressInline, PriceInline]\n fieldsets = (\n (None, {\n 'fields': ('name', 'ico', 'user'),\n }),\n ('Fakturační adresa', {\n 'fields': ('street', 'postal_code', 'city', 'same_delivery_address'),\n }),\n )\n\n def get_readonly_fields(self, request, obj=None):\n return ['user', 'id_idoklad'] if obj else ['id_idoklad']\n\n def has_id_idoklad(self, obj):\n return bool(obj.id_idoklad)\n\n def has_delivery(self, obj):\n return obj.delivery_addresses.all().exists() and not obj.delivery_addresses.filter(delivery=None)\n\n def sync_customers_to_idoklad(self, request, queryset):\n idoklad.sync_customers_to_idoklad(customers=queryset)\n\n def sync_customers_from_idoklad(self, request, queryset):\n idoklad.sync_customers_from_idoklad(customers=queryset)\n\n sync_customers_to_idoklad.short_description = 'synchronizovat kontakty v iDokladu podle zákazníků'\n sync_customers_from_idoklad.short_description = 'synchronizovat zákazníky podle kontaktů v iDokladu'\n has_id_idoklad.short_description = 'kontakt iDokladu'\n has_id_idoklad.boolean = True\n has_delivery.short_description = 'závoz nastaven'\n has_delivery.boolean = True\n\n\nclass ItemInline(admin.TabularInline):\n model = models.Item\n form = forms.ItemAdminForm\n formset = forms.ItemAdminFormSet\n extra = 0\n\n\nclass FutureDateFieldFilter(admin.FieldListFilter):\n def __init__(self, field, request, params, model, model_admin, field_path):\n self.field_generic = '%s__' % field_path\n self.date_params = {k: v for k, v in params.items() if k.startswith(self.field_generic)}\n\n today = datetime.date.today()\n\n self.lookup_kwarg_since = '%s__gte' % field_path\n self.lookup_kwarg_until = '%s__lt' % field_path\n self.links = (\n (_('Today'), {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=1)),\n }),\n ('následujících 7 dní', {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=8)),\n }),\n ('následujících 30 dní', {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=31)),\n }),\n (_('Any date'), {}),\n )\n if field.null:\n self.lookup_kwarg_isnull = '%s__isnull' % field_path\n self.links += (\n (_('No date'), {self.field_generic + 'isnull': 'True'}),\n (_('Has date'), {self.field_generic + 'isnull': 'False'}),\n )\n super().__init__(field, request, params, model, model_admin, field_path)\n\n def expected_parameters(self):\n params = [self.lookup_kwarg_since, self.lookup_kwarg_until]\n if self.field.null:\n params.append(self.lookup_kwarg_isnull)\n return params\n\n def choices(self, changelist):\n for title, param_dict in self.links:\n yield {\n 'selected': self.date_params == param_dict,\n 'query_string': changelist.get_query_string(param_dict, [self.field_generic]),\n 'display': title,\n }\n\n\ndef clean_status(self):\n data = self.cleaned_data.get('status')\n if self.instance.invoiced and data < models.Order.CONFIRMED:\n raise ValidationError('Vyfakturovanou objednávku nelze vrátit.')\n return data\n\n\n@admin.register(models.Order)\nclass OrderAdmin(admin.ModelAdmin):\n date_hierarchy = 'date_required'\n list_display = ['__str__', 'customer_note', 'customer', 'short_datetime', 'short_date',\n 'invoiced', 'delivery_note_number', 'status', 'my_note']\n list_filter = (\n ('date_required', admin.DateFieldListFilter),\n ('date_required', FutureDateFieldFilter),\n 'status')\n readonly_fields = ['datetime_ordered', 'invoiced']\n list_editable = ['my_note']\n change_form_template = 'ffpasta/admin/order_change_form.html'\n inlines = [ItemInline]\n actions = ['reject', 'confirm', 'complete', 'create_delivery_note', 'download_delivery_note',\n 'invoice_by_order', 'invoice_by_delivery_notes']\n search_fields = ['customer__name', 'customer__id']\n\n def short_datetime(self, obj):\n return obj.datetime_ordered.strftime(\"%d.%m. %H:%M\")\n\n def short_date(self, obj):\n return obj.date_required.strftime(\"%d %m.\")\n\n short_datetime.admin_order_field = 'datetime_ordered'\n short_datetime.short_description = 'Objednáno'\n short_date.admin_order_field = 'date_required'\n short_date.short_description = 'Datum dodání'\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n return queryset.filter(datetime_ordered__isnull=False).order_by('date_required')\n\n def changelist_view(self, request, extra_context=None):\n if not request.META.get('QUERY_STRING') and (\n request.META.get('HTTP_REFERER') is None or\n request.META.get('HTTP_REFERER').split('?')[0] != request.build_absolute_uri('?')):\n return HttpResponseRedirect('?date_required__gte={}&date_required__lt={}'.format(datetime.date.today(), datetime.date.today() + datetime.timedelta(days=8)))\n return super().changelist_view(request, extra_context)\n\n def response_change(self, request, obj):\n if \"invoice\" in request.POST:\n obj.save()\n invoice = obj.invoice()\n if invoice == 0:\n self.message_user(request, \"Vyfakturováno\")\n else:\n self.message_user(request, \"Nepodařilo se vyfakturovat\")\n # TO DO: log invoice fail !!!\n return HttpResponseRedirect(\".\")\n return super().response_change(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n if obj and obj.invoiced:\n return False\n return super().has_delete_permission(request, obj)\n\n def get_readonly_fields(self, request, obj=None):\n readonly_fields = super().get_readonly_fields(request, obj)\n if obj and obj.invoiced:\n return readonly_fields + ['customer']\n return readonly_fields\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n form.clean_status = clean_status\n return form\n\n def get_changelist_form(self, request, **kwargs):\n form = super().get_changelist_form(request, **kwargs)\n form.clean_status = clean_status\n return form\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.datetime_ordered = datetime.datetime.now()\n return super().save_model(request, obj, form, change)\n\n def reject(self, request, queryset):\n for order in queryset:\n err_msg = order.do_reject()\n if err_msg:\n messages.error(request, err_msg)\n else:\n messages.info(request, f'Objednávka č. { order.id } byla odmítnuta')\n\n def confirm(self, request, queryset):\n for order in queryset:\n err_msg = order.do_confirm()\n if err_msg:\n messages.error(request, err_msg)\n else:\n messages.info(request, f'Objednávka č. { order.id } byla potvrzena')\n\n def complete(self, request, queryset):\n for order in queryset:\n err_msg = order.do_complete(user=request.user)\n if err_msg:\n messages.error(request, err_msg)\n else:\n messages.info(request, f'Objednávka č. { order.id } byla dokončena')\n\n def create_delivery_note(self, request, queryset):\n for order in queryset:\n delivery_note_number = order.create_delivery_note()\n if isinstance(delivery_note_number, int):\n messages.success(request, f'Pro objednávku č. { order.id } byl vytvořen dodací list č. { delivery_note_number }')\n else:\n messages.warning(request, delivery_note_number)\n\n def download_delivery_note(self, request, queryset):\n queryset = queryset.filter(delivery_note_number__isnull=False)\n if queryset.exists():\n delivery_note_html = get_template('ffpasta/delivery_notes.html').render(context={'object_list': queryset})\n pdf_file = weasyprint.HTML(string=delivery_note_html).write_pdf()\n response = HttpResponse(pdf_file, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"dodaci_listy.pdf\"'\n return response\n\n def invoice_by_order(self, request, queryset):\n for order in queryset.filter(invoiced=True):\n messages.info(request, f'Objednávka č. { order.id } již byla dříve vyfakturována.')\n for order in queryset.filter(invoiced=False):\n if order.invoice() == 0:\n messages.success(request, f'Objednávka č. { order.id } byla vyfakturována.')\n else:\n messages.error(request, f'Objednávku č. { order.id } se nepodařilo vyfakturovat.')\n\n def invoice_by_delivery_notes(self, request, queryset):\n queryset = queryset.filter(invoiced=False, delivery_note_number__isnull=False)\n if not queryset.exists():\n messages.warning(request, f'Žádné nevyfakturované dodací listy nebyly vybrány')\n return None\n if queryset.exclude(customer=queryset.first().customer).exists():\n messages.warning(request, f'Vybrané dodací listy jsou pro různé zákazníky')\n return None\n response = models.Order.invoice_delivery_notes(queryset)\n if response:\n messages.error(request, f'Dodací listy se nepodařilo vyfakturovat')\n else:\n messages.info(request, f'Dodací listy byly úspěšně vyfakturovány')\n\n reject.short_description = 'odmítnout'\n confirm.short_description = 'potvrdit'\n complete.short_description = 'dokončit'\n create_delivery_note.short_description = 'vytvořit dodací listy'\n download_delivery_note.short_description = 'stáhnout dodací listy'\n invoice_by_order.short_description = 'fakturovat jednotlivé obědnávky'\n invoice_by_delivery_notes.short_description = 'fakturovat dodací listy'\n\n\nclass TodayDeliveryOrder(models.Order):\n class Meta:\n proxy = True\n verbose_name = 'zásilka'\n verbose_name_plural = 'dnešní rozvoz'\n\n def customer_address(self):\n return self.address\n\n customer_address.short_description = 'adresa'\n\n\nclass UncommittedOrder(models.Order):\n class Meta:\n proxy = True\n verbose_name = 'neobjednaná objednávka'\n verbose_name_plural = 'neobjednané objednávky'\n\n\n@admin.register(TodayDeliveryOrder)\nclass TodayDeliveryOrderAdmin(admin.ModelAdmin):\n list_display = ['__str__', 'customer', 'customer_address', 'customer_note', 'my_note', 'get_total_price']\n list_filter = []\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n return queryset.filter(datetime_ordered__isnull=False,\n date_required=datetime.date.today(),\n status__gte=models.Order.CONFIRMED)\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass ProductionTodayDeliveryOrderAdmin(TodayDeliveryOrderAdmin):\n\n def has_view_permission(self, request, obj=None):\n return True\n\n def has_module_permission(self, request):\n return True\n\n\n@admin.register(UncommittedOrder)\nclass UncommittedOrderAdmin(admin.ModelAdmin):\n date_hierarchy = 'date_required'\n list_display = ['__str__', 'customer', 'date_required']\n list_filter = (('date_required', FutureDateFieldFilter),)\n readonly_fields = ['datetime_ordered']\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n return queryset.filter(datetime_ordered__isnull=True)\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n\n@admin.register(models.Section)\nclass SectionAdmin(admin.ModelAdmin):\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n@admin.register(models.Recipe)\nclass RecipeAdmin(PublishMixin, admin.ModelAdmin):\n list_display = ['__str__', 'product', 'published']\n\n\n@admin.register(models.Difference)\nclass DifferenceAdmin(PublishMixin, admin.ModelAdmin):\n list_display = ['__str__', 'ffpasta', 'others', 'published']\n\n\nclass OrderedProduct(models.Product):\n class Meta:\n proxy = True\n verbose_name = 'Objednaný produkt'\n verbose_name_plural = 'Objednané produkty'\n\n\nclass ProductionFutureDateFieldFilter(admin.FieldListFilter):\n def __init__(self, field, request, params, model, model_admin, field_path):\n self.field_generic = '%s__' % field_path\n self.date_params = {k: v for k, v in params.items() if k.startswith(self.field_generic)}\n\n today = datetime.date.today()\n\n self.lookup_kwarg_since = '%s__gte' % field_path\n self.lookup_kwarg_until = '%s__lt' % field_path\n self.links = (\n ('Dnes', {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=1)),\n }),\n ('zítra', {\n self.lookup_kwarg_since: str(today + datetime.timedelta(days=1)),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=2)),\n }),\n ('pozítří', {\n self.lookup_kwarg_since: str(today + datetime.timedelta(days=2)),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=3)),\n }),\n ('popozítří', {\n self.lookup_kwarg_since: str(today + datetime.timedelta(days=3)),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=4)),\n }),\n )\n if field.null:\n self.lookup_kwarg_isnull = '%s__isnull' % field_path\n self.links += (\n (_('No date'), {self.field_generic + 'isnull': 'True'}),\n (_('Has date'), {self.field_generic + 'isnull': 'False'}),\n )\n super().__init__(field, request, params, model, model_admin, field_path)\n\n def expected_parameters(self):\n params = [self.lookup_kwarg_since, self.lookup_kwarg_until]\n if self.field.null:\n params.append(self.lookup_kwarg_isnull)\n return params\n\n def choices(self, changelist):\n for title, param_dict in self.links:\n yield {\n 'selected': self.date_params == param_dict,\n 'query_string': changelist.get_query_string(param_dict, [self.field_generic]),\n 'display': title,\n }\n\n\n@admin.register(OrderedProduct)\nclass OrderedProductAdmin(admin.ModelAdmin):\n list_display = ['production_link', 'next_1st_day_to_do', 'next_2nd_day_to_do', 'next_3rd_day_to_do',\n 'next_3_days_to_do', 'in_stock']\n ordering = ['-sauce', 'name']\n list_display_links = None\n list_select_related = True\n\n def next_1st_day_to_do(self, obj):\n return models.Item.objects.filter(\n product=obj,\n order__status__exact=models.Order.CONFIRMED,\n order__date_required__exact=datetime.date.today() + datetime.timedelta(days=1),\n ).aggregate(Sum('quantity'))['quantity__sum']\n\n def next_2nd_day_to_do(self, obj):\n return models.Item.objects.filter(\n product=obj,\n order__status__exact=models.Order.CONFIRMED,\n order__date_required__exact=datetime.date.today() + datetime.timedelta(days=2),\n ).aggregate(Sum('quantity'))['quantity__sum']\n\n def next_3rd_day_to_do(self, obj):\n return models.Item.objects.filter(\n product=obj,\n order__status__exact=models.Order.CONFIRMED,\n order__date_required__exact=datetime.date.today() + datetime.timedelta(days=3),\n ).aggregate(Sum('quantity'))['quantity__sum']\n\n def next_3_days_to_do(self, obj):\n return models.Item.objects.filter(\n product=obj,\n order__status__exact=models.Order.CONFIRMED,\n order__date_required__gt=datetime.date.today(),\n order__date_required__lte=datetime.date.today() + datetime.timedelta(days=3),\n ).aggregate(Sum('quantity'))['quantity__sum']\n\n def production_link(self, obj):\n return mark_safe(f'{ obj.name }')\n\n production_link.short_description = 'produkt'\n next_1st_day_to_do.short_description = date_format(\n (datetime.date.today() + datetime.timedelta(days=1)), format='l, j. n.', use_l10n=True)\n next_2nd_day_to_do.short_description = date_format(\n (datetime.date.today() + datetime.timedelta(days=2)), format='l, j. n.', use_l10n=True)\n next_3rd_day_to_do.short_description = date_format(\n (datetime.date.today() + datetime.timedelta(days=3)), format='l, j. n.', use_l10n=True)\n next_3_days_to_do.short_description = 'celkem vyrobit'\n\n def changelist_view(self, request, extra_context=None):\n extra_context = extra_context or {}\n extra_context['title'] = 'Vyberte produkt k výrobě'\n return super().changelist_view(request, extra_context=extra_context)\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_add_permission(self, request):\n return False\n\n\nclass ProductionOrderedProductAdmin(OrderedProductAdmin):\n\n def production_link(self, obj):\n return mark_safe(f'{ obj.name }')\n\n def has_module_permission(self, request):\n return True\n\n def has_view_permission(self, request, obj=None):\n return True\n\n\n@admin.register(models.StockTransaction)\nclass StockTransactionAdmin(admin.ModelAdmin):\n list_display = ['id', 'datetime', 'committed_by', 'transaction_type', 'product', 'quantity']\n list_filter = ['datetime', 'transaction_type', 'committed_by']\n form = forms.StockTransactionForm\n\n def get_readonly_fields(self, request, obj=None):\n readonly_fields = super().get_readonly_fields(request, obj)\n if obj is not None:\n readonly_fields += ('product', 'transaction_type', 'quantity')\n return readonly_fields\n\n def save_model(self, request, obj, form, change):\n if obj.id is None:\n obj.committed_by = request.user\n obj.save()\n\n\nclass ProductionStockTransactionAdmin(StockTransactionAdmin):\n\n def response_add(self, request, obj, post_url_continue=None):\n return redirect('/produkce/ffpasta/orderedproduct/')\n\n def has_add_permission(self, request):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return True\n\n def has_view_permission(self, request, obj=None):\n return True\n\n def has_module_permission(self, request):\n return True\n\n\nclass ToDoOrder(models.Order):\n class Meta:\n proxy = True\n verbose_name = 'potvrzená objednávka'\n verbose_name_plural = 'potvrzené objednávky'\n\n\nclass ToDoOrderAdmin(admin.ModelAdmin):\n date_hierarchy = 'date_required'\n list_display = ['__str__', 'customer_note', 'customer', 'short_date', 'my_note', 'is_complete']\n list_filter = [('date_required', ProductionFutureDateFieldFilter)]\n readonly_fields = ['datetime_ordered', 'invoiced']\n list_editable = ['my_note']\n change_form_template = 'ffpasta/admin/order_change_form.html'\n actions = ['complete']\n\n def short_date(self, obj):\n return obj.date_required.strftime(\"%d %m.\")\n\n def is_complete(self, obj):\n return obj.status == obj.COMPLETED\n\n short_date.admin_order_field = 'date_required'\n short_date.short_description = 'Datum dodání'\n is_complete.admin_order_field = 'status'\n is_complete.short_description = 'Hotovo'\n is_complete.boolean = True\n\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n return queryset.filter(status__in=[models.Order.CONFIRMED, models.Order.COMPLETED],\n date_required__gte=datetime.date.today(),\n date_required__lte=(datetime.date.today() + datetime.timedelta(days=3))\n ).order_by('date_required')\n\n def changelist_view(self, request, extra_context=None):\n if not request.META.get('QUERY_STRING') and (\n request.META.get('HTTP_REFERER') is None or\n request.META.get('HTTP_REFERER').split('?')[0] != request.build_absolute_uri('?')):\n return HttpResponseRedirect('?date_required__gte={}&date_required__lt={}'.format(\n datetime.date.today(), datetime.date.today() + datetime.timedelta(days=8)))\n return super().changelist_view(request, extra_context)\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_view_permission(self, request, obj=None):\n return True\n\n def has_module_permission(self, request):\n return True\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n form.clean_status = clean_status\n return form\n\n def get_changelist_form(self, request, **kwargs):\n form = super().get_changelist_form(request, **kwargs)\n form.clean_status = clean_status\n return form\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.datetime_ordered = datetime.datetime.now()\n return super().save_model(request, obj, form, change)\n\n def complete(self, request, queryset):\n for order in queryset:\n err_msg = order.do_complete(user=request.user)\n if err_msg:\n messages.error(request, err_msg)\n else:\n messages.info(request, f'Objednávka č. { order.id } byla zabalena')\n\n complete.short_description = 'zabalit'\n\n\nproduction_admin.register(ToDoOrder, ToDoOrderAdmin)\nproduction_admin.register(OrderedProduct, ProductionOrderedProductAdmin)\nproduction_admin.register(models.StockTransaction, ProductionStockTransactionAdmin)\nproduction_admin.register(TodayDeliveryOrder, ProductionTodayDeliveryOrderAdmin)\n","sub_path":"ffpasta/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":28352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"465499406","text":"#coding:utf-8\n\nimport os\nimport time\nimport urllib2\nimport hashlib\n\n\nSECOND = 1\nMINUTE = SECOND * 60\nHOUR = MINUTE * 60\nDAY = HOUR * 24\n\n\nclass FileCache(object):\n cache_dir = os.path.join(os.path.dirname(__file__), 'dddd')\n def __init__(self, cache_dir=None, timeout=0):\n if cache_dir:\n self.cache_dir = cache_dir\n if not os.path.isdir(self.cache_dir):\n os.makedirs(self.cache_dir)\n self.cache_timeout = timeout\n\n def set_cache(self, cacheid, data):\n if self.cache_timeout <= 0:\n return \n fpath = os.path.join(self.cache_dir, cacheid)\n with file(fpath, 'wb') as fp:\n fp.write(data)\n\n def get_cache(self, cacheid):\n fpath = os.path.join(self.cache_dir, cacheid)\n if not os.path.isfile(fpath):\n return None\n stat_result = os.stat(fpath)\n cache_time = time.time() - stat_result.st_atime\n if cache_time > self.cache_timeout:\n os.remove(fpath)\n return None\n with file(fpath, 'rb') as fp:\n return fp.read()\n\n\ndef urlencode(iters):\n if hasattr(iters, 'items'):\n iters = iters.items()\n l = []\n for k,v in iters:\n k = k.encode('utf8') if isinstance(k, unicode) else str(k)\n v = v.encode('utf8') if isinstance(v, unicode) else str(v)\n if not v:\n v = ''\n l.append(urllib2.quote(str(k)) + '=' + urllib2.quote(str(v)))\n return '&'.join(l)\n\n\nclass UrlRequestHandler(object):\n def __init__(self, cache_timeout=0):\n self.cache = FileCache(timeout=cache_timeout)\n self.opener = urllib2.build_opener()\n self.body = None\n\n def set_headers(self, headers):\n self.opener.addheaders = headers\n\n def set_body(self, body):\n if not isinstance(body, basestring):\n body = urlencode(body)\n self.body = body\n\n def make_cache_id(self, url):\n s = hashlib.md5(url).hexdigest()\n if self.body:\n s += hashlib.md5(self.body).hexdigest()\n return hashlib.md5(s).hexdigest()\n\n def request(self, url, timeout=SECOND*8, retry=3):\n cache_id = self.make_cache_id(url)\n data = self.cache.get_cache(cache_id)\n if data:\n return data\n while retry > 0:\n try:\n res = self.opener.open(url, self.body, timeout)\n ret = res.read()\n self.cache.set_cache(cache_id, ret)\n return ret\n except urllib2.URLError:\n retry += 1\n continue\n raise urllib2.URLError('timeout')\n\n\ndef url_request(url, data=None, timeout=SECOND*8, retry=3, cache_timeout=0):\n req = UrlRequestHandler(cache_timeout)\n if data:\n req.set_body(data)\n return req.request(url, timeout, retry)\n","sub_path":"datamine/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"333071942","text":"import stk\n\n\ndef func_groups(building_blocks):\n fgs = (fg for bb in building_blocks for fg in bb.func_groups)\n yield from stk.dedupe(fgs, key=lambda fg: fg.fg_type.name)\n\n\ndef _test_reaction(\n reactor,\n atom_change_per_reaction,\n bond_change_per_reaction,\n costruction_bonds_per_reaction,\n periodic_bonds_per_periodic_reaction,\n expected_construction_bond_order\n):\n mol = reactor._mol\n num_start_atoms = len(mol.atoms)\n num_start_bonds = len(mol.bonds)\n edge_clones = reactor._mol._edge_clones\n original_fgs = tuple(mol.func_groups)\n assert len(original_fgs) == 12\n reacted_fgs = []\n deleters = set()\n\n assert len(mol.construction_bonds) == 0\n\n periodicities = [\n (0, 0, 0),\n (1, 0, -1)\n ]\n\n num_periodic_reactions = 0\n for i, edge in enumerate(edge_clones):\n for fg in edge.get_func_groups():\n deleters.update(fg.deleters)\n\n reactor.add_reaction(\n func_groups=edge.get_func_groups(),\n periodicity=periodicities[i % 2]\n )\n num_periodic_reactions += i % 2\n\n reacted_fgs.extend(edge.get_func_groups())\n reactor.finalize()\n assert len(reacted_fgs) == 10\n\n # Make sure that unreacted functional groups are unchanged.\n num_original_atoms = {\n fg.fg_type.name: len(fg.atoms)\n for fg in func_groups(mol.building_block_vertices.keys())\n }\n num_original_bonders = {\n fg.fg_type.name: len(fg.bonders)\n for fg in func_groups(mol.building_block_vertices.keys())\n }\n num_original_deleters = {\n fg.fg_type.name: len(fg.deleters)\n for fg in func_groups(mol.building_block_vertices.keys())\n }\n\n num_unreacted_fgs = 0\n for fg in original_fgs:\n if fg not in reacted_fgs:\n fg_name = fg.fg_type.name\n assert len(fg.atoms) == num_original_atoms[fg_name]\n assert len(fg.bonders) == num_original_bonders[fg_name]\n assert len(fg.deleters) == num_original_deleters[fg_name]\n num_unreacted_fgs += 1\n assert num_unreacted_fgs == 2\n\n # Make sure the deleter atoms got purged from the functional\n # groups.\n for fg in reacted_fgs:\n assert not fg.deleters\n fg_name = fg.fg_type.name\n num_expected_atoms = (\n num_original_atoms[fg_name] -\n num_original_deleters[fg_name]\n )\n assert len(fg.atoms) == num_expected_atoms\n assert all(atom not in deleters for atom in fg.atoms)\n\n assert len(fg.bonders) == num_original_bonders[fg_name]\n assert all(atom not in deleters for atom in fg.bonders)\n\n # Make sure the deleters atoms are not present in the molecule.\n assert all(atom not in deleters for atom in mol.atoms)\n assert (\n len(mol.atoms) == num_start_atoms + atom_change_per_reaction*5\n )\n\n # Make sure the correct number of construction bonds was made.\n assert (\n len(mol.construction_bonds) == costruction_bonds_per_reaction*5\n )\n\n # Make sure all constructed bonds have the correct bond oder.\n for bond in mol.construction_bonds:\n assert bond.order == expected_construction_bond_order(bond)\n\n # Make sure the correct number of bonds is left.\n assert (\n len(mol.bonds) == num_start_bonds + bond_change_per_reaction*5\n )\n\n # Make sure construction bonds are shared with bonds.\n bonds = set(mol.bonds)\n assert all(bond in bonds for bond in mol.construction_bonds)\n\n # Make sure the correct amount of bonds is periodic.\n num_periodic_bonds = 0\n for bond in mol.bonds:\n if bond.is_periodic():\n num_periodic_bonds += 1\n assert bond.periodicity == (1, 0, -1)\n else:\n assert bond.periodicity == (0, 0, 0)\n assert (\n num_periodic_bonds ==\n periodic_bonds_per_periodic_reaction*num_periodic_reactions\n )\n\n\ndef test_react_any_single(make_reactor, amine2):\n reactor = make_reactor(\n building_blocks=[amine2, amine2],\n topology_graph=stk.polymer.Linear('AB', 3)\n )\n _test_reaction(\n reactor=reactor,\n atom_change_per_reaction=-4,\n bond_change_per_reaction=-3,\n costruction_bonds_per_reaction=1,\n periodic_bonds_per_periodic_reaction=1,\n expected_construction_bond_order=lambda bond: 1\n )\n\n\ndef test_react_any_double(make_reactor, amine2, aldehyde2):\n reactor = make_reactor(\n building_blocks=[amine2, aldehyde2],\n topology_graph=stk.polymer.Linear('AB', 3)\n )\n _test_reaction(\n reactor=reactor,\n atom_change_per_reaction=-3,\n bond_change_per_reaction=-2,\n costruction_bonds_per_reaction=1,\n periodic_bonds_per_periodic_reaction=1,\n expected_construction_bond_order=lambda bond: 2\n )\n\n\ndef test_react_diol_with_dihalogen(\n make_reactor,\n diol2,\n difluorene_dibromine\n):\n reactor = make_reactor(\n building_blocks=[diol2, difluorene_dibromine],\n topology_graph=stk.polymer.Linear('AB', 3)\n )\n _test_reaction(\n reactor=reactor,\n atom_change_per_reaction=-4,\n bond_change_per_reaction=-2,\n costruction_bonds_per_reaction=2,\n periodic_bonds_per_periodic_reaction=2,\n expected_construction_bond_order=lambda bond: 1\n )\n\n\ndef test_react_boronic_acid_with_diol(\n make_reactor,\n boronic_acid2,\n diol2\n):\n reactor = make_reactor(\n building_blocks=[boronic_acid2, diol2],\n topology_graph=stk.polymer.Linear('AB', 3)\n )\n _test_reaction(\n reactor=reactor,\n atom_change_per_reaction=-6,\n bond_change_per_reaction=-4,\n costruction_bonds_per_reaction=2,\n periodic_bonds_per_periodic_reaction=2,\n expected_construction_bond_order=lambda bond: 1\n )\n\n\ndef test_react_ring_amine_with_ring_amine(make_reactor, ring_amine):\n reactor = make_reactor(\n building_blocks=[ring_amine, ring_amine],\n topology_graph=stk.polymer.Linear('AB', 3)\n )\n _test_reaction(\n reactor=reactor,\n atom_change_per_reaction=3,\n bond_change_per_reaction=6,\n costruction_bonds_per_reaction=12,\n periodic_bonds_per_periodic_reaction=3,\n expected_construction_bond_order=lambda bond: 1\n )\n","sub_path":"tests/molecular/test_reactor.py","file_name":"test_reactor.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"516818557","text":"import numpy as np\n\ndef root_mean_squared_error(y1, y2):\n if len(y1) != len(y2):\n raise ValueError('different row sizes, {} & {} '.format(len(y1), len(y2)))\n y1 = np.array(y1)\n y2 = np.array(y2)\n rmse = np.sqrt(((y1 - y2) ** 2).sum(0) / len(y1))\n return rmse\n\ndef r2_score(y1, y2):\n if len(y1) != len(y2):\n raise ValueError('different row sizes, {} & {} '.format(len(y1), len(y2)))\n y1 = np.array(y1)\n y2 = np.array(y2) ## predicted value\n mean_y = np.mean(y1)\n ss_t = ((y1 - mean_y) ** 2).sum(0)\n ss_r = ((y1 - y2) ** 2).sum(0)\n \n return 1 - (ss_r/ss_t)\n\ndef accuracy_score(y1, y2):\n y1 = np.array(y1)\n y2 = np.array(y2)\n if y1.shape != y2.shape:\n raise ValueError('different shape, {} & {} '.format(len(y1), len(y2)))\n count_true_pred = 0\n for i, data in enumerate(y1):\n if data == y2[i]:\n count_true_pred += 1\n return count_true_pred / y1.shape[0] * 100\n \n \n \n","sub_path":"pymla/metrics/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"387849497","text":"from datetime import datetime\r\n\r\nclass RegisteredUser:\r\n def __init__(self, Name, FamilyName, Car, Username, Password, BankAccountNumber, controlboard, CarRotationManager,\r\n NumberOfVisits=0,Start=0): # --Intitializing credentials/info--\r\n self.Name = Name\r\n self.FamilyName = FamilyName\r\n self.Car = Car\r\n self.Username = Username\r\n self.Password = Password\r\n self.BankAccountNumber = BankAccountNumber\r\n self.NumberOfVisits = NumberOfVisits\r\n self.controlboard = controlboard\r\n self.CarRotationManager = CarRotationManager\r\n self.linkedplatform = None # Variable containing the platform where the user's car is parked\r\n self.parked = False # Boolean variable which indicates if user is parked or not\r\n self.Start=Start # This is the time when the user parks\r\n def parked_and_linkedplatform_value(self): # This function checks if the user is parked and sets the values of linkedplatform and parked accordingly\r\n (boolean, linkedplatform) = self.CarRotationManager.check_if_user_parked(self)\r\n if boolean == True:\r\n self.parked = True\r\n self.linkedplatform = linkedplatform\r\n else:\r\n self.parked = False\r\n self.linkedplatform = None\r\n\r\n def check_car_location(\r\n self): # Determines the location of the user's car (Platform name and Platform level that the car sits on)\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == False:\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n name = self.CarRotationManager.get_platform_name(self)\r\n level = self.CarRotationManager.get_platform_level(self)\r\n if level == -1:\r\n print(\"Your Car \" + self.Car.model + \" is not in the Parking\\n\")\r\n else:\r\n print(\"Your Car \" + self.Car.model + \" is on Platform \" + name + \" and it's on level \" + str(level) + \"\\n\")\r\n\r\n def __str__(self): # prints information for a Registered User\r\n return (\r\n \"Full name: \" + self.Name + \" \" + self.FamilyName + \"\\n\" + \"Car Model: \" + self.Car.model + \"\\n\" + \"Username: \" + self.Username + \"\\n\" + \"Number of Visits: \" + str(\r\n self.NumberOfVisits))\r\n\r\n def request_car(self): # Function that releases the car if it is parked\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == False:\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking\r\n if (pos == -1):\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position\r\n self.CarRotationManager.release_car(self.linkedplatform) # Release the car\r\n self.parked = False\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1\r\n print(\"Your \" + self.Car.model + \" has been released.\")\r\n print(\"Have a great day \" + self.Name + \"!\\n\")\r\n self.controlboard.add_start_time(self,'0')\r\n\r\n def park_car(self): # Function that parks the user's car if it's not already parked\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == True:\r\n print(\"Your car is already parked!\\n\")\r\n return\r\n platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM\r\n if (platform == None):\r\n return -1 # PARKING IS FULL\r\n self.CarRotationManager.return_platform_to_base(platform.Position)\r\n platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM\r\n self.linkedplatform = platform\r\n self.parked = True\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1\r\n self.controlboard.increment_nb_visits(self)\r\n print(\"Your \" + self.Car.model + \" has been parked!\\n\")\r\n now = datetime.now() # Get the current time, i.e when the user parks\r\n array = str(now).split()\r\n string_into_file = array[0] + \"@\" + array[1]\r\n self.controlboard.add_start_time(self,string_into_file) # Add this time (when the car is parked) next to this user's information in the file\r\n","sub_path":"RegisteredUser.py","file_name":"RegisteredUser.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"556953317","text":"from config import Config\n\n\nclass EtlHelper:\n def __init__(self):\n self.conf = Config()\n self.logger = self.conf.get_logger(__package__)\n\n def get_map_reduce_conf(self, etl_name):\n self.logger.info(\"Get map reduce conf\")\n self.logger.debug(\"ETL name: %s\" % etl_name)\n\n # At the moment these values are coming from the config file but we may want to consider\n # getting them from the database instead\n return {\n \"memory\": self.conf.get(\"ETL\", \"mapreduce_memory\"),\n \"reducers\": self.conf.get(\"ETL\", \"mapreduce_reducers\")\n }\n\n def get_spark_conf(self, etl_name):\n self.logger.info(\"Get Spark conf\")\n self.logger.debug(\"ETL name: %s\" % etl_name)\n\n # At the moment these values are coming from the config file but we may want to consider\n # getting them from the database instead\n return {\n \"executors\": self.conf.get(\"ETL\", \"spark_executors\"),\n \"driver_memory\": self.conf.get(\"ETL\", \"spark_driver_memory\"),\n \"executor_memory\": self.conf.get(\"ETL\", \"spark_executor_memory\")\n }\n","sub_path":"Projects/hadoop_import/hadoopimport/etl_helper.py","file_name":"etl_helper.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"649427576","text":"from rlkit.launchers.launcher_util import run_experiment\nimport rlkit.misc.hyperparameter as hyp\nfrom rlkit.launchers.experiments.murtaza.rfeatures_rl import state_td3bc_experiment\n\nif __name__ == \"__main__\":\n variant = dict(\n env_id='SawyerPushNIPSEasy-v0',\n algo_kwargs=dict(\n batch_size=1024,\n num_epochs=1000,\n num_eval_steps_per_epoch=1000,\n num_expl_steps_per_train_loop=1000,\n num_trains_per_train_loop=1000,\n min_num_steps_before_training=10000,\n max_path_length=50,\n ),\n td3_trainer_kwargs=dict(\n discount=0.99,\n ),\n td3_bc_trainer_kwargs=dict(\n discount=0.99,\n demo_path=None,\n demo_off_policy_path=None,\n bc_num_pretrain_steps=10000,\n q_num_pretrain_steps=10000,\n rl_weight=1.0,\n bc_weight=0,\n reward_scale=1.0,\n target_update_period=2,\n policy_update_period=2,\n ),\n replay_buffer_kwargs=dict(\n max_size=int(1e6),\n fraction_goals_rollout_goals=0.2,\n fraction_goals_env_goals=0.5,\n ),\n qf_kwargs=dict(\n hidden_sizes=[400, 300],\n ),\n policy_kwargs=dict(\n hidden_sizes=[400, 300],\n ),\n save_video=False,\n exploration_noise=.5,\n td3_bc=True,\n )\n\n search_space = {\n }\n sweeper = hyp.DeterministicHyperparameterSweeper(\n search_space, default_parameters=variant,\n )\n\n # n_seeds = 1\n # mode = 'local'\n # exp_name = 'test'\n\n n_seeds = 2\n mode = 'gcp'\n exp_name = 'pusher_state_td3_confirm'\n\n for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):\n for _ in range(n_seeds):\n run_experiment(\n state_td3bc_experiment,\n exp_name=exp_name,\n mode=mode,\n variant=variant,\n num_exps_per_instance=3,\n skip_wait=False,\n gcp_kwargs=dict(\n preemptible=False,\n )\n )\n","sub_path":"experiments/murtaza/off_policy_ssl/pusher/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"28962217","text":"from lxml import html\nimport requests\nimport re\n\n\ndef text(elt):\n return elt.text_content().replace(u'\\xa0', u' ')\n\nurl=r'http://www.marinetraffic.com/en/ais/index/positions/all/shipid:306858/mmsi:257458000/shipname:SIEM%20PILOT'\nprint(url)\npage = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n#print(page.content)\ntree = html.fromstring(page.content)\ntabulka=[]\nfor row in tree.xpath('//table[@class=\"table table-hover text-left\"]/tr'):\n radek=[]\n for column in row.xpath('./th[position()>0]/text() |./td[position()=1]/time/text() | ./td[position()>1]/text()'):\n radek.append(column.strip())\n tabulka.append(radek)\n\n\ntable=[]\nfor row in tabulka:\n if len(row)>5:\n zaznam ={\n \"timestamputc\":row[0],\n \"aissource\":row[1],\n \"speed\":row[2],\n \"latitude\":row[3],\n \"longtitude\":row[4],\n \"course\":row[5]\n }\n table.append(zaznam)\nprint(table)","sub_path":"marine.py","file_name":"marine.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450846781","text":"# Copyright 2015 Carnegie Mellon University\n#\n# Author: Shaunak Shatmanyu \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport ast\nimport falcon\nfrom oslo.config import cfg\nfrom stevedore import driver\n\nfrom monasca.common import es_conn\nfrom monasca.common import namespace\nfrom monasca.common import resource_api\nfrom monasca.openstack.common import log\n\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nalarms_opts = [\n cfg.StrOpt('doc_type', default='alarms',\n help='The doc_type that alarm definitions will be saved to.'),\n cfg.StrOpt('index_strategy', default='timed',\n help='The index strategy used to create index name.'),\n cfg.StrOpt('index_prefix', default='data_',\n help='The index prefix where metrics were saved to.'),\n cfg.IntOpt('size', default=10000,\n help=('The query result limit. Any result set more than '\n 'the limit will be discarded.')),\n]\n\ncfg.CONF.register_opts(alarms_opts, group='alarms')\n\nLOG = log.getLogger(__name__)\n\n\nclass AlarmDispatcher(object):\n\n def __init__(self, global_conf):\n LOG.debug('Initializing Alarm V2API!')\n super(AlarmDispatcher, self).__init__()\n self.doc_type = cfg.CONF.alarms.doc_type\n self.index_prefix = cfg.CONF.alarms.index_prefix\n self.size = cfg.CONF.alarms.size\n\n # load index strategy\n if cfg.CONF.alarms.index_strategy:\n self.index_strategy = driver.DriverManager(\n namespace.STRATEGY_NS,\n cfg.CONF.alarms.index_strategy,\n invoke_on_load=True,\n invoke_kwds={}).driver\n LOG.debug(self.index_strategy)\n else:\n self.index_strategy = None\n\n self._es_conn = es_conn.ESConnection(\n self.doc_type, self.index_strategy, self.index_prefix)\n\n def _get_alarms_response(self, res, ele_name='hits'):\n if res and res.status_code == 200:\n obj = res.json()\n if obj:\n return obj.get(ele_name)\n return None\n else:\n return None\n\n @resource_api.Restify('/v2.0/alarms', method='get')\n def do_get_alarms(self, req, res):\n LOG.debug('The alarms GET request is received!')\n\n # Extract the query string frm the request\n query_string = req.query_string\n LOG.debug('Request Query String: %s' % query_string)\n\n # Transform the query string with proper search format\n # params = self._get_alarms_helper(query_string)\n # LOG.debug('Query Data: %s' % params)\n params = ('{\"aggs\": {\"latest_state\": {'\n '\"terms\": {\"field\": \"alarm_definition.name\", \"size\": 0},'\n '\"aggs\": {\"top_state_hits\": {\"top_hits\": {\"sort\": ['\n '{\"updated_timestamp\": {\"order\": \"desc\"}}],'\n '\"_source\": {\"include\": ['\n '\"state\", \"created_timestamp\",\"updated_timestamp\",'\n '\"metrics\",\"sub_alarms\",\"state_updated_timestamp\",'\n '\"id\", \"alarm_definition\"]},\"size\" : 1}}}}}}')\n\n es_res = self._es_conn.get_messages(json.loads(params),\n q_string='search_type=count')\n res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)\n LOG.debug('Query to ElasticSearch returned Status: %s' %\n es_res.status_code)\n\n es_res = self._get_alarms_response(es_res, ele_name='aggregations')\n LOG.debug('Query to ElasticSearch returned: %s' % es_res)\n\n res.body = ''\n result_elements = []\n try:\n if es_res[\"latest_state\"]:\n res_data = es_res[\"latest_state\"][\"buckets\"]\n res.body = '['\n for bucket in res_data:\n alarm = bucket['top_state_hits']['hits']['hits'][0]\n if alarm and alarm['_source']:\n alarm = alarm['_source']\n result_elements.append({\n \"id\": alarm[\"id\"],\n \"links\": [{\"rel\": \"self\",\n \"href\": req.uri}],\n \"alarm_definition\": alarm[\"alarm_definition\"],\n \"metrics\": alarm[\"metrics\"],\n \"state\": alarm[\"state\"],\n \"sub_alarms\": alarm[\"sub_alarms\"],\n \"state_updated_timestamp\":\n alarm[\"state_updated_timestamp\"],\n \"updated_timestamp\": alarm[\"updated_timestamp\"],\n \"created_timestamp\": alarm[\"created_timestamp\"]})\n res.body = json.dumps({\n \"links\": [{\"rel\": \"self\", \"href\": req.uri}],\n \"elements\": result_elements\n })\n else:\n res.body = ''\n res.content_type = 'application/json;charset=utf-8'\n except Exception:\n res.status = getattr(falcon, 'HTTP_400')\n LOG.exception('Error occurred while handling Alarms Get Request.')\n\n @resource_api.Restify('/v2.0/alarms/{id}', method='get')\n def do_get_alarms_by_id(self, req, res, id):\n LOG.debug('The alarms by id GET request is received!')\n LOG.debug(id)\n\n es_res = self._es_conn.get_message_by_id(id)\n res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)\n LOG.debug('Query to ElasticSearch returned Status: %s' %\n es_res.status_code)\n\n es_res = self._get_alarms_response(es_res)\n LOG.debug('Query to ElasticSearch returned: %s' % es_res)\n\n res.body = ''\n try:\n if es_res[\"hits\"]:\n res_data = es_res[\"hits\"][0]\n if res_data:\n res.body = json.dumps({\n \"id\": id,\n \"links\": [{\"rel\": \"self\",\n \"href\": req.uri}],\n \"metrics\": res_data[\"_source\"][\"metrics\"],\n \"state\": res_data[\"_source\"][\"state\"],\n \"sub_alarms\": res_data[\"_source\"][\"sub_alarms\"],\n \"state_updated_timestamp\":\n res_data[\"_source\"][\"state_updated_timestamp\"],\n \"updated_timestamp\":\n res_data[\"_source\"][\"updated_timestamp\"],\n \"created_timestamp\":\n res_data[\"_source\"][\"created_timestamp\"]})\n\n res.content_type = 'application/json;charset=utf-8'\n else:\n res.body = ''\n except Exception:\n res.status = getattr(falcon, 'HTTP_400')\n LOG.exception('Error occurred while handling Alarm '\n 'Get By ID Request.')\n\n @resource_api.Restify('/v2.0/alarms/{id}', method='put')\n def do_put_alarms(self, req, res, id):\n LOG.debug(\"Put the alarm with id: %s\" % id)\n try:\n msg = req.stream.read()\n put_msg = ast.literal_eval(msg)\n es_res = self._es_conn.put_messages(json.dumps(put_msg), id)\n LOG.debug('Query to ElasticSearch returned Status: %s' %\n es_res)\n res.status = getattr(falcon, 'HTTP_%s' % es_res)\n except Exception:\n res.status = getattr(falcon, 'HTTP_400')\n LOG.exception('Error occurred while handling Alarm Put Request.')\n\n @resource_api.Restify('/v2.0/alarms/{id}', method='delete')\n def do_delete_alarms(self, req, res, id):\n LOG.debug(\"Delete the alarm with id: %s\" % id)\n try:\n es_res = self._es_conn.del_messages(id)\n LOG.debug('Query to ElasticSearch returned Status: %s' %\n es_res)\n res.status = getattr(falcon, 'HTTP_%s' % es_res)\n except Exception:\n res.status = getattr(falcon, 'HTTP_400')\n LOG.exception('Error occurred while handling '\n 'Alarm Delete Request.')\n","sub_path":"monasca/v2/elasticsearch/alarms.py","file_name":"alarms.py","file_ext":"py","file_size_in_byte":8624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"280214273","text":"from pymongo import MongoClient\nfrom collections import defaultdict\nfrom time import time\nimport json\n\n# MongoDB 접속\nhost = \"localhost\"\nport = \"27017\"\nclient = MongoClient(host, int(port))\ndb = client['lol_matchdata']\ncollection = db['summonerdata']\nsummoners = collection.find()\n\ngame_limit = 50\nsummoner_avg_data = []\ndata = []\n\nfor summoner in summoners:\n temp = []\n puid = summoner['puid']\n temp.append(puid)\n num_of_games = len(summoner['data'])\n selected_stats = defaultdict(int)\n selectors = ['kills', 'deaths', 'assists', 'totalminionskilled', 'neutralminionskilled', 'neutralminionskilledteamjungle', 'neutralminionskilledenemyjungle',\n 'totaldamagedealt', 'totaldamagedealttochampions', 'totaldamagetaken', 'damagedealttoobjectives', 'totalheal', 'wardsplaced', 'wardskilled', 'visionscore', 'champlevel',\n 'damagedealttoturrets', 'longesttimespentliving']\n if num_of_games >= game_limit:\n for game in summoner['data']:\n gameduration = (game['gameduration'] // 100) + (game['gameduration'] % 100) / 60\n stats = game['stats']\n for selector in selectors:\n selected_stats[selector] += stats[selector] / gameduration\n for key in selected_stats.keys():\n selected_stats[key] = round(selected_stats[key] / num_of_games, 5)\n temp.append(round(selected_stats[key] / num_of_games, 5))\n summoner_avg_data.append({'puid' : puid, 'stats' : selected_stats})\n data.append(temp)\n\nwith open('summoner_avg_data_' + str(game_limit) + '.json', 'w') as make_file:\n json.dump(summoner_avg_data, make_file, indent='\\t')\n\nwith open('dataframe_' + str(game_limit) + '.json', 'w') as make_file:\n json.dump(data, make_file, indent='\\t')\n\n\n","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"598133027","text":"import itertools as it\n\nclass rna_peptide:\n \n def __init__(self,sequence,coordinates, a2g=[], c2t=[], cleavage_sites = [], edge = None, C_terminus = 'no_change',N_terminus = 'no_change', cs_within_pep = 'no_change'):\n self.seq = sequence\n self.coo = coordinates\n self.a2g = a2g\n self.c2t = c2t\n self.cleavage_sites = cleavage_sites\n self.edge = edge\n self.C_terminus = C_terminus\n self.N_terminus = N_terminus\n self.cs_within_pep = cs_within_pep\n \n def print_peptide_data(self):\n print(self.seq + '| coordinates: ' + str(self.coo) + ' | edited_sites: ' + str(self.a2g)+';'+str(self.c2t) \n + ' | cleavage_sites (including boundaries): ' + str(self.cleavage_sites))\n \n\n def update_a2g_editing_sites(self, editing_sites):\n self.a2g = []\n editing_sites_it = it.chain(editing_sites)\n for i in editing_sites_it:\n if i<=self.coo[1]:\n if i >= self.coo[0]:\n self.a2g.append(i)\n else:\n break\n\n def update_c2t_editing_sites(self, editing_sites):\n self.c2t = []\n editing_sites_it = it.chain(editing_sites)\n for i in editing_sites_it:\n if i<=self.coo[1]:\n if i >= self.coo[0]:\n self.c2t.append(i)\n else:\n break\n\n \n def find_cleavage_sites(self, digestion_rule, cleave_before_pattern=False,append_zero = False, append_seq_end = False):\n \n if append_zero: #peptide is at the beginning of sequence thus appending a pseudo cleavage site - 0\n initial_list = [0] + [x.end() for x in digestion_rule.finditer(self.seq)]\n else:\n initial_list = [x.end() for x in digestion_rule.finditer(self.seq)]\n initial_set = set(initial_list)\n \n #find other overlapping sites\n if cleave_before_pattern: #cleavage sites are before regex pattern (usually not the casee)\n for site in initial_list:\n match1 = digestion_rule.match(self.seq,site+1)\n match2 = digestion_rule.match(self.seq,site+2)\n if match1:\n initial_set.add(match1.start())\n if match2:\n initial_set.add(match2.start())\n \n else:\n for site in initial_list:\n match1 = digestion_rule.match(self.seq,site-1)\n match2 = digestion_rule.match(self.seq,site-2)\n if match1:\n initial_set.add(match1.end())\n if match2:\n initial_set.add(match2.end()) \n \n #return only sites representing in-frame codons\n cleavage_sites = [x + self.coo[0] for x in initial_set if not x%3 and x!=len(self.seq)]\n \n if append_seq_end: #peptide is at the end of sequence thus appending a pseudo cleavage site - \n cleavage_sites.append(len(self.seq)+self.coo[0])\n cleavage_sites.sort()\n \n self.cleavage_sites = cleavage_sites\n \n \n \n \n \n ","sub_path":"scripts/proteomics_simulator/OLD/20181014/rna_peptide.py","file_name":"rna_peptide.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"474590498","text":"from PIL import Image\nimport os\n\nimage = []\n# read the image from folder\nfor f in os.listdir('.'):\n if f.endswith('png'):\n image.append(f)\n\nfor i in image:\n img = Image.open(i)\n pixels = img.load()\n for i in range(img.size[0]): # for every pixel:\n for j in range(img.size[1]):\n if pixels[i, j] != (0, 0, 0):\n # change to black if not red\n pixels[i, j] = (255, 255, 255)\n name = '/Users/tracy/Desktop/Image_Processing/test/new'\n img.save(name,'png')\n #fn, fext = os.path.splitext()\n # i.thumbnail(size_128)\n #img.save('/Users/tracy/Desktop/Image_Processing/test/new')\n #img.show()\n # img.show()\n#这一个可以用来改变图像像素以及展示\n # pixels = img.load()\n # for i in range(img.size[0]):\n # for j in range(img.size[1]):\n # if pixels[i, j] != (0, 0, 0):\n # pixels[i, j] = (255, 255, 255)\n # img.save('new/{}{}'.bmp)\n# img.show()\n","sub_path":"pixel_folder.py","file_name":"pixel_folder.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"197110537","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport numbers\nimport re\n\n__author__ = 'asaskevich'\n__escape_map__ = [('\"', '\\\\\"'), ('\\n', '\\\\n'), ('\\t', '\\\\t')]\n\n\nclass ParseException(BaseException):\n \"\"\"\n Класс-исключение, вызываемый в случае ошибок парсинга JSON строки\n \"\"\"\n\n def __init__(self, msg, token, token_group=''):\n BaseException.__init__(self, msg + ', token: ' + token + ', group: ' + token_group)\n\n\ndef escape_str(string):\n \"\"\"\n Экранирует спецсимволы в строке\n :param string: строка для экранирования\n :return: экранированная строка\n \"\"\"\n for old, new in __escape_map__:\n string = string.replace(old, new)\n return string\n\n\ndef unescape_str(string):\n \"\"\"\n Восстанавливает экранированные спецсимволы\n :param string: экранированная строка\n :return: оригинал\n \"\"\"\n for old, new in __escape_map__:\n string = string.replace(new, old)\n return string\n\n\ndef to_json(obj):\n \"\"\"\n Конвертирует некоторый объект в JSON строку\n :param obj: объект для конвертации\n :return: строка-JSON\n \"\"\"\n # None -> null\n if obj is None:\n return 'null'\n # булеан значения\n elif isinstance(obj, bool):\n if obj:\n return \"true\"\n else:\n return \"false\"\n # строка\n elif isinstance(obj, str):\n return ''.join(['\"', escape_str(obj), '\"'])\n # число\n elif isinstance(obj, numbers.Number):\n return str(obj)\n # Список, массив\n elif isinstance(obj, (list, tuple)):\n return '[' + \\\n ','.join([to_json(item) for item in obj]) + \\\n ']'\n # словарь\n elif isinstance(obj, dict):\n return '{' + \\\n ','.join([\n ''.join(['\"', escape_str(key), '\"', ':', to_json(obj[key])]) for key in obj.keys()\n ]) + \\\n '}'\n # экземпляр класса\n elif isinstance(obj, object) and hasattr(obj, '__dict__'):\n return to_json(obj.__dict__)\n # иначе это неподдерживаемый тип\n else:\n raise NotImplementedError('type of this object is not supported')\n\n\ndef get_tokens(text):\n \"\"\"\n Возвращает список токенов для JSON-строки\n :param text: json строка\n :return: список токенов\n \"\"\"\n tokens = []\n chars = list(text)\n while len(chars) > 0:\n ch = chars[0]\n # спецтокены\n if ch in ('[', ']', ',', ':', '{', '}'):\n tokens.append(ch)\n # начало строки или ключа\n elif ch == '\"':\n token = ch\n i = 1\n while i < len(chars):\n ch = chars[i]\n token += ch\n # неэкранированные кавычки - конец строки\n if not chars[i - 1] == '\\\\':\n if ch == '\"':\n break\n i += 1\n tokens.append(token)\n del chars[0:i + 1]\n continue\n # число\n elif re.match('[0-9\\.\\-e]', ch):\n token = ch\n del chars[0]\n while len(chars) > 0 and re.match('[0-9\\.\\-e]', chars[0]):\n ch = chars[0]\n token += ch\n del chars[0]\n tokens.append(token)\n continue\n # одно из ключевых слов null / true\n if len(chars) >= 4:\n if ''.join(chars[0:4]) in ('null', 'true'):\n tokens.append(''.join(chars[0:4]))\n del chars[0:4]\n continue\n # ключевое слово false\n if len(chars) >= 5:\n if ''.join(chars[0:5]) == 'false':\n tokens.append(''.join(chars[0:5]))\n del chars[0:5]\n continue\n del chars[0]\n return tokens\n\n\ndef from_json(text):\n \"\"\"\n Возвращает объект, полученный после преобразования json-строки\n :param text: json-строка\n :return: объект\n \"\"\"\n tokens = get_tokens(text)\n if len(tokens) == 0:\n return None\n # на входе один токен\n if len(tokens) == 1:\n token = tokens[0]\n if token == 'false':\n return False\n elif token == 'true':\n return True\n elif token == 'null':\n return None\n elif re.match('^\\-?[0-9]+$', token):\n return int(token)\n elif re.match('^\\-?[0-9]+(\\.[0-9]+)?$', token):\n return float(token)\n elif token[0] == '\"' and token[-1] == '\"':\n return unescape_str(token[1:-1])\n else:\n raise ParseException('unexpected token ', token, text)\n # на входе массив\n elif tokens[0] == '[' and tokens[-1] == ']':\n return_array = []\n tokens = tokens[1:-1]\n i = 0\n while i < len(tokens):\n token = tokens[i]\n # элемент массива - массив\n # ищем конец элемента, парсим\n if token == '[':\n j = i + 1\n depth = 1\n tokens_group = ['[']\n while depth > 0 and j < len(tokens):\n if tokens[j] == '[':\n depth += 1\n tokens_group.append(tokens[j])\n elif tokens[j] == ']':\n depth -= 1\n tokens_group.append(tokens[j])\n if depth == 0:\n return_array.append(from_json(''.join(tokens_group)))\n tokens_group.clear()\n break\n # elif depth < 0:\n # raise ParseException('unexpected token', tokens[j], ''.join(tokens_group))\n else:\n tokens_group.append(tokens[j])\n j += 1\n if depth != 0:\n raise ParseException('unexpected token', token, ''.join(tokens_group))\n i = j + 1\n elif token == '{':\n # элемент массива - объект\n # ищем конец элемента, парсим\n j = i + 1\n depth = 1\n tokens_group = ['{']\n while depth > 0 and j < len(tokens):\n if tokens[j] == '{':\n depth += 1\n tokens_group.append(tokens[j])\n elif tokens[j] == '}':\n depth -= 1\n tokens_group.append(tokens[j])\n if depth == 0:\n return_array.append(from_json(''.join(tokens_group)))\n tokens_group.clear()\n break\n # elif depth < 0:\n # raise ParseException('unexpected token', tokens[j], ''.join(tokens_group))\n else:\n tokens_group.append(tokens[j])\n j += 1\n if depth != 0:\n raise ParseException('unexpected token', token, ''.join(tokens_group))\n i = j + 1\n elif token == ',':\n # разделитель элементов\n i += 1\n continue\n else:\n return_array.append(from_json(token))\n i += 1\n return return_array\n elif tokens[0] == '{' and tokens[-1] == '}':\n # на входе - объект\n tokens = tokens[1:-1]\n i = 0\n return_dict = {}\n while i < len(tokens):\n token = tokens[i]\n next_token = tokens[i + 1] if i + 1 < len(tokens) else None\n if token[0] == '\"' and token[-1] == '\"' and \\\n next_token is not None and next_token == ':':\n # текущий токен - ключ, следующий - \":\"\n key = token\n value_token = tokens[i + 2] if i + 2 < len(tokens) else None\n if value_token == '{':\n # после двоеточия - возможно объект\n # ищем конец объекта - парсим\n j = i + 3\n depth = 1\n tokens_group = ['{']\n while depth > 0 and j < len(tokens):\n if tokens[j] == '{':\n depth += 1\n tokens_group.append(tokens[j])\n elif tokens[j] == '}':\n depth -= 1\n tokens_group.append(tokens[j])\n if depth == 0:\n return_dict[unescape_str(key[1:-1])] = from_json(''.join(tokens_group))\n tokens_group.clear()\n break\n # elif depth < 0:\n # raise ParseException('unexpected token', tokens[j], ''.join(tokens_group))\n else:\n tokens_group.append(tokens[j])\n j += 1\n if depth != 0:\n raise ParseException('unexpected token', token, ''.join(tokens_group))\n i = j + 1\n elif value_token == '[':\n # после двоеточия - возможно массив\n j = i + 3\n depth = 1\n tokens_group = ['[']\n # ищем конец массива, парсим\n while depth > 0 and j < len(tokens):\n if tokens[j] == '[':\n depth += 1\n tokens_group.append(tokens[j])\n elif tokens[j] == ']':\n depth -= 1\n tokens_group.append(tokens[j])\n if depth == 0:\n return_dict[unescape_str(key[1:-1])] = from_json(''.join(tokens_group))\n tokens_group.clear()\n break\n # elif depth < 0:\n # raise ParseException('unexpected token', tokens[j], ''.join(tokens_group))\n else:\n tokens_group.append(tokens[j])\n j += 1\n if depth != 0:\n raise ParseException('unexpected token', token, ''.join(tokens_group))\n i = j + 1\n else:\n return_dict[unescape_str(key[1:-1])] = from_json(value_token)\n i += 3\n continue\n elif token == ',':\n i += 1\n continue\n else:\n raise ParseException('unexpected token', token, ''.join(tokens))\n i += 1\n return return_dict\n else:\n raise ParseException('unexpected token', text)\n","sub_path":"kurs_3/sem_1/IGI/lb/Laboratornaya_2/Лабораторная 2/lab2/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"380626323","text":"# Settings file used for local development.\nfrom __future__ import absolute_import\nfrom datetime import datetime\nfrom .base import *\n\nDEBUG = True\n# JIRA_ISSUE_COLLECTOR = False\nENABLE_VIRUS_SCAN = False\n\nALLOWED_HOSTS = ['*', ]\n\n# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nEMAIL_REPLY_TO_DOMAIN = 'localhost'\nEMAIL_FROM_ADDRESS = 'recruitment@localhost'\n\n# URL that this site is hosted on\n# Do not include the trailing slash.\nSERVER_URL = 'http://localhost:8000'\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\n# MEDIA_ROOT = BASE_DIR.child('media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\n\nlog_file = path.abspath(__name__ + datetime.now().strftime('%Y_%m_%d') + '.log')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(message)s',\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(message)s',\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': log_file,\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n }\n}\n\n# djang-debug-toolbar doesnt allow wildcards in the INTERNAL_IPS settings which\n# is not very useful when running with vagrant as the host can change IP.\n# The following function adds wildcard support, as taken from:\n# http://dancarroll.org/blog/2011/01/debugging-django-dev-server/\nfrom fnmatch import fnmatch\n\n\nclass glob_list(list):\n def __contains__(self, key):\n for elt in self:\n if fnmatch(key, elt): return True\n return False\n\n\n# Required for django-debug-toolbar. When connecting to the recruitment system\n# using the following IPs the debug-toolbar will be displayed.\nINTERNAL_IPS = glob_list(['127.0.0.1', '10.0.*.*'])\n\nINSTALLED_APPS += (\n # 'debug_toolbar',\n # 'template_timings_panel',\n)\n","sub_path":"sylvan_library/sylvan_library/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"415467560","text":"from django.shortcuts import redirect\nfrom django.template.context_processors import request\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, ListView, UpdateView, CreateView, DeleteView\nfrom profissional.models import Profissional\nfrom paciente.models import Paciente\nfrom website.forms import InsereProfissionalForm,InserePacienteForm\n\n\n\n# PÁGINA PRINCIPAL\n# ----------------------------------------------\n\nclass IndexTemplateView(TemplateView):\n template_name = \"website/index.html\"\n\n\n# LISTA DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass ProfissionalListView(ListView):\n template_name = \"website/lista_profissional.html\"\n model = Profissional\n context_object_name = \"profissionais\"\n\n\n# CADASTRAMENTO DE PACIENTES\n# ----------------------------------------------\n\nclass ProfissionalCreateView(CreateView):\n template_name = \"website/cria_profissional.html\"\n model = Profissional\n form_class = InsereProfissionalForm\n success_url = reverse_lazy(\"website:lista_profissionais\")\n\n\n# ATUALIZAÇÃO DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass ProfissionalUpdateView(UpdateView):\n template_name = \"website/atualiza_profissional.html\"\n model = Profissional\n fields = '__all__'\n context_object_name = 'profissional'\n success_url = reverse_lazy(\"website:lista_profissionais\")\n\n\n# EXCLUSÃO DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass ProfissionalDeleteView(DeleteView):\n template_name = \"website/exclui_profissional.html\"\n model = Profissional\n context_object_name = 'profissional'\n success_url = reverse_lazy(\"website:lista_profissionais\")\n\n# LISTA DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass PacienteListView(ListView):\n template_name = \"website/lista_paciente.html\"\n model = Paciente\n context_object_name = \"pacientes\"\n\n\n# CADASTRAMENTO DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass PacienteCreateView(CreateView):\n template_name = \"website/cria_paciente.html\"\n model = Paciente\n form_class = InserePacienteForm\n success_url = reverse_lazy(\"website:lista_pacientes\")\n\n\n# ATUALIZAÇÃO DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass PacienteUpdateView(UpdateView):\n template_name = \"website/atualiza_paciente.html\"\n model = Paciente\n fields = '__all__'\n context_object_name = 'paciente'\n success_url = reverse_lazy(\"website:lista_pacientes\")\n\n\n# EXCLUSÃO DE FUNCIONÁRIOS\n# ----------------------------------------------\n\nclass PacienteDeleteView(DeleteView):\n template_name = \"website/exclui_paciente.html\"\n model = Paciente\n context_object_name = 'paciente'\n success_url = reverse_lazy(\"website:lista_pacientes\")\n\n\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"110462355","text":"import numpy as np \nb=0\nn=1\n\ndef solveSecretaryProblem(T):\n V=np.zeros((T+1,2)) #value(PCS)\n V[T][b]=1\n V[T][n]=0\n pi=np.zeros(T+1,dtype='int') #policy only when b\n \n for t in reversed(range(0,T)):\n #when action is go\n V[t][b]=1/(t+1)*V[t+1][b]+t/(t+1)*V[t+1][n]\n V[t][n]=1/(t+1)*V[t+1][b]+t/(t+1)*V[t+1][n]\n pi[t]=1\n #when action is stop\n if(V[t][b] 0):\n raise Exception(\"Matrix is not positive definite\")\n\n L = np.zeros_like(M)\n for i in range(len(M)):\n for j in range(i + 1):\n _sum = sum((L[i][k] * L[j][k]) for k in range(j))\n if i == j:\n L[j][j] = d_type(cmath.sqrt(M[j][j] - _sum).real)\n else:\n L[i][j] = d_type(1 / L[j][j] * (M[i][j] - _sum))\n\n return L\n","sub_path":"solvers/utils/decomposition.py","file_name":"decomposition.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"395524917","text":"import hashlib\nimport sys\nimport os\n\ndef md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n fileContent = f.read()\n hash_md5.update(fileContent)\n return hash_md5.hexdigest()\n\n\nprint(sys.argv[0])\nfileName = \"/home/ec2-user/environment/Python/Tests/FileForMD5.txt\"\nprint(md5(fileName))\n\nprint(os.path.dirname(os.path.abspath(sys.argv[0])))\n","sub_path":"Tests/CheckMD5.py","file_name":"CheckMD5.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"587742859","text":"# dictionary = 'key', 'value'\r\n\r\nemployee = {\r\n # key : value\r\n 'nama': 'Andy',\r\n 'usia': 20,\r\n 'married': True,\r\n 'jabatan': 'IT Engineer',\r\n 'kendaraan': ['mobil', 'motor'],\r\n 'address': {\r\n 'street': 'Jalan Mawar',\r\n 'RT': 5,\r\n 'RW': 2,\r\n 'zipcode': 12345,\r\n 'geo': {\r\n 'lat': 12345.621271,\r\n 'long': 1232131.12313\r\n }\r\n }\r\n}\r\n\r\nprint(employee)\r\nprint(\"Value di dalam key 'nama' adalah:\", employee['nama'])\r\nprint(\"Value di dalam key 'kendaraan' adalah:\", employee['kendaraan'])\r\nprint(\"Value di dalam key 'kendaraan' di index pertama:\", employee['kendaraan'][0])\r\n# ['mobil', 'motor][0] = 'mobil'\r\nprint(\"Value di dalam key 'address' adalah:\", employee['address'])\r\nprint(\"Value di dalam key 'address' nama jalan saja:\", employee['address']['street'])\r\n# ['mobil', 'motor][0] = 'mobil'\r\n\r\nprint(list(employee.keys()))\r\nprint(list(employee.values()))\r\n\r\n'''\r\nNo. 1\r\nMasukkan hari: Senin \r\noutput: bahasa inggris dari Senin adalah Monday\r\n\r\nNo. 2\r\nMasukkan hari (INA/ENG): senin\r\noutput: bahasa inggris dari senin adalah Monday\r\n\r\nMasukkan hari (INA/ENG): monday\r\noutput: bahasa indonesia dari monday adalah Senin\r\n'''","sub_path":"Modul 1/4/4_dictionary_PakRIdho.py","file_name":"4_dictionary_PakRIdho.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"497998423","text":"# Copyright (c) 2018- Xilinx, Inc (Alessandro Pappalardo)\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n\n# 3. Neither the names of Xilinx, Facebook, Deepmind Technologies, NYU,\n# NEC Laboratories America and IDIAP Research Institute nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom abc import ABCMeta\nfrom typing import Optional, Union, Tuple\n\nfrom torch import nn\nfrom torch.nn import Module\n\nfrom brevitas.core.bit_width import BitWidthParameter, BitWidthImplType\nfrom brevitas.core.function_wrapper import Identity, ConstScalarClamp\nfrom brevitas.core.quant import QuantType, IdentityQuant\nfrom brevitas.core.stats import StatsOp\nfrom brevitas.core.restrict_val import RestrictValueType, FloatToIntImplType\nfrom brevitas.core.scaling import ScalingImplType, StatsInputViewShapeImpl\nfrom brevitas.proxy.runtime_quant import ActivationQuantProxy\nfrom .quant_layer import QuantLayer, SCALING_MIN_VAL\n\n\nclass QuantActivation(QuantLayer, Module):\n __metaclass__ = ABCMeta\n\n def __init__(self, return_quant_tensor):\n QuantLayer.__init__(self,\n compute_output_scale=True,\n compute_output_bit_width=True,\n return_quant_tensor=return_quant_tensor)\n Module.__init__(self)\n\n @property\n def act_quant_proxy(self):\n return self._act_quant_proxy\n\n @act_quant_proxy.setter\n def act_quant_proxy(self, act_quant_proxy):\n self._act_quant_proxy = act_quant_proxy\n\n def quant_act_scale(self):\n if isinstance(self.act_quant_proxy.fused_activation_quant_proxy.tensor_quant, IdentityQuant):\n raise Exception(\"Can't generate scaling factor without quantization enabled\")\n zero_hw_sentinel = self.act_quant_proxy.zero_hw_sentinel\n scaling_impl = self.act_quant_proxy.fused_activation_quant_proxy.tensor_quant.scaling_impl\n current_status = scaling_impl.training\n scaling_impl.eval()\n _, out, _ = self.act_quant_proxy(zero_hw_sentinel)\n scaling_impl.train(current_status)\n return out\n\n def forward(self, input):\n tensor, _, _ = self.unpack_input(input)\n output, output_scale, output_bit_width = self.act_quant_proxy(tensor)\n return self.pack_output(output, output_scale, output_bit_width)\n\n\nclass QuantReLU(QuantActivation):\n\n def __init__(self,\n bit_width: int,\n max_val: float,\n quant_type: QuantType = QuantType.FP,\n float_to_int_impl_type: FloatToIntImplType = FloatToIntImplType.ROUND,\n scaling_impl_type: ScalingImplType = ScalingImplType.PARAMETER,\n scaling_override: Optional[Module] = None,\n scaling_per_channel: bool = False,\n scaling_min_val: Optional[float] = SCALING_MIN_VAL,\n scaling_stats_sigma = 2.0,\n scaling_stats_op = StatsOp.MEAN_LEARN_SIGMA_STD,\n scaling_stats_buffer_momentum = 0.1,\n scaling_stats_permute_dims = (1, 0, 2, 3),\n per_channel_broadcastable_shape: Optional[Tuple[int, ...]] = None,\n min_overall_bit_width: Optional[int] = 2,\n max_overall_bit_width: Optional[int] = None,\n bit_width_impl_override: Union[BitWidthParameter] = None,\n bit_width_impl_type: BitWidthImplType = BitWidthImplType.CONST,\n restrict_bit_width_type: RestrictValueType = RestrictValueType.INT,\n restrict_scaling_type: RestrictValueType = RestrictValueType.LOG_FP,\n override_pretrained_bit_width: bool = False,\n return_quant_tensor: bool = False):\n super(QuantReLU, self).__init__(return_quant_tensor=return_quant_tensor)\n activation_impl = nn.ReLU()\n self.act_quant_proxy = ActivationQuantProxy(activation_impl=activation_impl,\n bit_width=bit_width,\n signed=False,\n narrow_range=False,\n scaling_override=scaling_override,\n min_val=0.0,\n max_val=max_val,\n quant_type=quant_type,\n float_to_int_impl_type=float_to_int_impl_type,\n scaling_impl_type=scaling_impl_type,\n scaling_per_channel=scaling_per_channel,\n scaling_min_val=scaling_min_val,\n per_channel_broadcastable_shape=per_channel_broadcastable_shape,\n min_overall_bit_width=min_overall_bit_width,\n max_overall_bit_width=max_overall_bit_width,\n bit_width_impl_override=bit_width_impl_override,\n bit_width_impl_type=bit_width_impl_type,\n restrict_bit_width_type=restrict_bit_width_type,\n restrict_scaling_type=restrict_scaling_type,\n override_pretrained_bit_width=override_pretrained_bit_width,\n scaling_stats_sigma=scaling_stats_sigma,\n scaling_stats_permute_dims=scaling_stats_permute_dims,\n scaling_stats_op=scaling_stats_op,\n scaling_stats_buffer_momentum=scaling_stats_buffer_momentum)\n\n\nclass QuantSigmoid(QuantActivation):\n\n def __init__(self,\n bit_width: int,\n narrow_range: bool = False,\n quant_type: QuantType = QuantType.FP,\n float_to_int_impl_type: FloatToIntImplType = FloatToIntImplType.ROUND,\n min_overall_bit_width: Optional[int] = 2,\n max_overall_bit_width: Optional[int] = None,\n bit_width_impl_override: Union[BitWidthParameter] = None,\n bit_width_impl_type: BitWidthImplType = BitWidthImplType.CONST,\n restrict_bit_width_type: RestrictValueType = RestrictValueType.INT,\n restrict_scaling_type: RestrictValueType = RestrictValueType.LOG_FP,\n scaling_min_val: Optional[float] = SCALING_MIN_VAL,\n override_pretrained_bit_width: bool = False,\n return_quant_tensor = False):\n super(QuantSigmoid, self).__init__(return_quant_tensor=return_quant_tensor)\n activation_impl = nn.Sigmoid()\n self.act_quant_proxy = ActivationQuantProxy(activation_impl=activation_impl,\n bit_width=bit_width,\n signed=False,\n narrow_range=narrow_range,\n scaling_override=None,\n min_val=0.0,\n max_val=1.0,\n quant_type=quant_type,\n float_to_int_impl_type=float_to_int_impl_type,\n scaling_impl_type=ScalingImplType.CONST,\n scaling_per_channel=False,\n scaling_min_val=scaling_min_val,\n per_channel_broadcastable_shape=None,\n min_overall_bit_width=min_overall_bit_width,\n max_overall_bit_width=max_overall_bit_width,\n bit_width_impl_override=bit_width_impl_override,\n bit_width_impl_type=bit_width_impl_type,\n restrict_bit_width_type=restrict_bit_width_type,\n restrict_scaling_type=restrict_scaling_type,\n override_pretrained_bit_width=override_pretrained_bit_width,\n scaling_stats_sigma=None,\n scaling_stats_op=None,\n scaling_stats_buffer_momentum=None,\n scaling_stats_permute_dims=None)\n\n\nclass QuantTanh(QuantActivation):\n\n def __init__(self,\n bit_width: int,\n narrow_range: bool = False,\n quant_type: QuantType = QuantType.FP,\n float_to_int_impl_type: FloatToIntImplType = FloatToIntImplType.ROUND,\n min_overall_bit_width: Optional[int] = 2,\n max_overall_bit_width: Optional[int] = None,\n bit_width_impl_override: Union[BitWidthParameter] = None,\n bit_width_impl_type: BitWidthImplType = BitWidthImplType.CONST,\n restrict_bit_width_type: RestrictValueType = RestrictValueType.INT,\n restrict_scaling_type: RestrictValueType = RestrictValueType.LOG_FP,\n scaling_min_val: Optional[float] = SCALING_MIN_VAL,\n override_pretrained_bit_width: bool = False,\n return_quant_tensor: bool = False):\n super(QuantTanh, self).__init__(return_quant_tensor=return_quant_tensor)\n activation_impl = nn.Tanh()\n self.act_quant_proxy = ActivationQuantProxy(activation_impl=activation_impl,\n bit_width=bit_width,\n signed=True,\n narrow_range=narrow_range,\n scaling_override=None,\n min_val=-1.0,\n max_val=1.0,\n quant_type=quant_type,\n float_to_int_impl_type=float_to_int_impl_type,\n scaling_impl_type=ScalingImplType.CONST,\n scaling_per_channel=False,\n scaling_min_val=scaling_min_val,\n per_channel_broadcastable_shape=None,\n min_overall_bit_width=min_overall_bit_width,\n max_overall_bit_width=max_overall_bit_width,\n bit_width_impl_override=bit_width_impl_override,\n bit_width_impl_type=bit_width_impl_type,\n restrict_bit_width_type=restrict_bit_width_type,\n restrict_scaling_type=restrict_scaling_type,\n override_pretrained_bit_width=override_pretrained_bit_width,\n scaling_stats_sigma=None,\n scaling_stats_op=None,\n scaling_stats_buffer_momentum=None,\n scaling_stats_permute_dims=None)\n\n\nclass QuantHardTanh(QuantActivation):\n\n def __init__(self,\n bit_width: int,\n min_val: float = -1.0,\n max_val: float = 1.0,\n narrow_range: bool = False,\n quant_type: QuantType = QuantType.FP,\n float_to_int_impl_type: FloatToIntImplType = FloatToIntImplType.ROUND,\n scaling_impl_type: ScalingImplType = ScalingImplType.PARAMETER,\n scaling_override: Optional[Module] = None,\n scaling_per_channel: bool = False,\n scaling_stats_sigma: float = 3.0,\n scaling_stats_op: StatsOp = StatsOp.MEAN_LEARN_SIGMA_STD,\n scaling_stats_buffer_momentum: float = 0.1,\n scaling_stats_permute_dims: Tuple = (1, 0, 2, 3),\n per_channel_broadcastable_shape: Optional[Tuple[int, ...]] = None,\n min_overall_bit_width: Optional[int] = 2,\n max_overall_bit_width: Optional[int] = None,\n bit_width_impl_override: Union[BitWidthParameter] = None,\n bit_width_impl_type: BitWidthImplType = BitWidthImplType.CONST,\n restrict_bit_width_type: RestrictValueType = RestrictValueType.INT,\n restrict_scaling_type: RestrictValueType = RestrictValueType.LOG_FP,\n scaling_min_val: Optional[float] = SCALING_MIN_VAL,\n override_pretrained_bit_width: bool = False,\n return_quant_tensor: bool = False):\n super(QuantHardTanh, self).__init__(return_quant_tensor=return_quant_tensor)\n if quant_type == QuantType.FP:\n activation_impl = ConstScalarClamp(min_val=min_val, max_val=max_val)\n else:\n activation_impl = Identity()\n self.act_quant_proxy = ActivationQuantProxy(activation_impl=activation_impl,\n bit_width=bit_width,\n signed=True,\n narrow_range=narrow_range,\n scaling_override=scaling_override,\n min_val=min_val,\n max_val=max_val,\n quant_type=quant_type,\n float_to_int_impl_type=float_to_int_impl_type,\n scaling_impl_type=scaling_impl_type,\n scaling_per_channel=scaling_per_channel,\n scaling_min_val=scaling_min_val,\n per_channel_broadcastable_shape=per_channel_broadcastable_shape,\n min_overall_bit_width=min_overall_bit_width,\n max_overall_bit_width=max_overall_bit_width,\n bit_width_impl_override=bit_width_impl_override,\n bit_width_impl_type=bit_width_impl_type,\n restrict_bit_width_type=restrict_bit_width_type,\n restrict_scaling_type=restrict_scaling_type,\n override_pretrained_bit_width=override_pretrained_bit_width,\n scaling_stats_sigma=scaling_stats_sigma,\n scaling_stats_op=scaling_stats_op,\n scaling_stats_buffer_momentum=scaling_stats_buffer_momentum,\n scaling_stats_permute_dims=scaling_stats_permute_dims)\n","sub_path":"brevitas/nn/quant_activation.py","file_name":"quant_activation.py","file_ext":"py","file_size_in_byte":18204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"339710990","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name = \"readify\",\n packages = [\"readify\"],\n long_description = long_description,\n long_description_content_type = \"text/markdown\",\n version = \"1.0.0\",\n description = \"Convert Integer to Human Readable format\",\n author = \"Yoginth\",\n maintainer = \"Yoginth\",\n author_email = \"me@yoginth.com\",\n maintainer_email = \"me@yoginth.com\",\n url = \"https://yoginth.com\",\n keywords = \"Humanize, Readable\",\n license = \"MIT\",\n classifiers=(\n \"Programming Language :: Python\",\n \"Natural Language :: English\",\n \"Environment :: Plugins\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n ),\n project_urls={\n 'Source': 'https://gitlab.com/yoginth/readify',\n },\n)\n","sub_path":"pypi_install_script/readify-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"161094555","text":"from .power_method import (\n create_markov_matrix, create_markov_matrix_discrete,\n stationary_distribution,\n)\n\n\ndef degree_centrality_scores(\n similarity_matrix,\n threshold=None,\n increase_power=True,\n):\n if not (\n threshold is None\n or isinstance(threshold, float)\n and 0 <= threshold < 1\n ):\n raise ValueError(\n '\\'threshold\\' should be a floating-point number '\n 'from the interval [0, 1) or None',\n )\n\n if threshold is None:\n markov_matrix = create_markov_matrix(similarity_matrix)\n\n else:\n markov_matrix = create_markov_matrix_discrete(\n similarity_matrix,\n threshold,\n )\n\n scores = stationary_distribution(\n markov_matrix,\n increase_power=increase_power,\n normalized=False,\n )\n\n return scores","sub_path":"src/external/lexrank.py","file_name":"lexrank.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11167230","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\na=1\nwhile 1==1:\n print(\"1. for convert celsius to fehrenheit\")\n print(\"2. for convert fehrenheit to celsius\")\n num=int(input(\"enter your number\"))\n \nif(num==1):\n c=int(input(\"enter celsius temp. = \"))\n f=(c*9/5)+32\n print(\"fehrenheit temp. =\",f)\n \nif(num==2):\n f=int(input(\"enter fehrenheit temp. =\"))\n c=(f-32)*5/9\n print(\"celsius temp. =\",c)\n \nif(num>=3):\n print(\"enter valid number\")\n a=0\n\n","sub_path":"practice1-C.py","file_name":"practice1-C.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99546675","text":"\"\"\"\nThe `inference` module implements inference of Brick entities from tags\nand other representations of building metadata\n\"\"\"\nimport logging\nimport pkgutil\nimport pickle\nfrom collections import defaultdict\nfrom .namespaces import BRICK, A\nfrom rdflib import Namespace\nfrom .graph import Graph\nimport rdflib\nimport owlrl\nimport io\nimport tarfile\n\n\nclass BrickInferenceSession:\n \"\"\"\n Handles all necessary inference for typical everyday usage of Brick.\n Applies the following stages in this order:\n - OWLRLInferenceSession (handles all normal inference, class -> tag)\n - TagInferenceSession (handles tag -> class)\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new Brick Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n self.g = Graph(load_brick=load_brick)\n self._tag_sess = TagInferenceSession(\n load_brick=load_brick, rebuild_tag_lookup=False, approximate=False\n )\n self._owl_sess = OWLRLInferenceSession(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Applies Brick reasoning to materialize all implied triples\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n self.g = self._owl_sess.expand(self.g)\n self.g = self._tag_sess.expand(self.g)\n return _return_correct_type(graph, self.g)\n\n @property\n def triples(self):\n return self.g.triples\n\n\nclass RDFSInferenceSession:\n \"\"\"\n Provides methods and an inferface for producing the deductive closure\n of a graph under RDFS semantics\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new RDFS Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n self.g = Graph(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Applies RDFS reasoning from the Python owlrl library to the graph\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n owlrl.DeductiveClosure(owlrl.RDFS_Semantics).expand(self.g.g)\n return _return_correct_type(graph, self.g)\n\n @property\n def triples(self):\n return self.g.triples\n\n\nclass OWLRLInferenceSession:\n \"\"\"\n Common entrypoint to OWL inference that automatically chooses the fastest\n available inference implementation. The priorities are as follows:\n\n 1. reasonable (Linux only for now): pip install brickschema[reasonable]\n 2. Allegro (requires docker): pip install brickschema[allegro]\n 3. OWLRL Python package (can be slow)\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new OWLRL Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n\n # see __init__.py for logging.basicConfig settings\n self.log = logging.getLogger(\"OWLRLInferenceSession\")\n self.log.setLevel(logging.INFO)\n\n try:\n self.sess = OWLRLReasonableInferenceSession(load_brick=load_brick)\n except ImportError:\n self.log.warning(\"Reasonable not installed; trying Allegro\")\n try:\n self.sess = OWLRLAllegroInferenceSession(load_brick=load_brick)\n except ImportError:\n self.log.warning(\"Allegro not installed; defaulting to OWLRL\")\n self.sess = OWLRLNaiveInferenceSession(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Applies OWLRL reasoning from the Python owlrl library to the graph\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n return self.sess.expand(graph)\n\n @property\n def triples(self):\n return self.sess.g.triples\n\n\nclass OWLRLNaiveInferenceSession:\n \"\"\"\n Provides methods and an inferface for producing the deductive closure\n of a graph under OWL-RL semantics. WARNING this may take a long time\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new OWLRL Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n self.g = Graph(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Applies OWLRL reasoning from the Python owlrl library to the graph\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(self.g.g)\n return _return_correct_type(graph, self.g)\n\n @property\n def triples(self):\n return self.g.triples\n\n\nclass OWLRLReasonableInferenceSession:\n \"\"\"\n Provides methods and an inferface for producing the deductive closure\n of a graph under OWL-RL semantics. WARNING this may take a long time\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new OWLRL Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n try:\n from reasonable import PyReasoner\n except ImportError:\n raise ImportError(\n f\"'reasonable' package not found. Install\\\nsupport for the reasonable Reasoner with 'pip install brickschema[reasonable].\\\nCurrently only works on Linux\"\n )\n self.r = PyReasoner()\n self.g = Graph(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Applies OWLRL reasoning from the Python owlrl library to the graph\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n self.r.from_graph(self.g.g)\n triples = self.r.reason()\n for t in triples:\n t = tuple(map(self._to_rdflib_ident, t))\n self.g.add(t)\n return _return_correct_type(graph, self.g)\n\n def _to_rdflib_ident(self, s):\n try:\n if s.startswith(\"http\"):\n return rdflib.URIRef(s)\n else:\n return rdflib.BNode(s)\n except Exception:\n return rdflib.Literal(s)\n\n @property\n def triples(self):\n return self.g.triples\n\n\nclass OWLRLAllegroInferenceSession:\n \"\"\"\n Provides methods and an inferface for producing the deductive closure\n of a graph under OWL-RL semantics. WARNING this may take a long time\n\n Uses the Allegrograph reasoning implementation\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new OWLRL Inference session backed by the Allegrograph\n reasoner (https://franz.com/agraph/support/documentation/current/materializer.html).\n Requires the docker package to work; recommended method of installing\n is to use the 'allegro' option with pip:\n pip install brickschema[allegro]\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n\n try:\n import docker\n except ImportError:\n raise ImportError(\n f\"'docker' package not found. Install support \\\nfor Allegro with 'pip install brickschema[allegro]\"\n )\n\n self.g = Graph(load_brick=load_brick)\n\n self._client = docker.from_env()\n containers = self._client.containers.list(all=True)\n print(f\"Checking {len(containers)} containers\")\n for c in containers:\n if c.name != \"agraph\":\n continue\n if c.status == \"running\":\n print(f\"Killing running agraph\")\n c.kill()\n print(f\"Removing old agraph\")\n c.remove(v=True)\n break\n\n def _setup_input(self, g):\n \"\"\"\n Add our serialized graph to an in-memory tar file\n that we can send to Docker\n \"\"\"\n g.g.serialize(\"input.ttl\", format=\"turtle\")\n tarbytes = io.BytesIO()\n tar = tarfile.open(name=\"out.tar\", mode=\"w\", fileobj=tarbytes)\n tar.add(\"input.ttl\", arcname=\"input.ttl\")\n tar.close()\n # seek to beginning so our file is not empty when docker sees it\n tarbytes.seek(0)\n return tarbytes\n\n def expand(self, graph):\n \"\"\"\n Applies OWLRL reasoning from the Python owlrl library to the graph\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n\n def check_error(res):\n exit_code, message = res\n if exit_code > 0:\n print(f\"Non-zero exit code {exit_code} with message {message}\")\n\n for triple in graph:\n self.g.add(triple)\n # setup connection to docker\n tar = self._setup_input(self.g)\n # TODO: temporary name so we can have more than one running?\n agraph = self._client.containers.run(\n \"franzinc/agraph:v7.0.0\", name=\"agraph\", detach=True, shm_size=\"1G\"\n )\n if not agraph.put_archive(\"/tmp\", tar):\n print(\"Could not add input.ttl to docker container\")\n check_error(agraph.exec_run(\"chown -R agraph /tmp\", user=\"root\"))\n check_error(\n agraph.exec_run(\n \"/agraph/bin/agraph-control --config /agraph/etc/agraph.cfg start\",\n user=\"agraph\",\n )\n )\n check_error(\n agraph.exec_run(\n \"/agraph/bin/agload test \\\n/tmp/input.ttl\",\n user=\"agraph\",\n )\n )\n check_error(\n agraph.exec_run(\n \"/agraph/bin/agmaterialize test \\\n--rule all\",\n user=\"agraph\",\n )\n )\n check_error(\n agraph.exec_run(\n \"/agraph/bin/agexport -o turtle test\\\n /tmp/output.ttl\",\n user=\"agraph\",\n )\n )\n bits, stat = agraph.get_archive(\"/tmp/output.ttl\")\n with open(\"output.ttl.tar\", \"wb\") as f:\n for chunk in bits:\n f.write(chunk)\n tar = tarfile.open(\"output.ttl.tar\")\n tar.extractall()\n tar.close()\n\n agraph.stop()\n agraph.remove(v=True)\n self.g.load_file(\"output.ttl\")\n return _return_correct_type(graph, self.g)\n\n @property\n def triples(self):\n return self.g.triples\n\n\nclass InverseEdgeInferenceSession:\n \"\"\"\n Provides methods and an inferface for producing the deductive closure\n of a graph that adds all properties implied by owl:inverseOf\n \"\"\"\n\n def __init__(self, load_brick=True):\n \"\"\"\n Creates a new OWLRL Inference session\n\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n \"\"\"\n self.g = Graph(load_brick=load_brick)\n\n def expand(self, graph):\n \"\"\"\n Adds inverse predicates to the graph that are modeled\n with OWL.inverseOf\n\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n # inverse relationships\n query = \"\"\"\n INSERT {\n ?o ?invprop ?s\n } WHERE {\n ?s ?prop ?o.\n ?prop owl:inverseOf ?invprop.\n }\n \"\"\"\n self.g.g.update(query)\n return _return_correct_type(graph, self.g)\n\n\nclass TagInferenceSession:\n \"\"\"\n Provides methods and an interface for inferring Brick classes from\n sets of Brick tags. If you want to work with non-Brick tags, you\n will need to use a wrapper class (see HaystackInferenceSession)\n \"\"\"\n\n def __init__(self, load_brick=True, rebuild_tag_lookup=False, approximate=False):\n \"\"\"\n Creates new Tag Inference session\n Args:\n load_brick (bool): if True, load Brick ontology into the graph\n rebuild_tag_lookup (bool): if True, rebuild the dictionary\n used for performing the inference of tags -> classes.\n By default, uses the dictionary for the packaged Brick\n version\n approximate (bool): if True, considers a more permissive set of\n possibly related classes. If False, performs exact tag mapping\n \"\"\"\n self.g = Graph(load_brick=load_brick)\n self._approximate = approximate\n if rebuild_tag_lookup:\n self._make_tag_lookup()\n else:\n # get ontology data from package\n data = pkgutil.get_data(__name__, \"ontologies/taglookup.pickle\")\n # TODO: move on from moving pickle to something more secure?\n self.lookup = pickle.loads(data)\n\n def _make_tag_lookup(self):\n \"\"\"\n Builds taglookup dictionary. You shouldn't need to do this unless\n the taglookup dictionary is out of date\n \"\"\"\n self.lookup = defaultdict(set)\n res = self.g.query(\n \"\"\"SELECT ?class ?tag WHERE {\n ?class rdfs:subClassOf+ brick:Class.\n ?class brick:hasAssociatedTag ?tag .\n ?tag rdf:type brick:Tag\n }\"\"\"\n )\n class2tag = defaultdict(set)\n for (cname, tag) in res:\n cname = cname.split(\"#\")[1]\n tag = tag.split(\"#\")[1]\n class2tag[cname].add(tag)\n for cname, tagset in class2tag.items():\n self.lookup[tuple(sorted(tagset))].add(cname)\n pickle.dump(self.lookup, open(\"taglookup.pickle\", \"wb\"))\n\n def _is_point(self, classname):\n return (\n len(\n self.g.query(\n f\"SELECT ?x WHERE {{ \\\n brick:{classname} rdfs:subClassOf* brick:Point . \\\n brick:{classname} a ?x }}\"\n )\n )\n > 0\n )\n\n def _is_equip(self, classname):\n return (\n len(\n self.g.query(\n f\"SELECT ?x WHERE {{ \\\n brick:{classname} rdfs:subClassOf* brick:Equipment . \\\n brick:{classname} a ?x }}\"\n )\n )\n > 0\n )\n\n def lookup_tagset(self, tagset):\n \"\"\"\n Returns the Brick classes and tagsets that are supersets OR\n subsets of the given tagsets\n\n Args:\n tagset (list of str): a list of tags\n \"\"\"\n s = set(map(_to_tag_case, tagset))\n if self._approximate:\n s.add(\"Point\")\n withpoint = [\n (klass, set(tagset))\n for tagset, klass in self.lookup.items()\n if s.issuperset(set(tagset)) or s.issubset(set(tagset))\n ]\n s.remove(\"Point\")\n s.add(\"Equipment\")\n withequip = [\n (klass, set(tagset))\n for tagset, klass in self.lookup.items()\n if s.issuperset(set(tagset)) or s.issubset(set(tagset))\n ]\n return withpoint + withequip\n\n return [\n (klass, set(tagset))\n for tagset, klass in self.lookup.items()\n if s == set(tagset)\n ]\n\n def most_likely_tagsets(self, orig_s, num=-1):\n \"\"\"\n Returns the list of likely classes for a given set of tags,\n as well as the list of tags that were 'leftover', i.e. not\n used in the inference of a class\n\n Args:\n tagset (list of str): a list of tags\n num (int): number of likely tagsets to be returned; -1 returns all\n\n Returns:\n results (tuple): a 2-element tuple containing (1)\n most_likely_classes (list of str): list of Brick classes\n and (2) leftover (set of str): list of tags not used\n\n \"\"\"\n s = set(map(_to_tag_case, orig_s))\n tagsets = self.lookup_tagset(s)\n if len(tagsets) == 0:\n # no tags\n return [], orig_s\n # find the highest number of tags that overlap\n most_overlap = max(map(lambda x: len(s.intersection(x[1])), tagsets))\n\n # return the class with the fewest tags >= the overlap size\n candidates = list(\n filter(lambda x: len(s.intersection(x[1])) == most_overlap, tagsets)\n )\n\n # When calculating the minimum difference, we calculate it form the\n # perspective of the candidate tagsets because they will have more tags\n # We want to find the tag set(s) who has the fewest tags over what was\n # provided\n min_difference = min(map(lambda x: len(x[1].difference(s)), candidates))\n most_likely = list(\n filter(lambda x: len(x[1].difference(s)) == min_difference, candidates)\n )\n\n leftover = s.difference(most_likely[0][1])\n most_likely_classes = list(set([list(x[0])[0] for x in most_likely]))\n # return most likely classes (list) and leftover tags\n # (what of 'orig_s' wasn't used)\n if num < 0:\n return most_likely_classes, leftover\n else:\n return most_likely_classes[:num], leftover\n\n def expand(self, graph):\n \"\"\"\n Infers the Brick class for entities with tags; tags are indicated\n by the `brick:hasTag` relationship.\n Args:\n graph (brickschema.graph.Graph): a Graph object containing triples\n Returns:\n graph (brickschema.graph.Graph): a Graph object containing the\n inferred triples in addition to the regular graph\n \"\"\"\n _inherit_bindings(graph, self.g)\n for triple in graph:\n self.g.add(triple)\n entity_tags = defaultdict(set)\n res = self.g.query(\n \"\"\"SELECT ?ent ?tag WHERE {\n ?ent brick:hasTag ?tag\n }\"\"\"\n )\n for ent, tag in res:\n entity_tags[ent].add(tag)\n for entity, tagset in entity_tags.items():\n tagset = list(map(lambda x: x.split(\"#\")[-1], tagset))\n lookup = self.lookup_tagset(tagset)\n if len(lookup) == 0:\n continue\n klasses = list(lookup[0][0])\n self.g.add((entity, A, BRICK[klasses[0]]))\n return _return_correct_type(graph, self.g)\n\n\nclass HaystackInferenceSession(TagInferenceSession):\n \"\"\"\n Wraps TagInferenceSession to provide inference of a Brick model\n from a Haystack model. The haystack model is expected to be encoded\n as a dictionary with the keys \"cols\" and \"rows\"; I believe this is\n a standard Haystack JSON export.\n TODO: double check this\n \"\"\"\n\n def __init__(self, namespace):\n \"\"\"\n Creates a new HaystackInferenceSession that infers entities into\n the given namespace\n Args:\n namespace (str): namespace into which the inferred Brick entities\n are deposited. Should be a valid URI\n \"\"\"\n super(HaystackInferenceSession, self).__init__(\n approximate=True, load_brick=True\n )\n self._BLDG = Namespace(namespace)\n self._tagmap = {\n \"cmd\": \"command\",\n \"sp\": \"setpoint\",\n \"temp\": \"temperature\",\n \"lights\": \"lighting\",\n \"rtu\": \"RTU\",\n \"ahu\": \"AHU\",\n \"freq\": \"frequency\",\n \"equip\": \"equipment\",\n }\n self._filters = [\n lambda x: not x.startswith(\"his\"),\n lambda x: not x.endswith(\"Ref\"),\n lambda x: not x.startswith(\"cur\"),\n lambda x: x != (\"disMacro\"),\n lambda x: x != \"navName\",\n lambda x: x != \"tz\",\n lambda x: x != \"mod\",\n lambda x: x != \"id\",\n ]\n self._point_tags = [\n \"point\",\n \"sensor\",\n \"command\",\n \"setpoint\",\n \"alarm\",\n \"status\",\n \"parameter\",\n \"limit\",\n ]\n\n def infer_entity(self, tagset, identifier=None):\n \"\"\"\n Produces the Brick triples representing the given Haystack tag set\n\n Args:\n tagset (list of str): a list of tags representing a Haystack entity\n\n Keyword Args:\n identifier (str): if provided, use this identifier for the entity,\n otherwise, generate a random string.\n \"\"\"\n triples = []\n infer_results = []\n if identifier is None:\n raise Exception(\"PROVIDE IDENTIFIER\")\n\n non_point_tags = set(tagset).difference(self._point_tags)\n non_point_tags.add(\"equip\")\n inferred_equip_classes, leftover_equip = self.most_likely_tagsets(\n non_point_tags\n )\n inferred_equip_classes = [\n c for c in inferred_equip_classes if self._is_equip(c)\n ]\n\n # choose first class for now\n equip_entity_id = identifier.replace(\" \", \"_\") + \"_equip\"\n point_entity_id = identifier.replace(\" \", \"_\") + \"_point\"\n\n # check if this is a point; if so, infer what it is\n if set(tagset).intersection(self._point_tags):\n tagset = set(tagset).difference(set([\"equip\"]))\n inferred_point_classes, leftover_points = self.most_likely_tagsets(tagset)\n inferred_point_classes = [\n c for c in inferred_point_classes if self._is_point(c)\n ]\n triples.append(\n (self._BLDG[point_entity_id], A, BRICK[inferred_point_classes[0]])\n )\n infer_results.append((identifier, list(tagset), inferred_point_classes))\n infer_results.append((identifier, list(tagset), inferred_point_classes))\n\n if len(inferred_equip_classes) > 0 and inferred_equip_classes[0] != \"Equipment\":\n triples.append(\n (self._BLDG[equip_entity_id], A, BRICK[inferred_equip_classes[0]])\n )\n triples.append(\n (\n self._BLDG[equip_entity_id],\n BRICK.hasPoint,\n self._BLDG[point_entity_id],\n )\n )\n infer_results.append((identifier, list(tagset), inferred_equip_classes))\n return triples, infer_results\n\n def infer_model(self, model):\n \"\"\"\n Produces the inferred Brick model from the given Haystack model\n Args:\n model (dict): a Haystack model\n \"\"\"\n entities = model[\"rows\"]\n # index the entities by their ID field\n entities = {e[\"id\"].replace('\"', \"\"): {\"tags\": e} for e in entities}\n brickgraph = Graph(load_brick=True)\n\n # marker tag pass\n for entity_id, entity in entities.items():\n marker_tags = {\n k for k, v in entity[\"tags\"].items() if v == \"m:\" or v == \"M\"\n }\n for f in self._filters:\n marker_tags = list(filter(f, marker_tags))\n # translate tags\n entity_tagset = list(\n map(\n lambda x: self._tagmap[x.lower()] if x in self._tagmap else x,\n marker_tags,\n )\n )\n # infer tags for single entity\n triples, _ = self.infer_entity(entity_tagset, identifier=entity_id)\n brickgraph.add(*triples)\n\n # take a pass through for relationships\n for entity_id, entity in entities.items():\n relships = {k: v for k, v in entity[\"tags\"].items() if k.endswith(\"Ref\")}\n # equip_entity_id = entity_id.replace(' ', '_') + '_equip'\n point_entity_id = entity_id.replace(\" \", \"_\") + \"_point\"\n if \"equipRef\" not in relships:\n continue\n reffed_equip = (\n relships[\"equipRef\"].replace(\" \", \"_\").replace('\"', \"\") + \"_equip\"\n )\n if self._BLDG[point_entity_id] in brickgraph.nodes:\n brickgraph.add(\n (\n self._BLDG[reffed_equip],\n BRICK.hasPoint,\n self._BLDG[point_entity_id],\n )\n )\n return brickgraph\n\n\ndef _to_tag_case(x):\n \"\"\"\n Returns the string in \"tag case\" where the first letter\n is capitalized\n\n Args:\n x (str): input string\n Returns:\n x (str): transformed string\n \"\"\"\n return x[0].upper() + x[1:]\n\n\ndef _return_correct_type(input_graph, output_graph):\n \"\"\"\n Returns the correct type of output_graph (rdflib.Graph or\n brickschema.Graph) depending on the type of input_graph\n \"\"\"\n if isinstance(input_graph, rdflib.Graph):\n return output_graph.g\n else:\n return output_graph\n\n\ndef _inherit_bindings(src_graph, dst_graph):\n \"\"\"\n Copies namespace bindings from src to dst\n \"\"\"\n if isinstance(src_graph, Graph):\n src_graph = src_graph.g\n if isinstance(dst_graph, Graph):\n dst_graph = dst_graph.g\n if not isinstance(src_graph, rdflib.Graph):\n return\n if not isinstance(dst_graph, rdflib.Graph):\n return\n for pfx, ns in src_graph.namespaces():\n dst_graph.bind(pfx, ns)\n","sub_path":"brickschema/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":26577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"518128170","text":"import numpy as np\nfrom sklearn import preprocessing\n\ndef Mean_removal():\n '''对每一个特征列都缩放到类似的数值范围,每一个特征列的均值为0'''\n data=np.array([[3, -1.5, 2, -5.4],\n [0, 4,-0.3,2.1],\n [1, 3.3, -1.9, -4.3]]) # 原始数据矩阵 shape=(3,4)\n\n data_standardized=preprocessing.scale(data)\n\n print(data_standardized.shape)\n print('Mean={}'.format(data_standardized.mean(axis=0)))\n print('Mean2={}'.format(np.mean(data_standardized,axis=0)))\n print('standardized: ')\n print(data_standardized)\n print('STD={}'.format(np.std(data_standardized,axis=0)))\n\ndef Scaling():\n '''将特征列的数值范围缩放到合理的大小'''\n data = np.array([[3, -1.5, 2, -5.4],\n [0, 4, -0.3, 2.1],\n [1, 3.3, -1.9, -4.3]]) # 原始数据矩阵 shape=(3,4)\n\n data_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) # 缩放到(0,1)之间\n data_scaled = data_scaler.fit_transform(data)\n\n print('scaled matrix: *********************************')\n print(data_scaled)\n\ndef Normalization():\n '''归一化,将特征向量调整为L1范数或L2范数,使特征向量的数值之和为1'''\n data = np.array([[3, -1.5, 2, -5.4],\n [0, 4, -0.3, 2.1],\n [1, 3.3, -1.9, -4.3]]) # 原始数据矩阵 shape=(3,4)\n\n data_L1_normalized = preprocessing.normalize(data, norm='l1')\n print('L1 normalized matrix: *********************************')\n print(data_L1_normalized)\n print('sum of matrix: {}'.format(np.sum(data_L1_normalized)))\n\n data_L2_normalized = preprocessing.normalize(data) # 默认:l2\n print('L2 normalized matrix: *********************************')\n print(data_L2_normalized)\n print('sum of matrix: {}'.format(np.sum(data_L2_normalized)))\n\ndef Binarization():\n '''二值化,将数值特征向量转换为布尔类型向量\n 二值化之后的数据点都是0或者1,将所有大于threshold的数据都改为1,小于等于threshold的都设为0\n '''\n data = np.array([[3, -1.5, 2, -5.4],\n [0, 4, -0.3, 2.1],\n [1, 3.3, -1.9, -4.3]]) # 原始数据矩阵 shape=(3,4)\n data_binarized = preprocessing.Binarizer(threshold=1.4).transform(data)\n print('binarized matrix: *********************************')\n print(data_binarized)\n\ndef One_Hot_Encoding():\n '''独热编码,当数值稀疏时,用来缩小特征向量的维度'''\n data = np.array([[0, 2, 1, 12],\n [1, 3, 5, 3],\n [2, 3, 2, 12],\n [1, 2, 4, 3]]) # 原始数据矩阵 shape=(4,4)\n encoder = preprocessing.OneHotEncoder()\n encoder.fit(data)\n encoded_vector = encoder.transform([[2, 3, 5, 3]]).toarray()\n print('one-hot encoded matrix: *********************************')\n print(encoded_vector.shape)#shape=(1,11)\n print(encoded_vector)\n '''\n编码方式为:根据原始数据集data构建编码器encoder,用编码器来对新数据进行编码。比如,第0列有三个不同值(0,1,2),故而有三个维度,\n即0=100,1=010,2=001;同理,第1列有两个不同值(2,3),故而只有两个维度,即2=10,3=01;同理,第2列有四个不同值(1,5,2,4),\n故而有四个维度,即1=1000,2=0100,4=0010,5=0001同理,第3列有两个不同值(3,12),故而只有两个维度,即3=10,12=01。\n所以在面对新数据[[2,3,5,3]]时,第0列的2就对应于001,第二列的3对应于01,第三列的5对应于0001,第四列的3对应于10,\n连接起来后就是输出的这个(1,11)矩阵,即为读了编码后的致密矩阵。\n如果面对的新数据不存在上面的编码器中,比如[[2,3,5,4]]时,4不存在于第3列(只有两个离散值3和12),则输出为00,\n连接起来后是[[0. 0. 1. 0. 1. 0. 0. 0. 1. 0. 0.]],注意倒数第二个数字变成了0\n '''\n\n\ndef label_encoding():\n '''对标记(类别值等)进行编码'''\n # 构建编码器\n encoder = preprocessing.LabelEncoder() # 先定义一个编码器对象\n raw_labels = ['翠花', '张三', '王宝强', '芙蓉姐姐', '凤姐', '王宝强', '凤姐']\n encoder.fit(raw_labels) # 返回自己的一个实例\n print('编码器列表:{}'.format(encoder.classes_)) # 返回编码器中所有类别,已经排除了重复项\n for index, item in enumerate(encoder.classes_):\n print('{} --> {}'.format(item, index))#凤姐 ->0,张三 ->1,王宝强 ->2,翠花 ->3,芙蓉姐姐->4\n\n # 使用编码器来编码新样本数据\n need_encode_labels = ['王宝强', '芙蓉姐姐', '翠花']\n # need_encode_labels=['王宝强','芙蓉姐姐','翠花','无名氏']\n # 在编码时,如果遇到编码器中没有的标记时会报错,在解码时也一样,如'无名氏'\n encoded_labels = encoder.transform(need_encode_labels)\n print('\\n编码之前的标记:{}'.format(need_encode_labels))\n print('编码之后的标记:{}'.format(encoded_labels))\n\n # 使用编码器将编码数字解码成原来的文本标记,注意最大值不能超过编码器中的长度\n encoded = [1, 3, 0, 4]\n # encoded=[1,3,0,4,5] # 5不存在与编码器中,故报错\n decoded_labels = encoder.inverse_transform(encoded)\n print('\\n已经编码的标记代码:{}'.format(encoded))\n print('解码后的标记:{}'.format(decoded_labels))\n\ndef evaluation_index(predict_data,true_ret):\n '''使用评价指标来评估模型的好坏'''\n import sklearn.metrics as metrics\n print('平均绝对误差:{}'.format(\n round(metrics.mean_absolute_error(predict_data, true_ret), 2)))\n print('均方误差MSE:{}'.format(\n round(metrics.mean_squared_error(predict_data, true_ret), 2)))\n print('中位数绝对误差:{}'.format(\n round(metrics.median_absolute_error(predict_data, true_ret), 2)))\n print('解释方差分:{}'.format(\n round(metrics.explained_variance_score(predict_data, true_ret), 2)))\n print('R方得分:{}'.format(\n round(metrics.r2_score(predict_data, true_ret), 2)))\n\ndef cal_H(x):\n '''计算信息熵H(x)'''\n x_value_list = set([x[i] for i in range(x.shape[0])])\n ent = 0.0\n for x_value in x_value_list:\n p = float(x[x == x_value].shape[0]) / x.shape[0]\n logp = np.log2(p)\n ent -= p * logp\n return ent\n\ndef cal_condition_H(x, y):\n '''计算条件信息熵H(y|x)'''\n x_value_list = set([x[i] for i in range(x.shape[0])])\n ent = 0.0\n for x_value in x_value_list:\n sub_y = y[x == x_value]\n temp_ent = cal_H(sub_y)\n ent += (float(sub_y.shape[0]) / y.shape[0]) * temp_ent\n return ent\n\ndef cal_ent_grap(x,y):\n '''计算信息增益'''\n base_ent = cal_H(y)\n condition_ent = cal_condition_H(x, y)\n ent_grap = base_ent - condition_ent\n return ent_grap\n\ndef cal_MI(labels_true,labels_pred):\n '''计算互信息、标准互信息'''\n from sklearn.metrics import mutual_info_score,normalized_mutual_info_score\n MI=mutual_info_score(labels_true,labels_pred)\n NMI=normalized_mutual_info_score(labels_true,labels_pred)\n return MI,NMI","sub_path":"智能算法/数据处理(标准化、归一化、二值化等).py","file_name":"数据处理(标准化、归一化、二值化等).py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"160748877","text":"#!/usr/bin/env python\n\nimport psutil\nimport logging\n\nfrom hc.monitor import Monitor\n\n\nlogger = logging.getLogger(\"Monitor.{}\".format(__name__))\n\n\nclass sensors(Monitor):\n\n def __init__(self, config, **args):\n\n Monitor.__init__(self, config, **args)\n # self.mcelog = self.getconfig(\"file\", default=\"/var/log/mcelog\")\n\n def get_sensor(self, sens):\n\n if sens == 'temp':\n sens_data = psutil.sensors_temperatures()\n elif sens == 'fan':\n sens_data = psutil.sensors_fans()\n elif sens == 'battery':\n sens_data = psutil.sensors_battery()\n return {'percent': sens_data.percent,\n 'secsleft': sens_data.secsleft,\n 'plugged': sens_data.power_plugged}\n\n data = {}\n for key, values in sens_data.items():\n data[key] = {}\n for item in values:\n\n if item.label:\n data[key][item.label] = {}\n tmp_data = data[key][item.label]\n else:\n tmp_data = data[key]\n\n tmp_data['current'] = item.current\n try:\n tmp_data['high'] = item.high\n except AttributeError:\n pass\n try:\n tmp_data['critical'] = item.critical\n except AttributeError:\n pass\n\n return data\n\n def runit(self):\n status = 0\n errs = \"\"\n\n data = {}\n\n data['temp'] = self.get_sensor('temp')\n data['fan'] = self.get_sensor('fan')\n data['battery'] = self.get_sensor('battery')\n\n return status, data, {}, errs\n","sub_path":"hc/modules/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338617352","text":"\"\"\"Shared method used in methods.\"\"\"\n\n\ndef get_expected_log_files_dict(base_out):\n \"\"\"\n :param base_out: Base path structure for log files. For example, if the expected path for\n the log is 'work/step.path/log/step.conda_info.txt', the argument should be\n 'work/step.path/log/step'.\n :type base_out: str\n\n :return: Returns dictionary with expected path for log files based on the provided input.\n \"\"\"\n # Define expected\n expected = {\n \"conda_info\": base_out + \".conda_info.txt\",\n \"conda_info_md5\": base_out + \".conda_info.txt.md5\",\n \"conda_list\": base_out + \".conda_list.txt\",\n \"conda_list_md5\": base_out + \".conda_list.txt.md5\",\n \"log\": base_out + \".log\",\n \"log_md5\": base_out + \".log.md5\",\n }\n # Return\n return expected\n\n\ndef get_expected_output_vcf_files_dict(base_out):\n \"\"\"\n :param base_out: Base path structure for vcf files. For example, if the expected path for\n the log is 'work/step.path/log/step.vcf.gz', the argument should be\n 'work/step.path/log/step'.\n :type base_out: str\n\n :return: Returns dictionary with expected path for vcf related files based on the\n provided input.\n \"\"\"\n # Define expected\n expected = {\n \"vcf\": base_out + \".vcf.gz\",\n \"vcf_md5\": base_out + \".vcf.gz.md5\",\n \"tbi\": base_out + \".vcf.gz.tbi\",\n \"tbi_md5\": base_out + \".vcf.gz.tbi.md5\",\n }\n # Return\n return expected\n\n\ndef get_expected_output_bcf_files_dict(base_out):\n \"\"\"\n :param base_out: Base path structure for log files. For example, if the expected path for\n the log is 'work/step.path/log/step.bcf', the argument should be\n 'work/step.path/log/step'.\n :type base_out: str\n\n :return: Returns dictionary with expected path for bcf related files based on the\n provided input.\n \"\"\"\n # Define expected\n expected = {\n \"bcf\": base_out + \".bcf\",\n \"bcf_md5\": base_out + \".bcf.md5\",\n \"csi\": base_out + \".bcf.csi\",\n \"csi_md5\": base_out + \".bcf.csi.md5\",\n }\n # Return\n return expected\n","sub_path":"tests/snappy_pipeline/workflows/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"608542124","text":"'''\nCreated on Feb 14, 2018\n\n@author: fangyunzhao\n'''\n\ndef repTrails(numberBlock, LeftRight, numberTask):\n for x in range(1, numberBlock + 1):\n for y in range(3):\n if y < 2:\n for l in LeftRight:\n print (x, \"Mask\", l)\n else:\n for l in LeftRight:\n print (x, \"nonMask\", l)\n\n\nrepTrails(5, ['right', 'left'], 6)","sub_path":"exercise_3_2.py","file_name":"exercise_3_2.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4750212","text":"#!/apollo/sbin/envroot \"$ENVROOT/python3.6/bin/python3.6\"\n\nimport dxd_tools_dev.modules.pttv2_cli_module as cli_module\nfrom dxd_tools_dev.modules.device import Credential\nfrom dxd_tools_dev.modules import pttv2_gen_pwd\nimport argparse\nimport os\nfrom datetime import datetime\n\n\nDDB_RECORD_SEPERATION = \"&\"\n\nHOME = os.getenv(\"HOME\")\nSLAX_LOG_SAVE_AT = f'{HOME}/pttv2'\n\nclass Color:\n BLUE = '\\033[36m'\n RED = '\\033[31m'\n ORANGE = '\\033[33m'\n BLACK = '\\033[30m'\n GREY = '\\033[37m'\n WHITE = '\\033[0m'\n\n\nclass TypeNotice:\n ALARM = \"ALARM\"\n ERROR = \"ERROR\"\n MESSAGE = \"MESSAGE\"\n\n\ndef notice(type, message):\n sayit = f\"[{type}] {message}\"\n print(sayit)\n\n\ndef convert_float_to_string(timestamp: float):\n return str(timestamp)\n\ndef convert_string_to_float(timestring: str):\n result = 0\n try:\n result = float(timestring)\n except:\n notice(TypeNotice.ERROR, f\"{timestring} is not right timestring\")\n return result\n\nclass TimeDiff:\n def __init__(self):\n self._before = datetime.now()\n def get_difference_in_seconds(self):\n self._after = datetime.now()\n diff = self._after - self._before\n if diff.seconds < 1:\n return 1\n return diff.seconds\n\n\nclass TimeObj:\n def __init__(self, time_string=None, formated_time=None):\n self._datetime = None\n if time_string is not None:\n self.get_time_from_timestring(time_string)\n elif formated_time is not None:\n self.get_time_from_slaxlog(formated_time)\n else:\n self.get_time_now()\n\n def get_time_now(self):\n self._datetime = datetime.now()\n\n def get_time_from_timestring(self, time_string):\n time_float = convert_string_to_float(time_string)\n if time_float > 0:\n self._datetime = datetime.fromtimestamp(time_float)\n else:\n self.get_time_now()\n\n def get_time_from_slaxlog(self, formated_time):\n pass\n\n def __gt__(self, time_object_2):\n return self.datetime > time_object_2.datetime\n\n @property\n def datetime(self):\n return self._datetime\n\n @property\n def time_float(self):\n return self._datetime.timestamp()\n\n @property\n def time_string(self):\n return convert_float_to_string(self.time_float)\n\n @property\n def formated_time(self):\n return\n\ndef prepare_file(folder, file):\n path = os.path.join(folder, file)\n with open(path, mode='a+') as f:\n pass\n\ndef test_new_file():\n file = 'created_for_test.txt'\n with open(file, mode='a+') as f:\n pass\n\ndef rename(folder, exist_name, new_name):\n filename = os.path.join(folder, exist_name)\n new_filename = os.path.join(folder, new_name)\n os.rename(filename, new_filename)\n\ndef test_rename_file():\n folder = '/Users/wanwill/PycharmProjects/PTTV2'\n exist_name = 'created_for_test.txt'\n new_name = 'new_name.txt'\n rename(folder, exist_name, new_name)\n\ndef prepare_folder(folder, devices):\n \"\"\"\n steps:\n create folder anyway\n if device file doesnot exist:\n create empty file\n move file name device to device_old\n\n :param folder:\n :return:\n \"\"\"\n os.makedirs(folder, mode=0o777, exist_ok=True)\n for device in devices:\n prepare_file(folder=folder, file=device)\n rename(folder=folder, exist_name=device, new_name=f'{device}_old')\n return\n\n\ndef get_slax_log(project_name, folder, time_string):\n site_type, region, devices = get_type_region_devices_from_project(project_name)\n username = 'porttest'\n password = pttv2_gen_pwd.pwd_of_project(project_name)\n cred = Credential(username=username, password=password)\n flag = False\n if site_type == cli_module.PTT_SITE_TYPE.FUN:\n flag = cli_module.gen_project_record(project_name, time_string, folder)\n elif site_type == cli_module.PTT_SITE_TYPE.PHXV3:\n flag = cli_module.get_slax_cli(devices, time_string, region, folder, cred=cred)\n elif site_type == cli_module.PTT_SITE_TYPE.CENTENNIAL:\n flag = cli_module.get_slax_cli(devices, time_string, region, folder, cred=cred, vendor=cli_module.DeviceType.cisco)\n\n #flag = cli_module.get_cisco_log_cli(devices, time_string, region, folder)\n return flag\n\n\ndef get_type_region_devices_from_project(project):\n return cli_module.get_type_region_devices_from_project(project)\n\n\ndef write_device_record_to_ddb(device, project_name, time_string, folder):\n file = f\"{folder}/{device}\"\n with open(file) as f:\n up_records = []\n for line in f:\n if ',up,' not in line:\n continue\n items_in_a_line = line.split(\",\")\n if len(items_in_a_line) != 5:\n continue\n up_records.append(items_in_a_line)\n\n record_id = DDB_RECORD_SEPERATION.join([project_name, time_string, device])\n record = cli_module.Record(record_id)\n record.update_record(up_records)\n\n\ndef get_slax_for_project(project_name, start_flag=False):\n if cli_module.is_project_complete(project_name):\n message = f\"The '{project_name}' completed port testing task. No need to get log\"\n print(Color.BLUE + message + Color.WHITE)\n return\n\n time_diff = TimeDiff()\n time_string = TimeObj().time_string\n folder = SLAX_LOG_SAVE_AT + '/' + project_name #time_string\n #folders = [folder]\n\n site_type, region, devices = get_type_region_devices_from_project(project_name)\n\n prepare_folder(folder, devices)\n\n get_slax_log(project_name, folder, time_string)\n\n\n if not start_flag:\n for device in devices:\n # if site_type == cli_module.PTT_SITE_TYPE.PHXV3 and cli_module.PTT:\n if site_type == cli_module.PTT_SITE_TYPE.CENTENNIAL:\n pass\n else:\n write_device_record_to_ddb(device, project_name, time_string, folder)\n\n message = f\"the slax log saved in folder '{folder}'\\n uploading to DDB\"\n print(Color.WHITE + message)\n\n else:\n\n message = (f\"history log saved in folder '{folder}'\\n uploading to DDB\"\n f\"the device log is cleared. Ready to do porttest.\"\n )\n\n print(Color.WHITE + message)\n\n processing_time_in_second = time_diff.get_difference_in_seconds()\n cli_module.update_record_list(project_name, time_string, processing_time_in_second)\n\ndef test_get_slax_for_project():\n project_name = 'iad.ewr53@deploy745'\n get_slax_for_project(project_name)\n\ndef get_args():\n parser = argparse.ArgumentParser(description='get slax record from device')\n parser.add_argument('-p', '--project',\n help='pls give project name format {reg}.{az}@{sim_id}')\n parser.add_argument('-s', '--start_porttest', action='store_true',\n help='run with -s, before onsite engineer insert fiber')\n\n return parser.parse_args()\n\ndef main():\n\n args = get_args()\n project_name = args.project\n start_flag = args.start_porttest\n\n return get_slax_for_project(project_name, start_flag)\n\ndef do_test():\n project_name = \"pdx.pdx1.8@demo\"\n project = cli_module.Project(project_name)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"aws/pttv2projlog.py","file_name":"pttv2projlog.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"341898005","text":"\nimport numpy as np\nimport copy\n\nfrom submissions.formulas.FormulaTree import *\n\n\ndef load_data(n_fit=1000000):\n train_filename = 'chopped_data/train.csv'\n data = pd.read_csv(train_filename).head(100)\n X_df = data.drop(['target'], axis=1)\n y = data[[\"target\"]].values\n removed_column = 0\n X = np.delete(X_df.head(n_fit).values, removed_column, axis=1)\n y = y[0:n_fit]\n return X, y\n\n\ndef generateSingleTree(Nsplit=1):\n verbose = False\n ft = FormulaTree()\n ft.add_node()\n n = 0\n\n while(n < Nsplit):\n maxnode = len(ft.nodes)\n split = False\n while(split is False):\n i = np.random.random_integers(0, maxnode - 1)\n if(verbose):\n print(\"request splitting node : \", i)\n split = ft.split_node(i)\n n += 1\n\n ft.add_coefficients()\n tree = ft.get_formula()\n print(\"Adding feature : \", tree)\n return ft\n\n\ndef generatePopulation(N=100):\n trees = np.array([])\n X, y = load_data()\n\n for i in range(0, N):\n tree = generateSingleTree()\n tree.set_classifier()\n tree.fit_coefficients(X, y)\n trees = np.append(trees, tree)\n\n for tree in trees:\n print(\"Tree Score : \", tree.score)\n\n return trees\n\n\ndef propagate_population(trees):\n children = trees\n for tree in children:\n tree.add_subtrees(trees)\n tree = mutate_tree(tree)\n return children\n\n\ndef cross_population(trees):\n n = len(trees)\n children = copy.deepcopy(trees)\n for i in range(0, n):\n trees[i].add_subtrees(trees)\n ic = -1\n while(ic < 0 | ic == i):\n ic = np.random.random_integers(0, n - 1)\n children[i] = cross_trees(trees[i], trees[ic])\n return children\n\n\ndef cut_population(trees, min_score=0.6):\n survivors = np.array([])\n for tree in trees:\n if(tree.score > min_score):\n survivors = np.append(survivors, tree)\n return survivors\n\n\ndef mutate_tree(tree):\n split = False\n while(split is False):\n inode = np.random.random_integers(0, len(tree.nodes) - 1)\n split = tree.split_node(inode)\n return tree\n\n\ndef cross_trees(tree, subtree):\n crossed = False\n while(crossed is False):\n inode = np.random.random_integers(0, len(tree.nodes) - 1)\n crossed = tree.graft_node(subtree, inode)\n return tree\n\n\n\n","sub_path":"submissions/formulas/GenerateTrees.py","file_name":"GenerateTrees.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"465835163","text":"import requests\n\ndef load_from_storage():\n\ttry:\n\t\twith open('api_key.txt') as api_key:\n\t\t\ttoken = api_key.read().splitlines()\n\t\t\ttoken = [x.strip() for x in token]\n\t\t\ttoken = token.__str__().strip('[]')\n\t\t\ttoken = token.__str__().strip('\\'')\n\t\t\treturn token\n\texcept FileNotFoundError:\n\t\tprint(f'No API key found, continuing without..')\n\t\tpass\n\treturn None\n\n\ntoken = load_from_storage()\n\n# Documentation: https://developers.google.com/speed/docs/insights/v5/get-started\n\n# JSON paths: https://developers.google.com/speed/docs/insights/v4/reference/pagespeedapi/runpagespeed\n\n# Populate 'pagespeed.txt' file with URLs to query against API.\nwith open('pagespeed.txt') as pagespeedurls:\n\tdownload_dir = 'pagespeed-results.csv'\n\tfile = open(download_dir, 'w')\n\tcontent = pagespeedurls.readlines()\n\tcontent = [line.rstrip('\\n') for line in content]\n\n\tcolumnTitleRow = \"URL, First Contentful Paint, First Interactive,\" \\\n\t\t\t\t\t \"Speed Index,First Meaningful Paint, First CPU Idle\\n\"\n\tfile.write(columnTitleRow)\n\n\t# This is the google pagespeed api url structure, using for loop to insert each url in .txt file\n\tfor line in content:\n\t\t# If no \"strategy\" parameter is included, the query by default returns desktop data.\n\t\tif token is not None:\n\t\t\tx = f'https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url={line}&strategy=mobile&key={token}'\n\t\telse:\n\t\t\tx = f'https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url={line}&strategy=mobile'\n\t\tprint(f'Requesting {x}...')\n\t\tr = requests.get(x)\n\t\tfinal = r.json()\n\t\tj = final\n\n\t\ttry:\n\t\t\turlid = final['id']\n\t\t\tsplit = urlid.split(\n\t\t\t\t'?') # This splits the absolute url from the api key parameter\n\t\t\turlid = split[0] # This reassigns urlid to the absolute url\n\t\t\tID = f'URL ~ {urlid}'\n\t\t\tID2 = str(urlid)\n\t\t\turlfcp = final['lighthouseResult']['audits'][\n\t\t\t\t'first-contentful-paint']['displayValue']\n\t\t\tFCP = f'First Contentful Paint ~ {str(urlfcp)}'\n\t\t\tFCP2 = str(urlfcp)\n\t\t\turlfi = final['lighthouseResult']['audits'][\n\t\t\t\t'interactive']['displayValue']\n\t\t\tFI = f'First Interactive ~ {str(urlfi)}'\n\t\t\tFI2 = str(urlfi)\n\t\t\turlsp = final['lighthouseResult']['audits']['speed-index'][\n\t\t\t\t'displayValue']\n\t\t\tSP = f'Speed Index ~ {str(urlsp)}'\n\t\t\tSP2 = str(urlsp)\n\t\t\turlfmp = \\\n\t\t\t\tfinal['lighthouseResult']['audits']['first-meaningful-paint'][\n\t\t\t\t\t'displayValue']\n\t\t\tFMP = f'First Meaningful Paint ~ {str(urlfmp)}'\n\t\t\tFMP2 = str(urlfmp)\n\t\t\turlfci = \\\n\t\t\t\tfinal['lighthouseResult']['audits']['first-cpu-idle'][\n\t\t\t\t\t'displayValue']\n\t\t\tFCI = f'First CPU Idle ~ {str(urlfci)}'\n\t\t\tFCI2 = str(urlfci)\n\t\texcept KeyError:\n\t\t\tprint(f' One or more keys not found {line}.')\n\n\t\ttry:\n\t\t\trow = f'{ID2},{FCP2},{FI2},{SP2},{FMP2},{FCI2}\\n'\n\t\t\tfile.write(row)\n\t\texcept NameError:\n\t\t\tprint(f' Failing because of KeyError {line}.')\n\t\t\tfile.write(\n\t\t\t\tf' & Failing because of nonexistant Key ~ {line}.' + '\\n')\n\n\t\ttry:\n\t\t\tprint(ID)\n\t\t\tprint(FCP)\n\t\t\tprint(FI)\n\t\t\tprint(SP)\n\t\t\tprint(FMP)\n\t\t\tprint(FCI)\n\t\texcept NameError:\n\t\t\tprint(f' Failing because of KeyError {line}.')\n\n\tfile.close()\n","sub_path":"pagespeed-api.py","file_name":"pagespeed-api.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313311848","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import osv\nfrom . import spreadsheet\n\n\ndef object_descriptor(obj, cr, uid, fields_get, fields, context):\n f = fields[0]\n if f not in fields_get.keys():\n return (None, None, None)\n field_type = fields_get[f]['type']\n model = fields_get[f].get('relation')\n field_descriptor = fields_get[f]['string']\n if len(fields) == 1 or (len(fields) == 2 and fields[1] == 'id'):\n pass\n else:\n fget = obj.pool.get(model).fields_get(cr, uid, context=context)\n field_type, model, desc = object_descriptor(\n obj, cr, uid, fget, fields[1:], context)\n field_descriptor += ' / ' + desc\n return (field_type, model, field_descriptor)\n\n\nclass IrUiView(osv.Model):\n _inherit = 'ir.ui.view'\n\n def get_export_title(self, cr, uid, doc, obj, **kwargs):\n if hasattr(obj, 'get_export_title'):\n return obj.get_export_title(cr, uid, doc, **kwargs)\n row = doc.AddRow()\n number_column = len(kwargs.get('fields'))\n if kwargs.get('grouped'):\n number_column += 1\n\n row.AddStringCell(kwargs.get('title'), colspan=number_column)\n row = doc.AddRow()\n row = doc.AddRow()\n\n def get_export_criteria_group_by(self, cr, uid, obj, fields_get, groupby):\n if hasattr(obj, 'get_export_criteria_group_by'):\n return obj.get_export_criteria_group_by(\n cr, uid, fields_get, groupby)\n return [fields_get[f]['string'] for f in groupby]\n\n def get_export_criteria_domain(self, cr, uid, obj, fields_get, domain, context):\n if hasattr(obj, 'get_export_criteria_domain'):\n return obj.get_export_criteria_domain(\n cr, uid, fields_get, domain, context)\n filters = []\n while domain:\n x = domain.pop()\n if x == '&':\n filters.append('et')\n elif x == '|':\n filters.append('ou')\n else:\n field, op, val = x\n ftype, fmodel, fdesc = object_descriptor(\n self, cr, uid, fields_get, field.split('.'), context)\n if ftype == 'many2one':\n if val:\n if not isinstance(val, (list, tuple)):\n val = [val]\n for v in self.pool.get(fmodel).name_get(cr, uid, val,\n context=context):\n filters.append('%s %s %s' % (fdesc or field, op, v[1]))\n else:\n filters.append('%s %s %s' % (fdesc, op, val))\n\n else:\n filters.append('%s %s %s' % (fdesc or field, op, val))\n return filters\n\n def get_export_criteria(self, cr, uid, doc, obj, fields_get, **kwargs):\n if hasattr(obj, 'get_export_criteria'):\n return obj.get_export_criteria(cr, uid, doc, **kwargs)\n\n number_column = len(kwargs.get('fields'))\n row = doc.AddRow()\n colspan = 1\n if kwargs.get('grouped'):\n number_column += 1\n colspan = number_column / 2\n row.AddStringCell(u'Filter by ...', colspan=colspan)\n row.AddStringCell(u'Group by ...', colspan=colspan)\n else:\n colspan = number_column\n row.AddStringCell(u'Filter by ...', colspan=colspan)\n\n domain = [] + kwargs.get('domain', [])\n groupby = [] + kwargs.get('groupby', [])\n context = kwargs.get('context', {}).copy()\n\n filters = self.get_export_criteria_domain(\n cr, uid, obj, fields_get, domain, context)\n groupby = self.get_export_criteria_group_by(\n cr, uid, obj, fields_get, groupby)\n while filters or groupby:\n row = doc.AddRow()\n if filters and groupby:\n row.AddStringCell(filters.pop(), colspan=colspan)\n row.AddStringCell(groupby.pop(), colspan=colspan)\n elif filters:\n row.AddStringCell(filters.pop(), colspan=colspan)\n else:\n row.AddStringCell('', colspan=colspan)\n row.AddStringCell(groupby.pop(), colspan=colspan)\n\n def get_export_header(self, cr, uid, doc, obj, **kwargs):\n if hasattr(obj, 'get_export_header'):\n return obj.get_export_header(cr, uid, doc, **kwargs)\n row = doc.AddRow()\n for header in kwargs.get('headers'):\n row.AddStringCell(header)\n\n def get_export_row_add_field(self, cr, uid, obj, row, value, fields_get,\n field, context=None):\n if value is None:\n return row.AddStringCell('')\n\n field_descr = fields_get.get(field)\n if field_descr is None:\n if isinstance(value, (int, long, float)):\n return row.AddDoubleCell(value)\n else:\n return row.AddStringCell(unicode(value))\n\n field_type = field_descr['type']\n if field_type == 'char':\n return row.AddStringCell(value)\n elif field_type == 'many2one':\n if value:\n return row.AddStringCell(value[1])\n else:\n return row.AddStringCell('')\n elif field_type in ('integer', 'float'):\n return row.AddDoubleCell(value)\n elif field_type == 'decimal':\n return row.AddDecimalCell(value)\n elif field_type == 'boolean':\n return row.AddBooleanCell(value)\n elif field_type == 'selection':\n selection = dict(fields_get[field]['selection']).get(value, '')\n return row.AddStringCell(unicode(selection))\n elif field_type == 'date':\n return row.AddDateTimeCell(value, date_only=True)\n elif field_type == 'datetime':\n # TODO pass user's timezone\n return row.AddDateTimeCell(value)\n else:\n return row.AddStringCell('Not Implemented Yet: %r' % field_type)\n\n def get_export_row_read(self, cr, uid, doc, obj, domain, fields,\n fields_get, grouped=False, context=None):\n obj_ids = obj.search(cr, uid, domain, context=context) # TODO sort\n rows = []\n for read in obj.read(cr, uid, obj_ids, fields, context=context):\n row = doc.AddRow()\n rows.append(row)\n if grouped:\n row.AddStringCell('')\n for f in fields:\n self.get_export_row_add_field(\n cr, uid, obj, row, read[f], fields_get, f, context=context)\n\n return rows\n\n def get_export_row_group(self, cr, uid, doc, obj, domain, groupby, fields,\n fields_get, level=0, context=None):\n fields2read = list(set(fields).union(set(groupby)))\n rows = []\n n=0\n for g in obj.read_group(cr, uid, domain, fields2read, groupby,\n context=context):\n n+=1\n #print n,\"g==============\",g\n row = doc.AddRow()\n rows.append(row)\n if isinstance(g[groupby[0]], (list, tuple)):\n gstring = g[groupby[0]][1]\n else:\n gstring = g[groupby[0]]\n #print \"gstringgstringxxxxxxxxxxxxxx\",gstring\n try:\n row.AddStringCell('%s%s (%d)' % (\n ' - ' * level, gstring, g[groupby[0] + '_count']))\n except:\n row.AddStringCell('%s%s ' % (\n ' - ' * level, gstring))\n new_groupby = g['__context'].get('group_by')\n new_domain = g['__domain']\n if new_groupby:\n subrows = self.get_export_row_group(\n cr, uid, doc, obj, new_domain, new_groupby, fields,\n fields_get, level=level + 1, context=context)\n else:\n subrows = self.get_export_row_read(\n cr, uid, doc, obj, new_domain, fields, fields_get,\n grouped=True, context=context)\n\n for f in fields:\n if f != groupby[0] and fields_get[f]['type'] in ('integer',\n 'float',\n 'decimal'):\n use_read_group = getattr(\n obj._columns[f], 'visual_export_use_read_group', False)\n if use_read_group:\n self.get_export_row_add_field(cr, uid, obj, row,\n g[f], fields_get, f,\n context=context)\n else:\n group_operator = fields_get[f].get(\n 'group_operator', 'sum').upper()\n # It is uggly but by default if it is False or None\n # the group by put a SUM\n\n if group_operator == 'AVG':\n group_operator = 'AVERAGE'\n\n cnumber = row.nextcell\n minsubcell = '%s%d' % (chr(ord('A') + cnumber),\n min([r.number for r in subrows]))\n maxsubcell = '%s%d' % (chr(ord('A') + cnumber),\n max([r.number for r in subrows]))\n val = '=%s(%s:%s)' % (group_operator, minsubcell,\n maxsubcell)\n row.AddFormulaCell(val)\n else:\n row.AddStringCell('')\n\n return rows\n\n def get_export_rows(self, cr, uid, doc, obj, fields_get, **kwargs):\n if hasattr(obj, 'get_export_rows'):\n return obj.get_export_rows(cr, uid, doc, obj, **kwargs)\n domain = [] + kwargs.get('domain', [])\n groupby = [] + kwargs.get('groupby', [])\n context = kwargs.get('context', {}).copy()\n fields = [] + kwargs.get('fields')\n if kwargs.get('grouped'):\n self.get_export_row_group(cr, uid, doc, obj, domain, groupby,\n fields, fields_get, context=context)\n else:\n self.get_export_row_read(\n cr, uid, doc, obj, domain, fields, fields_get, context=context)\n doc.AddRow()\n doc.AddRow()\n\n def get_export_tree_rows(self, cr, uid, doc, obj, fields_get, **kwargs):\n if hasattr(obj, 'get_export_tree_rows'):\n return obj.get_export_tree_rows(cr, uid, doc, obj, **kwargs)\n fields = [] + kwargs.get('fields')\n child_field = kwargs['child_field']\n toread = fields + [child_field]\n\n def getdata(ids, level):\n for r in obj.read(cr, uid, ids, toread, context=context):\n row = doc.AddRow()\n for c in fields:\n if c == fields[0] and r[child_field]:\n val = \" - \" * level + '> ' + unicode(r[c])\n row.AddStringCell(val)\n elif c == fields[0]:\n val = \" - \" * level + unicode(r[c])\n row.AddStringCell(val)\n else:\n self.get_export_row_add_field(\n cr, uid, obj, row, r[c], fields_get, c,\n context=context)\n\n if r[child_field]:\n getdata(r[child_field], level + 1)\n\n domain = [('parent_id', '=', int(kwargs['other_filter']['id']))]\n context = kwargs.get('context', {}).copy()\n obj_ids = obj.search(cr, uid, domain, context=context)\n getdata(obj_ids, 0)\n doc.AddRow()\n doc.AddRow()\n return domain\n\n def get_export(self, cr, uid, **kwargs):\n doc = spreadsheet.get_spreadsheet('ods', kwargs.get('title'))\n obj = self.pool.get(kwargs.get('model'))\n if hasattr(obj, 'get_export_domain_context'):\n kwargs['domain'], kwargs['context'] = obj.get_export_domain_context(\n cr, uid, **kwargs)\n self.get_export_title(cr, uid, doc, obj, **kwargs)\n self.get_export_header(cr, uid, doc, obj, **kwargs)\n fields_get = obj.fields_get(cr, uid, context=kwargs.get('context'))\n if kwargs.get('view_mode') == 'list':\n self.get_export_rows(cr, uid, doc, obj, fields_get, **kwargs)\n elif kwargs.get('view_mode') == 'tree':\n kwargs['domain'] = self.get_export_tree_rows(\n cr, uid, doc, obj, fields_get, **kwargs)\n self.get_export_criteria(cr, uid, doc, obj, fields_get, **kwargs)\n return doc.tofile()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"visual_export/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91957078","text":"\"\"\"\r\nAuthor: Sikder Tahsin Al Amin\r\nProblem: \r\nWrite a function that takes an unsigned integer and return the number of '1' bits it has (also known as the Hamming weight).\r\nInput: 00000000000000000000000000001011\r\nOutput: 3\r\n\r\n\"\"\"\r\n\r\n\r\ndef hammingWeight(n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n count =0\r\n for i in str(n):\r\n if i=='1':\r\n count = count +1\r\n return count\r\n","sub_path":"leetcode-my-solutions/191_number_of_1_bits.py","file_name":"191_number_of_1_bits.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"212093449","text":"import pytest\nimport os\nimport sys\nfrom bootstrap.run import run\nfrom bootstrap.lib.options import Options\n\noption_names = [\n 'block',\n 'block_tucker',\n 'cat_mlp',\n 'linear_sum',\n 'mfb',\n 'mfh',\n 'mlb',\n 'mutan',\n 'tucker'\n]\n\ndef reset_options_instance():\n Options._Options__instance = None\n sys.argv = [sys.argv[0]] # reset command line args\n\n@pytest.mark.parametrize('option_name', option_names)\ndef test_run_vqa2_options(option_name):\n reset_options_instance()\n sys.argv += [\n '-o', f'block/options/vqa2/{option_name}.yaml',\n '--exp.dir', f'/tmp/logs/tests/vqa2/{option_name}',\n '--engine.nb_epochs', '1',\n '--engine.debug', 'True',\n '--misc.cuda', 'False',\n ]\n try:\n run()\n except:\n print('Unexpected error:', sys.exc_info()[0])\n assert False\n assert True\n\n@pytest.mark.mcb\ndef test_run_vqa2_options_mcb():\n test_run_vqa2_options('mcb')\n\nif __name__ == '__main__':\n test_run_vqa2_options('block')","sub_path":"tests/test_run_vqa2_options.py","file_name":"test_run_vqa2_options.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"414713677","text":"\"\"\"\nCurrent Limitations:\n - A member cannot transition from a monthly to a punchcard without losing the remainder of their time\n\n\n\"\"\"\n\n\n\nimport sys\nimport pprint\nimport hashlib\n\nfrom datetime import datetime, date, time, timedelta\nimport time\nfrom tinydb import *\nfrom tinydb.operations import decrement\nimport re\n\nimport config\n\n\ndef init():\n global appDB\n appDB = LoginDatabase()\n \n\nclass LoginDatabase:\n\n def __init__(self, db_dir=None):\n\n if db_dir:\n append_dir = db_dir\n else:\n append_dir = \"\"\n\n self.membersDB = TinyDB(append_dir + \"members.json\")\n self.logDB = TinyDB(append_dir + \"log.json\")\n\n \"\"\" add_member: Adds a new member to the database. All parameters but link are required. \n Expiration date is automatically set using the member date and current time. Current time \n is recorded. A 16 digit numerical ID is generated and assigned based off the date and member's name\n and checked against the database. If the ID has been already been used, it waits .1 second and \n tries again until an unused ID has been generated. Also protects against IDs that are less than 16\n digits long, as sometimes they were generated that were too short.\n \n Returns: Dictionary of the data entered into the database \"\"\"\n\n def add_member(self, first_name, last_name, email, phone, birthdate, member_type_str, link=None):\n join_date = datetime.now()\n\n\n member_ID_str = first_name + last_name + str(join_date)\n member_ID = int(hashlib.sha256(member_ID_str.encode('utf-8')).hexdigest(),\n 16) % 10 ** 16 # Generate a 16 digit ID number :: https://stackoverflow.com/a/42089311\n\n while self.membersDB.contains(Query().id == member_ID) or len(str(member_ID)) < 16: # Check for member_id uniqueness\n time.sleep(0.1)\n join_date = datetime.now()\n member_ID_str = first_name + last_name + str(join_date)\n member_ID = int(hashlib.sha256(member_ID_str.encode('utf-8')).hexdigest(),\n 16) % 10 ** 16 # Generate a 16 digit ID number :: https://stackoverflow.com/a/42089311\n\n exp_date = \"-1\"\n exp_punches = 0\n\n if member_type_str == \"monthly\" or member_type_str == \"student\":\n exp_date = str(timedelta(days=31) + join_date)\n elif member_type_str == \"annual\" or member_type_str == \"student_annual\":\n exp_date = str(timedelta(days=365) + join_date)\n elif member_type_str == \"trial\":\n exp_date = str(timedelta(days=7) + join_date)\n elif member_type_str == \"punchcard\":\n exp_punches = 10\n\n\n\n entry = {\"name_first\": first_name,\n \"name_last\": last_name,\n \"id\": member_ID,\n \"dob\": str(birthdate),\n \"email\" : email,\n \"phone\": phone,\n \"join_date\": str(join_date),\n \"member_type\": member_type_str,\n \"expiration_date\": exp_date,\n \"expiration_punches\": exp_punches,\n \"link\": link}\n\n self.membersDB.insert(entry)\n\n return entry\n\n \"\"\" retrieve_member: Upon being passed a member ID, checks if the member exists (and not deleted) and then \n returns the member's data. Raises LookupError if the member does not exits.\n \n Returns: Document type containing data of the selected member in the members database\"\"\"\n\n def retrieve_member(self, member_id):\n member_query = Query()\n if self.membersDB.contains(member_query.id == member_id):\n member_data = self.membersDB.get(member_query.id == member_id)\n if member_data.get(\"deleted\", False):\n raise LookupError(\"The entered user ID could not be found in the database\")\n return member_data\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n \"\"\" update_member: Updates a member in the database. With the exception of expiration_punches, expiration_date\n and link, all of the member data must be passed in. Currently does not support passing \n in just the parameters you want to update. In the future, might consider using **kwargs\n as a more flexible update. Raises a LookupError if the associated member cannot be found.\n\n Returns: List of documents containing data of the selected member in the members database\"\"\"\n\n def update_member(self, member_id, first_name, last_name, email, phone, birthdate, member_type_str,\n expiration_punches=-1, expiration_date=\"-1\", link=None):\n\n\n member_query = Query()\n\n if self.membersDB.contains(member_query.id == member_id):\n\n\n member_data = self.membersDB.get(member_query.id == member_id)\n member_data[\"name_first\"] = first_name\n member_data[\"name_last\"] = last_name\n member_data[\"dob\"] = str(birthdate)\n member_data[\"email\"] = email\n member_data[\"phone\"] = phone\n member_data[\"member_type\"] = member_type_str\n member_data[\"expiration_date\"] = expiration_date\n member_data[\"expiration_punches\"] = int(expiration_punches)\n member_data[\"link\"] = link\n\n\n self.membersDB.update(member_data, member_query.id == member_id)\n return member_data\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n \"\"\" log_member: Logs in member by adding their member_id to the log database, along with:\n * Log Date\n * First/Last name\n * Member Type\n * Expiration Date/Punches Remaining\n * Link (if Applicable) \n If a member is a punchcard member, one punch is removed from their account.\n If the member has already logged in during the past day (since midnight), and the debug feature \n \"config.allow_multiple_scans_a_day\" is False, a LookupError will be raised, indicating that the\n member has already logged in today.\n \n Returns: List type of the log entry \"\"\"\n\n def log_member(self, member_id):\n logged_time = datetime.now()\n\n member_query = Query()\n # db.contains(User.name == 'John')\n if self.membersDB.contains(member_query.id == member_id):\n today = date.today()\n visited_today = self.logDB.search((member_query.id == member_id) &\n (member_query.log_time.matches(re.compile(str(today), re.IGNORECASE))))\n\n if visited_today and not config.allow_multiple_scans_a_day:\n raise LookupError(\"Member has already logged in today!\")\n\n else:\n member_data = self.membersDB.get(member_query.id == member_id)\n # print(member_data[\"name_last\"] + \", \" + member_data[\"name_first\"])\n member_type_str = member_data[\"member_type\"]\n\n remaining_time=\"-1\"\n\n if member_type_str == \"punchcard\":\n if member_data[\"expiration_punches\"] <= 0:\n raise RuntimeError(\"The member has used all of their punches!\")\n self.membersDB.update(decrement('expiration_punches'), member_query.id == member_id)\n\n\n\n log_entry = {\"id\": member_id, \"name_first\": member_data[\"name_first\"], \"name_last\": member_data[\"name_last\"],\n \"log_time\": str(logged_time), \"member_type_str\": member_type_str,\n \"remaining_punches\": member_data[\"expiration_punches\"] - 1,\n \"expiration_date\": member_data[\"expiration_date\"],\n \"link\": member_data.get(\"link\", None)}\n\n self.logDB.insert(log_entry)\n\n return log_entry\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n \"\"\" query_member: Performs a regex search on the database, by member's first name or current day. Ignores members\n marked as deleted. The parameter \"log_date\" can be passed True to use the current day, or a \n date object to specify a certain day. If no members match the name, or nobody has logged in\n today, a LookupError is raised.\n\n Returns: List of documents containing data of all the members matching the search in the \n members database \"\"\"\n\n def query_member(self, name_member=\"-1\", log_date=None):\n member = Query()\n\n if name_member != \"-1\":\n compiled_search = re.compile(name_member, re.IGNORECASE)\n results = self.membersDB.search((member.name_first.matches(compiled_search)\n | member.name_last.matches(compiled_search))\n & ~(member.deleted == True))\n\n\n if not results:\n raise LookupError(\"The entered name could not be found in the database!\")\n\n return results\n elif log_date:\n if log_date == True:\n log_date = date.today()\n members_today = self.logDB.search((member.log_time.matches(re.compile(str(log_date), re.IGNORECASE)))\n & ~(member.deleted == True))\n\n results = []\n if not members_today:\n raise LookupError(\"No members logged in today\")\n for memb in members_today:\n member_id = memb[\"id\"]\n results.append(self.membersDB.get(member.id == member_id))\n return results\n else:\n raise LookupError(\"Invalid search query.\")\n\n\n \"\"\" get_member_sign_offs: When given a member ID. this function retrieves the member's sign ins from the member \n database. If the member does not have any sign offs, generates an empty dict containing\n the sign-offs listed in config.sign_off_list. The dict should only contain booleans.\n If the member ID does not exist in the database, a LookupError is raised.\n\n Returns: Dictionary containing the skills a member has been signed off on. If the member\n does not have any recorded sign-offs, generates a dict of all the sign-offs with \n the value False \"\"\"\n\n def get_member_sign_offs(self, member_id):\n member_query = Query()\n if self.membersDB.contains(member_query.id == member_id):\n member_data = self.membersDB.get(member_query.id == member_id)\n sign_offs = member_data.get(\"sign_offs\", None)\n\n if not sign_offs:\n sign_offs_dict = {}\n for activity in config.sign_off_list.keys():\n sign_offs_dict[activity] = False\n sign_offs = sign_offs_dict\n\n return sign_offs\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n \"\"\" set_member_sign_offs: Updates the sign-offs for a given member. \n\n Returns: Dictionary containing the skills a member has been signed off on. If the member\n does not have any recorded sign-offs, generates a dict of all the sign-offs with \n the value False \"\"\"\n\n def set_member_sign_offs(self, member_id, sign_offs):\n member_query = Query()\n if self.membersDB.contains(member_query.id == member_id):\n member_data = self.membersDB.get(member_query.id == member_id)\n\n if not sign_offs:\n for activity in config.sign_off_list.keys():\n sign_offs[activity] = False\n\n member_data[\"sign_offs\"] = sign_offs\n self.membersDB.update(member_data, member_query.id == member_id)\n\n return sign_offs\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n def delete_member(self, member_id, hard_delete=False):\n member_query = Query()\n if hard_delete:\n raise RuntimeError(\"Not supported\")\n\n if self.membersDB.contains(member_query.id == member_id):\n member_data = self.membersDB.get(member_query.id == member_id)\n member_data[\"deleted\"] = True\n self.membersDB.update(member_data, member_query.id == member_id)\n else:\n raise LookupError(\"The entered user ID could not be found in the database\")\n\n\n\n\n","sub_path":"venv/dbManage.py","file_name":"dbManage.py","file_ext":"py","file_size_in_byte":13073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"544476336","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/lpo/rule.py\n# Compiled at: 2008-07-31 16:08:18\nimport re\nfrom sqlalchemy import orm\nimport tables as ta, expression as ex\nMODEL = 0\nPREM = 1\nCONS = 2\n\nclass Rule(object):\n \"\"\"\n >>> Rule(prems=[1], cons=[2])\n 1\n -->\n 2\n \"\"\"\n __module__ = __name__\n\n def __init__(self, prems=None, cons=None):\n \"\"\"\n \"\"\"\n if prems:\n for prem in prems:\n prem = ex.expr(prem)\n prem.set_inrule(PREM)\n self.sentences.append(prem)\n\n if cons:\n for con in cons:\n con = ex.expr(con)\n con.set_inrule(CONS)\n self.sentences.append(con)\n\n def __repr__(self):\n \"\"\"\n \"\"\"\n prems = self.get_prems()\n prems = map(repr, prems)\n prems.sort()\n cons = self.get_cons()\n cons = map(repr, cons)\n cons.sort()\n return '%s\\n-->\\n%s' % ((';\\n').join(prems), (';\\n').join(cons))\n\n def get_prems(self, exclude=None):\n prems = []\n for s in self.sentences:\n if s.inrule == PREM and repr(s) != repr(exclude):\n prems.append(s)\n\n return prems\n\n def get_cons(self):\n cons = []\n for s in self.sentences:\n if s.inrule == CONS:\n cons.append(s)\n\n return cons\n\n\norm.mapper(Rule, ta.rules, properties={'sentences': orm.relation(ex.Expression, primaryjoin=ta.expressions.c.rule_id == ta.rules.c.rule_id, cascade='all', backref=orm.backref('rule'))})\n\ndef rule(r):\n \"\"\"Create an Expr representing a logic expression by parsing the input\n string. Symbols and numbers are automatically converted to Exprs.\n \"\"\"\n if isinstance(r, Rule):\n return r\n r = re.sub('\\\\s+', '', r)\n m = re.match('^(.*)-->(.*)$', r)\n prems = m.group(1).split(';')\n cons = m.group(2).split(';')\n return Rule(prems, cons)","sub_path":"pycfiles/lpo-0.1.2-py2.4/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"138739682","text":"import sys, re\r\npattern = r\"\\b[aA]{1,}\\b\"\r\nrep = r\"argh\"\r\nfor line in sys.stdin:\r\n line = line.rstrip()\r\n if len(line) == 0:\r\n break\r\n else:\r\n print (re.sub(pattern, rep, line, 1))\r\n\r\n","sub_path":"patternReplaceAaA.py","file_name":"patternReplaceAaA.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"465858231","text":"from django.urls import path,re_path\n# from django.conf.urls import url\nfrom .views import show_comment,EditComment\n\napp_name = 'comments'\n\nurlpatterns = [ \n path('',show_comment,name=\"com\"), \n path('edit-comment/',EditComment.as_view(),name=\"edit-comment\"), \n # path('edit-comment///',EditComment.as_view(),name=\"edit-comment\"), \n # path('edit-name//',DisplayNameChange.as_view(),name=\"name-edit\"),\n \n]","sub_path":"comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"281854675","text":"\"\"\" Module containing custom layers \"\"\"\n\nimport torch as th\n\n\n# extending Conv2D and Deconv2D layers for equalized learning rate logic\nclass _equalized_conv2d(th.nn.Module):\n \"\"\" conv2d with the concept of equalized learning rate \"\"\"\n\n def __init__(self, c_in, c_out, k_size, stride=1, pad=0, initializer='kaiming', bias=True):\n \"\"\"\n constructor for the class\n :param c_in: input channels\n :param c_out: output channels\n :param k_size: kernel size (h, w) should be a tuple or a single integer\n :param stride: stride for conv\n :param pad: padding\n :param initializer: initializer. one of kaiming or xavier\n :param bias: whether to use bias or not\n \"\"\"\n super(_equalized_conv2d, self).__init__()\n self.conv = th.nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False)\n if initializer == 'kaiming':\n th.nn.init.kaiming_normal_(self.conv.weight, a=th.nn.init.calculate_gain('conv2d'))\n elif initializer == 'xavier':\n th.nn.init.xavier_normal_(self.conv.weight)\n\n self.use_bias = bias\n\n if self.use_bias:\n self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))\n self.scale = (th.mean(self.conv.weight.data ** 2)) ** 0.5\n self.conv.weight.data.copy_(self.conv.weight.data / self.scale)\n\n def forward(self, x):\n \"\"\"\n forward pass of the network\n :param x: input\n :return: y => output\n \"\"\"\n try:\n dev_scale = self.scale.to(x.get_device())\n except RuntimeError:\n dev_scale = self.scale\n x = self.conv(x.mul(dev_scale))\n if self.use_bias:\n return x + self.bias.view(1, -1, 1, 1).expand_as(x)\n return x\n\n\nclass _equalized_deconv2d(th.nn.Module):\n \"\"\" Transpose convolution using the equalized learning rate \"\"\"\n\n def __init__(self, c_in, c_out, k_size, stride=1, pad=0, initializer='kaiming', bias=True):\n \"\"\"\n constructor for the class\n :param c_in: input channels\n :param c_out: output channels\n :param k_size: kernel size\n :param stride: stride for convolution transpose\n :param pad: padding\n :param initializer: initializer. one of kaiming or xavier\n :param bias: whether to use bias or not\n \"\"\"\n super(_equalized_deconv2d, self).__init__()\n self.deconv = th.nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False)\n if initializer == 'kaiming':\n th.nn.init.kaiming_normal_(self.deconv.weight, a=th.nn.init.calculate_gain('conv2d'))\n elif initializer == 'xavier':\n th.nn.init.xavier_normal_(self.deconv.weight)\n\n self.use_bias = bias\n\n if self.use_bias:\n self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))\n self.scale = (th.mean(self.deconv.weight.data ** 2)) ** 0.5\n self.deconv.weight.data.copy_(self.deconv.weight.data / self.scale)\n\n def forward(self, x):\n \"\"\"\n forward pass of the layer\n :param x: input\n :return: y => output\n \"\"\"\n try:\n dev_scale = self.scale.to(x.get_device())\n except RuntimeError:\n dev_scale = self.scale\n\n x = self.deconv(x.mul(dev_scale))\n if self.use_bias:\n return x + self.bias.view(1, -1, 1, 1).expand_as(x)\n return x\n\n\nclass _equalized_linear(th.nn.Module):\n \"\"\" Linear layer using equalized learning rate \"\"\"\n\n def __init__(self, c_in, c_out, initializer='kaiming', bias=True):\n \"\"\"\n Linear layer from pytorch extended to include equalized learning rate\n :param c_in: number of input channels\n :param c_out: number of output channels\n :param initializer: initializer to be used: one of \"kaiming\" or \"xavier\"\n :param bias: whether to use bias with the linear layer\n \"\"\"\n super(_equalized_linear, self).__init__()\n self.linear = th.nn.Linear(c_in, c_out, bias=False)\n if initializer == 'kaiming':\n th.nn.init.kaiming_normal_(self.linear.weight,\n a=th.nn.init.calculate_gain('linear'))\n elif initializer == 'xavier':\n th.nn.init.xavier_normal_(self.linear.weight)\n\n self.use_bias = bias\n\n if self.use_bias:\n self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))\n self.scale = (th.mean(self.linear.weight.data ** 2)) ** 0.5\n self.linear.weight.data.copy_(self.linear.weight.data / self.scale)\n\n def forward(self, x):\n \"\"\"\n forward pass of the layer\n :param x: input\n :return: y => output\n \"\"\"\n try:\n dev_scale = self.scale.to(x.get_device())\n except RuntimeError:\n dev_scale = self.scale\n x = self.linear(x.mul(dev_scale))\n if self.use_bias:\n return x + self.bias.view(1, -1).expand_as(x)\n return x\n\n\n# ----------------------------------------------------------------------------\n# Pixelwise feature vector normalization.\n# reference: https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120\n\nclass PixelwiseNorm(th.nn.Module):\n def __init__(self):\n super(PixelwiseNorm, self).__init__()\n\n def forward(self, x, alpha=1e-8):\n \"\"\"\n forward pass of the module\n :param x: input activations volume\n :param alpha: small number for numerical stability\n :return: y => pixel normalized activations\n \"\"\"\n y = th.mean(x.pow(2.), dim=1, keepdim=True) + alpha # [N1HW]\n return x.div(y.sqrt())\n\n\n# ==========================================================\n# Layers required for Building The generator and\n# discriminator\n# ==========================================================\nclass GenInitialBlock(th.nn.Module):\n \"\"\" Module implementing the initial block of the input \"\"\"\n\n def __init__(self, in_channels, use_eql):\n \"\"\"\n constructor for the inner class\n :param in_channels: number of input channels to the block\n :param use_eql: whether to use equalized learning rate\n \"\"\"\n from torch.nn import LeakyReLU\n\n super(GenInitialBlock, self).__init__()\n\n if use_eql:\n self.conv_1 = _equalized_deconv2d(in_channels, in_channels, (4, 4), bias=True)\n self.conv_2 = _equalized_conv2d(in_channels, in_channels, (3, 3),\n pad=1, bias=True)\n\n else:\n from torch.nn import Conv2d, ConvTranspose2d\n self.conv_1 = ConvTranspose2d(in_channels, in_channels, (4, 4), bias=True)\n self.conv_2 = Conv2d(in_channels, in_channels, (3, 3), padding=1, bias=True)\n\n # Pixelwise feature vector normalization operation\n self.pixNorm = PixelwiseNorm()\n\n # leaky_relu:\n self.lrelu = LeakyReLU(0.2)\n\n def forward(self, x):\n \"\"\"\n forward pass of the block\n :param x: input to the module\n :return: y => output\n \"\"\"\n # convert the tensor shape:\n y = th.unsqueeze(th.unsqueeze(x, -1), -1)\n\n # perform the forward computations:\n y = self.lrelu(self.conv_1(y))\n y = self.lrelu(self.conv_2(y))\n\n # apply pixel norm\n y = self.pixNorm(y)\n\n return y\n\n\nclass GenGeneralConvBlock(th.nn.Module):\n \"\"\" Module implementing a general convolutional block \"\"\"\n\n def __init__(self, in_channels, out_channels, use_eql):\n \"\"\"\n constructor for the class\n :param in_channels: number of input channels to the block\n :param out_channels: number of output channels required\n :param use_eql: whether to use equalized learning rate\n \"\"\"\n from torch.nn import LeakyReLU, Upsample\n\n super(GenGeneralConvBlock, self).__init__()\n\n self.upsample = Upsample(scale_factor=2)\n\n if use_eql:\n self.conv_1 = _equalized_conv2d(in_channels, out_channels, (3, 3),\n pad=1, bias=True)\n self.conv_2 = _equalized_conv2d(out_channels, out_channels, (3, 3),\n pad=1, bias=True)\n else:\n from torch.nn import Conv2d\n self.conv_1 = Conv2d(in_channels, out_channels, (3, 3),\n padding=1, bias=True)\n self.conv_2 = Conv2d(out_channels, out_channels, (3, 3),\n padding=1, bias=True)\n\n # Pixelwise feature vector normalization operation\n self.pixNorm = PixelwiseNorm()\n\n # leaky_relu:\n self.lrelu = LeakyReLU(0.2)\n\n def forward(self, x):\n \"\"\"\n forward pass of the block\n :param x: input\n :return: y => output\n \"\"\"\n y = self.upsample(x)\n y = self.pixNorm(self.lrelu(self.conv_1(y)))\n y = self.pixNorm(self.lrelu(self.conv_2(y)))\n\n return y\n\n\n# function to calculate the Exponential moving averages for the Generator weights\n# This function updates the exponential average weights based on the current training\ndef update_average(model_tgt, model_src, beta):\n \"\"\"\n update the model_target using exponential moving averages\n :param model_tgt: target model\n :param model_src: source model\n :param beta: value of decay beta\n :return: None (updates the target model)\n \"\"\"\n\n # utility function for toggling the gradient requirements of the models\n def toggle_grad(model, requires_grad):\n for p in model.parameters():\n p.requires_grad_(requires_grad)\n\n # turn off gradient calculation\n toggle_grad(model_tgt, False)\n toggle_grad(model_src, False)\n\n param_dict_src = dict(model_src.named_parameters())\n\n for p_name, p_tgt in model_tgt.named_parameters():\n p_src = param_dict_src[p_name]\n assert (p_src is not p_tgt)\n p_tgt.copy_(beta * p_tgt + (1. - beta) * p_src)\n\n # turn back on the gradient calculation\n toggle_grad(model_tgt, True)\n toggle_grad(model_src, True)\n\n\nclass MinibatchStdDev(th.nn.Module):\n \"\"\"\n Minibatch standard deviation layer for the discriminator\n \"\"\"\n\n def __init__(self, group_size=None):\n \"\"\"\n derived class constructor\n :param group_size: the size of the group (default None => batch_size)\n note that if the batch_size % group_size != 0\n the group_size defaults to batch_size\n \"\"\"\n super(MinibatchStdDev, self).__init__()\n self.group_size = group_size\n\n def forward(self, x, alpha=1e-8):\n \"\"\"\n forward pass of the layer\n :param x: input activation volume\n :param alpha: small number for numerical stability\n :return: y => x appended with standard deviation constant map\n \"\"\"\n # calculate the g and m values\n g = min(self.group_size, x.size(0)) \\\n if self.group_size is not None and (x.size(0) % self.group_size == 0) else x.size(0)\n m = int(x.size(0) / g)\n\n # [GMCHW] Split minibatch into M groups of size G.\n y = th.reshape(x, (g, m, x.size(1), x.size(2), x.size(3)))\n\n # [GMCHW] Subtract mean over group.\n y = y - th.mean(y, dim=0, keepdim=True)\n\n # [MCHW] Calc variance over group.\n y = th.mean(y.pow(2.), dim=0, keepdim=False)\n\n # [MCHW] Calc stddev over group.\n y = th.sqrt(y + alpha)\n\n # [M111] Take average over fmaps and pixels.\n y = th.mean(y.view(m, -1), dim=1, keepdim=False).view(m, 1, 1, 1)\n\n # [N1HW] Replicate over group and pixels.\n y = y.repeat(g, 1, x.size(2), x.size(3))\n\n # [NCHW] Append as new fmap.\n return th.cat([x, y], 1)\n\n\nclass DisFinalBlock(th.nn.Module):\n \"\"\" Final block for the Discriminator \"\"\"\n\n def __init__(self, in_channels, use_eql):\n \"\"\"\n constructor of the class\n :param in_channels: number of input channels\n :param use_eql: whether to use equalized learning rate\n \"\"\"\n from torch.nn import LeakyReLU\n\n super(DisFinalBlock, self).__init__()\n\n # declare the required modules for forward pass\n self.batch_discriminator = MinibatchStdDev()\n if use_eql:\n self.conv_1 = _equalized_conv2d(in_channels + 1, in_channels, (3, 3), pad=1, bias=True)\n self.conv_2 = _equalized_conv2d(in_channels, in_channels, (4, 4), bias=True)\n # final conv layer emulates a fully connected layer\n self.conv_3 = _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\n else:\n from torch.nn import Conv2d\n self.conv_1 = Conv2d(in_channels + 1, in_channels, (3, 3), padding=1, bias=True)\n self.conv_2 = Conv2d(in_channels, in_channels, (4, 4), bias=True)\n # final conv layer emulates a fully connected layer\n self.conv_3 = Conv2d(in_channels, 1, (1, 1), bias=True)\n\n # leaky_relu:\n self.lrelu = LeakyReLU(0.2)\n\n def forward(self, x):\n \"\"\"\n forward pass of the FinalBlock\n :param x: input\n :return: y => output\n \"\"\"\n # minibatch_std_dev layer\n y = self.batch_discriminator(x)\n\n # define the computations\n y = self.lrelu(self.conv_1(y))\n y = self.lrelu(self.conv_2(y))\n\n # fully connected layer\n y = self.conv_3(y) # This layer has linear activation\n\n # flatten the output raw discriminator scores\n return y.view(-1)\n\n\nclass ConDisFinalBlock(th.nn.Module):\n \"\"\" Final block for the Conditional Discriminator \"\"\"\n\n def __init__(self, in_channels, in_latent_size, out_latent_size, use_eql):\n \"\"\"\n constructor of the class\n :param in_channels: number of input channels\n :param in_latent_size: size of the input latent vectors\n :param out_latent_size: size of the transformed latent vectors\n :param use_eql: whether to use equalized learning rate\n \"\"\"\n from torch.nn import LeakyReLU\n\n super(ConDisFinalBlock, self).__init__()\n\n # declare the required modules for forward pass\n self.batch_discriminator = MinibatchStdDev()\n if use_eql:\n self.compressor = _equalized_linear(c_in=in_latent_size, c_out=out_latent_size)\n self.conv_1 = _equalized_conv2d(in_channels + 1, in_channels, (3, 3), pad=1, bias=True)\n self.conv_2 = _equalized_conv2d(in_channels + out_latent_size,\n in_channels, (1, 1), bias=True)\n self.conv_3 = _equalized_conv2d(in_channels, in_channels, (4, 4), bias=True)\n # final conv layer emulates a fully connected layer\n self.conv_4 = _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\n else:\n from torch.nn import Conv2d, Linear\n self.compressor = Linear(in_features=in_latent_size,\n out_features=out_latent_size, bias=True)\n self.conv_1 = Conv2d(in_channels + 1, in_channels, (3, 3), padding=1, bias=True)\n self.conv_2 = Conv2d(in_channels + out_latent_size,\n in_channels, (1, 1), bias=True)\n self.conv_3 = Conv2d(in_channels, in_channels, (4, 4), bias=True)\n # final conv layer emulates a fully connected layer\n self.conv_4 = Conv2d(in_channels, 1, (1, 1), bias=True)\n\n # leaky_relu:\n self.lrelu = LeakyReLU(0.2)\n\n def forward(self, x, latent_vector):\n \"\"\"\n forward pass of the FinalBlock\n :param x: input\n :param latent_vector: latent vector for conditional discrimination\n :return: y => output\n \"\"\"\n # minibatch_std_dev layer\n y = self.batch_discriminator(x)\n\n # define the computations\n y = self.lrelu(self.conv_1(y))\n # apply the latent vector here:\n compressed_latent_vector = self.compressor(latent_vector)\n cat = th.unsqueeze(th.unsqueeze(compressed_latent_vector, -1), -1)\n cat = cat.expand(\n compressed_latent_vector.shape[0],\n compressed_latent_vector.shape[1],\n y.shape[2],\n y.shape[3]\n )\n y = th.cat((y, cat), dim=1)\n\n y = self.lrelu(self.conv_2(y))\n y = self.lrelu(self.conv_3(y))\n\n # fully connected layer\n y = self.conv_4(y) # This layer has linear activation\n\n # flatten the output raw discriminator scores\n return y.view(-1)\n\n\nclass DisGeneralConvBlock(th.nn.Module):\n \"\"\" General block in the discriminator \"\"\"\n\n def __init__(self, in_channels, out_channels, use_eql):\n \"\"\"\n constructor of the class\n :param in_channels: number of input channels\n :param out_channels: number of output channels\n :param use_eql: whether to use equalized learning rate\n \"\"\"\n from torch.nn import AvgPool2d, LeakyReLU\n\n super(DisGeneralConvBlock, self).__init__()\n\n if use_eql:\n self.conv_1 = _equalized_conv2d(in_channels, in_channels, (3, 3), pad=1, bias=True)\n self.conv_2 = _equalized_conv2d(in_channels, out_channels, (3, 3), pad=1, bias=True)\n else:\n from torch.nn import Conv2d\n self.conv_1 = Conv2d(in_channels, in_channels, (3, 3), padding=1, bias=True)\n self.conv_2 = Conv2d(in_channels, out_channels, (3, 3), padding=1, bias=True)\n\n self.downSampler = AvgPool2d(2)\n\n # leaky_relu:\n self.lrelu = LeakyReLU(0.2)\n\n def forward(self, x):\n \"\"\"\n forward pass of the module\n :param x: input\n :return: y => output\n \"\"\"\n # define the computations\n y = self.lrelu(self.conv_1(x))\n y = self.lrelu(self.conv_2(y))\n y = self.downSampler(y)\n\n return y\n","sub_path":"ext_src/pro_gan_pytorch/pro_gan_pytorch/CustomLayers.py","file_name":"CustomLayers.py","file_ext":"py","file_size_in_byte":17904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39366371","text":"import argparse\nimport os\nimport time\nimport random\n\nos.environ[\"DGL_REPO\"] = \"http://data.dgl.ai/\"\nimport sklearn.preprocessing\nfrom dgl.data import register_data_args\nimport torch.distributed as dist\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport pickle\nimport matplotlib.pyplot as plt\n\nfrom modules import GCN\nfrom sampler import ClusterIter\nfrom utils import Logger, evaluate, load_data\n\n\ndef broadcast_weight(para, rank_list=None, source=0):\n if rank_list is None:\n group = dist.group.WORLD\n else:\n group = dist.new_group(rank_list)\n dist.broadcast(para, src=source, group=group, async_op=False)\n if rank_list is not None:\n dist.destroy_process_group(group)\n\n\ndef broadcast_module_itr(args, module: torch.nn.Module, source=0):\n group = dist.new_group(list(range(args.num_subnet)))\n for para in module.parameters():\n dist.broadcast(para.data, src=source, group=group, async_op=False)\n dist.destroy_process_group(group)\n\n\ndef all_reduce_weights(args, para):\n group = dist.group.WORLD\n dist.all_reduce(para, op=dist.ReduceOp.SUM, group=group)\n para = para.div_(args.num_subnet)\n\n\ndef all_reduce_module(args, module: torch.nn.Module):\n group = dist.group.WORLD\n for para in module.parameters():\n dist.all_reduce(para.data, op=dist.ReduceOp.SUM, group=group)\n para.data = para.data.div_(args.num_subnet)\n\n\ndef create_partition(num_subnet, size):\n possible_indices = [x for x in range(size)]\n random.shuffle(possible_indices)\n feats_idx_list = [[] for x in range(num_subnet)]\n for i in range(size):\n next_idx = possible_indices[i]\n subnet_idx = i % num_subnet\n feats_idx_list[subnet_idx].append(next_idx)\n sage_feats_idx_list = []\n for idx in feats_idx_list:\n idx = torch.LongTensor(idx)\n lower_idx = idx + size\n full_idx = torch.cat((idx, lower_idx))\n sage_feats_idx_list.append((idx, full_idx))\n return sage_feats_idx_list\n\n\nclass DistributedGNNWrapper(torch.nn.Module):\n # wrapper class to handle full GNN and subnetworks that are created\n\n def __init__(self, args, g, in_feats, n_classes, device):\n super().__init__()\n self.args = args\n self.g = g\n self.in_feats = in_feats\n self.n_classes = n_classes\n self.device = device\n if args.rank == 0:\n # construct a full model to keep on the parameter server\n # this is a graphSAGE-style network\n self.base_model = GCN(\n in_feats, args.n_hidden, n_classes, args.n_layers, F.relu,\n args.dropout, args.use_layernorm, False, False, 1, True)\n # self.base_model = self.base_model.to(self.device)\n else:\n self.base_model = None\n self.sub_model = GCN(\n in_feats, args.n_hidden, n_classes, args.n_layers, F.relu,\n args.dropout, args.use_layernorm, False, True, args.num_subnet, True)\n self.sub_model = self.sub_model.to(self.device)\n self.current_partition = None\n\n def sample_partitions(self):\n partition = []\n for i in range(self.args.n_layers):\n partition.append(\n create_partition(self.args.num_subnet, self.args.n_hidden))\n return partition\n\n def sync_model(self):\n with torch.no_grad():\n # run all reduce on the shared weights\n all_reduce_weights(self.args, self.sub_model.layers[-1].linear.bias.data)\n\n # copy weights into the full model on the main node\n if self.args.rank == 0:\n for layer_idx in range(len(self.current_partition) + 1):\n if layer_idx == 0:\n # first layer\n idx_tens, full_idx_tens = self.current_partition[layer_idx][0]\n self.base_model.layers[0].linear.weight.data[idx_tens, :] = (\n self.sub_model.layers[0].linear.weight.data.cpu())\n self.base_model.layers[0].linear.bias.data[idx_tens] = (\n self.sub_model.layers[0].linear.bias.data.cpu())\n elif layer_idx == len(self.current_partition):\n # last layer\n idx_tens, full_idx_tens = self.current_partition[layer_idx - 1][0]\n self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens] = (\n self.sub_model.layers[layer_idx].linear.weight.data.cpu())\n\n # copy all of the bias weights -- shared between all networks\n self.base_model.layers[layer_idx].linear.bias.data = (\n self.sub_model.layers[layer_idx].linear.bias.data.cpu())\n else:\n # general case, middle layers\n prev_idx, full_prev_idx = self.current_partition[layer_idx - 1][0]\n next_idx, full_next_idx = self.current_partition[layer_idx][0]\n correct_rows = self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_idx]\n correct_rows[next_idx, :] = (\n self.sub_model.layers[layer_idx].linear.weight.data.cpu())\n self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_idx] = correct_rows\n self.base_model.layers[layer_idx].linear.bias.data[next_idx] = (\n self.sub_model.layers[layer_idx].linear.bias.data.cpu())\n\n # only works properly if there is no overlap within the parameters\n for layer_idx in range(len(self.current_partition) + 1):\n if layer_idx == 0:\n # first layer\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = self.current_partition[layer_idx][site_i]\n if self.args.rank == 0:\n # communicate weight\n corr_weight = self.base_model.layers[0].linear.weight.data[idx_tens, :]\n corr_weight = corr_weight.to(self.device)\n broadcast_weight(\n corr_weight, rank_list=[0, site_i], source=site_i)\n corr_weight = corr_weight.to('cpu')\n self.base_model.layers[0].linear.weight.data[idx_tens, :] = corr_weight\n\n # communicate bias\n corr_bias = self.base_model.layers[0].linear.bias.data[idx_tens]\n corr_bias = corr_bias.to(self.device)\n broadcast_weight(\n corr_bias, rank_list=[0, site_i], source=site_i)\n corr_bias = corr_bias.to('cpu')\n self.base_model.layers[0].linear.bias.data[idx_tens] = corr_bias\n else:\n broadcast_weight(\n self.sub_model.layers[0].linear.weight.data, rank_list=[0, site_i],\n source=site_i)\n broadcast_weight(\n self.sub_model.layers[0].linear.bias.data, rank_list=[0, site_i],\n source=site_i)\n\n\n elif layer_idx == len(self.current_partition):\n # last layer\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = self.current_partition[layer_idx - 1][site_i]\n if self.args.rank == 0:\n # do NOT need to copy in bias, it is all reduced\n corr_row = self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens]\n corr_row = corr_row.to(self.device)\n broadcast_weight(\n corr_row, rank_list=[0, site_i], source=site_i)\n corr_row = corr_row.to('cpu')\n self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens] = corr_row\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data,\n rank_list=[0, site_i], source=site_i)\n else:\n # general case, middle layers\n for site_i in range(1, self.args.num_subnet):\n prev_tens, full_prev_tens = self.current_partition[layer_idx - 1][site_i]\n next_tens, full_next_tens = self.current_partition[layer_idx][site_i]\n if self.args.rank == 0:\n # communicate the weights\n crows = self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_tens]\n full_split = crows[next_tens, :]\n full_split = full_split.to(self.device)\n broadcast_weight(\n full_split, rank_list=[0, site_i], source=site_i)\n full_split = full_split.to('cpu')\n crows[next_tens, :] = full_split\n self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_tens] = crows\n\n # communicate the bias\n cbias = self.base_model.layers[layer_idx].linear.bias.data[next_tens]\n cbias = cbias.to(self.device)\n broadcast_weight(\n cbias, rank_list=[0, site_i], source=site_i)\n cbias = cbias.to('cpu')\n self.base_model.layers[layer_idx].linear.bias.data[next_tens] = cbias\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data, rank_list=[0, site_i],\n source=site_i)\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.bias.data, rank_list=[0, site_i],\n source=site_i)\n\n def ini_sync_dispatch_model(self):\n # perform the partition\n all_indices = self.sample_partitions()\n\n # set weights within central node and broadcast them\n with torch.no_grad():\n # copy in weights for node 0\n if self.args.rank == 0:\n for layer_idx in range(len(all_indices) + 1):\n if layer_idx == 0:\n idx_tens, _ = all_indices[0][0]\n self.sub_model.layers[0].linear.weight.data = (\n self.base_model.layers[0].linear.weight.data[idx_tens, :]\n .to(self.device))\n self.sub_model.layers[0].linear.bias.data = (\n self.base_model.layers[0].linear.bias.data[idx_tens]\n .to(self.device))\n elif layer_idx == len(all_indices):\n idx_tens, full_idx_tens = all_indices[-1][0]\n self.sub_model.layers[layer_idx].linear.weight.data = (\n self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens]\n .to(self.device))\n\n # full output bias is copied for the final layer\n self.sub_model.layers[layer_idx].linear.bias.data = (\n self.base_model.layers[layer_idx].linear.bias.data\n .to(self.device))\n else:\n prev_idx, full_prev_idx = all_indices[layer_idx - 1][0]\n next_idx, full_next_idx = all_indices[layer_idx][0]\n correct_cols = self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_idx]\n self.sub_model.layers[layer_idx].linear.weight.data = (\n correct_cols[next_idx, :].to(self.device))\n self.sub_model.layers[layer_idx].linear.bias.data = (\n self.base_model.layers[layer_idx].linear.bias.data[next_idx]\n .to(self.device))\n\n # broadcast all of the shared weights\n broadcast_weight(self.sub_model.layers[-1].linear.bias, None, source=0)\n\n for layer_idx in range(len(all_indices) + 1):\n if layer_idx == 0:\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = all_indices[0][site_i]\n if self.args.rank == 0:\n # broadcast the weight\n bcast_weight = self.base_model.layers[0].linear.weight.data[idx_tens, :]\n bcast_weight = bcast_weight.to(self.device)\n broadcast_weight(\n bcast_weight, rank_list=[0, site_i], source=0)\n bcast_weight = bcast_weight.to('cpu')\n\n # broadcastt the bias\n bcast_bias = self.base_model.layers[0].linear.bias.data[idx_tens]\n bcast_bias = bcast_bias.to(self.device)\n broadcast_weight(\n bcast_bias, rank_list=[0, site_i], source=0)\n bcast_bias = bcast_bias.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[0].linear.weight.data,\n rank_list=[0, site_i], source=0)\n broadcast_weight(\n self.sub_model.layers[0].linear.bias.data,\n rank_list=[0, site_i], source=0)\n\n elif layer_idx == len(all_indices):\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = all_indices[-1][site_i]\n if self.args.rank == 0:\n # do NOT need to broadcast bias, it is already done above\n bcast_weight = self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens]\n bcast_weight = bcast_weight.to(self.device)\n broadcast_weight(\n bcast_weight, rank_list=[0, site_i], source=0)\n bcast_weight = bcast_weight.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data,\n rank_list=[0, site_i], source=0)\n else:\n for site_i in range(1, self.args.num_subnet):\n prev_tens, full_prev_tens = all_indices[layer_idx - 1][site_i]\n next_tens, full_next_tens = all_indices[layer_idx][site_i]\n if self.args.rank == 0:\n # broadcast the weights\n correct_rows = (\n self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_tens])\n correct_rows = correct_rows[next_tens, :]\n correct_rows = correct_rows.to(self.device)\n broadcast_weight(\n correct_rows, rank_list=[0, site_i],\n source=0)\n correct_rows = correct_rows.to('cpu')\n\n bcast_bias = self.base_model.layers[layer_idx].linear.bias.data[next_tens]\n bcast_bias = bcast_bias.to(self.device)\n broadcast_weight(\n bcast_bias, rank_list=[0, site_i], source=0)\n bcast_bias = bcast_bias.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data, rank_list=[0, site_i],\n source=0)\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.bias.data, rank_list=[0, site_i],\n source=0)\n\n self.current_partition = all_indices\n\n def dispatch_model(self):\n # get the subnetwork indices\n all_indices = self.sample_partitions()\n\n with torch.no_grad():\n # copy in weights for node 0\n if self.args.rank == 0:\n for layer_idx in range(len(all_indices) + 1):\n if layer_idx == 0:\n idx_tens, _ = all_indices[0][0]\n self.sub_model.layers[0].linear.weight.data = (\n self.base_model.layers[0].linear.weight.data[idx_tens, :]\n .to(self.device))\n self.sub_model.layers[0].linear.bias.data = (\n self.base_model.layers[0].linear.bias.data[idx_tens]\n .to(self.device))\n elif layer_idx == len(all_indices):\n idx_tens, full_idx_tens = all_indices[-1][0]\n self.sub_model.layers[layer_idx].linear.weight.data = (\n self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens]\n .to(self.device))\n\n # full output bias is copied for the final layer\n self.sub_model.layers[layer_idx].linear.bias.data = (\n self.base_model.layers[layer_idx].linear.bias.data\n .to(self.device))\n else:\n prev_idx, full_prev_idx = all_indices[layer_idx - 1][0]\n next_idx, full_next_idx = all_indices[layer_idx][0]\n correct_cols = self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_idx]\n self.sub_model.layers[layer_idx].linear.weight.data = (\n correct_cols[next_idx, :].to(self.device))\n self.sub_model.layers[layer_idx].linear.bias.data = (\n self.base_model.layers[layer_idx].linear.bias.data[next_idx]\n .to(self.device))\n\n for layer_idx in range(len(all_indices) + 1):\n if layer_idx == 0:\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = all_indices[0][site_i]\n if self.args.rank == 0:\n # broadcast the weight\n bcast_weight = self.base_model.layers[0].linear.weight.data[idx_tens, :]\n bcast_weight = bcast_weight.to(self.device)\n broadcast_weight(\n bcast_weight, rank_list=[0, site_i], source=0)\n bcast_weight = bcast_weight.to('cpu')\n\n # broadcastt the bias\n bcast_bias = self.base_model.layers[0].linear.bias.data[idx_tens]\n bcast_bias = bcast_bias.to(self.device)\n broadcast_weight(\n bcast_bias, rank_list=[0, site_i], source=0)\n bcast_bias = bcast_bias.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[0].linear.weight.data,\n rank_list=[0, site_i], source=0)\n broadcast_weight(\n self.sub_model.layers[0].linear.bias.data,\n rank_list=[0, site_i], source=0)\n\n elif layer_idx == len(all_indices):\n for site_i in range(1, self.args.num_subnet):\n idx_tens, full_idx_tens = all_indices[-1][site_i]\n if self.args.rank == 0:\n # do NOT need to broadcast bias, it is already done above\n bcast_weight = self.base_model.layers[layer_idx].linear.weight.data[:, full_idx_tens]\n bcast_weight = bcast_weight.to(self.device)\n broadcast_weight(\n bcast_weight, rank_list=[0, site_i], source=0)\n bcast_weight = bcast_weight.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data,\n rank_list=[0, site_i], source=0)\n else:\n for site_i in range(1, self.args.num_subnet):\n prev_tens, full_prev_tens = all_indices[layer_idx - 1][site_i]\n next_tens, full_next_tens = all_indices[layer_idx][site_i]\n if self.args.rank == 0:\n # broadcast the weights\n correct_rows = (\n self.base_model.layers[layer_idx].linear.weight.data[:, full_prev_tens])\n correct_rows = correct_rows[next_tens, :]\n correct_rows = correct_rows.to(self.device)\n broadcast_weight(\n correct_rows, rank_list=[0, site_i],\n source=0)\n correct_rows = correct_rows.to('cpu')\n\n bcast_bias = self.base_model.layers[layer_idx].linear.bias.data[next_tens]\n bcast_bias = bcast_bias.to(self.device)\n broadcast_weight(\n bcast_bias, rank_list=[0, site_i], source=0)\n bcast_bias = bcast_bias.to('cpu')\n else:\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.weight.data, rank_list=[0, site_i],\n source=0)\n broadcast_weight(\n self.sub_model.layers[layer_idx].linear.bias.data, rank_list=[0, site_i],\n source=0)\n\n self.current_partition = all_indices\n\n\ndef train(\n ist_model, args, g, cluster_iterator, labels, train_mask, val_mask,\n test_mask, train_nid, device):\n # track the metrics throughout training\n if args.rank == 0:\n test_accs = []\n val_accs = []\n losses = []\n else:\n test_accs = None\n val_accs = None\n losses = None\n\n # create the loss\n loss_fcn = torch.nn.CrossEntropyLoss()\n local_epochs = args.n_epochs // args.num_subnet\n train_nid = torch.from_numpy(train_nid).to(device)\n\n # training loop\n running_loss = 0.\n curr_iter = 0.\n total_iter = 0.\n total_time = 0.\n start_time = time.time()\n for e in range(local_epochs):\n print(f'{args.rank}: running epoch {e} / {local_epochs}', flush=True)\n lr = args.lr\n run_eval = True # run eval once every epoch\n for j, cluster in enumerate(cluster_iterator):\n # dispatch the model\n if total_iter % args.iter_per_site == 0:\n if e > 0:\n dist.barrier(group=dist.group.WORLD)\n ist_model.dispatch_model()\n ist_model.sub_model.train()\n optimizer = torch.optim.Adam(\n ist_model.sub_model.parameters(), lr=lr,\n weight_decay=args.weight_decay)\n optimizer.zero_grad()\n cluster = cluster.to(device)\n pred = ist_model.sub_model(cluster)\n batch_labels = cluster.ndata['label']\n batch_train_mask = cluster.ndata['train_mask']\n loss = loss_fcn(\n pred[batch_train_mask], batch_labels[batch_train_mask])\n loss.backward()\n running_loss += float(loss)\n optimizer.step()\n\n # sync model every \"iter_per_site\" iterations and at end of training\n total_iter += 1\n curr_iter += 1 # used for computing average loss between evaluations\n if (\n (total_iter % args.iter_per_site == 0)\n or ((j == len(cluster_iterator) - 1) and (e == local_epochs - 1))):\n print(f'{args.rank} running sync @ iter #{total_iter}, epoch {e}')\n dist.barrier(group=dist.group.WORLD)\n ist_model.sync_model()\n\n # run eval during each new epoch and at the end of training\n # only run eval after updates are synchronized into global model\n if (\n run_eval or\n ((j == len(cluster_iterator) - 1) and (e == local_epochs - 1))):\n end_time = time.time()\n total_time += (end_time - start_time)\n run_eval = False\n if args.rank == 0:\n ist_model.base_model.eval()\n if args.use_f1:\n val_acc = evaluate(ist_model.base_model, g.cpu(), labels.cpu(), val_mask.cpu(), 'f1')\n test_acc = evaluate(ist_model.base_model, g.cpu(), labels.cpu(), test_mask.cpu(), 'f1')\n else:\n val_acc = evaluate(ist_model.base_model, g.cpu(), labels.cpu(), val_mask.cpu())\n test_acc = evaluate(ist_model.base_model, g.cpu(), labels.cpu(), test_mask.cpu())\n val_accs.append(val_acc)\n test_accs.append(test_acc)\n losses.append((running_loss / curr_iter))\n running_loss = 0.\n curr_iter = 0.\n start_time = time.time()\n\n # make sure training results are the last thing to be printed\n # so that it can be easily grepped with a python script\n dist.barrier(group=dist.group.WORLD)\n if args.rank == 0:\n # save training curve and results\n title = args.fig_name\n os.makedirs(args.fig_dir, exist_ok=True)\n plt.plot(val_accs)\n plt.title(title)\n plt.savefig(os.path.join(args.fig_dir, title + '.png'))\n\n if args.save_results:\n results = {\n 'total_time': total_time,\n 'trn_losses': losses,\n 'val_accs': val_accs,\n 'test_accs': test_accs,\n }\n fn = title + '_result.pckl'\n result_path = os.path.join(args.fig_dir, fn)\n with open(result_path, 'wb') as f:\n pickle.dump(results, f)\n else:\n print(f'Training Time: {total_time:.4f}', flush=True)\n print(f'Last Val: {val_accs[-1]:.4f}', flush=True)\n print(f'Best Val: {max(val_accs):.4f}', flush=True)\n print(f'Last Test: {test_accs[-1]:.4f}', flush=True)\n print(f'Best Test: {max(test_accs):.4f}', flush=True)\n\n\ndef get_data(args, device):\n # load and preprocess dataset\n data = load_data(args)\n g = data.g\n train_mask = g.ndata['train_mask']\n val_mask = g.ndata['val_mask']\n test_mask = g.ndata['test_mask']\n labels = g.ndata['label']\n train_nid = np.nonzero(train_mask.data.numpy())[0].astype(np.int64)\n\n # Normalize features\n if args.normalize:\n feats = g.ndata['feat']\n train_feats = feats[train_mask]\n scaler = sklearn.preprocessing.StandardScaler()\n scaler.fit(train_feats.data.numpy())\n features = scaler.transform(feats.data.numpy())\n g.ndata['feat'] = torch.FloatTensor(features)\n\n in_feats = g.ndata['feat'].shape[1]\n n_classes = data.num_classes\n n_edges = g.number_of_edges()\n g = g.long()\n\n # create the cluster gcn iterator\n cluster_iterator = ClusterIter(\n args.dataset, g, args.psize, args.batch_size,\n train_nid, use_pp=args.use_pp)\n\n # set device for dataset tensors\n val_mask = val_mask.to(device)\n test_mask = test_mask.to(device)\n g = g.int().to(device)\n return (\n g, cluster_iterator, train_mask, val_mask, test_mask, labels,\n train_nid, in_feats, n_classes, n_edges)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='GCN')\n register_data_args(parser)\n parser.add_argument(\"--iter_per_site\", type=int, default=5)\n parser.add_argument(\"--num_subnet\", type=int, default=2,\n help=\"number of sub networks\")\n parser.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability\")\n parser.add_argument(\"--lr\", type=float, default=0.01,\n help=\"learning rate\")\n parser.add_argument(\"--n-epochs\", type=int, default=20,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--n-layers\", type=int, default=1,\n help=\"number of hidden gcn layers\")\n parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--use_layernorm\", type=bool, default=False,\n help=\"Whether use layernorm (default=False)\")\n parser.add_argument('--dist-backend', type=str, default='nccl', metavar='S',\n help='backend type for distributed PyTorch')\n parser.add_argument('--dist-url', type=str, default='tcp://127.0.0.1:9971', metavar='S',\n help='master ip for distributed PyTorch')\n parser.add_argument('--rank', type=int, default=0, metavar='R',\n help='rank for distributed PyTorch')\n parser.add_argument('--cuda-id', type=int, default=0, metavar='N',\n help='cuda index, if the instance has multiple GPUs.')\n parser.add_argument(\"--batch-size\", type=int, default=20,\n help=\"batch size\")\n parser.add_argument(\"--psize\", type=int, default=1500,\n help=\"partition number\")\n parser.add_argument(\"--test-batch-size\", type=int, default=1000,\n help=\"test batch size\")\n parser.add_argument(\"--rnd-seed\", type=int, default=3,\n help=\"number of epoch of doing inference on validation\")\n parser.add_argument(\"--use-pp\", action='store_true',\n help=\"whether to use precomputation\")\n parser.add_argument(\"--normalize\", action='store_true',\n help=\"whether to use normalized feature\")\n parser.add_argument(\"--save_results\", action='store_true')\n parser.add_argument(\"--fig-dir\", type=str, default='../report/example_pic/')\n parser.add_argument(\"--fig-name\", type=str, default='name')\n parser.add_argument(\"--use-f1\", action='store_true')\n args = parser.parse_args()\n\n assert (args.n_hidden % args.num_subnet) == 0\n\n # set all the random seeds\n print('Setting seeds', flush=True)\n torch.manual_seed(args.rnd_seed)\n np.random.seed(args.rnd_seed)\n random.seed(args.rnd_seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # set the proper GPU\n assert args.cuda_id < torch.cuda.device_count()\n device = torch.device(f'cuda:{args.cuda_id}')\n\n # initialize the distributed process group\n print(f'{args.rank} initializing process', flush=True)\n dist.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url, rank=args.rank,\n world_size=args.num_subnet)\n print(f'Process spawned: {args.rank} --> {device}', flush=True)\n\n # get the data and setup the dataset\n dataset = get_data(args, device)\n (g, cluster_iterator, train_mask, val_mask, test_mask, labels,\n train_nid, in_feats, n_classes, n_edges) = dataset\n\n # get the main model\n ist_model = DistributedGNNWrapper(args, g, in_feats, n_classes, device)\n print(f'{args.rank}: start initial dispatch', flush=True)\n ist_model.ini_sync_dispatch_model()\n print(f'{args.rank}: finish initial dispatch', flush=True)\n train(\n ist_model, args, g, cluster_iterator, labels, train_mask, val_mask,\n test_mask, train_nid, device)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cluster_gcn/cluster_gcn_ist_ultra_wide.py","file_name":"cluster_gcn_ist_ultra_wide.py","file_ext":"py","file_size_in_byte":33148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39595998","text":"\"\"\"\n * Copyright (c) 2012-2016, Nic McDonald\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n *\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * - Neither the name of prim nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# Python 3 compatibility\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nimport copy\nimport math\nimport numpy\nimport percentile\n\n\ndef maxNoInfinity(v):\n m = float('inf')\n for t in v:\n if not math.isinf(t):\n if math.isinf(m):\n m = t\n else:\n m = max(t, m)\n return m\n\n\n# a class to represent a stats file in a 2d grid in CSV format\nclass GridStats(object):\n\n def __init__(self, filename):\n self.filename = filename\n with open(filename) as fd:\n lines = fd.readlines()\n rows = []\n for line in lines:\n cols = line.split(',')\n cols = [x.strip() for x in cols]\n rows.append(cols)\n\n self._map = {}\n headerRow = None\n for ridx, row in enumerate(rows):\n if ridx == 0:\n headerRow = row\n continue\n rowType = None\n for cidx, col in enumerate(row):\n if cidx == 0:\n rowType = col\n self._map[rowType] = {}\n else:\n try:\n val = int(col)\n except ValueError:\n val = float(col)\n self._map[rowType][headerRow[cidx]] = val\n\n def get(self, row, col):\n try:\n return self._map[row][col]\n except:\n return float('inf')\n\n\n# a class to represent latency stats\nclass LatencyStats(object):\n\n class PlotBounds(object):\n def __init__(self):\n # defaults\n self.spxmin = 0\n self.spxmax = 1\n self.spymin = 0\n self.spymax = 1\n self.ppxmin = 0\n self.ppxmax = 1\n self.ppymin = 0\n self.ppymax = 1\n self.cpxmin = 0\n self.cpxmax = 1\n self.cpymin = 0\n self.cpymax = 1\n self.lpxmin = 0\n self.lpxmax = 1\n self.setmid()\n self.default = True\n\n def readFile(self, filename):\n grid = GridStats(filename)\n self.spymin = grid.get('spy', 'min')\n self.spymax = grid.get('spy', 'max')\n self.ppxmin = grid.get('ppx', 'min')\n self.ppxmax = grid.get('ppx', 'max')\n self.ppymin = grid.get('ppy', 'min')\n self.ppymax = grid.get('ppy', 'max')\n self.cpxmin = grid.get('cpx', 'min')\n self.cpxmax = grid.get('cpx', 'max')\n self.cpymin = grid.get('cpy', 'min')\n self.cpymax = grid.get('cpy', 'max')\n self.lpxmin = grid.get('lpx', 'min')\n self.lpxmax = grid.get('lpx', 'max')\n self.setmid()\n self.default = False\n\n def writeFile(self, filename):\n with open(filename, 'w') as fd:\n print('axis,min,max\\n'\n 'spy,{0},{1}\\n'\n 'ppx,{2},{3}\\n'\n 'ppy,{4},{5}\\n'\n 'cpx,{6},{7}\\n'\n 'cpy,{8},{9}\\n'\n 'lpx,{10},{11}\\n'\n .format(self.spymin, self.spymax,\n self.ppxmin, self.ppxmax, self.ppymin, self.ppymax,\n self.cpxmin, self.cpxmax, self.cpymin, self.cpymax,\n self.lpxmin, self.lpxmax),\n file=fd)\n\n def setmid(self):\n self.spxmid = (self.spxmax - self.spxmin) / 2\n self.spymid = (self.spymax - self.spymin) / 2\n self.ppxmid = (self.ppxmax - self.ppxmin) / 2\n self.ppymid = (self.ppymax - self.ppymin) / 2\n self.cpxmid = (self.cpxmax - self.cpxmin) / 2\n self.cpymid = (self.cpymax - self.cpymin) / 2\n self.lpxmid = (self.lpxmax - self.lpxmin) / 2\n self.default = False\n\n def greater(self, other):\n # detect defaults\n if self.default == True and other.default == True:\n return LatencyStats.PlotBounds()\n elif self.default == False and other.default == True:\n return copy.deepcopy(self)\n elif self.default == True and other.default == False:\n return copy.deepcopy(other)\n\n # both are not defaults, do comparison\n new = LatencyStats.PlotBounds()\n new.spymin = min(self.spymin, other.spymin)\n new.spymax = max(self.spymax, other.spymax)\n new.ppxmin = min(self.ppxmin, other.ppxmin)\n new.ppxmax = max(self.ppxmax, other.ppxmax)\n new.ppymin = min(self.ppymin, other.ppymin)\n new.ppymax = max(self.ppymax, other.ppymax)\n new.cpxmin = min(self.cpxmin, other.cpxmin)\n new.cpxmax = max(self.cpxmax, other.cpxmax)\n new.cpymin = min(self.cpymin, other.cpymin)\n new.cpymax = max(self.cpymax, other.cpymax)\n new.lpxmin = min(self.lpxmin, other.lpxmin)\n new.lpxmax = max(self.lpxmax, other.lpxmax)\n new.setmid()\n return new\n\n def __init__(self, filename):\n # read in raw data\n self.times = []\n self.latencies = []\n with open(filename, 'r') as fd:\n while (True):\n line = fd.readline()\n delim = line.find(',')\n if (delim >= 0):\n startTime = int(line[:delim])\n endTime = int(line[delim+1:])\n self.times.append(startTime)\n self.latencies.append(endTime - startTime)\n else:\n break\n self.times = numpy.array(self.times)\n self.latencies = numpy.array(self.latencies)\n\n # size\n self.size = len(self.times)\n if self.size > 0:\n # min and max\n self.tmin = min(self.times)\n self.tmax = max(self.times)\n self.smin = min(self.latencies)\n self.smax = max(self.latencies)\n if self.smin < 0:\n raise Exception('latencies can\\'t be negative!')\n\n # compute the probability density function\n self.pdfBins = 50\n hist, self.pdfx = numpy.histogram(self.latencies, bins=self.pdfBins)\n self.pdfy = hist.astype(float) / hist.sum()\n\n # compute the cumulative distribution function\n self.cdfx = numpy.sort(self.latencies)\n self.cdfy = numpy.linspace(1.0 / self.size, 1.0, self.size)\n\n # find percentiles\n self.p50 = self.percentile(0.50)\n self.p90 = self.percentile(0.90)\n self.p99 = self.percentile(0.99)\n self.p999 = self.percentile(0.999)\n self.p9999 = self.percentile(0.9999)\n\n # set plot boundaries\n self.bounds = LatencyStats.PlotBounds()\n if self.size > 0:\n self.bounds.spxmin = self.tmin\n self.bounds.spxmax = self.tmax\n self.bounds.spymin = max(self.smin, 0)\n self.bounds.spymax = self.smax * 1.01\n self.bounds.ppxmin = self.smin\n self.bounds.ppxmax = self.smax\n self.bounds.ppymin = 0\n self.bounds.ppymax = max(self.pdfy) * 1.01\n self.bounds.cpxmin = self.smin\n self.bounds.cpxmax = self.smax\n self.bounds.cpymin = 0\n self.bounds.cpymax = 1\n self.bounds.lpxmin = self.smin * 0.999\n self.bounds.lpxmax = self.smax * 1.001\n self.bounds.setmid()\n\n def percentile(self, percent):\n if percent < 0 or percent > 1:\n raise Exception('percent must be between 0 and 1')\n return self.cdfx[int(round(percent * len(self.cdfx)))]\n\n def nines(self):\n if self.size > 0:\n nines = int(math.ceil(math.log10(len(self.cdfx))))\n else:\n nines = 5\n return nines\n\n def emptyPlot(self, axes, x, y):\n axes.text(x, y, 'Saturated :(', clip_on=False, color='red',\n verticalalignment='center',\n horizontalalignment='center')\n\n def scatterPlot(self, axes, showPercentiles=False, randomColors=False):\n # format axes\n axes.set_title('Latency scatter')\n axes.set_xlabel('Time')\n axes.set_ylabel('Latency')\n axes.set_xlim(self.bounds.spxmin, self.bounds.spxmax)\n axes.set_ylim(self.bounds.spymin, self.bounds.spymax)\n axes.grid(True)\n\n # detect non-empty data set\n if self.size > 0:\n # create plot\n if randomColors:\n colors = numpy.random.rand(len(self.times))\n else:\n colors = 'b'\n axes.scatter(self.times, self.latencies, color=colors, s=2)\n if showPercentiles:\n l50, = axes.plot([self.tmin, self.tmax], [self.p50, self.p50],\n c='r', linewidth=2)\n l90, = axes.plot([self.tmin, self.tmax], [self.p90, self.p90],\n c='g', linewidth=2)\n l99, = axes.plot([self.tmin, self.tmax], [self.p99, self.p99],\n c='c', linewidth=2)\n l999, = axes.plot([self.tmin, self.tmax], [self.p999, self.p999],\n c='m', linewidth=2)\n l9999, = axes.plot([self.tmin, self.tmax], [self.p9999, self.p9999],\n c='y', linewidth=2)\n else:\n self.emptyPlot(axes, self.bounds.spxmid, self.bounds.spymid)\n\n def pdfPlot(self, axes, showPercentiles=False):\n # format axes\n axes.set_title('Probability density function')\n axes.set_xlabel('Latency')\n axes.set_ylabel('Probability')\n axes.set_xlim(self.bounds.ppxmin, self.bounds.ppxmax)\n axes.set_ylim(self.bounds.ppymin, self.bounds.ppymax)\n axes.grid(True)\n\n # detect non-empty data set\n if self.size > 0:\n # create plot\n axes.plot(self.pdfx[:-1], self.pdfy)\n if showPercentiles:\n l50, = axes.plot([self.p50, self.p50], [0, 1], c='r')\n l90, = axes.plot([self.p90, self.p90], [0, 1], c='g')\n l99, = axes.plot([self.p99, self.p99], [0, 1], c='c')\n l999, = axes.plot([self.p999, self.p999], [0, 1], c='m')\n l9999, = axes.plot([self.p9999, self.p9999], [0, 1], c='y')\n axes.legend((l50, l90, l99, l999, l9999),\n ('50th %ile ({0})'.format(self.p50),\n '90th %ile ({0})'.format(self.p90),\n '99th %ile ({0})'.format(self.p99),\n '99.9th %ile ({0})'.format(self.p999),\n '99.99th %ile ({0})'.format(self.p9999)),\n fontsize=12)\n else:\n self.emptyPlot(axes, self.bounds.ppxmid, self.bounds.ppymid)\n\n def cdfPlot(self, axes, showPercentiles=False):\n # format axes\n axes.set_title('Cumulative distribution function')\n axes.set_xlabel('Latency')\n axes.set_ylabel('Probability')\n axes.set_xlim(self.bounds.cpxmin, self.bounds.cpxmax)\n axes.set_ylim(self.bounds.cpymin, self.bounds.cpymax)\n axes.grid(True)\n\n # detect non-empty data set\n if self.size > 0:\n # create plot\n axes.plot(self.cdfx, self.cdfy)\n if showPercentiles:\n axes.plot([self.p50, self.p50], [0, 0.50], c='r')\n axes.plot([self.bounds.cpxmin, self.p50], [0.50, 0.50], c='r')\n axes.plot([self.p90, self.p90], [0, 0.90], c='g')\n axes.plot([self.bounds.cpxmin, self.p90], [0.90, 0.90], c='g')\n axes.plot([self.p99, self.p99], [0, 0.99], c='c')\n axes.plot([self.bounds.cpxmin, self.p99], [0.99, 0.99], c='c')\n axes.plot([self.p999, self.p999], [0, 0.999], c='m')\n axes.plot([self.bounds.cpxmin, self.p999], [0.999, 0.999], c='m')\n axes.plot([self.p9999, self.p9999], [0, 0.9999], c='y')\n axes.plot([self.bounds.cpxmin, self.p9999], [0.9999, 0.9999], c='y')\n else:\n self.emptyPlot(axes, self.bounds.cpxmid, self.bounds.cpymid)\n\n def cdfLogPlot(self, axes, xlog=False):\n # format axes\n axes.set_title('Logarithmic cumulative distribution function')\n axes.set_xlabel('Latency')\n axes.set_ylabel('Percentile')\n axes.set_xlim(self.bounds.lpxmin, self.bounds.lpxmax)\n axes.set_yscale('percentile', nines=self.nines())\n axes.grid(True)\n if xlog:\n axes.set_xscale('log')\n\n # detect non-empty data set\n if self.size > 0:\n # create the plot\n axes.scatter(self.cdfx, self.cdfy, color='b', s=2)\n else:\n self.emptyPlot(axes, self.bounds.lpxmid, 0.999)\n\n def quadPlot(self, plt, filename, title='',\n spxmin=float('Nan'), spxmax=float('NaN'),\n spymin=float('Nan'), spymax=float('NaN'),\n ppxmin=float('Nan'), ppxmax=float('NaN'),\n ppymin=float('Nan'), ppymax=float('NaN'),\n cpxmin=float('Nan'), cpxmax=float('NaN'),\n cpymin=float('Nan'), cpymax=float('NaN'),\n lpxmin=float('Nan'), lpxmax=float('NaN')):\n if not math.isnan(spxmin):\n self.bounds.spxmin = spxmin\n if not math.isnan(spxmax):\n self.bounds.spxmax = spxmax\n if not math.isnan(spymin):\n self.bounds.spymin = spymin\n if not math.isnan(spymax):\n self.bounds.spymax = spymax\n\n if not math.isnan(ppxmin):\n self.bounds.ppxmin = ppxmin\n if not math.isnan(ppxmax):\n self.bounds.ppxmax = ppxmax\n if not math.isnan(ppymin):\n self.bounds.ppymin = ppymin\n if not math.isnan(ppymax):\n self.bounds.ppymax = ppymax\n\n if not math.isnan(cpxmin):\n self.bounds.cpxmin = cpxmin\n if not math.isnan(cpxmax):\n self.bounds.cpxmax = cpxmax\n if not math.isnan(cpymin):\n self.bounds.cpymin = cpymin\n if not math.isnan(cpymax):\n self.bounds.cpymax = cpymax\n\n if not math.isnan(lpxmin):\n self.bounds.lpxmin = lpxmin\n if not math.isnan(lpxmax):\n self.bounds.lpxmax = lpxmax\n\n self.bounds.setmid()\n\n fig = plt.figure(figsize=(16, 10))\n ax1 = fig.add_subplot(2, 2, 1)\n ax2 = fig.add_subplot(2, 2, 2)\n ax3 = fig.add_subplot(2, 2, 3)\n ax4 = fig.add_subplot(2, 2, 4)\n\n self.scatterPlot(ax1, showPercentiles=True, randomColors=False)\n self.pdfPlot(ax2, showPercentiles=True)\n self.cdfPlot(ax3, showPercentiles=True)\n self.cdfLogPlot(ax4, xlog=False)\n\n fig.tight_layout()\n if title:\n fig.suptitle(title, fontsize=20)\n fig.subplots_adjust(top=0.92)\n fig.savefig(filename)\n\n\n # a class to represent load vs. latency stats\nclass LoadLatencyStats(object):\n\n FIELDS = ['Minimum', 'Mean', 'Median', '90th%', '99th%', '99.9th%',\n '99.99th%', '99.999th%', 'Maximum']\n\n class PlotBounds(object):\n def __init__(self):\n # defaults\n self.ymin = float('inf')\n self.ymax = float('inf')\n self.default = True\n\n def load(ymin=float('inf'), ymax=float('inf')):\n self.ymin = ymin\n self.ymax = ymax\n self.default = ymin is not float('inf') or ymax is not float('inf')\n\n def readFile(self, filename):\n grid = GridStats(filename)\n self.ymin = grid.get('y', 'min')\n self.ymax = grid.get('y', 'max')\n self.default = False\n\n def writeFile(self, filename):\n with open(filename, 'w') as fd:\n print('axis,min,max\\n'\n 'y,{0},{1}\\n'\n .format(self.ymin, self.ymax),\n file=fd)\n\n def greater(self, other):\n # detect defaults\n if self.default == True and other.default == True:\n return LoadLatencyStats.PlotBounds()\n elif self.default == False and other.default == True:\n return copy.deepcopy(self)\n elif self.default == True and other.default == False:\n return copy.deepcopy(other)\n\n # both are not defaults, do comparison\n new = LoadLatencyStats.PlotBounds()\n new.ymin = min(self.ymin, other.ymin)\n assert new.ymin <= self.ymin\n assert new.ymin <= other.ymin\n new.ymax = maxNoInfinity([self.ymax, other.ymax])\n new.default = False\n return new\n\n\n def __init__(self, start, stop, step, grids, **kwargs):\n # create arrays\n load = numpy.arange(start, stop, step)\n self.data = {'Load': load}\n for field in LoadLatencyStats.FIELDS:\n self.data[field] = numpy.empty(len(load), dtype=float)\n\n # parse kwargs\n verbose = kwargs.get('verbose', False);\n statRow = kwargs.get('row', 'Packet')\n if verbose:\n print('load {0}'.format(self.data['Load']))\n print('analyzing {0}s'.format(statRow))\n\n assert len(grids) == len(self.data['Load']), \"wrong number of grids\"\n\n # load data arrays\n for idx, grid in enumerate(grids):\n assert type(grid) == GridStats, \"'grid' elements must be GridStats\"\n if verbose:\n print('extracting {0}'.format(grid.filename))\n for key in self.data.keys():\n if key != 'Load':\n s = grid.get(statRow, key)\n if verbose:\n print('Load {0} {1} is {2}'.format(self.data['Load'][idx], key, s))\n self.data[key][idx] = s\n\n self.bounds = LoadLatencyStats.PlotBounds()\n\n self.bounds.ymin = min(self.data['Minimum'])\n self.bounds.ymax = maxNoInfinity(self.data['Maximum'])\n\n def plotAll(self, plt, filename, title='',\n ymin=float('Nan'), ymax=float('NaN')):\n if not math.isnan(ymin):\n self.bounds.ymin = ymin\n if not math.isnan(ymax):\n self.bounds.ymax = ymax\n\n # create figure\n fig = plt.figure(figsize=(16, 10))\n ax1 = fig.add_subplot(1, 1, 1)\n\n # create a colors list from a colormap\n lineCount = 9\n cmap = plt.get_cmap('gist_rainbow')\n colors = [cmap(idx) for idx in numpy.linspace(0, 1, lineCount)]\n\n # set axis labels\n ax1.set_xlabel('Load')\n ax1.set_ylabel('Latency')\n\n # plot load vs. latency curves\n lines = []\n for idx, field in enumerate(reversed(LoadLatencyStats.FIELDS)):\n lines.append(ax1.plot(self.data['Load'], self.data[field],\n color=colors[idx], lw=1, label=field)[0])\n\n # if given, apply title\n if title:\n ax1.set_title(title, fontsize=20)\n\n # create legend\n labels = [line.get_label() for line in lines]\n ax1.legend(lines, labels, loc='upper left', fancybox=True, shadow=True,\n ncol=1)\n\n # set plot bounds\n ax1.set_xlim(self.data['Load'][0], self.data['Load'][-1]);\n ax1.set_ylim(self.bounds.ymin, self.bounds.ymax);\n ax1.xaxis.grid(True)\n ax1.yaxis.grid(True)\n\n fig.tight_layout()\n fig.savefig(filename)\n\n @staticmethod\n def plotCompare(plt, filename, stats, field='Mean', labels=[], title='',\n ymin=float('NaN'), ymax=float('NaN')):\n # make sure the loads are all the same\n mload = stats[0].data['Load']\n for stat in stats:\n assert len(mload) == len(set(mload).intersection(stat.data['Load'])), \\\n print('{0} != {1}'.format(mload, stat.data['Load']))\n assert len(labels) == 0 or len(labels) == len(stats)\n assert field in LoadLatencyStats.FIELDS\n\n # create figure\n fig = plt.figure(figsize=(16, 10))\n ax1 = fig.add_subplot(1, 1, 1)\n\n # create a colors list from a colormap\n lineCount = len(stats)\n cmap = plt.get_cmap('gist_rainbow')\n colors = [cmap(idx) for idx in numpy.linspace(0, 1, lineCount)]\n\n # set axis labels\n ax1.set_xlabel('Load')\n ax1.set_ylabel('{0} Latency'.format(field))\n\n # plot all lines\n lines = []\n for idx, stat in enumerate(stats):\n label = None\n if len(labels) > 0:\n label = labels[idx]\n line, = ax1.plot(mload, stat.data[field], color=colors[idx], lw=1,\n label=label)\n lines.append(line)\n\n # if given, apply title\n if title:\n ax1.set_title(title, fontsize=20)\n\n # create legend\n if len(labels) > 0:\n labels = [line.get_label() for line in lines]\n ax1.legend(lines, labels, loc='upper left', fancybox=True, shadow=True,\n ncol=1)\n\n # set plot bounds\n ax1.set_xlim(stats[0].data['Load'][0], stats[0].data['Load'][-1]);\n if not math.isnan(ymin) and not math.isnan(ymax):\n ax1.set_ylim(ymin, ymax)\n elif not math.isnan(ymin):\n ax1.set_ylim(bottom=ymin)\n elif not math.isnan(ymax):\n ax1.set_ylim(top=ymax)\n ax1.grid(True)\n\n fig.tight_layout()\n fig.savefig(filename)\n","sub_path":"percentile/ssplot/ssplot/ssplot.py","file_name":"ssplot.py","file_ext":"py","file_size_in_byte":20571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444260562","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 13 12:59:25 2020\n\n@author: hankui\n\"\"\"\n\n\n#%% attempt 1\ndef superDigit0(n, k):\n \n inp = str(n)*k\n curSum = sumDigits(inp)\n curDig = str(curSum)\n \n while len(curDig) > 1:\n curSum = sumDigits(curDig)\n curDig = str(curSum)\n return int(curDig) \n\ndef sumDigits(inp):\n curSum = 0 # current sum\n for i,v in enumerate(inp):\n curSum += int(v)\n return curSum\n\n\n#%% solution 1\ndef superDigit(n, k):\n \n def add_digits(string):\n if len(string) == 1:\n return int(string)\n result = sum(int(s) for s in string)\n return add_digits(str(result))\n \n #import pdb\n #pdb.set_trace()\n start = sum([int(s) for s in str(n)]) * k\n \n return add_digits(str(start))\n\n\n#%%\nn = 123\nk = 3\n\nsuperDigit(123,3)\n\n","sub_path":"General/NotYetSolved/superDigit.py","file_name":"superDigit.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"15103048","text":"from Graph import DirectedGraph\r\nfrom Graph import Stack\r\n\r\ndef DFS(graph, idx, check, stack, timeStack):\r\n stack.add(idx)\r\n edges = graph.getEdge(idx)\r\n check[idx] = True\r\n for val in edges:\r\n if not check[val]:\r\n DFS(graph, val, check, stack, timeStack)\r\n\r\n x = stack.pop()\r\n timeStack.add(x)\r\n\r\n\r\ndef kosaraju(graph):\r\n check = [False] * graph.V\r\n stack = Stack()\r\n timeStack = Stack()\r\n for i in range(graph.V):\r\n if not check[i]:\r\n DFS(graph, i, check, stack, timeStack)\r\n\r\n\r\n\r\n graphT = graph.getT()\r\n check = [False] * graph.V\r\n\r\n scc = []\r\n while(timeStack.size):\r\n x = timeStack.pop()\r\n if not check[x]:\r\n stack = Stack()\r\n saveStack = Stack()\r\n DFS(graph, x, check, stack, saveStack)\r\n scc.append(saveStack.popAll())\r\n return scc\r\n\r\nif __name__ == '__main__':\r\n\r\n print(\"enter count of vertices and edges then enter source and destination of edges:\")\r\n V, E = map(int, input().split())\r\n graph = DirectedGraph(V)\r\n for i in range(E):\r\n s, d = map(int, input().split())\r\n graph.addEdge(s, d)\r\n\r\n scc = kosaraju(graph)\r\n print(scc)\r\n\r\n\"\"\"\r\n4 5\r\n0 1\r\n0 2\r\n1 3\r\n2 3\r\n3 1\r\n\r\n\"\"\"","sub_path":"graph/SCC.py","file_name":"SCC.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502320132","text":"def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\n dialect=dialect, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield [unicode(cell, 'utf-8') for cell in row]\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\t\nschema = avro.schema.parse(open(\"bq_table_avro.avsc\",\"rb\").read())\nSOURCE_FILE_NAME = os.environ['BQ_SOURCE_FILE_NAME']\nTARGET_FILE_NAME = os.environ['BQ_TARGET_FILE_NAME']\n\nwith codecs.open(SOURCE_FILE_NAME, 'r') as csvfile:\n\treader = unicode_csv_reader(csvfile, delimiter=',')\n\twriter = DataFileWriter(open(TARGET_FILE_NAME, \"wb\"), DatumWriter(), schema, codec='deflate')\n\tfor count, row in enumerate(reader):\n\t\tprint (count)\n\t\ttry:\n\t\t\twriter.append(\n\t\t\t\t{\"ts\": row[0],\n \t\t\t\"device\": row[1],\n \t\t\t\"co\": row[2],\n \t\t\t\"humidity\": row[3],\n \t\t\t\"light\": row[4],\n \t\t\t\"lpg\": row[5],\n \t\t\t\"motion\": row[6],\n \t\t\t\"smoke\": row[7],\n \t\t\t\"temp\": row[8]\n\t\t\t\t}\n\t\t\t)\n\t\texcept IndexError:\n\t\t\tprint (\"Bad record, skip.\")\n\twriter.close()","sub_path":"csv2Avro_schema.py","file_name":"csv2Avro_schema.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"26134549","text":"# -*- coding: utf-8 -*-\n'''\n Created by hushiwei on 2018/6/26\n Desc : 单个状态的概率.计算gamma值,即给定模型lambda和观测序列Q的时候,时刻t对应状态i的概率值\n'''\n\nimport numpy as np\n\nfrom MachineLearning.HMM.algorithm_implementation import common\nfrom MachineLearning.HMM.algorithm_implementation import forward_probability as forward\nfrom MachineLearning.HMM.algorithm_implementation import backward_probability as backward\n\n\ndef calc_gamma(alpha, beta, gamma):\n '''\n 根据alpha和beta的值计算单个状态的概率gamma值\n :param alpha: 前向概率值\n :param beta: 后向概率值\n :param gamma: 结果存储到gamma中\n :return:\n '''\n T = len(alpha)\n n_range = range(alpha.shape[1])\n tmp = np.zeros(T)\n for t in range(T):\n for i in n_range:\n tmp[i] = alpha[t][i] * beta[t][i]\n sum_alpha_beta_of_t = np.sum(tmp)\n\n # 更新gamma值\n for i in n_range:\n gamma[t][i] = tmp[i] / sum_alpha_beta_of_t\n\n\nif __name__ == '__main__':\n # 初始矩阵\n pi = np.array([0.2, 0.5, 0.3])\n # 状态转移矩阵\n A = np.array([[0.5, 0.4, 0.1], [0.2, 0.2, 0.6], [0.2, 0.5, 0.3]])\n # 发射矩阵\n B = np.array([[0.4, 0.6], [0.8, 0.2], [0.5, 0.5]])\n # 观测序列\n Q = \"白黑白白黑\"\n\n # TxN矩阵\n alpha = np.zeros((len(Q), len(A)))\n beta = np.zeros((len(Q), len(A)))\n gamma = np.zeros((len(Q), len(A)))\n\n # 开始计算\n # 1. 计算后向概率beta矩阵\n backward.calc_beta(pi, A, B, Q, beta, common.convert_obs_seq_2_index)\n print(\"后向概率beta矩阵:\\n\", beta)\n tmp = 0\n for i in range(len(A)):\n tmp += pi[i] * B[i][common.convert_obs_seq_2_index(Q, 0)] * beta[0][i]\n print(Q, \"出现的概率,后向算法--->\", tmp)\n\n # 2. 计算前向概率alpha矩阵\n forward.calc_alpha(pi, A, B, Q, alpha, common.convert_obs_seq_2_index)\n print(\"前向概率alpha矩阵:\\n\", alpha)\n print(Q, \"出现的概率,前向算法-->\", np.sum(alpha[-1]))\n\n # 3.计算gamma矩阵\n calc_gamma(alpha, beta, gamma)\n print(\"单个状态概率gamma矩阵:\\n\", gamma)\n\n # 选择每个时刻最大的概率作为预测概率\n print('各个时刻最大概率的盒子为:', end='')\n index = ['盒子1', '盒子2', '盒子3']\n for p in gamma:\n print(index[p.tolist().index(np.max(p))], end=\"\\t\")\n","sub_path":"MachineLearning/HMM/algorithm_implementation/single_state_probability_of_gamma.py","file_name":"single_state_probability_of_gamma.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"603397864","text":"#! /usr/bin/env python3\n\n\nimport requests, sys, webbrowser, bs4\n\n\nzmienna = (' '.join(sys.argv[1:]))\n\nprint(zmienna)\nzmienna = list(zmienna)\nprint(zmienna)\n\n\n\"\"\"\nhttps://www.amazon.de/s/ref=nb_sb_noss_2?__mk_pl_PL=%C3%85M%C3%85%C5%BD%C3%95%C3%91&url=search-alias%3Daps&field-keywords=korn%27\nhttps://www.amazon.de/s/ref=nb_sb_noss?__mk_pl_PL=%C3%85M%C3%85%C5%BD%C3%95%C3%91&url=search-alias%3Daps&field-keywords=lenovo+note+6&rh=i%3Aaps%2Ck%3Alenovo+note+6\nhttps://www.amazon.de/s/ref=nb_sb_noss_2?__mk_pl_PL=%C3%85M%C3%85%C5%BD%C3%95%C3%91&url=search-alias%3Daps&field-keywords=konsola\n\"\"\"\n\n\nprint('wyszukiwanie www.amazon.de.....')\nres = requests.get('https://www.amazon.de/s/ref=nb_sb_noss_2?__mk_pl_PL=ÅMÅŽÕÑ&url=search-alias%3Daps&field-keywords=' + ' '.join(sys.argv[1:]))\nres.raise_for_status()\n\n\n# print(res.text)\n\nplik = open('plik.html', 'wb')\nfor i in res.iter_content(100000):\n plik.write(i)\n\nprint(plik.closed)\nplik.close()\nprint(plik.closed)\n\nszukane = bs4.BeautifulSoup(res.text, \"html.parser\")\nznalezione = szukane.select('.a-spacing-small a')\n\nprint(len(znalezione))\nprint(type(znalezione))\n\nfor i in range(5):\n print(znalezione[i].get('href'))\n\n# for i in znalezione:\n# print(i)\n\n\n","sub_path":"PYTHON_AUTOMATYZACJA_NUDNYCH_ZADAN/automat_11_pomoc.py","file_name":"automat_11_pomoc.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"605778872","text":"'''\r\ndados = list()\r\ndados.append('pedro')\r\ndados.append(25)\r\nprint(dados)\r\n\r\npessoas = list()\r\npessoas.append(dados[:])\r\npessoas = [['pedro', 25], ['maria', 19], ['joao', 32]]\r\nprint(pessoas[0][0]) #dentro do 0 vai printar o 0\r\nprint(pessoas[1][1])\r\nprint(pessoas[2][0])\r\nprint(pessoas[1])\r\n'''\r\n\r\n'''\r\nteste = list()\r\nteste.append('pedro')\r\nteste.append(40)\r\ngalera=list()\r\ngalera.append(teste[:])\r\nteste[0] = 'maria'\r\nteste[1] = 22\r\ngalera.append(teste[:])\r\nprint(galera)\r\n'''\r\n\r\n'''\r\ngalera = [['joao', 19], ['ana', 33], ['joaquim', 13], ['maria', 45]]\r\nprint(galera[0])\r\nprint(galera[0][0])\r\nprint(galera[2][1])\r\n'''\r\n\r\n'''\r\ngalera = [['joao', 19], ['ana', 33], ['joaquim', 13], ['maria', 45]]\r\nfor p in galera:\r\n print(f'{p[0]} tem {p[1]} anos de idade')\r\n'''\r\n\r\n'''\r\ngalera = list()\r\ndado = list()\r\ntotmai = totmen = 0\r\nfor c in range(0, 3):\r\n dado.append(str(input('nome: ')))\r\n dado.append(int(input('idade: ')))\r\n galera.append(dado[:])\r\n dado.clear()\r\n\r\nfor p in galera:\r\n if p[1] >= 21:\r\n print(f'{p[0]} é maior de idade')\r\n totmai += 1\r\n else:\r\n print(f'{p[0]} é menor de idade]')\r\n totmen += 1\r\nprint(f'temos {totmai} maiores e {totmen} menores')\r\n'''\r\n\r\n########## DESAFIO 84 ##########\r\n'''\r\ngeral = list()\r\nnep = list()\r\nqtd = 0\r\nma = me = 0\r\nwhile True:\r\n nep.append(str(input('digite o nome: ')))\r\n nep.append(int(input('digite o peso: ')))\r\n if len(geral) == 0:\r\n ma = me = nep[1]\r\n else:\r\n if nep[1] > ma:\r\n ma = nep[1]\r\n if nep[1] < me:\r\n me = nep[1]\r\n geral.append(nep[:])\r\n nep.clear()\r\n qtd += 1\r\n o = str(input('quer continuar o cadastro? [s/n] '))\r\n if o in 'Nn':\r\n break\r\n\r\n\r\nprint(f'a quantidade de pessoas cadastradas é: {qtd}')\r\nprint(f'a pessoa mais pesada é {ma} e a mais leve é {me}')\r\n'''\r\n\r\n########## DESAFIO 85 ##########\r\n'''\r\nlista = [[], []]\r\nfor c in range(1, 8):\r\n n = int(input(f'digite o {c} valor: '))\r\n if n % 2 == 0:\r\n lista[0].append(n)\r\n else:\r\n lista[1].append(n)\r\nprint(f'a lista é: {lista}')\r\nlista[0].sort()\r\nlista[1].sort()\r\nprint(f'Par: {lista[0]}')\r\nprint(f'impar: {lista[1]}')\r\n'''\r\n\r\n########## DESAFIO 86 ##########\r\n'''\r\nmatriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] #1º, 2º e 3º linha\r\nfor linha in range(0, 3):\r\n for coluna in range(0, 3):\r\n matriz[linha][coluna] = int(input(f'digite um valor para [{linha}][{coluna}]: '))\r\nfor linha in range(0, 3):\r\n for coluna in range(0, 3):\r\n print(f'[{matriz[linha][coluna]:^5}]', end='')\r\n print()\r\n'''\r\n\r\n########## DESAFIO 87 ###########\r\n'''\r\nmatriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\r\nsomapar = 0\r\nsomacol = 0\r\nsomalin = 0\r\n\r\nfor linha in range(0, 3):\r\n for coluna in range(0, 3):\r\n matriz[linha][coluna] = int(input(f'digite um valor para [{linha}][{coluna}]: '))\r\n\r\nfor linha in range(0, 3):\r\n for coluna in range(0, 3):\r\n print(f'[{matriz[linha][coluna]:^5}]', end='')\r\n print()\r\n\r\nfor linha in range(0, 3):\r\n for coluna in range(0, 3):\r\n if matriz[linha][coluna] % 2 == 0:\r\n somapar += matriz[linha][coluna]\r\n\r\nfor linha in range(0, 3):\r\n somacol += matriz[linha][2]\r\n\r\nfor coluna in range(0, 3):\r\n somalin += matriz[1][coluna]\r\n\r\nprint(f'a soma dos pares é: {somapar}')\r\nprint(f'a soma dos elementos da 3 coluna é: {somacol}')\r\nprint(f'a soma dos elementos da 2 coluna é: {somalin}')\r\n'''\r\n\r\n########## DESAFIO 88 ##########\r\n'''\r\nfrom random import randint\r\ntotal = 1\r\nsorteados = []\r\nlista = []\r\njogos = int(input('quantos jogos voce quer? '))\r\nwhile total <= jogos:\r\n cont = 0\r\n while True:\r\n n = randint(1, 60)\r\n if n not in lista:\r\n lista.append(n)\r\n cont += 1\r\n if cont >= 6:\r\n break\r\n lista.sort()\r\n sorteados.append(lista[:])\r\n lista.clear()\r\n total += 1\r\nprint(f'os numeros sorteados foram: {sorteados}')\r\n'''\r\n\r\n########## DESAFIO 89 ##########\r\nficha = []\r\nwhile True:\r\n nome = str(input('nome: '))\r\n nota1 = int(input('nota1: '))\r\n nota2 = int(input('nota2: '))\r\n media = (nota1 + nota2)/2\r\n ficha.append([nome, [nota1, nota2], media])\r\n resp = str(input('quer continuar? '))\r\n if resp in 'Nn':\r\n break\r\nprint(ficha)","sub_path":"aula18.py","file_name":"aula18.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"116872004","text":"from gym_jobshop.envs.src import environment, global_settings\nimport csv\n\n\ndef get_cost_from_current_period():\n \"\"\"\n\n :return:\n \"\"\"\n temp_wip_cost = 0\n temp_overtime_cost = 0\n temp_fgi_cost = 0\n temp_late_cost = 0\n total_cost_this_period = 0\n ################### Measure cost for shopfloor and overtime (machines + WIP inventories):\n for wip in environment.list_of_all_wip_elements:\n temp_wip_cost += len(wip) * global_settings.cost_per_item_in_shopfloor\n for machine in environment.list_of_all_machines:\n if len(machine.orders_inside_the_machine) > 0:\n temp_wip_cost += len(machine.orders_inside_the_machine) * \\\n global_settings.cost_per_item_in_shopfloor\n # Measure overtime cost on bottleneck machine\n if global_settings.processing_times_multiplier > 1: # only if overtime is active in this period\n if machine.name == environment.bottleneck_machine.name:\n temp_overtime_cost += global_settings.cost_per_overtime_period * global_settings.processing_times_multiplier\n\n ################### Measure cost for finished goods inventory:\n temp_fgi_cost = len(environment.finished_goods_inventory) * global_settings.cost_per_item_in_fgi\n\n ################### Measure cost for late goods (= backorder cost):\n temp_late_cost = global_settings.temp_sum_of_late_orders_this_period * global_settings.cost_per_late_item\n\n # Measure total cost for this period\n total_cost_this_period = temp_wip_cost + temp_overtime_cost + temp_fgi_cost + temp_late_cost\n\n global_settings.temp_sum_of_late_orders_this_period = 0 # reset the count of late orders until the next period's end\n\n return [total_cost_this_period, temp_wip_cost, temp_overtime_cost, temp_fgi_cost, temp_late_cost]\n\n\ndef update_total_cost():\n \"\"\"\n Logic for measuring costs:\n Once at the end of every period (after orders have been released, processed and shipped) we update the cost.\n The cost that incurred in the past period will be added to the sum of the respective cost (e.g. we\n add all cost from FGI inventories from the past period to the sum of all FGI costs and so on)\n The cost is calculated by multiplying a given cost factor (see global_settings.py) with the amount of orders\n in the respective inventory (e.g. we have 5 orders in FGI and the cost factor is 4, then the cost for that period\n is 20)\n :return: return nothing\n \"\"\"\n global_settings.temp_cost_this_period = 0\n\n all_costs_from_this_period = get_cost_from_current_period()\n\n ################### Update total cost for shopfloor (machines + WIP inventories):\n global_settings.sum_shopfloor_cost += all_costs_from_this_period[1]\n\n ################### Update total cost for finished goods inventory:\n global_settings.sum_fgi_cost += all_costs_from_this_period[3]\n\n ################### Update total cost for late goods (= backorder cost) in the last step of simulation:\n global_settings.sum_lateness_cost += all_costs_from_this_period[4]\n\n ################### Update total cost for overtime:\n global_settings.sum_overtime_cost += all_costs_from_this_period[2]\n\n ################### Update total cost:\n global_settings.total_cost += all_costs_from_this_period[0]\n global_settings.temp_cost_this_period = all_costs_from_this_period[0]\n return\n\n\ndef reset_all_costs():\n global_settings.total_cost = 0\n global_settings.sum_shopfloor_cost = 0\n global_settings.sum_fgi_cost = 0\n global_settings.sum_lateness_cost = 0\n global_settings.sum_overtime_cost = 0\n return\n\n\ndef measure_bottleneck_utilization():\n if len(environment.bottleneck_machine.orders_inside_the_machine) > 0:\n global_settings.bottleneck_utilization_per_step += 1\n return\n\n\ndef utilization_per_step(): # this appends to the steps.csv file\n raise NotImplementedError(\"Function utilization_per_step() is not properly implemented\")\n amount_of_active_machines = 0\n for machine in environment.list_of_all_machines:\n if len(machine.orders_inside_the_machine) > 0:\n amount_of_active_machines += 1\n utilization = amount_of_active_machines / 6\n # Append results to CSV file\n with open('../steps.csv', mode='a') as steps_CSV:\n results_writer = csv.writer(steps_CSV, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n results_writer.writerow([global_settings.current_time,\n len(environment.wip_A), len(environment.wip_B), len(environment.wip_C),\n len(environment.wip_D),\n len(environment.wip_E), len(environment.wip_F),\n len(environment.machine_A.orders_inside_the_machine),\n len(environment.machine_B.orders_inside_the_machine),\n len(environment.machine_C.orders_inside_the_machine),\n len(environment.machine_D.orders_inside_the_machine),\n len(environment.machine_E.orders_inside_the_machine),\n len(environment.machine_F.orders_inside_the_machine), utilization\n ])\n steps_CSV.close()\n return\n\n\ndef measure_order_flow_times():\n raise NotImplementedError(\"Function measure_order_flow_times() is not properly implemented\")\n list_of_earliness_per_order = []\n list_of_flow_time_per_order = []\n # Create CSV file to store results after each iteration\n with open('../orders_' + str(global_settings.random_seed) + '.csv', mode='w') as orders_CSV:\n results_writer = csv.writer(orders_CSV, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n results_writer.writerow(['OrderID', 'product_type', 'creation_date', 'order_release_date',\n 'arrival_m1', 'arrival_wip_step2', 'arrival_m_step_2',\n 'arrival_wip_step_3', 'arrival_m_step_3', 'finished_production_date',\n 'due_date', 'shipping_date', 'lateness', 'earliness', 'flow_time'])\n for order_element in environment.shipped_orders:\n order_element.arrvival_m1 = order_element.arrival_times_m1m2m3[0]\n order_element.arrival_prodstep_2_m = order_element.arrival_times_m1m2m3[1]\n order_element.arrival_prodstep_3_m = order_element.arrival_times_m1m2m3[2]\n results_writer.writerow([\n order_element.orderID, order_element.product_type,\n order_element.creation_date, order_element.order_release_date,\n order_element.arrvival_m1, order_element.arrival_prodstep_2_wip,\n order_element.arrival_prodstep_2_m, order_element.arrival_prodstep_3_wip,\n order_element.arrival_prodstep_3_m,\n order_element.finished_production_date, order_element.due_date,\n order_element.shipping_date, order_element.lateness,\n order_element.earliness, order_element.flow_time\n ])\n list_of_earliness_per_order.append(order_element.earliness)\n list_of_flow_time_per_order.append(order_element.flow_time)\n\n # Append average results to the end of the CSV\n # global_settings.average_earliness_of_all_orders = statistics.mean(list_of_earliness_per_order)\n # global_settings.average_flow_time_of_all_orders = statistics.mean(list_of_flow_time_per_order)\n # results_writer.writerow(['avg_earliness','avg_lateness', 'avg_flow_time'])\n # results_writer.writerow([global_settings.average_earliness_of_all_orders,'avg_lateness', global_settings.average_flow_time_of_all_orders])\n orders_CSV.close()\n return\n","sub_path":"gym-jobshop/gym_jobshop/envs/src/performance_measurement.py","file_name":"performance_measurement.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"645475667","text":"import turtle\nimport random\n\nTURTLE_SPEED = 10\nMOVING_DISTANCE = 20\n\nxEndPoint = 340\nyEndPoint = 320\n\nEAST = 0\nNORTH = 90\nWEST = 180\nSOUTH = 270\n\nremainingX = xEndPoint/MOVING_DISTANCE * 2\nremainingY = yEndPoint/MOVING_DISTANCE * 2\n\nforwardX = 0\nbackwardX = 0\n\nwn = turtle.Screen()\nturtle = turtle.Turtle()\n\nturtle.setpos(-xEndPoint, -yEndPoint)\nturtle.clear()\n\nturtle.shape(\"turtle\")\nturtle.speed(TURTLE_SPEED)\n\ndef moveTurtle(turtle, direction):\n turtle.stamp()\n\n if turtle.heading() != direction:\n turtle.setheading(direction)\n\n\n global remainingY\n global remainingX\n global forwardX\n global backwardX\n \n if direction == NORTH:\n newY = remainingY - 1\n remainingY = newY\n elif direction == EAST:\n newX = remainingX - 1\n remainingX = newX\n newForX = forwardX + 1\n forwardX = newForX\n\n if backwardX > 0:\n newBackX = backwardX - 1\n backwardX = newBackX\n \n \n elif direction == WEST:\n newX = remainingX + 1\n remainingX = newX\n \n newBackX = backwardX + 1\n backwardX = newBackX\n newForX = forwardX - 1\n forwardX = newForX\n \n else:\n newY = remainingY + 1\n remainingY = newY\n\n turtle.forward(MOVING_DISTANCE)\n\n\ndef moveDir(turtle, direction):\n if turtle.heading() != direction:\n turtle.setheading(direction)\n\n turtle.forward(MOVING_DISTANCE)\n global remainingY\n newY = remainingY - 1\n remainingY = newY\n\n \ndef randomDirection():\n if remainingX > 0 and remainingY > 0:\n if turtle.xcor() < 0 and forwardX > backwardX:\n randomInt = random.randrange(0,3)\n else:\n randomInt = random.randrange(0,2)\n\n if randomInt == 0:\n return NORTH\n elif randomInt == 1 and turtle.heading() != WEST:\n return EAST\n elif randomInt == 2 and turtle.heading() != EAST:\n return WEST\n \n\ndef main():\n drawEnd = False\n while drawEnd == False:\n if turtle.xcor() < xEndPoint and turtle.ycor() < yEndPoint:\n randDir = None\n while randDir == None:\n randDir = randomDirection()\n \n moveTurtle(turtle, randDir)\n \n elif turtle.xcor() < xEndPoint:\n moveTurtle(turtle, EAST)\n elif turtle.ycor() < yEndPoint:\n moveTurtle(turtle, NORTH)\n else:\n drawEnd = True\n\n \nmain()\n \n","sub_path":"pathCreator.py","file_name":"pathCreator.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"611193059","text":"\n# coding: utf-8\n\n# # Genetic Algorithm Root Finder Main Notebook\n\n# ## Imports\n\n# In[1]:\n\nimport scipy as sp\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom operator import or_\nimport functools\nfrom prettytable import PrettyTable\nfrom subnb.Evolve import evolve\nget_ipython().magic('matplotlib inline')\n\n\n# ## Define a Test Function\n\n# In[2]:\n\nfunction = lambda x: x**2 - 10\nfunction = lambda x: sp.special.jv(0,x)\n# function = lambda x: np.exp(x) - sp.special.gamma(x+1)\nplt.figure(figsize=(8,4))\nxmax = 4\nxs = np.linspace(-xmax,xmax, 500)\nfs = function(xs)\nplt.plot(xs, fs)\nplt.plot([-xmax, xmax], [0, 0], 'k--', alpha=.5)\nplt.xlabel('x');plt.ylabel('f(x)');plt.title('Plot of function, f(x)')\nplt.show()\n\n\n# ## The Numerical Root Finders\n\n# In[ ]:\n\n\n\n\n# ## The Genetic Algorithms\n\n# ### GA_1\n\n# In[3]:\n\ndef plotting(history, fun, a, b, maxgen):\n plt.rcParams.update({'font.size': 20})\n r, n = None, 0\n while r is None:\n try:\n r = optimize.brentq(function, a, b, maxiter=n)\n except RuntimeError as e:\n n += 1\n plt.figure(figsize=(16,8))\n x, y = zip(*list(history.items()))\n ynew = list(map(lambda l: sum(l)/float(len(l)), y))\n yerr = list(map(lambda l: np.std(l)/float(len(l)), y))\n plt.errorbar(x, ynew, yerr, lw=2, label='GA')\n roots = [optimize.brentq(function, a, b, maxiter=i, disp=False) for i in range(maxgen + 1)]\n root = roots[-1]\n# plt.plot(range(maxgen + 1), roots, c='g', label='Optimizer')\n \n plt.plot([0, maxgen], [root, root], 'g--', lw=1, label=\"Optimizer root = {}\".format(root))\n plt.axvline(n, alpha=.5, c='g', label='Minimum iterations required by Optimizer')\n plt.legend(loc=4);plt.xlabel('Generation/Iteration');plt.ylabel('Root value')\n plt.title('Convergence Comparison of Roots for Function f(x)')\n plt.show()\n# plotting(history, function)\n\n\n# In[67]:\n\ndef run_GA_1(hist=None):\n import subnb.GA_1\n a, b, maxgen, initsize = 0, 4, 20, 50\n ga_runner = subnb.GA_1.GA_1\n history = hist\n if hist is None: history = dict()\n \n best, history = evolve(function, a, b, ga_runner, maxgen, initsize, history)\n best = float(next(iter(best)))\n root = optimize.brentq(function, a, b, maxiter=maxgen, disp=False)\n t = PrettyTable(['Item', 'Value']);t.align['Value'] = \"l\"\n t.add_row(['GA Root', best]); t.add_row(['Optimizer Root', root])\n aerr = abs(best - root)\n t.add_row(['Absolute Error', aerr]);t.add_row(['Relative Error', aerr/root])\n aerrs.append(aerr)\n rerrs.append(aerr/root)\n \n print('Absolute Error STD', np.std(aerrs))\n print('Relative Error STD', np.std(rerrs))\n print('Absolute Error Mean', np.mean(aerrs))\n print('Relative Error Mean', np.mean(rerrs))\n print();print(t)\n print('\\nHistory data\\n',history)\n plotting(history, function, a, b, maxgen)\n return root, history\n\n\n# In[69]:\n\n# aerrs, rerrs = [], []\n\n\n# In[117]:\n\nroot, history = run_GA_1(history);\n\n\n# In[120]:\n\nplt.rcParams.update({'font.size': 10})\nplt.hist([aerrs, rerrs], 15, label=['Abs Err', 'Rel Err'])\nplt.legend()\nplt.show()\n\n\n# ##Testing\n\n# In[1]:\n\ndef tests(a, b):\n pass\n\n\n# In[7]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"GA-Root-Finding-Notebook.py","file_name":"GA-Root-Finding-Notebook.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"25643152","text":"\"\"\"Common functions for all transcoder modules.\"\"\"\nfrom xml.etree import ElementTree as ETree\n\nfrom timecode import timecode_to_seconds\n\ndef _valida_campo(value, rule):\n \"\"\"Validates a specific value, making sure it conforms to a specific rule\n\n Args:\n value (str): the field that needs validation\n rule (str): a description of the condition\n \"\"\"\n\n if rule == 'not':\n return bool(value)\n elif rule == '>t':\n return timecode_to_seconds(value) > 0\n else:\n return False\n\n\ndef valida_dados_fork(metadata, fields):\n \"\"\"Validates the metadata values against the conditions in the fields parameter\n Returns the metadata fields that didn't validate.\n\n Args:\n metadata (dict): {nome_do_campo: valor,}\n fields (dict): {nome_do_campo: (operador, valor),}\n\n Returns:\n dict - {nome_do_campo_invalido: razão_da_invalidação}\n \"\"\"\n campos_saida = {}\n\n for campo, condicao in fields.items():\n if campo in metadata.keys():\n if not _valida_campo(metadata[campo], condicao[0]):\n campos_saida[campo] = 'Campo vazio.'\n else:\n campos_saida[campo] = 'Campo inexistente'\n return campos_saida\n\n\ndef invalid_metadata(filename, fields):\n \"\"\"Wrapper function that validates fork metadata in a xml file.\n\n Args:\n filename (str): filename of the fork xml file\n fields (dict): {nome_do_campo: (operador, valor),}\n\n Returns:\n dict: {nome_do_campo_invalido: razão_da_invalidação}\n \"\"\"\n metadados = _transform_fork_metadata(_get_clip_element(filename))\n return valida_dados_fork(metadados, fields)\n\n\ndef _get_clip_element(xml_file):\n \"\"\"Abre o arquivo xml e retorna o primeiro elemento clip encontrado, que deve conter as informações da mídia do Fork\n\n Args:\n xml_file (str): filename do arquivo XML\n\n Returns:\n xml.etree.ElementTree.Element: Elemento 'clip' do arquivo XML\n \"\"\"\n root = ETree.parse(xml_file).getroot()\n\n if root.tag != 'FORKImport':\n raise ValueError('O arquivo XML não parece ser um arquivo FORK válido')\n root = root.find('clip')\n if root:\n return root\n else:\n raise ValueError('O arquivo XML não parece ser um arquivo FORK válido')\n\n\ndef _transform_fork_metadata(clip_element):\n \"\"\"Transforma o xml do fork em um dict simples no formato {chave: valor}, linear, sem os nós \"properties\"\n\n Args:\n root_element (xml.etree.Element): Elemento clip do xml de metadados do fork?\n\n Returns:\n dict: Fork metadata simplified to one level dict\n \"\"\"\n clip_data = {}\n if clip_element.tag != 'clip':\n raise ValueError('O elemento passado precisa ser um nó \"clip\"')\n else:\n for node in clip_element:\n if node.tag == 'properties':\n clip_data.update(_parse_properties(node))\n else:\n clip_data[node.tag] = node.text\n return clip_data\n\n\ndef get_fork_metadata(filename):\n \"\"\"Transforms the metadata inside a fork xml file into python dict\n\n Args:\n filename (str): The filename for the Forkr metadata file\n\n Returns:\n dict: the Fork metadata turned into a dict\n \"\"\"\n return _transform_fork_metadata(_get_clip_element(filename))\n\n\ndef _parse_properties(node):\n \"\"\"Função que transforma o esquema de properties do XML do Fork em um par {chave: valor}\n\n Args:\n node (xml.etree.ElementTree.Element): Fork XML properties node\n\n Returns:\n dict: decoded node into python dict {nome: valor}\n \"\"\"\n if isinstance(node, ETree.Element):\n if node.tag != 'properties':\n raise ValueError('O elemento passado precisa ser um nó \"properties\"')\n elif node.tag:\n data = {}\n for i in range(len(node)):\n data[node[i][0].text] = node[i][1].text\n return data\n else:\n raise ValueError('O parâmetro precisa ser um Elemento do XML')\n\n","sub_path":"transcoder/fork.py","file_name":"fork.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"404913596","text":"'''\nCreated on 2010-8-23\n\n@author: pear\n'''\nfrom logconfdb import LogConfDb\nfrom logthread import LogThread\nfrom abspath import AbsPath\nfrom wxevtmastframe import EMFrame\nimport wx, os, sys\nfrom BaseHTTPServer import HTTPServer\nfrom CGIHTTPServer import CGIHTTPRequestHandler\n#import time, syslog\n\nWEB_PORT = 8000\n\nclass ControlMgmt():\n def __init__(self):\n# syslog.openlog(\"huhu\")\n# syslog.syslog(\"start test2\") \n self.logMonThList = {}\n self.buildListFromConf()\n# time.sleep(20)\n# syslog.syslog(\"stop test2\") \n# self.logMonThList['test2'].stop()\n \n def runService(self):\n self.startLogMonitor()\n self.startSysAdmin()\n \n def startLogMonitor(self):\n for eachLogMon in self.logMonThList:\n self.logMonThList[eachLogMon].start()\n \n def startSysAdmin(self):\n app = wx.PySimpleApp()\n emFrame = EMFrame(size=(640, 480))\n emFrame.Centre()\n# emFrame.Show()\n app.MainLoop()\n \n def buildListFromConf(self):\n logConfDb = LogConfDb()\n devicesConf = logConfDb.getConf()\n for eachDevice in devicesConf:\n logThread = LogThread(logName = eachDevice, logPath = devicesConf[eachDevice]['logPath'], hostName = devicesConf[eachDevice]['hostName'], port = devicesConf[eachDevice]['port'])\n self.logMonThList[eachDevice] = logThread\n\nif __name__ == '__main__':\n controlMgmt = ControlMgmt()\n controlMgmt.runService()","sub_path":"project/python/dcux-evtMastClient/src/libs/controlmgmt.py","file_name":"controlmgmt.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"260715316","text":"from bs4 import BeautifulSoup\nimport requests\nimport warnings\nimport json\nimport re\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module='bs4')\n\nf = open(\"result.json\", \"w\")\njsonlocations = []\n\nurl = (\"https://apps.studentaffairs.cmu.edu/dining/conceptinfo/?page=conceptDetails&conceptId=\")\n\n# ---------------------- Looping through all the places -----------------------\n#70-140\nfor placeid in range(70, 140):\n\n #print(placeid)\n\n # ---------------------- Create the URL -----------------------------------\n\n url2 = url + str(placeid)\n r = requests.get(url2)\n #print(r.status_code)\n # ---------------------- Page not found -----------------------------------\n if r.status_code != 200:\n print(\"SKIP\")\n continue\n\n # ---------------------- Making the beautiful soup ------------------------\n data = r.text\n soup = BeautifulSoup(data)\n\n # ---------------------- Obtaining name of the place, skip if empty -------\n h1s = soup.find('h1')\n if h1s == None:\n continue\n\n place = str(h1s.text.encode('utf-8'))[2:-1]\n if place == \"\":\n continue\n\n # ---------------------- Obtaining location of the place ------------------\n loc = soup.find('div', {'class': 'location'})\n location = str(loc.a.text.encode('utf-8'))[2:-1].strip()\n #print(location)\n\n # ---------------------- Obtaining coordinates of the place ---------------\n location_url = loc.a['href']\n\n at_index = location_url.index('@')\n location_url = location_url[at_index+1:]\n\n comma_index = location_url.index(',')\n x_coord = location_url[0: comma_index]\n location_url = location_url[comma_index+1:]\n\n comma_index = location_url.index(',')\n y_coord = location_url[0: comma_index]\n\n x_coord = float(x_coord)\n y_coord = float(y_coord)\n\n # ---------------------- Obtaining description of the place ---------------\n desc = soup.find('div', {'class': 'description'}).find(text=True,\n recursive=False)\n description = str(desc.encode('utf-8'))[3:-1]\n\n # ---------------------- Initializing variable for timings ----------------\n tim = soup.find('ul', {'class': 'schedule'})\n timingstrings = []\n\n # ---------------------- Obtaining the timing strings ---------------------\n #THE FOLLOWING SCRIPT SHOULD ONLY BE RUN ON SUNDAY\n\n remove = ['\\\\xc2','\\\\xa0','\\\\r','\\\\n']\n for child in tim.children:\n #print(child)\n tmp = 0\n for grandchild in child:\n #print(\"___\")\n if tmp == 2:\n all_times = str(grandchild.encode('utf-8'))[3:-1]\n for character in remove:\n all_times = all_times.replace(character,\"\")\n days = all_times.split(\",\")\n for i in range(len(days)):\n days[i] = days[i].replace(\" \",\"\")\n \n #print(days)\n timingstrings.append(days[1::])\n tmp += 1\n #print(timingstrings)\n # ---------------------- Parsing timings into JSON ------------------------\n jsontime = []\n for i in range(len(timingstrings)):\n for times in timingstrings[i]:\n #print(times)\n if times == \"CLOSED\":\n continue\n if times == \"24hours\":\n tmpjson = {\n \"start\":\n {\n \"day\": (i)%7,\n \"hour\": 0,\n \"min\": 0\n },\n \"end\":\n {\n \"day\": (i)%7,\n \"hour\": 23,\n \"min\": 59\n }\n }\n jsontime.append(tmpjson)\n continue\n\n time_split = times.split(\"-\")\n start_time = time_split[0]\n end_time = time_split[1]\n\n start_pm_flag = False\n start_time_json = []\n end_time_json = []\n\n if start_time[-2::] == \"AM\":\n hour_min = start_time.replace(\"AM\",\"\").split(\":\")\n start_time_json.append((i)%7)\n start_time_json.append(int(hour_min[0]))\n start_time_json.append(int(hour_min[1]))\n if start_time[-2::] == \"PM\":\n start_pm_flag = True\n hour_min = start_time.replace(\"PM\",\"\").split(\":\")\n start_time_json.append((i)%7)\n start_time_json.append(int(hour_min[0]) + 12)\n start_time_json.append(int(hour_min[1]))\n if end_time[-2::] == \"AM\":\n hour_min = end_time.replace(\"AM\",\"\").split(\":\")\n if start_pm_flag:\n end_time_json.append((i+1)%7)\n else:\n end_time_json.append((i)%7)\n end_time_json.append(int(hour_min[0]))\n end_time_json.append(int(hour_min[1]))\n if end_time[-2::] == \"PM\":\n hour_min = end_time.replace(\"PM\",\"\").split(\":\")\n end_time_json.append((i+2)%7)\n end_time_json.append(int(hour_min[0]) + 12)\n end_time_json.append(int(hour_min[1]))\n \n #print(start_time_json)\n #print(end_time_json)\n \n tmpjson = {\n \"start\":\n {\n \"day\": start_time_json[0],\n \"hour\": start_time_json[1],\n \"min\": start_time_json[2]\n },\n \"end\":\n {\n \"day\": end_time_json[0],\n \"hour\": end_time_json[1],\n \"min\": end_time_json[2]\n }\n }\n jsontime.append(tmpjson)\n\n #print(jsontime)\n # ---------------------- Parsing one place into JSON ----------------------\n jsonplace = {\n \"name\": place,\n \"description\": description,\n \"keywords\": [\"\"],\n \"location\": location,\n \"coordinates\": {\n \"lat\": x_coord,\n \"lng\": y_coord\n },\n \"times\": jsontime\n }\n\n # ---------------------- Add to locations JSON ----------------------------\n jsonlocations.append(jsonplace)\n\n\njsondata = {\n \"locations\": jsonlocations\n}\nprint(jsondata)\njsonfinal = json.dump(jsondata, f)\n#print(jsonfinal)\n","sub_path":"updated_dining_parser.py","file_name":"updated_dining_parser.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"372929469","text":"# @author Jinal Shah\n\n# This is a sms test/template to send texts/sms:\n# Twilio is a python package that can allow you to send text messages using code\nfrom twilio.rest import Client\n\n# Your Account SID from twilio.com/console\naccount_sid = \"Your Account SID goes here\"\n# Your Auth Token from twilio.com/console\nauth_token = \"Your Auth Token Goes here\"\n\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages.create(\n to=\"Number you have applied in twilio(should be your own cell phone number)\",\n from_=\"twilio number\",\n body=\"Hello from Python!\")\n\nprint(message.sid)\n","sub_path":"Backend/Sending_sms_template.py","file_name":"Sending_sms_template.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569379339","text":"import os\nimport table_parser\nimport urllib3\n\n\nJOBS_DB = '..\\\\output\\\\jobs.tab'\nRAW_HTML_FILES_DIR = '..\\\\downloads\\\\'\nJOBS_TABLE_HTML_FILE_PREFIX = 'out'\nINDIVIDUAL_JOB_HTML_FILE_PREFIX = 'job_'\nWEBSITE_PREFIX = 'http://jobs.intel.com'\nSEPARATER = '\\t'\n\n\ndef download_jobs_list(save_file_addr, raw_html_files):\n jobs_db = open(save_file_addr, 'w')\n ignores_list = ['http://jobs.intel.com/ListJobs/All/sortasc-jobtitle/']\n parser = table_parser.JobsTableParser(ignores_list)\n\n files_list = os.listdir(raw_html_files)\n for file in files_list:\n if file.startswith(JOBS_TABLE_HTML_FILE_PREFIX):\n f = open(raw_html_files + file)\n htmlTxt = f.read()\n parser.feed(htmlTxt)\n\n jobs_db.writelines(parser.get_txt_dump(SEPARATER))\n jobs_db.flush()\n jobs_db.close()\n\n\ndef load_jobs_list(load_file_addr):\n f = open(load_file_addr)\n lines = f.readlines()\n jobs = []\n\n for line in lines:\n job = ()\n for element in line.split(SEPARATER):\n job += element,\n jobs += [job]\n\n return jobs\n\n\ndef download_all_individual_jobs(jobs_db):\n http = urllib3.PoolManager()\n for job in jobs_db:\n job_id = job[2].split(' - ')[0]\n if not os.path.exists(RAW_HTML_FILES_DIR +\n INDIVIDUAL_JOB_HTML_FILE_PREFIX +\n job_id + '.html'):\n url_split = job[1].split('/')\n url = (WEBSITE_PREFIX + url_split[0] + '/' + url_split[1] +\n '/' + url_split[2] + '/' + url_split[3])\n print('Requesting: ' + url)\n try:\n req = http.request('GET', url)\n f = open(RAW_HTML_FILES_DIR + INDIVIDUAL_JOB_HTML_FILE_PREFIX +\n job_id + '.html', 'w')\n f.write(str(req.data))\n f.flush()\n f.close()\n except:\n print('Job download failed: ' + WEBSITE_PREFIX + job[1])\n else:\n print('Duplicate job ID: ' + job_id)\n # req = [http.request('GET', WEBSITE_PREFIX + job[1]) for job in jobs_db]\n # page_text = [str(r.data) for r in req]\n\n\nif __name__ == '__main__':\n jobs_db = []\n\n # First establish a raw jobs list DB.\n if os.path.exists(JOBS_DB):\n jobs_db = load_jobs_list(JOBS_DB)\n else:\n download_jobs_list(JOBS_DB, RAW_HTML_FILES_DIR)\n\n # Once simple jobs DB is established, download individual jobs description\n # pages, and parse it.\n download_all_individual_jobs(jobs_db)\n # print(len(jobs_db))\n # print(jobs_db[10])\n # print(jobs_db[10][1].split('/'))\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"620923979","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom .models import WateringLog, TemperatureLog, HumidityLog, PictureLog, ColorLog\n# from .models import Friend\n\nclass WateringForm(forms.ModelForm):\n class Meta:\n model = WateringLog\n fields = ['history']\n\nclass TemperatureForm(forms.Form):\n try:\n items = TemperatureLog.objects.count()\n except:\n items = 0\n dataNum = forms.IntegerField(\n label='グラフ対象数',\n min_value=0,\n max_value=items\n )\n\nclass HumidityForm(forms.Form):\n try:\n items = HumidityLog.objects.count()\n except:\n items = 0\n dataNum = forms.IntegerField(\n label='グラフ対象数',\n min_value=0,\n max_value=items\n )\n\nclass PictureForm(forms.Form):\n try:\n items = PictureLog.objects.count()\n except:\n items = 0\n dataNum = forms.IntegerField(\n label='表示対象数',\n min_value=0,\n max_value=items\n )\n\nclass GrowForm(forms.Form):\n try:\n items = ColorLog.objects.count()\n except:\n items = 0\n dataNum = forms.IntegerField(\n label='グラフ対象数',\n min_value=0,\n max_value=items\n )\n\nclass SettingForm(forms.Form):\n photoTime = forms.IntegerField(\n label='写真撮影間隔(ms)',\n min_value=0,\n max_value=99999\n )\n\n plantManagemntTime = forms.IntegerField(\n label='水やり間隔(ms)',\n min_value=0,\n max_value=99999\n )\n\n notifyTime = forms.IntegerField(\n label='LINE通知間隔(ms)',\n min_value=0,\n max_value=99999\n )\n\n temperatureSaveTime = forms.IntegerField(\n label='温度記憶間隔(ms)',\n min_value=0,\n max_value=99999\n )\n\n warteringTime = forms.FloatField(\n label='水量(ms)',\n min_value=0,\n max_value=10\n )\n\n lineToken = forms.CharField(\n label='LINE-TOKEN'\n )","sub_path":"django_app/Hydroponic/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48752815","text":"from typing import Tuple, Union\n\nimport torch\nfrom torch.distributions import Uniform\n\n\ndef _transform_input(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Reshape an input tensor to be (*, C, H, W). Accept either (H, W), (C, H, W) or (*, C, H, W).\n Args:\n input: torch.Tensor\n\n Returns:\n torch.Tensor\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n if len(input.shape) not in [2, 3, 4]:\n raise ValueError(\n f\"Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}\")\n\n if len(input.shape) == 2:\n input = input.unsqueeze(0)\n\n if len(input.shape) == 3:\n input = input.unsqueeze(0)\n\n return input\n\n\ndef _validate_input_shape(input: torch.Tensor, channel_index: int, number: int) -> bool:\n r\"\"\"Validate if an input has the right shape. e.g. to check if an input is channel first.\n If channel first, the second channel of an RGB input shall be fixed to 3. To verify using:\n _validate_input_shape(input, 2, 3)\n Args:\n input: torch.Tensor\n channel_index: int\n number: int\n Returns:\n bool\n \"\"\"\n return input.shape[channel_index] == number\n\n\ndef _adapted_uniform(shape: Union[Tuple, torch.Size], low, high, same_on_batch=False):\n r\"\"\" The uniform function that accepts 'same_on_batch'.\n If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]).\n By default, same_on_batch is set to False.\n \"\"\"\n dist = Uniform(low, high)\n if same_on_batch:\n return dist.rsample((1, *shape[1:])).repeat(shape[0])\n else:\n return dist.rsample(shape)\n","sub_path":"kornia/augmentation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"343493903","text":"from questions import get_questions\nimport pytest\n\n\ndef test_questions_for_salesman(create_salesman_object):\n salesman_obj = create_salesman_object\n func = get_questions(salesman_obj)\n assert callable(func) and func.__name__ == \"salesman_questions\"\n\n\ndef test_no_questions_for_manager(create_manager_object):\n manager_obj = create_manager_object\n with pytest.raises(NotImplementedError):\n get_questions(manager_obj)\n","sub_path":"tests/unittests/test_getting_questions.py","file_name":"test_getting_questions.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"237381336","text":"# 2015/08/06 edit by lego\n# \t--BIG CHANGE OF FORM.PY--\n# \tPlease reference to the MODEL.PY\n#\n# 2015/06/23 edit by lego\n# \tAppealForm add user and department field\n#\n# 2015/04/19 Create by lego\n\nfrom django import forms\nfrom base.refrence import GradeChoice, DepartChoice\nfrom appeal.models import Appeal\nclass AppealGuestForm(forms.Form):\n \"\"\" For guest \"\"\"\n title = forms.CharField(\n label='申訴標題',\n \tmax_length=50)\n context = forms.CharField(\n label='申訴內容',\n \twidget=forms.Textarea) \n name = forms.CharField(\n label='名子',\n \tmax_length=50)\n sid = forms.CharField(\n label='學號',\n max_length=15)\n department = forms.ChoiceField(\n label='系所',\n \tchoices=DepartChoice)\n grade = forms.ChoiceField(\n label='年級',\n \tchoices=GradeChoice)\n \n is_public = forms.BooleanField(\n \tlabel='是否願意讓此成為公開議題?',\n initial=True,\n required=False)\n is_public.widget.attrs['checked'] = True\n\nclass AppealAuthForm(forms.Form):\n \"\"\" For Auth \"\"\"\n title = forms.CharField(\n label='申訴標題',\n max_length=50)\n context = forms.CharField(\n label='申訴內容',\n widget=forms.Textarea)\n is_public = forms.BooleanField(\n label='是否願意讓此成為公開議題?',\n initial=True,\n required=False)\n is_public.widget.attrs['checked'] = True\n \nclass ReplyForm(forms.Form):\n\tcontext = forms.CharField(\n label='回覆',\n widget=forms.Textarea(\n attrs={\n 'rows':2,\n })\n )\n\nclass AppealPowerForm(forms.ModelForm):\n class Meta:\n model = Appeal\n fields = (\n 'process_status',\n )\n\n","sub_path":"appeal/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179812434","text":"'''\ntitle: monai bootcamp - post transform\nauthor: Sun Yeop Lee\n'''\n'''\nOverview\nThis notebook introduces you to the MONAI APIs for:\n\nsliding window inference\npost-processing transforms\n'''\n\nimport torch\nimport matplotlib.pyplot as plt\n\nimport monai\nmonai.config.print_config()\nfrom monai.inferers import sliding_window_inference\n\n\n# -- sliding window inference\n\n## a toy model for inference\nclass ToyModel:\n # A simple model generates the output by adding an integer `pred` to input.\n # each call of this instance increases the integer by 1.\n pred = 0\n def __call__(self, input):\n self.pred = self.pred + 1\n return input + self.pred\n\n## run the inference using sliding window\n\ninput_tensor = torch.zeros(1, 1, 200, 200)\noutput_tensor = sliding_window_inference(\n inputs=input_tensor, \n predictor=ToyModel(), \n roi_size=(40, 40), \n sw_batch_size=1, \n overlap=0.5, \n mode=\"constant\")\nplt.imshow(output_tensor[0, 0])\nplt.show()\n\n\n## Gaussian weighted windows\n'''\nFor a given input image window, the convolutional neural networks often predict the central regions more accurately than the border regions, usually due to the stacked convolutions' receptive field.\n\nTherefore, it is worth considering a \"Gaussian weighted\" prediction to emphasize the central region predictions when we stitch the windows into a complete inference output.\n\nThe following is an example of a 40x40-pixel Gaussian window map constructed using GaussianFilter from MONAI. \nThis is also integrated into the sliding window module.\n'''\nwin_size = (40, 40)\ngaussian = torch.zeros(win_size, device=\"cpu\")\ncenter_coords = [i // 2 for i in win_size]\nsigmas = [i * 0.125 for i in win_size]\ngaussian[tuple(center_coords)] = 1\npt_gaussian = monai.networks.layers.GaussianFilter(len(win_size), sigma=sigmas).to(device=\"cpu\", dtype=torch.float)\ngaussian = pt_gaussian(gaussian.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)\nprint(gaussian.shape)\nplt.imshow(gaussian)\nplt.show()\n\n\n\ninput_tensor = torch.zeros(1, 1, 200, 200)\noutput_tensor_1 = sliding_window_inference(\n inputs=input_tensor, \n predictor=ToyModel(), \n roi_size=(40, 40), \n sw_batch_size=1, \n overlap=0.5, \n mode=\"gaussian\")\nplt.imshow(output_tensor_1[0, 0])\nplt.show()\n\nplt.subplots(1, 2)\nplt.subplot(1, 2, 1); plt.imshow(output_tensor[0, 0])\nplt.subplot(1, 2, 2); plt.imshow(output_tensor_1[0, 0])\nplt.show()\n\n## -- post-processing transforms\n'''\nThis section will set up and load a SegResNet model, run sliding window inference, and post-process the model output volumes:\n\nArgmax to get a discrete prediction map\nRemove small isolated predicted regions\nConvert the segmentation regions into contours\nWe'll start by importing all of our dependencies.\n'''\nimport os\nimport glob\n\nfrom monai.apps import download_and_extract\nfrom monai.utils import set_determinism\nfrom monai.data import CacheDataset, DataLoader\nfrom monai.networks.nets import SegResNet\nfrom monai.transforms import (\n AddChanneld,\n AsDiscrete,\n Compose,\n CropForegroundd,\n KeepLargestConnectedComponent,\n LabelToContour,\n LoadNiftid,\n Orientationd,\n ScaleIntensityRanged,\n Spacingd,\n ToTensord,\n)\n\nresource = \"https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar\"\nmd5 = \"410d4a301da4e5b2f6f86ec3ddba524e\"\n\nroot_dir = r'C:\\Users\\sunyp\\Desktop\\딥노이드\\Python\\github repository\\deepnoid-practices\\monai-tutorials\\data'\ncompressed_file = os.path.join(root_dir, \"Task09_Spleen.tar\")\ndata_dir = os.path.join(root_dir, \"Task09_Spleen\")\ndownload_and_extract(resource, compressed_file, root_dir, md5)\n\n## set up the validation data, preprocessing transforms, and data loader\nimages = sorted(glob.glob(os.path.join(data_dir, \"imagesTr\", \"*.nii.gz\")))\nlabels = sorted(glob.glob(os.path.join(data_dir, \"labelsTr\", \"*.nii.gz\")))\ndata_dicts = [\n {\"image\": image_name, \"label\": label_name}\n for image_name, label_name in zip(images, labels)\n]\nval_files = data_dicts[-9:]\n\nval_transforms = Compose(\n [\n LoadNiftid(keys=[\"image\", \"label\"]),\n AddChanneld(keys=[\"image\", \"label\"]),\n Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(\n keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True,\n ),\n CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n ToTensord(keys=[\"image\", \"label\"]),\n ]\n)\nval_ds = CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0, num_workers=4)\nval_loader = DataLoader(val_ds, batch_size=1, num_workers=4)\n\n## set up the model\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu:0\")\nmodel = SegResNet(\n spatial_dims=3,\n in_channels=1,\n out_channels=2,\n).to(device)\n\nmodel_path = os.path.join(root_dir, \"segresnet_model_epoch30.pth\")\nmodel.load_state_dict(torch.load(model_path))\nmodel.eval()\nprint(f\"model from {model_path}.\")\n\n## run the sliding window inferenc\nval_data = next(iter(val_loader))\nval_data = val_data[\"image\"].to(device)\n\nroi_size = (160, 160, 160)\nsw_batch_size = 4\nwith torch.no_grad():\n val_output = sliding_window_inference(val_data, roi_size, sw_batch_size, model)\nprint(val_output.shape, val_output.device)\nslice_idx = 80\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_output.detach().cpu()[0, 1, :, :, 80], cmap=\"gray\")\n\n\nroi_size = (88, 88, 88)\nsw_batch_size = 1\nwith torch.no_grad():\n val_output = sliding_window_inference(\n val_data, roi_size, sw_batch_size=sw_batch_size, predictor=model, mode=\"gaussian\", overlap=0.2)\nprint(val_output.shape, val_output.device)\n\nslice_idx = 80\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_output.detach().cpu()[0, 1, :, :, 80], cmap=\"gray\")\n\n## post processing: argmax over the output probabilities into a discrete map\nargmax = AsDiscrete(argmax=True)(val_output)\nprint(argmax.shape)\n\nslice_idx = 80\nplt.subplots(1, 2)\nplt.subplot(1, 2, 1)\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_data.detach().cpu()[0, 0, :, :, 80], cmap=\"gray\")\n\nplt.subplot(1, 2, 2)\nplt.title(f\"argmax -- slice {slice_idx}\")\nplt.imshow(argmax.detach().cpu()[0, 0, :, :, 80])\n\n## post processing: connected component analysis to select the largest segmentation region\n\nlargest = KeepLargestConnectedComponent(applied_labels=[1])(argmax)\nprint(largest.shape)\n\nslice_idx = 80\nplt.subplots(1, 2)\nplt.subplot(1, 2, 1)\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_data.detach().cpu()[0, 0, :, :, 80], cmap=\"gray\")\n\nplt.subplot(1, 2, 2)\nplt.title(f\"largest component -- slice {slice_idx}\")\nplt.imshow(largest.detach().cpu()[0, 0, :, :, 80])\n\n## post-processing: convert the region into a contour map\ncontour = LabelToContour()(largest)\nprint(contour.shape)\n\nslice_idx = 80\nplt.subplots(1, 2)\nplt.subplot(1, 2, 1)\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_data.detach().cpu()[0, 0, :, :, 80], cmap=\"gray\")\n\nplt.subplot(1, 2, 2)\nplt.title(f\"contour -- slice {slice_idx}\")\nplt.imshow(contour.detach().cpu()[0, 0, :, :, 80], cmap=\"Greens\")\n\nmap_image = contour + val_data\n\nslice_idx = 80\nplt.subplots(1, 2)\nplt.subplot(1, 2, 1)\nplt.title(f\"image -- slice {slice_idx}\")\nplt.imshow(val_data.detach().cpu()[0, 0, :, :, 80], cmap=\"gray\")\n\nplt.subplot(1, 2, 2)\nplt.title(f\"contour -- slice {slice_idx}\")\nplt.imshow(map_image.detach().cpu()[0, 0, :, :, 80], cmap=\"gray\")\n\nfrom monai.visualize import plot_2d_or_3d_image\nfrom torch.utils.tensorboard import SummaryWriter\n\nwith SummaryWriter(log_dir=root_dir) as writer:\n plot_2d_or_3d_image(map_image, step=0, writer=writer, tag=\"segmentation\")\n plot_2d_or_3d_image(val_output, step=0, max_channels=2, writer=writer, tag=\"Probability\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"monai-tutorials/monai bootcamp - post transform.py","file_name":"monai bootcamp - post transform.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"57177362","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom decimal import Decimal\nfrom django.forms import ModelForm\nfrom estacionamientos.models import *\nfrom django.core.validators import RegexValidator\nfrom django.template.defaultfilters import default\nfrom django.forms.widgets import Widget, DateInput\nfrom cProfile import label\n\n\nclass EstacionamientoForm(forms.Form):\n phone_validator = RegexValidator(\n regex = '^((0212)|(0412)|(0416)|(0414)|(0424)|(0426))-?\\d{7}',\n message = 'Debe introducir un formato válido.'\n )\n\n # nombre del dueno (no se permiten digitos)\n propietario = forms.CharField(\n required = True,\n label = \"Propietario\",\n validators = [\n RegexValidator(\n regex = '^[a-zA-ZäëïöüáéíóúñÄËÏÖÜÑÁÉÍÓÚ ]+$',\n message = 'Sólo debe contener letras.'\n )\n ],\n help_text=\"Nombre del propietario del estacionamiento.\"\n )\n\n nombre = forms.CharField(required = True, label = \"Nombre\",\n help_text=\"Nombre del estacionamiento\")\n\n direccion = forms.CharField(required = True,\n help_text=\"Direccion del estacionamiento\")\n\n telefono_1 = forms.CharField(required = False, validators = [phone_validator],\n help_text=\"Ejemplo : 0426-0009911\")\n telefono_2 = forms.CharField(required = False, validators = [phone_validator],\n help_text=\"Ejemplo : 0426-0009911\")\n telefono_3 = forms.CharField(required = False, validators = [phone_validator],\n help_text=\"Ejemplo : 0426-0009911\")\n\n email_1 = forms.EmailField(required = False,\n help_text=\"Ejemplo : octacorde@gmail.com\")\n email_2 = forms.EmailField(required = False,\n help_text=\"Ejemplo : octacorde@gmail.com\")\n\n rif = forms.CharField(\n required = True,\n label = \"RIF\",\n validators = [\n RegexValidator(\n regex = '^[JVD]-\\d{8}-?\\d$',\n message = 'Introduzca un RIF con un formato válido.'\n )\n ],\n help_text=\"Ejemplo : J-00000000-0\"\n )\n\nclass EstacionamientoExtendedForm(ModelForm):\n class Meta:\n model = Estacionamiento\n exclude = ['Propietario','Nombre','Direccion', 'Telefono_1', 'Telefono_2','Telefono_3',\n 'Email_1','Email_2','Rif', 'esquema_tarifario', 'content_type', 'object_id']\n \n def clean(self):\n try:\n self.cleaned_data['NroPuesto']\n except:\n return\n if self.cleaned_data['NroPuesto'] <= 0:\n raise forms.ValidationError({'NroPuesto': [\"El valor debe ser positivo.\",]})\n super(EstacionamientoExtendedForm, self).clean()\n\nclass EsquemaTarifarioForm(forms.Form):\n esquema_tarifario = forms.ChoiceField(\n choices = EsquemaTarifario.LISTA_ESQUEMAS\n )\n \n monto = forms.DecimalField(max_digits = 10, decimal_places=2, label = 'Monto', required=True, help_text = \"Monto del esquema\")\n monto_pico = forms.DecimalField(max_digits = 10, decimal_places=2, label = 'Monto pico', required=False, help_text = \"Monto para las horas pico\")\n monto_fin_semana = forms.DecimalField(max_digits = 10, decimal_places=2, label = 'Monto fin de semana', required=False, help_text = \"Monto para los fines de semana\")\n hora_pico_ini = forms.TimeField(label = 'Hora de inicio de la hora Pico',\\\n help_text=\"Hora inicio. Ejem 13:01\", required=False)\n hora_pico_fin = forms.TimeField(label = 'Horario final de la hora Pico',\\\n help_text=\"Hora final. Ejem 14:01\", required=False)\n \n def clean(self):\n \n dict_errores = {}\n try:\n self.cleaned_data['monto']\n if isinstance(self.cleaned_data['monto'], Decimal):\n if self.cleaned_data['monto'] <= 0.0 :\n dict_errores['monto'] = [\"El valor debe ser positivo.\",]\n except :\n pass\n \n try:\n esquema_tarifario = self.cleaned_data['esquema_tarifario']\n except KeyError:\n esquema_tarifario = \"\"\n \n if esquema_tarifario == \"Pago diferenciado por horas\":\n \n #Verificamos que el monto pico existe \n try:\n \n # Verificacion monto_pico vacio\n self.cleaned_data['monto_pico']\n if self.cleaned_data['monto_pico'] is None:\n dict_errores['monto_pico'] = [\"El monto pico no debe ser vacio.\",]\n \n #Si monto pico existe y es valido, procedemos a verificar los otros campos\n if isinstance(self.cleaned_data['monto_pico'], Decimal):\n if self.cleaned_data['monto_pico'] <= 0.0 :\n dict_errores['monto_pico'] = [\"El valor debe ser positivo.\",]\n \n # Validacion hora inicio pico\n try:\n self.cleaned_data['hora_pico_ini']\n if self.cleaned_data['hora_pico_ini'] is None:\n dict_errores['hora_pico_ini'] = [\"La hora de inicio no debe ser vacia.\",]\n except :\n dict_errores['hora_pico_ini'] = [\"La hora de inicio debe ser valida\",]\n \n # Validacion hora fin pico\n try:\n self.cleaned_data['hora_pico_fin']\n if self.cleaned_data['hora_pico_fin'] is None:\n dict_errores['hora_pico_fin'] = [\"La hora final no debe ser vacia.\",]\n except :\n dict_errores['hora_pico_fin'] = [\"La hora final debe ser valida\",]\n \n # Validacion hora pico ini < hora pico fin\n if self.cleaned_data['hora_pico_ini'] >= self.cleaned_data['hora_pico_fin']:\n dict_errores['hora_pico_ini'] = [\"La hora pico de inicio debe ser menor que la hora pico final.\"]\n \n # Validacion monto < monto_pico\n if self.cleaned_data['monto'] >= self.cleaned_data['monto_pico']:\n dict_errores['monto_pico'] = [\"La tarifa en hora pico debe ser mayor a la tarifa valle\"]\n \n # Si monto pico no es valido \n except:\n pass\n \n elif esquema_tarifario == \"Pago por Fraccion Fin Semana\":\n \n try:\n # Verificacion monto_fin_semana vacio\n self.cleaned_data['monto_fin_semana']\n if self.cleaned_data['monto_fin_semana'] is None:\n dict_errores['monto_fin_semana'] = [\"El monto del fin de semana no debe ser vacio.\",]\n \n #Si monto pico existe y es valido, procedemos a verificar los otros campos\n if isinstance(self.cleaned_data['monto_fin_semana'], Decimal):\n if self.cleaned_data['monto_fin_semana'] <= 0.0 :\n dict_errores['monto_fin_semana'] = [\"El valor debe ser positivo.\",]\n \n except:\n pass\n \n if dict_errores:\n raise forms.ValidationError(dict_errores)\n \n super(EsquemaTarifarioForm, self).clean()\n \nclass EstacionamientoReserva(forms.Form):\n fecha_inicio_reserva = forms.DateField(label='Fecha inicio',\\\n help_text=\"Formato:dd/mm/aa o dd-mm-aa\",\\\n input_formats=['%d/%m/%Y','%d/%m/%y','%d-%m-%y','%d-%m-%Y'])\n \n hora_inicio_reserva = forms.TimeField(label = 'Horario de inicio',\\\n help_text=\"Hora de inicio. Ejem 13:01\")\n \n fecha_final_reserva = forms.DateField(label='Fecha final',\\\n help_text=\"Formato: dd/mm/aa o dd-mm-aa\",\\\n input_formats=['%d/%m/%Y','%d/%m/%y','%d-%m-%y','%d-%m-%Y'])\n \n hora_final_reserva = forms.TimeField(label = 'Horario final',\\\n help_text=\"Hora final. Ejem : 14:01\")\n \nclass EstacionamientoReporte(forms.Form):\n fecha_inicio_reserva = forms.DateField(label='Fecha inicio',\\\n help_text=\"Formato:dd/mm/aa o dd-mm-aa\",\\\n input_formats=['%d/%m/%Y','%d/%m/%y','%d-%m-%y','%d-%m-%Y'])\n \n fecha_final_reserva = forms.DateField(label='Fecha final',\\\n help_text=\"Formato: dd/mm/aa o dd-mm-aa\",\\\n input_formats=['%d/%m/%Y','%d/%m/%y','%d-%m-%y','%d-%m-%Y'])\n \n \nclass EstacionamientoPagarReservaForm(forms.Form):\n text_validator_no_letras = RegexValidator(\n regex = '^[a-zA-ZäëïöüáéíóúñÄËÏÖÜÑÁÉÍÓÚ ]+$',\n message = 'Sólo debe contener letras.'\n )\n \n text_validator_comienza_blanco = RegexValidator(\n regex = '^[a-zA-ZäëïöüáéíóúñÄËÏÖÜÑÁÉÍÓÚ0123456789][a-zA-ZäëïöüáéíóúñÄËÏÖÜÑÁÉÍÓÚ0123456789]*',\n message = 'Debe comenzar con una letra.'\n )\n text_validator_tarjeta_credito = RegexValidator(\n regex = '^(\\d{4})-?(\\d{4})-?(\\d{4})-?(\\d{4})$',\n message = 'Debe introducir un número de tarjeta válido' \n )\n \n text_validator_numero_cedula = RegexValidator(\n regex = '^[1-9][0-9]{0,8}$', \n message = 'Introduzca un numero de cedula válido' \n ) \n opcionesTarjetas = (('vista', 'Vista'),\n ('mister', 'Mister'),\n ('xpres', 'Xpres'),\n )\n \n opcionesCedula = (('v', 'V'),\n ('e', 'E'),\n )\n \n nombres = forms.CharField(required = True, label = \"Nombres\", max_length=100, validators = [text_validator_no_letras,\n text_validator_comienza_blanco], \n help_text=\"Ejemplo: Juan Antonio\")\n apellidos = forms.CharField(required = True, label = \"Apellidos\", max_length=100, validators = [text_validator_no_letras,\n text_validator_comienza_blanco], \n help_text=\"Ejemplo: Díaz López\")\n tipoCedula = forms.ChoiceField(choices = opcionesCedula, required= True)\n numeroCedula = forms.IntegerField(required = True, label = \"numeroCedula\", validators = [text_validator_numero_cedula], help_text=\"Ejemplo: 1000000\")\n tipoTarjeta = forms.ChoiceField(choices = opcionesTarjetas, required= True)\n numeroTarjeta = forms.CharField(required = True, label = \"numeroTarjeta\", help_text=\"16 Digitos Ej: 4532-4801-5474-0685\", validators = [text_validator_tarjeta_credito]) \n\nclass clienteCedula(forms.Form):\n text_validator_numero_cedula = RegexValidator(\n regex = '^[1-9][0-9]{0,8}$', \n message = 'Introduzca un numero de cedula válido' \n )\n opcionesCedula = (('v', 'V'),\n ('e', 'E'),\n )\n tipoCedula = forms.ChoiceField(choices = opcionesCedula, required= True)\n numeroCedula = forms.IntegerField(required = True, label = \"numeroCedula\", \n validators = [text_validator_numero_cedula], help_text=\"Ejemplo: 1000000\")\n \nclass estacionamientoRif(forms.Form):\n rif = forms.CharField(\n required = True,\n label = \"RIF\",\n validators = [\n RegexValidator(\n regex = '^[JVD]-\\d{8}-?\\d$',\n message = 'Introduzca un RIF con un formato válido.'\n )\n ],\n help_text=\"Ejemplo : J-00000000-0\"\n )\n","sub_path":"SAGEOctAcorde/estacionamientos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":12828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363614432","text":"import os\nimport config\nfrom game import *\n\nos.system('clear')\nprint(\"\\033[0;0H\")\nprint(\"\\033[?25l\")\n\nfor level in range(1, 4):\n if level == 3:\n config.BOSS_LEVEL = True\n my_game = Game(level)\n my_game.play()\n\n# my_game = Game(3)\n# my_game.play()\n \nprint(\"\\033[?25h\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"533431876","text":"from django.contrib import admin\n\nfrom ordersapp.models import Order, OrderItem\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n\n\nclass OrderModelAdmin(admin.ModelAdmin):\n list_display = [\n 'user',\n 'created',\n 'is_active',\n 'status',\n ]\n\n inlines = [\n OrderItemInline\n ]\n\nadmin.site.register(Order, OrderModelAdmin)\nadmin.site.register(OrderItem)\n\n\n","sub_path":"myshop/ordersapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171426486","text":"from db.run_sql import run_sql\nfrom models.artist import Artist \n\ndef save(artist):\n sql = \"INSERT INTO artists (name) VALUES (%s) RETURNING *\"\n values = [artist.name]\n results = run_sql(sql, values)\n id = results[0]['id']\n artist.id = id\n return artist\n\ndef delete_all():\n sql = \"DELETE FROM artist\"\n run_sql(sql)\n\ndef find_artist(id):\n artist = None\n sql = \"SELECT * FROM artist WHERE id = %s\" \n values = [id] \n result = run_sql(sql, values)[0]\n if result is not None:\n artist = Artist(result['title'], result['id'])\n return artist\n\ndef select_all():\n artists = [] \n sql = \"SELECT * FROM artists\"\n results = run_sql(sql)\n for row in results:\n artist = Artist(result['title'], result['id'])\n artists.append(artist)\n return artists \n\ndef update(id):\n sql = \"UPDATE artists SET (name, id) = (%s, %s) WHERE id = %s\"\n values = [task.name, task.id]\n run_sql(sql, values) \n\n\ndef delete(id):\n sql = \"DELETE FROM albums WHERE id = %s\" \n values = [id]\n run_sql(sql, values)","sub_path":"repositories/artist_repository.py","file_name":"artist_repository.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"103576938","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport re\nimport json\nimport pandas as pd\n\nimport six\n\nfrom pyarrow.compat import PY2\n\n\nINDEX_LEVEL_NAME_REGEX = re.compile(r'^__index_level_\\d+__$')\n\n\ndef is_unnamed_index_level(name):\n return INDEX_LEVEL_NAME_REGEX.match(name) is not None\n\n\ndef infer_dtype(column):\n try:\n return pd.api.types.infer_dtype(column)\n except AttributeError:\n return pd.lib.infer_dtype(column)\n\n\ndef get_column_metadata(column, name):\n inferred_dtype = infer_dtype(column)\n dtype = column.dtype\n\n if hasattr(dtype, 'categories'):\n extra_metadata = {\n 'num_categories': len(column.cat.categories),\n 'ordered': column.cat.ordered,\n }\n elif hasattr(dtype, 'tz'):\n extra_metadata = {'timezone': str(dtype.tz)}\n else:\n extra_metadata = None\n\n if not isinstance(name, six.string_types):\n raise TypeError(\n 'Column name must be a string. Got column {} of type {}'.format(\n name, type(name).__name__\n )\n )\n\n return {\n 'name': name,\n 'pandas_type': {\n 'string': 'bytes' if PY2 else 'unicode',\n 'datetime64': (\n 'datetimetz' if hasattr(dtype, 'tz')\n else 'datetime'\n ),\n 'integer': str(dtype),\n 'floating': str(dtype),\n }.get(inferred_dtype, inferred_dtype),\n 'numpy_type': str(dtype),\n 'metadata': extra_metadata,\n }\n\n\ndef index_level_name(index, i):\n return index.name or '__index_level_{:d}__'.format(i)\n\n\ndef construct_metadata(df, index_levels, preserve_index):\n return {\n b'pandas': json.dumps(\n {\n 'index_columns': [\n index_level_name(level, i)\n for i, level in enumerate(index_levels)\n ] if preserve_index else [],\n 'columns': [\n get_column_metadata(df[name], name=name)\n for name in df.columns\n ] + (\n [\n get_column_metadata(\n level, name=index_level_name(level, i)\n )\n for i, level in enumerate(index_levels)\n ] if preserve_index else []\n ),\n 'pandas_version': pd.__version__,\n }\n ).encode('utf8')\n }\n","sub_path":"python/pyarrow/pandas_compat.py","file_name":"pandas_compat.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"312246533","text":"# -*- coding: utf-8 -*-\n\nANALYSES = ('scout', 'fastq', 'scout+fastq', 'custom')\nAPPLICATION_TAGS = ('EXOSXTR100', 'EXOSXTR075', 'EXOSXTR060', 'EXOSXTR050',\n 'EXOSXTR025', 'EXOSLIR100', 'EXOSLIR075', 'EXOSLIR060',\n 'EXOSLIR030', 'EXOSLIR020', 'EXOSFFR100', 'EXOSFFR075',\n 'EXOSFFR060', 'EXOSFFR030', 'EXOSFFR025', 'WGSPCFC060',\n 'WGSPCFC030', 'WGSACUR600', 'EXXCUSR100', 'WGXCUSC030')\nSEXES = ('F', 'M', 'Unknown')\nCUSTOMERS = ('cust001', 'cust002', 'cust003', 'cust004', 'cust005', 'cust006',\n 'cust010', 'cust013', 'cust019', 'cust020')\nREFERENCE_GENOMES = ('hg19', 'hg38')\nBOOLEANS = ('Ja', 'Nej')\nORIGINS = ('Blod', 'Saliv', 'Vävnad', 'Tumör (fresh frozen)', 'Tumör (FFPE)',\n 'Cell-linje', 'Annat')\nPRIORITIES = ('standard', 'prioriterad', 'express', 'akut')\nSTATUSES = ('Affected', 'Unaffected')\nCAPTURE_KITS = ('Agilent_SureSelectCRE.V1', 'Agilent_SureSelect.V5',\n 'Agilent_SureSelect.V4')\nCONTAINER_TYPES = ('96 well plate', 'Tube')\n# for WGS, how many reads needed to cover genome 1x\nREADS_PERX = (435 * 1000000) / 30\n","sub_path":"orderform/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"168356366","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 1 08:28:37 2020\r\n\r\n@author: Kwaku\r\n\"\"\"\r\nimport random\r\nimport math\r\nimport statistics as st\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import norm\r\nimport seaborn as sns\r\n\r\n\r\n\r\n\r\n# settings for seaborn plotting style\r\nsns.set(color_codes=True)\r\n# settings for seaborn plot sizes\r\nsns.set(rc={'figure.figsize':(5,5)})\r\nnp.random.seed(20)#just the to add a little bit of repeatbilty in the randomnes\r\n#np.random.seed()\r\ndef theorwhichman(size):\r\n rand=[]\r\n for i in range(0,size):\r\n rand.append(random.uniform(0, 1))\r\n return rand\r\nsize=100\r\n\r\na=theorwhichman(size)\r\n\r\nmu = st.mean(a)\r\nsigma = st.stdev(a)\r\n\r\nx = np.linspace(-1, 1, size)\r\na.sort()\r\n\r\nprint(\"Sigma:\", sigma)\r\nprint(\"Mu:\", mu)\r\n\r\n\r\n#Creating bits to be transmitted \r\n# _____________________________________________________________________________\r\n# Bit generator\r\n# _____________________________________________________________________________\r\n# random bits generator\r\ndef bits_gen(values): # function takes a list of values between 0 and 1\r\n data = []\r\n \r\n for i in values:\r\n #if the values is less than 0.5 append a 0\r\n if i < 0.5: \r\n data.append(0)\r\n else:\r\n #if the value found in the list is greater than 0.5 append 1\r\n data.append(1)\r\n\r\n return data\r\n\r\n# ________________________________________________________________________________\r\n# mapping of bits to symbol using constellation maps\r\n# ______________________________________________________________________________\r\ndef BPSK(bits):\r\n bpsk = []\r\n for k in bits:\r\n if k == 1:\r\n bpsk.append(1)\r\n else:\r\n bpsk.append(-1)\r\n bpsk.append(1)\r\n return bpsk\r\n\r\ndef fourQAM(bits):\r\n FQAM = []\r\n M = 2\r\n subList = [bits[n:n + M] for n in range(0, len(bits), M)]\r\n for k in subList:\r\n if k == [0, 0]:\r\n FQAM.append(complex(1 / np.sqrt(2), 1 / np.sqrt(2)))\r\n elif k == [0, 1]:\r\n FQAM.append(complex(-1 / np.sqrt(2), 1 / np.sqrt(2)))\r\n elif k == [1, 1]:\r\n FQAM.append(complex(-1 / np.sqrt(2), -1 / np.sqrt(2)))\r\n # elif(k==[1,0]):\r\n elif k == [1, 0]:\r\n FQAM.append(complex(1 / np.sqrt(2), -1 / np.sqrt(2)))\r\n FQAM.append((1+1j)/sqrt2)\r\n return FQAM\r\n\r\ndef eight_PSK(bits):\r\n EPSK = []\r\n M = 3\r\n subList = [bits[n:n + M] for n in range(0, len(bits), M)]\r\n for k in subList:\r\n if k == [0, 0, 0]:\r\n EPSK.append(complex(1 , 0))\r\n elif k == [0, 0, 1]:\r\n EPSK.append((1+1j)/np.sqrt(2))\r\n elif k == [0, 1, 1]:\r\n EPSK.append( 1j)\r\n elif k == [0, 1, 0]:\r\n EPSK.append((-1+1j)/np.sqrt(2))\r\n elif k == [1, 1, 0]:\r\n EPSK.append(-1)\r\n elif k == [1, 1, 1]:\r\n EPSK.append((-1-1j)/np.sqrt(2))\r\n elif k == [1, 0, 1]:\r\n EPSK.append(-1j)\r\n elif k == [1, 0, 0]:\r\n EPSK.append((1-1j)/np.sqrt(2))\r\n EPSK.append(1)\r\n return EPSK\r\n\r\n#________________________________________________________________\r\n# Sigma calculation \r\n#__________________________________________________________________\r\n\r\ndef sigma(domain,M):# M is the number of symbols\r\n sigma=[]\r\n for i in domain:\r\n sigma.append(1/np.sqrt(math.pow(10, (i/ 10)) * 2 * math.log2(M)))\r\n return sigma\r\n\r\n\r\n#______________________________________________________________________________\r\n# create noise \r\n#______________________________________________________________________________\r\n\r\ndef noise(size,sigma):\r\n noiseList = np.random.normal(0,sigma,size)\r\n return noiseList\r\n\r\n#__________________________________________________________________________\r\n# add noise \r\n#______________________________________________________________________________\r\n\r\ndef addnoise(transmitted,channels,L,m,snr, sigma_):# assuming transmited comes with the memory symbols padded\r\n recieved=[]\r\n M=m\r\n k=snr\r\n \r\n \r\n #print(sigma_)\r\n \r\n \r\n for i in range(L-1,len(transmitted)):\r\n sample=np.random.normal(0,1,1)[0]\r\n recieved.append(transmitted[i]*channels[0]+transmitted[i-1]*channels[0+1]\r\n +transmitted[i-2]*channels[2]+sigma_*(sample+(sample)*1j))\r\n return recieved #without the padding\r\n\r\n#__________________________________________________________________________\r\n# DFE function \r\n#______________________________________________________________________________\r\n\r\ndef DFE(recieved,channels,L,options,memory):\r\n Options =options# [1,-1]# these are the option available for bpsk\r\n symbols = memory#[1]*(L-1) #the first 1 is the memory symbols\r\n s=0 # symbol mover\r\n for i in range(0,len(recieved)):\r\n guess=[]\r\n n=len(channels)-1# length of the chanel L-1\r\n #calculating the product but from second position\r\n sumof=0\r\n for j in range(1,n):\r\n sumof+= symbols[n-1+s]*channels[j]\r\n n-=1\r\n \r\n for k in Options:\r\n guess.append(np.abs(recieved[i]-((k)*channels[0]+sumof))**2)\r\n estimate=Options[guess.index(min(guess))]\r\n symbols.append(estimate)\r\n s+=1\r\n return symbols[L-1:] #final \r\n#__________________________________________________________________________\r\n# Options and memory generator\r\n#______________________________________________________________________________\r\ndef OptMemGen(i,L):\r\n #BPSK=1 #4QAM=2 8PSK=3\r\n if i==1:\r\n Options = [1,-1]# these are the option available for bpsk\r\n memory= [1]*(L-1) #the first 1 is the memory symbols\r\n return Options,memory\r\n elif(i==2):\r\n Options = [(1+1j)/np.sqrt(2), (-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2), (1-1j)/np.sqrt(2)]\r\n memory=[(1+1j)/np.sqrt(2)]*(L-1)\r\n return Options,memory\r\n elif(i==3):\r\n Options=[1, (1+1j)/np.sqrt(2), 1j, (-1+1j)/np.sqrt(2), -1, (-1-1j)/np.sqrt(2), -1j, (1-1j)/np.sqrt(2)]\r\n memory=[(1+1j)/np.sqrt(2)]*(L-1)\r\n return Options,memory \r\n\r\n#__________________________________________________________________________\r\n# Bit Error calculation\r\n#______________________________________________________________________________\r\n\r\ndef bit_errors(sent, recieved):\r\n error = 0\r\n for k in range(0,len(recieved)):\r\n if sent[k] != recieved[k]:\r\n error += 1\r\n BER = error / len(recieved)*100\r\n return BER\r\n\r\n#transmitted=[1,1,1,-1,1,-1,1]\r\n#channels = [0.89+0.92j,0.42-0.37j,0.19+0.12j] \r\n#Recieved=addnoise(transmitted,channels,3)#[1.5,1.2,1,-1.2,-1.5,0.2]\r\n#print(Recieved)\r\n\r\n\r\n###############################################################################\r\n\"\"\"\r\n\r\nLefa's graph function\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n###############################################################################\r\n\r\n\r\n\r\nsqrt2 = np.sqrt(2)\r\nCIR = [0.89 + 0.92j, 0.42 - 0.37j, 0.19 + 0.12j]\r\nN = 200\r\n\r\n\r\ndef Reverse(x):\r\n return x [::-1]\r\n \r\n# generate deltas for BPSK \r\ndef findDeltaBPSK(recieved, Symbols =[[]],i=0, c = CIR):\r\n Options = [-1,1]\r\n delta = []\r\n for s in Symbols:\r\n for j in Options:\r\n delta.append(np.abs(recieved[i] - (j*c[0] + s[1]*c[1] + s[0]*c[2]) )**2)\r\n return delta\r\n\r\n# generate deltas for 4QAM\r\ndef findDelta4QAM(recieved, Symbols =[[]],i=0, c = CIR):\r\n Options = [(1+1j)/sqrt2, (-1+1j)/sqrt2, (-1-1j)/sqrt2, (1-1j)/sqrt2]\r\n delta = []\r\n for s in Symbols:\r\n for j in Options:\r\n delta.append(np.abs(recieved[i] - (j*c[0] + s[1]*c[1] + s[0]*c[2]) )**2)\r\n \r\n return delta\r\n\r\n# generate deltas for 8PSK\r\ndef findDelta8PSK(recieved, Symbols =[[]],i=0, c = CIR):\r\n Options = [1, (1+1j)/sqrt2, 1j, (-1+1j)/sqrt2, -1, (-1-1j)/sqrt2, -1j, (1-1j)/sqrt2]\r\n delta = []\r\n for s in Symbols:\r\n for j in Options:\r\n delta.append(np.abs(recieved[i] - (j*c[0] + s[1]*c[1] + s[0]*c[2]) )**2)\r\n return delta\r\n\r\n\r\ndef BPSK_MLSE(recieved, N, c):\r\n # Generate the bits, then get their symbols from the constellation map \r\n Bits = []\r\n for i in range(8):\r\n Bits.append(format(8+i, 'b')) \r\n Symbols = []\r\n # print(Bits)\r\n for i in range(8):\r\n Sym = []\r\n for j in range(3):\r\n if Bits[i][j+1] == \"1\":\r\n Sym.append(1)\r\n else:\r\n Sym.append(-1)\r\n Symbols.append(Sym)\r\n # print(Symbols) \r\n # Get all the deltas\r\n deltas = []\r\n for i in range(N):\r\n deltas.append(findDeltaBPSK(recieved, Symbols ,i, c))\r\n\r\n # Using the deltas, work backwards and determine the transmitted sequence\r\n transmitted = []\r\n for i in range(N):\r\n cost = min(deltas[N-1-i])\r\n bit = deltas[N-1-i].index(cost)\r\n \r\n # print(bit)\r\n if bit % 2 == 0:\r\n transmitted.append(-1)\r\n else:\r\n transmitted.append(1) \r\n transmitted = Reverse(transmitted)\r\n return transmitted\r\n\r\ndef MLSE_4QAM(recieved, N, c):\r\n # Generate the bits, then get their symbols from the constellation map\r\n Bits = []\r\n for i in range(64):\r\n Bits.append(format(64+i, 'b')) \r\n Symbols = []\r\n i = 0\r\n while i < 64:\r\n Sym = []\r\n for j in range(6):\r\n if Bits[i][j+1:j+3] == \"00\":\r\n Sym.append((1+1j)/sqrt2)\r\n elif Bits[i][j+1:j+3] == \"01\":\r\n Sym.append((-1+1j)/sqrt2)\r\n elif Bits[i][j+1:j+3] == \"11\":\r\n Sym.append((-1-1j)/sqrt2)\r\n elif Bits[i][j+1:j+3] == \"10\":\r\n Sym.append((1-1j)/sqrt2)\r\n Symbols.append(Sym)\r\n i += 2\r\n \r\n # Get all the deltas\r\n deltas = []\r\n for i in range(N):\r\n deltas.append(findDelta4QAM(recieved, Symbols ,i, c))\r\n \r\n # print(deltas)\r\n # Using the deltas, work backwards and determine the transmitted sequence\r\n transmitted = []\r\n for i in range(N):\r\n cost = min(deltas[N-1-i])\r\n bit = deltas[N-1-i].index(cost)\r\n \r\n if bit % 4 == 0: #recieved (1+1j)/sqrt2\r\n transmitted.append((1+1j)/sqrt2)\r\n elif bit % 4 == 1: #recieved (-1+1j)/sqrt2\r\n transmitted.append((-1+1j)/sqrt2)\r\n elif bit % 4 == 2: #recieved (-1-1j)/sqrt2\r\n transmitted.append((-1-1j)/sqrt2)\r\n elif bit % 4 == 3: #recieved (1-1j)/sqrt2\r\n transmitted.append((1-1j)/sqrt2)\r\n \r\n transmitted = Reverse(transmitted)\r\n \r\n return transmitted\r\n\r\ndef MLSE_8PSK(recieved, N, c):\r\n # Generate the bits, then get their symbols from the constellation map \r\n L=3\r\n Bits = []\r\n \r\n for i in range(512):\r\n Bits.append(format(512+i, 'b')) \r\n Symbols = []\r\n i = 0\r\n Sym = []\r\n while i < 8**L:\r\n Sym = []\r\n for j in range(9):\r\n if Bits[i][j+1:j+4] == \"111\":\r\n Sym.append(1)\r\n elif Bits[i][j+1:j+4] == \"110\":\r\n Sym.append((1+1j)/sqrt2)\r\n elif Bits[i][j+1:j+4] == \"010\":\r\n Sym.append(1j)\r\n elif Bits[i][j+1:j+4] == \"011\":\r\n Sym.append((-1+1j)/sqrt2)\r\n elif Bits[i][j+1:j+4] == \"001\":\r\n Sym.append(-1)\r\n elif Bits[i][j+1:j+4] == \"000\":\r\n Sym.append((-1-1j)/sqrt2)\r\n elif Bits[i][j+1:j+4] == \"100\":\r\n Sym.append(-1j)\r\n elif Bits[i][j+1:j+4] == \"101\":\r\n Sym.append((1-1j)/sqrt2)\r\n Symbols.append(Sym)\r\n i += 3\r\n \r\n \r\n # Get all the deltas\r\n deltas = []\r\n for i in range(N):\r\n deltas.append(findDelta8PSK(recieved, Symbols ,i, c))\r\n \r\n # print(deltas)\r\n # Using the deltas, work backwards and determine the transmitted sequence\r\n transmitted = []\r\n for i in range(N):\r\n cost = min(deltas[N-1-i])\r\n bit = deltas[N-1-i].index(cost) \r\n \r\n if bit % 8 == 0:\r\n transmitted.append(1)\r\n elif bit % 8 == 1:\r\n transmitted.append((1+1j)/sqrt2)\r\n elif bit % 8 == 2:\r\n transmitted.append(1j)\r\n elif bit % 8 == 3:\r\n transmitted.append((-1+1j)/sqrt2)\r\n elif bit % 8 == 4:\r\n transmitted.append(-1)\r\n elif bit % 8 == 5:\r\n transmitted.append((-1-1j)/sqrt2)\r\n elif bit % 8 == 6:\r\n transmitted.append(-1j)\r\n elif bit % 8 == 7:\r\n transmitted.append((1-1j)/sqrt2)\r\n \r\n transmitted = Reverse(transmitted) \r\n return transmitted\r\n\r\ndef Graph():\r\n size=N\r\n size2=5\r\n randomValues= theorwhichman(size)\r\n bits=bits_gen(randomValues)\r\n BPSK_bits=BPSK(bits)\r\n FourQAM_bits=fourQAM(bits)\r\n EBPSK_bits=eight_PSK(bits)\r\n channels = [0.89+0.92j,0.42-0.37j,0.19+0.12j]\r\n transmitted=[]\r\n xValues = np.linspace(-4, 15, 38*size2)\r\n yvalues=[]\r\n \"\"\"#bpsk\r\n \r\n L=3\r\n M=2\r\n a,transmitted=OptMemGen(1,3)\r\n transmitted.extend(BPSK_bits)\r\n k=-4\r\n while (k<15):\r\n \r\n for i in range(size2):\r\n sigma_=1/np.sqrt(math.pow(10, (k/ 10)) * 2 * math.log2(M))\r\n c = [(random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6)]\r\n Recieved=addnoise(transmitted,channels,L,M,k,sigma_)#[1.5,1.2,1,-1.2,-1.5,0.2]\r\n Options,memory= OptMemGen(1,L)#bpsk 1, 4Qam,8psk\r\n Detected=BPSK_MLSE(Recieved,size,c)\r\n yvalues.append(bit_errors(transmitted[L-1:],Detected))\r\n k+= 0.5\r\n plt.semilogy(xValues,yvalues, label=\"BPSK\")\r\n plt.ylabel('BER')\r\n plt.xlabel('SNR')\r\n \r\n \r\n yvalues=[]\r\n \r\n #4Qam\r\n L=3\r\n M=4\r\n a,transmitted=OptMemGen(2,3)\r\n transmitted.extend(FourQAM_bits)\r\n k=-4\r\n while (k<15):\r\n for i in range(size2):\r\n sigma_=1/np.sqrt(math.pow(10, (k/ 10)) * 2 * math.log2(M))\r\n c = [(random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6)]\r\n Recieved=addnoise(transmitted,channels,L,M,k,sigma_)#[1.5,1.2,1,-1.2,-1.5,0.2]\r\n Options,memory= OptMemGen(2,L)#bpsk 1, 4Qam,8psk\r\n Detected=MLSE_4QAM(Recieved,int(size/2),c)\r\n yvalues.append(bit_errors(transmitted[L-1:],Detected))\r\n k+=0.5\r\n \r\n \r\n plt.semilogy(xValues,yvalues, label=\"4QAM\")\r\n plt.ylabel('BER')\r\n plt.xlabel('SNR')\r\n yvalues=[]\r\n \"\"\"\r\n L=3\r\n M=8 #BPSK=2 4Qam=4 8psk=8\r\n #8psk\r\n a,transmitted=OptMemGen(3,3)\r\n transmitted.extend(EBPSK_bits)\r\n \r\n k=-4\r\n while (k<15):\r\n for i in range(size2):\r\n sigma_=1/np.sqrt(math.pow(10, (k/ 10)) * 2 * math.log2(M))\r\n c = [(random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6),\r\n (random.gauss(0,sigma_)+random.gauss(0,sigma_)*1j)/np.sqrt(6)]\r\n Recieved=addnoise(transmitted,channels,L,M,k, sigma_)#[1.5,1.2,1,-1.2,-1.5,0.2]\r\n Options,memory= OptMemGen(3,L)#bpsk 1, 4Qam,8psk\r\n Detected=MLSE_8PSK(Recieved,int(size/3),c)\r\n yvalues.append(bit_errors(transmitted[L-1:],Detected))\r\n k+=0.5\r\n \r\n plt.semilogy(xValues,yvalues, label=\"8PSK\")\r\n plt.ylabel('BER')\r\n plt.xlabel('SNR')\r\n plt.title(\" BER vs SNR\")\r\n plt.legend()\r\n# print(BPSK_MLSE(r,N))\r\n#\r\n#print(MLSE_4QAM(r,N))\r\n#\r\nGraph()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"MLSE_Dynamic.py","file_name":"MLSE_Dynamic.py","file_ext":"py","file_size_in_byte":16002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"45296329","text":"import random\n\nprint(\"=\"*20 + \"Random number Game\\n\") #_________Random number Game____________#\n\nhighest = 10\nanswer = random.randint(1, highest) #Returns random int between the given range including the end numbers\n\nprint(\"Guess a number between 1 and {} or press 0 to quit: \".format(highest))\nguess = 0\n# if guess != answer:\n# if guess < answer:\n# print(\"Guess a little higher\")\n# else:\n# print(\"Guess a little lower\")\n# guess = int(input())\n# if guess == answer:\n# print(\"You win\")\n# else:\n# print(\"You have not guessed correctly\")\n# else:\n# print(\"Good!, you guessed it right the first time.\")\n\nwhile guess != answer:\n guess = int(input())\n if guess == 0:\n print(\"by, by!\")\n break\n if guess < answer:\n print(\"Please guess a little higher\")\n elif guess > answer:\n print(\"Please guess a little lower\")\n else:\n print(\"Good! you guessed it right\")","sub_path":"learning_curve/Basics/Control statements/whileLoops challenge.py","file_name":"whileLoops challenge.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362530522","text":"\"\"\"Input and output helpers to load in data.\n(This file will not be graded.)\n\"\"\"\n\nimport numpy as np\nimport skimage\nimport os\nfrom skimage import io\n\n\ndef read_dataset(data_txt_file, image_data_path):\n \"\"\"Read data into a Python dictionary.\n\n Args:\n data_txt_file(str): path to the data txt file.\n image_data_path(str): path to the image directory.\n\n Returns:\n data(dict): A Python dictionary with keys 'image' and 'label'.\n The value of dict['image'] is a numpy array of dimension (N,8,8,3)\n containing the loaded images.\n\n The value of dict['label'] is a numpy array of dimension (N,1)\n containing the loaded label.\n\n N is the number of examples in the data split, the exampels should\n be stored in the same order as in the txt file.\n \"\"\"\n data = {}\n data['image'] = []\n data['label'] = []\n\n indexFile = open(data_txt_file, 'r')\n for sample in indexFile:\n sample = sample.split(',')\n\n _id = sample[0]\n label = int(sample[1])\n imageData = io.imread(image_data_path+_id+'.jpg')\n\n data['label'].append(label)\n data['image'].append(imageData)\n\n data['image'] = np.array(data['image'])\n data['label'] = np.array(data['label'])\n\n return data\n","sub_path":"assignment4/mp4/utils/io_tools.py","file_name":"io_tools.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"61101911","text":"from rest_framework import viewsets, filters\nfrom rest_framework.response import Response\nfrom apicook.cookie.models import ShoppingRecipeList, Article, ShoppingIngredientList\nfrom apicook.cookie.serializers import ShopListSerializer\nfrom rest_framework.views import APIView\nfrom django.contrib.auth.models import User\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\n\n\"\"\"\n Get ingredient list\n\"\"\"\nclass ShoppingListRecipe(APIView):\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, shop_id = None):\n if shop_id:\n asker_user = User.objects.get(pk=request.user.id)\n shop = ShoppingRecipeList.objects.get(pk=shop_id, contributor__in=asker_user)\n\n return self._get_list_to_buy(shop)\n else :\n # Get all shop list\n formated_shops = []\n shops = ShoppingRecipeList.objects.get(contributor__in=asker_user).order_by('-created_at')\n for shop in shops.all():\n formated_shop = {\n 'id': shop.id,\n 'created_at': shop.created_at\n }\n formated_shops.append(formated_shop) \n return Response (\n formated_shops\n )\n\n def _get_list_to_buy(self, shop):\n \n shop_list = shop.list_content.all()\n\n formated_ingredients = []\n for ingredient in shop_list:\n formated_ingredient = {\n 'id': ingredient.id,\n 'name': ingredient.article.name,\n 'bought_value': ingredient.bought_value,\n 'bought_status': ingredient.bought_status,\n 'measure_type': ingredient.measure_type,\n 'quantity': ingredient.total_quantity,\n }\n formated_ingredients.append(formated_ingredient) \n\n return Response(\n formated_ingredients\n )\n\nclass ShoppingListViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = ShoppingIngredientList.objects.all()\n serializer_class = ShopListSerializer\n","sub_path":"apicook/cookie/views/shoppinglist.py","file_name":"shoppinglist.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"557388884","text":"from django.shortcuts import render\nfrom .models import ObjectViewed\nfrom django.views.generic import View\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\n\nfrom .utils import check_analytics_rights\n\n# Create your views here.\n\n''''''\n\n\nclass AnalyticsView(View):\n template_name = 'geba_analytics/analytics.html'\n\n def get(self, request, *args, **kwargs):\n request = check_analytics_rights(request)\n\n return render(request, self.template_name)\n\n\nclass AnalyticsData(APIView):\n\n authentication_classes = (authentication.SessionAuthentication,)\n permission_classes = (permissions.IsAdminUser,)\n\n def get(self, request, format=None):\n viewed_today, anonymous_daily_views = ObjectViewed.objects.today()\n\n monthly_view_labels, monthly_views, monthly_anonymous_views = ObjectViewed.objects.monthly()\n\n daily_view_labels, daily_views, daily_anonymous_views = ObjectViewed.objects.daily()\n\n data = {\n 'today_labels': ['Viewed Today', 'Anonymous Views'],\n 'today_data': [viewed_today, anonymous_daily_views],\n\n 'monthly_labels': monthly_view_labels,\n 'monthly_data': monthly_views,\n 'monthly_anon_data': monthly_anonymous_views,\n\n 'daily_labels': daily_view_labels,\n 'daily_data': daily_views,\n 'daily_anon_data': daily_anonymous_views,\n }\n\n return Response(data)\n\n\n\n","sub_path":"geba_website/apps/geba_analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567177682","text":"import math\nimport torch.nn as nn\nfrom models.networks.utils import UnetConv3, UnetUp3_CT\nimport torch.nn.functional as F\nfrom models.networks_other import init_weights\nimport torch\nclass unet_3D(nn.Module):\n\n def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True, im_dim = None):\n super(unet_3D, self).__init__()\n self.is_deconv = is_deconv\n self.in_channels = in_channels\n self.is_batchnorm = is_batchnorm\n self.feature_scale = feature_scale\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n self.im_dim = im_dim\n\n # downsampling\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))\n\n # upsampling\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)\n\n # final conv (without any concat)\n self.final = nn.Conv3d(filters[0], n_classes, 1)\n\n # interpolation\n self.interpolation = nn.Upsample(size = (512,512,256), mode = \"trilinear\")\n\n\n # initialise weights\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n init_weights(m, init_type='kaiming')\n elif isinstance(m, nn.BatchNorm3d):\n init_weights(m, init_type='kaiming')\n\n def forward(self, X):\n # if self.im_dim != None:\n # with torch.no_grad():\n # inputs = nn.functional.interpolate(X, self.im_dim, mode='trilinear')\n # print(\"|||| INPUT SHAPE\", inputs.shape)\n\n # print(\"||interpolate|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||interpolate|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n # print(\"|||| inputs size :\", convert_bytes(inputs.element_size() * inputs.nelement()))\n # del X\n # print(\"||del|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||del|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n\n conv1 = self.conv1(X)\n del X\n maxpool1 = self.maxpool1(conv1)\n\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n\n center = self.center(maxpool4)\n up4 = self.up_concat4(conv4, center)\n up3 = self.up_concat3(conv3, up4)\n up2 = self.up_concat2(conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n# \n # print(\"||down/up|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||down/up|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n\n del maxpool1, maxpool2, maxpool3, maxpool4, center\n del conv1,conv2,conv3,conv4\n del up4, up3, up2\n # print(\"||del maxpool|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||del maxpool|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n\n Y = self.final(up1)\n del up1\n # print(\"||final|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||final|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n final = self.interpolation(Y)\n del Y\n # print(\"||interpolation|| memory :\",convert_bytes(torch.cuda.max_memory_allocated()))\n # print(\"||interpolation|| cur memory :\", convert_bytes(torch.cuda.memory_allocated()))\n # exit(0)\n return final\n\n @staticmethod\n def apply_argmax_softmax(pred):\n log_p = F.softmax(pred, dim=1)\n\n return log_p\n\n\n\ndef convert_bytes(size):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if size < 1024.0:\n return \"%3.2f %s\" % (size, x)\n size /= 1024.0\n\n return size\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"models/same_dim/unet_3D.py","file_name":"unet_3D.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"143202300","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.urls import reverse_lazy\nfrom hotel.b2b.models import B2B\nfrom hotel.child_supplement_policy.forms import ChildSupplementPolicyForm\nfrom hotel.child_supplement_policy.models import ChildSupplementPolicy\nfrom hotel.inventory.models import HotelInventory\nfrom hotel.models import Hotels\n\n\nclass childSupplementListView(ListView):\n model = ChildSupplementPolicy\n template_name = 'childSupplementPolicy/index.html'\n context_object_name = 'all_items'\n\n def get_context_data(self, **kwargs):\n context = super(childSupplementListView, self).get_context_data(**kwargs)\n hotel_id = self.kwargs.get('item_id')\n all_items = ChildSupplementPolicy.objects.filter(hotel=hotel_id, hotelInventory=None)\n context.update({'hotel_id': hotel_id})\n context.update({'all_items': all_items})\n return context\n\n\n@method_decorator([login_required], name='dispatch')\nclass childSupplementCreate(SuccessMessageMixin, CreateView):\n template_name = 'childSupplementPolicy/create.html'\n model = ChildSupplementPolicy\n form_class = ChildSupplementPolicyForm\n success_message = 'Information Added Successfully'\n\n def form_valid(self, form):\n self.form = form\n form.save(commit=False)\n hotel = form.data.get('hotel')\n age_category = form.data.getlist('age_category')\n age_start = form.data.getlist('age_start')\n age_end = form.data.getlist('age_end')\n cost_status = form.data.getlist('cost_status')\n cost = form.data.getlist('cost')\n unit = form.data.getlist('unit')\n season_start_date = form.data.getlist('season_start_date')\n season_end_date = form.data.getlist('season_end_date')\n day = form.data.getlist('day')\n DayCount = form.data.getlist('DayCount')\n seperator = \",\"\n for index, age_cat in enumerate(age_category):\n instance = ChildSupplementPolicy()\n instance.hotel = Hotels.objects.get(pk=hotel)\n instance.age_category = age_cat\n instance.age_start = age_start[index]\n instance.age_end = age_end[index]\n instance.cost_status = cost_status[index]\n instance.cost = cost[index] or 0.0\n instance.unit = None if cost_status[index] == 'Free' else unit[index]\n instance.change_status = 'new'\n instance.season_start_date = season_start_date[index] or None\n instance.season_end_date = season_end_date[index] or None\n a = seperator.join(day[:int(DayCount[index])])\n del day[0:int(DayCount[index])]\n instance.day = a or None\n instance.save()\n\n inventories = HotelInventory.objects.filter(hotel_id=hotel)\n for inventory in inventories:\n cancel = ChildSupplementPolicy()\n cancel.hotelInventory = HotelInventory.objects.get(id=inventory.pk)\n cancel.parent = instance\n cancel.hotel = Hotels.objects.get(id=HotelInventory.objects.get(id=inventory.pk).hotel_id)\n cancel.change_status = 'assigned'\n cancel.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form):\n messages.warning(self.request, form.errors)\n print(form.errors)\n return self.render_to_response(self.get_context_data(object=form.data))\n\n def get_context_data(self, **kwargs):\n context = super(childSupplementCreate, self).get_context_data(**kwargs)\n return context\n\n def get_success_url(self):\n\n if self.form.data['register'] == 'Save and Exit':\n url = reverse_lazy('hotel:hotelindex', kwargs={'hotel_id': self.kwargs.get('item_id')})\n else:\n url = reverse_lazy('hotel:extraBedPolicy-create', kwargs={'item_id': self.kwargs.get('item_id')})\n return url\n # return reverse_lazy('hotel:childSupplement', kwargs={'item_id': self.kwargs.get('item_id')})\n\n def get_form_kwargs(self):\n kwargs = super(childSupplementCreate, self).get_form_kwargs()\n hotel_id = self.kwargs.get('item_id')\n kwargs['action'] = 'create'\n kwargs['hotel'] = hotel_id\n return kwargs\n\n\n@method_decorator([login_required], name='dispatch')\nclass childSupplementUpdate(SuccessMessageMixin, UpdateView):\n template_name = 'childSupplementPolicy/create.html'\n model = ChildSupplementPolicy\n form_class = ChildSupplementPolicyForm\n success_message = 'Information Updated Successfully'\n queryset = ChildSupplementPolicy.objects.all()\n\n def form_valid(self, form):\n self.hotel = form.data.get('hotel')\n form.save(commit=False)\n data = form.cleaned_data\n hotel = data.get('hotel')\n instance = ChildSupplementPolicy.objects.get(pk=self.kwargs['pk'])\n # instance.hotel = Hotels.objects.get(pk=hotel)\n instance.age_category = data.get('age_category')\n instance.age_start = data.get('age_start')\n instance.age_end = data.get('age_end')\n instance.cost_status = data.get('cost_status')\n instance.cost = data.get('cost') or 0.0\n instance.unit = None if data.get('cost_status') == 'Free' else data.get('unit')\n instance.change_status = 'new'\n instance.season_start_date = data.get('season_start_date') or None\n instance.season_end_date = data.get('season_end_date') or None\n temp = \"\"\n for d in data.get('day'):\n if temp:\n temp = temp + ',' + d\n else:\n temp = d\n instance.day = temp or None\n instance.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form):\n messages.warning(self.request, form.errors)\n return self.render_to_response(self.get_context_data(object=form.data))\n\n def get_context_data(self, **kwargs):\n context = super(childSupplementUpdate, self).get_context_data(**kwargs)\n arrayDay = self.model.objects.get(pk=self.kwargs['pk']).day\n daysOfWeekList = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\n day = {}\n objList = []\n if arrayDay:\n spilted = arrayDay.split(',')\n # spilted.pop(0)\n context['day'] = spilted\n\n for d in daysOfWeekList:\n day.update({'day': d})\n if d in spilted:\n day.update({'status': 'checked'})\n else:\n day.update({'status': 'unchecked'})\n objList.append(day)\n day = {}\n context['dayObj'] = objList\n return context\n\n def get_form_kwargs(self):\n kwargs = super(childSupplementUpdate, self).get_form_kwargs()\n pk = self.kwargs.get('pk')\n hotel_id = ChildSupplementPolicy.objects.get(pk=pk).hotel_id\n kwargs['hotel'] = hotel_id\n kwargs['action'] = 'edit'\n return kwargs\n\n def get_success_url(self):\n hotel_id = self.hotel\n return reverse_lazy('hotel:childSupplement', kwargs={'item_id': hotel_id})\n\n\n@method_decorator([login_required], name='dispatch')\nclass childSupplementUpdateInv(SuccessMessageMixin, UpdateView):\n template_name = 'childSupplementPolicy/create.html'\n model = ChildSupplementPolicy\n form_class = ChildSupplementPolicyForm\n success_message = 'Information Updated Successfully'\n queryset = ChildSupplementPolicy.objects.all()\n\n def form_valid(self, form):\n self.hotel = form.data.get('hotel')\n temp = form.save(commit=False)\n hotel_id = self.model.objects.get(id=self.kwargs.get('pk')).hotel_id\n previous = self.model.objects.get(id=self.kwargs.get('pk'))\n previous.save()\n data = form.cleaned_data\n instance = ChildSupplementPolicy()\n hotel = get_object_or_404(Hotels, pk=hotel_id)\n instance.hotel = hotel\n instance.hotelInventory = HotelInventory.objects.get(id=self.kwargs.get('inventory_id'))\n instance.age_category = data.get('age_category')\n instance.age_start = data.get('age_start')\n instance.age_end = data.get('age_end')\n instance.cost_status = data.get('cost_status')\n instance.cost = data.get('cost') or 0.0\n instance.unit = None if data.get('cost_status') == 'Free' else data.get('unit')\n instance.season_start_date = data.get('season_start_date') or None\n instance.season_end_date = data.get('season_end_date') or None\n temp = \"\"\n for d in data.get('day'):\n if temp:\n temp = temp + ',' + d\n else:\n temp = d\n instance.day = temp or None\n instance.change_status = 'copied'\n instance.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form):\n messages.warning(self.request, form.errors)\n return self.render_to_response(self.get_context_data(object=form.data))\n\n def get_context_data(self, **kwargs):\n context = super(childSupplementUpdateInv, self).get_context_data(**kwargs)\n parent_id = self.model.objects.get(pk=self.kwargs.get('pk')).parent_id\n if not parent_id:\n parent_id = self.model.objects.get(pk=self.kwargs.get('pk')).pk\n arrayDay = self.model.objects.get(pk=parent_id).day\n daysOfWeekList = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\n day = {}\n objList = []\n if arrayDay:\n spilted = arrayDay.split(',')\n # spilted.pop(0)\n context['day'] = spilted\n\n for d in daysOfWeekList:\n day.update({'day': d})\n if d in spilted:\n day.update({'status': 'checked'})\n else:\n day.update({'status': 'unchecked'})\n objList.append(day)\n day = {}\n context['dayObj'] = objList\n return context\n\n def get_form_kwargs(self):\n kwargs = super(childSupplementUpdateInv, self).get_form_kwargs()\n pk = self.kwargs.get('pk')\n hotel_id = ChildSupplementPolicy.objects.get(pk=pk).hotel_id\n kwargs['hotel'] = hotel_id\n kwargs['action'] = 'edit'\n return kwargs\n\n def get_success_url(self):\n return reverse_lazy('hotel:inventoryPolicies-create',\n kwargs={'model': 'childsupplement', 'operation': 'list', 'id': self.hotel,\n 'inv_id': self.kwargs.get('inventory_id')})\n\n def get_object(self, queryset=None):\n parent_id = self.model.objects.get(pk=self.kwargs.get('pk')).parent_id\n\n if not parent_id:\n parent_id = self.model.objects.get(pk=self.kwargs.get('pk')).pk\n\n return self.model.objects.get(pk=parent_id)\n\n\n@method_decorator([login_required], name='dispatch')\nclass childSupplementDelete(SuccessMessageMixin, DeleteView):\n model = ChildSupplementPolicy\n\n def get(self, request, *args, **kwargs):\n messages.warning(self.request, \"Successfully Deleted.\")\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n item_id = self.object.id\n hotel_id = ChildSupplementPolicy.objects.get(id=item_id).hotel_id\n return reverse_lazy('hotel:childSupplement', kwargs={'item_id': hotel_id})\n\n def delete(self, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n return HttpResponseRedirect(success_url)\n\n\n@method_decorator([login_required], name='dispatch')\nclass childSupplementDeleteInv(SuccessMessageMixin, DeleteView):\n model = ChildSupplementPolicy\n\n def get(self, request, *args, **kwargs):\n messages.warning(self.request, \"Successfully Deleted.\")\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n item_id = self.object.id\n hotel = ChildSupplementPolicy.objects.get(id=item_id).hotel_id\n inventory = ChildSupplementPolicy.objects.get(id=item_id).hotelInventory_id\n return reverse_lazy('hotel:inventoryPolicies-create',\n kwargs={'model': 'childsupplement', 'operation': 'list', 'id': hotel, 'inv_id': inventory})\n\n def delete(self, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n return HttpResponseRedirect(success_url)\n","sub_path":"hotel/child_supplement_policy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633090707","text":"import io\nimport os\n\nimport flask\n\nfrom web import app\nfrom player.grab_lyrics import *\nfrom player.constant.tag import *\n\n\nWEB_CONTROLLER = None\n\ndef setup_web_controller(lib, x_accel_enabled):\n WEB_CONTROLLER = PlayerController(lib, x_accel_enabled)\n\n app.add_url_rule(\"/test\", view_func=WEB_CONTROLLER.test, methods=[\"GET\"])\n app.add_url_rule(\"/library\", view_func=WEB_CONTROLLER.get_library, methods=[\"GET\"])\n app.add_url_rule(\"//artwork\", view_func=WEB_CONTROLLER.get_art_bytes, methods=[\"GET\"])\n app.add_url_rule(\"/song//stream\", view_func=WEB_CONTROLLER.get_track_bytes, methods=[\"GET\"])\n app.add_url_rule(\"/song//lyrics\", view_func=WEB_CONTROLLER.get_track_lyric, methods=[\"GET\"])\n\nclass PlayerController:\n def __init__(self, lib, x_accel_enabled):\n self.lib = lib\n self.x_accel_enabled = x_accel_enabled\n\n # --------------------------- APIs\n\n def test(self):\n print(self.lib)\n return \"test\"\n\n def get_library(self):\n return self.lib.get_library_json()\n\n def get_art_bytes(self, track_id):\n artwork = io.BytesIO(self.lib.get_artwork_bytes_with_id(track_id))\n return flask.send_file(artwork, mimetype=\"image/jpeg\")\n\n def get_track_bytes(self, track_id):\n track = self.lib.get_track(track_id)\n if track is None:\n flask.abort(404)\n\n if self.x_accel_enabled:\n prefix = os.path.commonprefix([self.lib.library_path, track.get_path()])\n path = \"/stream\" + track.get_path()[len(prefix):]\n\n resp = flask.make_response()\n resp.headers[\"Content-Type\"] = track.get_mimetype()\n resp.headers[\"X-Accel-Redirect\"] = path.encode(\"utf-8\")\n return resp\n else:\n dirname = os.path.dirname(track.get_path())\n basename = os.path.basename(track.get_path())\n return flask.send_from_directory(dirname, basename)\n\n def get_track_lyric(self, track_id):\n track = self.lib.get_track(track_id)\n if track is None:\n flask.abort(404)\n tag = track.get_tag()\n return json.dumps(grab_lyric(tag[TAG_ARTIST], tag[TAG_TITLE]))\n","sub_path":"web/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"13076200","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: US27819\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName:\n cmCC27819_3pcc_BS_IOT_Interop_IPv6_286_Network_Based_Three_Way_Call.py\n\nPurpose:\n This test case verifies the DUT interoperability with BroadWorks\n for Network-based Three-Way Call with IPv6 only mode.\n\nAuthor:\n Vishnu Prasad B(vishnpra@cisco.com)\n\nReferences:\n BW-SIPPhone-InteropTestPlan-R22.0\n\nDescription:\n Originate a call from BroadWorks User A to the DUT.Answer the call.\n From the DUT, initiate a second call to BroadWorks User B.\n Answer the call.Initiate a conference from the DUT to create a\n three-way call.Release the calls, with IPv6 only mode.\n\nTopology:\n 3 3pcc phones\n\nPass/Fail Criteria:\n Pass - DUT sends an INVITE request to the conferenceURI provisioned\n on BroadWorks.\n\nTest Steps:\n 1. Originate a call from BroadWorks User A to the DUT\n 2. DUT answer the call\n 3. From the DUT, initiate a second call to BroadWorks User B\n 4. BroadWorks User B answer the call\n 5. Initiate a conference from the DUT to create a three-way call\n 6. Release the calls\n Verify:\n 1. BroadWorks User A dials the DUT\n 1. DUT is alerted.\n 2.BroadWorks User A receives audible ringback.\n 2. DUT answers the call.\n 1. Two-way voice path is established.\n 3. DUT dials BroadWorks User B.\n 1. BroadWorks User B is alerted.\n 2. DUT receives audible ring back.\n 4. BroadWorks User B answers the call.\n 1. Two-way voice path is established between the DUT and\n BroadWorks User B.\n 5. DUT conferences all parties.\n 1. Each of the three parties can hear the other two parties\n in the conference.\n 6. The DUT hangs up.\n 1. All calls are released.\n 7. Verify the SIP signaling from the DUT.\n 1. DUT sends an INVITE request to the conferenceURI provisioned\n on BroadWorks.\n 2. REFER request to the user: The conference creator sends the\n REFER request inside the dialog associated with the creators call\n with the user.The Refer-To header must have the conference ID as\n its URI.\n\n Notes:\n\n Known Bugs:\n\"\"\"\n\nimport tng\nimport logging\nfrom tng.api import concurrent\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended\\\n import wait_for_ccapi_call_states\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\n\nlog = logging.getLogger('IPv6_Network_Based_Three_Way_Call')\n\n\nclass IPv6NetworkBasedThreeWayCall(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (\n PhoneConfigHelper, PhoneLineRegHelper, TsharkHelper)\n helper_num_devices = 3\n\n def setUp(self):\n log.info(\"Start of setUp\")\n\n self.conference_url = self.toolkit.get_test_env_info(\n section='phone',\n parameter_name=\"conference_url\")\n\n self.serverproxy = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"as_ip_addr6\")\n concurrent([\n self.oPhone1.ui.set_param_value,\n self.oPhone2.ui.set_param_value,\n self.oPhone3.ui.set_param_value],\n {'IP Mode': 'IPv6 Only', 'SIP IP Preference': 'IPv6'})\n self.domainproxy = self.phone_data['proxy']\n\n log.info(\"End of setUp\")\n\n def test_ipv6_network_based_three_way_call(self):\n log.info(\"Start of test_duts_network_based_three_way_call\")\n\n log.info('Set and verify Conference Bridge URL on DUT')\n # Set Conference Bridge URL in Call Feature settings\n self.oPhone2.ui.set_web_parameter_http(\n Call_Feature_Settings=[\n 'Ext 1', 'Conference Bridge URL',\n str(self.conference_url)])\n\n # verify Conference Bridge URL in Call Feature settings\n call_feature = self.oPhone2.ui.get_web_parameter_http(\n 'Ext 1', 'Conference Bridge URL')\n self.assertEqual(call_feature, self.conference_url)\n\n # Set Refer-To Target Contact in SIP settings\n self.oPhone2.ui.set_web_parameter_http(\n SIP_Settings=['Ext 1', 'Refer-To Target Contact', 1])\n\n # verify Refer-To Target Contact is set in SIP Settings\n refer_to_target = self.oPhone2.ui.get_web_parameter_http(\n 'Ext 1', 'Refer-To Target Contact')\n self.assertEqual('1', refer_to_target[0])\n\n log.info('Start tshark on linux')\n dut_ip = self.oPhone2.ui.get_param_value(\"Current IP_IPV6\")\n filter_cmd = 'port sip and host {}'.format(dut_ip)\n capture_file = self.tshark.tshark_start(filter_cmd)\n\n log.info(\"Phone1 dial Phone2: {}\".format(self.user_id2))\n self.oPhone1.ccapi.dial('null', self.user_id2, '', 1, 0, 1)\n # check phone1 ringout status and Phone2 ringing status\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone2), (\"PROCEEDING\", \"RINGING\"),\n timeout=30)\n\n log.info(\"Phone2 receive the call\")\n self.oPhone2.ccapi.accept(\"0000\")\n # check two phones are in connected status\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone2), (\"CONNECTED\", \"CONNECTED\"))\n\n # Conference call from Phone2\n log.info(\"Phone2 press conference\")\n self.oPhone2.ccapi.conference(\"0000\")\n\n log.info(\"Phone2 dial Phone3's number: {}\".format(self.user_id3))\n self.oPhone2.dial_digits('0001', self.user_id3)\n wait_for_ccapi_call_states(\n (self.oPhone2, self.oPhone2, self.oPhone3),\n (\"PROCEEDING\", \"HOLD\", \"RINGING\"), (1, 1, 1), (1, 0, 0),\n timeout=30)\n\n log.info(\"Phone3 receive the call\")\n self.oPhone3.ccapi.accept(\"0000\")\n wait_for_ccapi_call_states(\n (self.oPhone2, self.oPhone2, self.oPhone3),\n (\"CONNECTED\", \"HOLD\", \"CONNECTED\"), (1, 1, 1), (1, 0, 0),\n timeout=30)\n\n log.info(\"Phone2 press conference and all three are connected\")\n self.oPhone2.ccapi.conference(\"0001\")\n wait_for_ccapi_call_states(\n self.devices, (\"CONNECTED\", \"CONNECTED\", \"CONNECTED\"), timeout=30)\n\n log.info(\"Disconnect call\")\n self.oPhone2.ccapi.hangUp('0000')\n # check phone1 and phone2's and phone3's status are idle\n wait_for_ccapi_call_states(\n self.devices, (\"IDLE\", \"IDLE\", \"IDLE\"))\n\n log.info('Stop tshark on linux')\n self.tshark.tshark_stop()\n\n # analyse tshark capture\n log.info('Start tshark analysis on linux')\n received_msgs = self.tshark.tshark_read(\n file=capture_file, protocol='sip')\n expected_msgs = dict()\n expected_msgs['frame_src'] = [dut_ip]\n expected_msgs['frame_dst'] = [self.serverproxy]\n expected_msgs['frame_proto'] = ['SIP/SDP']\n expected_msgs['frame_data'] = ['Request: INVITE sip:{}'.format(\n self.conference_url)]\n result_src = self.tshark.tshark_call_flow(\n expected=expected_msgs, received=received_msgs)\n self.assertTrue(result_src, 'Basic Call flow not match')\n\n # DUT sends an INVITE request to the conferenceURI provisioned\n # on BroadWorks.\n cseq, call_id = self.tshark.tshark_get_string_cseq_call_id(\n capture_file, dut_ip, self.serverproxy,\n search_string=self.conference_url, method='Request: INVITE',\n protocol='sip', header='To')\n\n # The conference ID is supplied in the contact header of the\n # 200 OK to the INVITE to the conference URI.\n log.info(\n \"Verify the conference ID in the contact header of the \"\n \"200 OK to the INVITE to the conference URI.\")\n self.tshark.tshark_check_string_in_message(\n capture_file, '200 OK', 'sip:{}'.format(self.conference_url),\n self.serverproxy, dut_ip, call_id=call_id, header='Contact')\n\n # DUT REFERs both parties to the conference\n log.info(\n \"The conference creator sends the REFER request inside\"\n \"the dialog associated with the creators call with the\"\n \"Broadworks UserA.\")\n cseq, call_id = self.tshark.tshark_get_method_cseq_call_id(\n capture_file, dut_ip, self.serverproxy,\n protocol='sip', method='Request: REFER', call_number=1)\n\n self.tshark.tshark_check_string_in_message(\n capture_file, 'Request: REFER', \"{}\".format(self.user_id1[-4:]),\n dut_ip, self.serverproxy, call_id=call_id, header='To')\n\n log.info(\"The Refer-To header must have the conference ID as its URI\")\n self.tshark.tshark_check_string_in_message(\n capture_file, 'Request: REFER', 'sip:conference@{}'.format(\n self.domainproxy), dut_ip, self.serverproxy, call_id=call_id,\n header='Refer-To')\n\n log.info(\n \"The conference creator sends the REFER request inside\"\n \"the dialog associated with the creators call with the\"\n \"Broadworks UserB.\")\n cseq, call_id = self.tshark.tshark_get_method_cseq_call_id(\n capture_file, dut_ip, self.serverproxy, method='Request: REFER',\n protocol='sip', call_number=2)\n\n self.tshark.tshark_check_string_in_message(\n capture_file, 'Request: REFER', \"{}\".format(self.user_id3[-4:]),\n dut_ip, self.serverproxy, call_id=call_id, header='To')\n\n log.info(\"The Refer-To header must have the conference ID as its URI\")\n self.tshark.tshark_check_string_in_message(\n capture_file, 'Request: REFER', 'sip:conference@{}'.format(\n self.domainproxy), dut_ip, self.serverproxy, call_id=call_id,\n header='Refer-To')\n\n log.info(\n 'Tshark analysis successful'\n 'for test_ipv6_network_based_three_way_call')\n log.info(\"End of test_ipv6_network_based_three_way_call\")\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Interop/section_14/cmCC27819_3pcc_BS_IOT_Interop_IPv6_286_Network_Based_Three_Way_Call.py","file_name":"cmCC27819_3pcc_BS_IOT_Interop_IPv6_286_Network_Based_Three_Way_Call.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"539259696","text":"# Plot scatterplots and histograms in a loop and\n# save them into PNG files.\n#\n#\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef pltpair(nm1,nm2):\n fig = plt.figure(figsize=(15, 15/1.618))\n ax = fig.add_subplot(111)\n ax.set_xlabel(nm1)\n ax.set_ylabel(nm2)\n ax.plot(data[nm1][1:n], data[nm2][1:n], 'o')\n plt.savefig('./img/splt_' + nm1 + '_vs_' + nm2 + '.PNG')\n\n\ndef plthist(num, name, df, maintext=None, fprefix=None):\n fig = plt.figure(num, figsize=(15, 15/1.618))\n ax = fig.add_subplot(111)\n if maintext is None:\n ax.set_title(name)\n else:\n ax.set_title(maintext)\n ax.set_xlabel('values')\n ax.set_ylabel('frequency')\n n, bins, patches = plt.hist(df[name], 50, normed=1, facecolor='green', alpha=0.75)\n if fprefix is None:\n fname = \"./img/hist_\" + name + \".PNG\"\n else:\n fname = \"./img/\" + fprefix + \"_\" + name + \".PNG\"\n plt.savefig(fname)\n plt.close()\n","sub_path":"Python/DataAnalysis/fn.plot.scatter.hist-20170309.py","file_name":"fn.plot.scatter.hist-20170309.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34066560","text":"# coding=UTF-8\nfrom flask import Flask, jsonify\nfrom flask import make_response\nfrom flask import request\nfrom show.com_parse import _comService\nfrom show.const import const\nfrom show.hanlp import segment ,demo_dependency_parser\nimport json\nimport logging\n\nfrom show.nlp_instance import nlp_pattern_all, segment_instance, ner_instance, dp_instance\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\napp.config\n'''\n接口入口\n'''\n\n#条款名称成分识别\n@app.route('/api/v1/product_analysis', methods=['GET','POST'])\ndef product_analysis():\n if request.method == 'POST' :\n try:\n logging.info (request.data)\n dict = json.loads(str(request.data,'utf-8'))\n except ValueError:\n return jsonify({'app': {'error': const.ERROR_PARAM }, \"code\": \"0x1400\"}), 200\n\n text = dict.get(\"text\")\n if not text.strip():\n return jsonify({'app': {'result':'[]'}, \"code\":\"0x1200\"}), 200\n\n comNerTerms = _comService(text)\n comNerTermsStr = []\n typeFlag = True\n for comNerTerm in comNerTerms:\n type = comNerTerm.typeStr\n if type == 'D':\n typeFlag = False\n comNerTermsStr.append({'entity':comNerTerm.word,'type':comNerTerm.typeStr})\n if typeFlag:\n comNerTermsStr.append({'entity': const.ENTITY_DES , 'type': 'D'})\n return json.dumps({'app': {'result':comNerTermsStr}, \"code\":\"0x1200\"},ensure_ascii=False), 200, {'Content-Type': 'text/json charset=utf-8'}\n\n else:\n return json.dumps({'app':{'error': const.ERROR_REQ_TYPE}, \"code\":\"0x1100\"},ensure_ascii = False), 200\n\n# 中文分词\n@app.route('/api/v1/segment', methods=['GET','POST'])\ndef segment():\n if request.method == 'POST' :\n try:\n logging.info (request.data)\n dict = json.loads(str(request.data,'utf-8'))\n except ValueError:\n return jsonify({'app': {'error': const.ERROR_PARAM }, \"code\": \"0x1400\"}), 200\n\n text = dict.get(\"text\")\n pos_whether = dict.get(\"pos\")\n if not text.strip():\n return jsonify({'app': {'result':'[]'}, \"code\":\"0x1200\"}), 200\n\n segs = segment_instance(text,pos_whether)\n ret_json = json.dumps({'app': {'result':segs}, \"code\":\"0x1200\"},ensure_ascii=False)\n return ret_json, 200\n\n else:\n return json.dumps({'app':{'error': const.ERROR_REQ_TYPE}, \"code\":\"0x1100\"},ensure_ascii = False), 200\n\n# 依存句法分析\n@app.route('/api/v1/dep', methods=['GET','POST'])\ndef dependency():\n if request.method == 'POST' :\n try:\n logging.info (request.data)\n dict = json.loads(str(request.data,'utf-8'))\n except ValueError:\n return jsonify({'app': {'error': const.ERROR_PARAM }, \"code\": \"0x1400\"}), 200\n\n text = dict.get(\"text\")\n if not text.strip():\n return jsonify({'app': {'result':'[]'}, \"code\":\"0x1200\"}), 200\n\n dep = dp_instance(text)\n return json.dumps({'app': {'result':dep}, \"code\":\"0x1200\"},ensure_ascii=False), 200\n\n else:\n return json.dumps({'app':{'error': const.ERROR_REQ_TYPE}, \"code\":\"0x1100\"},ensure_ascii = False), 200\n\n# 实体识别\n@app.route('/api/v1/ner', methods=['GET', 'POST'])\ndef ner():\n if request.method == 'POST':\n try:\n logging.info(request.data)\n dict = json.loads(str(request.data, 'utf-8'))\n except ValueError:\n return jsonify({'app': {'error': const.ERROR_PARAM}, \"code\": \"0x1400\"}), 200\n\n text = dict.get(\"text\")\n if not text.strip():\n return jsonify({'app': {'result': '[]'}, \"code\": \"0x1200\"}), 200\n\n ners = ner_instance(text)\n return json.dumps({'app': {'result': ners}, \"code\": \"0x1200\"}, ensure_ascii=False), 200\n\n else:\n return json.dumps({'app': {'error': const.ERROR_REQ_TYPE}, \"code\": \"0x1100\"}, ensure_ascii=False), 200\n\n# 总体调度\n@app.route('/api/v1/nlp', methods=['GET','POST'])\ndef nlp():\n if request.method == 'POST':\n try:\n logging.info (request.data)\n dict = json.loads(str(request.data,'utf-8'))\n except ValueError:\n return json.dumps({'app': {'error': const.ERROR_PARAM }, \"code\": \"0x1400\"}, ensure_ascii=False), 200\n\n text = dict.get(\"text\")\n if not text.strip():\n return jsonify({'app': {'result':'[]'}, \"code\":\"0x1200\"}), 200\n\n ret_arr = nlp_pattern_all(text)\n return json.dumps({'app': {'result':ret_arr}, \"code\":\"0x1200\"}, ensure_ascii=False), 200\n\n else:\n return json.dumps({'app':{'error': const.ERROR_REQ_TYPE}, \"code\":\"0x1100\"}, ensure_ascii = False), 200\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'app':{'error': const.ERROR_NOT_FOUND }, \"code\":\"0x1400\"}), 404)\n\nif __name__ == '__main__':\n url = '0.0.0.0'\n port = 5000\n app.run(url, port=port, debug=False)","sub_path":"product_analysis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"535531181","text":"\n\nimport sys\n\n\ndef input(): return sys.stdin.readline().rstrip()\n\n\nN = float(input())\nrnd = -1\ne = 1\nwhile True:\n if N < 10:\n break\n # 96\n if N > (10**e):\n N = round(N+0.1, rnd)\n e += 1\n rnd -= 1\n else:\n break\nprint(int(N))\n","sub_path":"BOJ_Bronze/2033.py","file_name":"2033.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569138302","text":"##############################################################################\n##############################################################################\n##\n## Program : Craps Statistics\n##\n## Author : Richard E. Pattis\n## Computer Science Department\n## University of California, Irvine\n## Irvine, CA 92617-3435\n## e-mail: pattis@ics.uci.edu\n##\n## Maintainer : Author\n##\n##\n## Description:\n##\n## Craps prompts the user to enter the number of games to play. It then\n## plays (simulates) that many games of craps, keeping the win/loss\n## information. At the end, it displays these statistics.\n##\n## Craps is a dice game. The thrower loses if he/she immediately rolls a 2\n## (snake eyes), 3, or 12 (box cars). The thrower wins if he/she immediately\n## rolls a 7 or 11. If the thrower does not immediately win or lose, the number\n## thrown becomes the 'point'. Afterwards, the thrower tries to make his/her\n## point by rolling that same value again (and winning) before rolling a 7\n## (and losing). When trying to make his/her point, the thrower keeps rolling\n## if he/she rolls any number other than the point or 7.\n##\n##\n## Known Bugs : None\n##\n## Future Plans : None\n##\n## Program History:\n## 8/ 8/00: R. Pattis - Operational in C++\n## 5/15/01: R. Pattis - Translated to Java\n## 5/16/01: R. Pattis - Changed identifiers to conform to Java style\n## 3/ 6/13: R. Pattis - Converted to Python\n##\n##############################################################################\n##############################################################################\n\n\n\nfrom goody import irange\nfrom dice import Dice\nfrom stopwatch import Stopwatch\nimport prompt\nimport predicate\n\n \nwin_count = 0 #Win/Lose/Dice Statistics\nlose_count = 0\n\ndice = Dice([6,6])\ngame_timer = Stopwatch()\n\ngames_to_play = prompt.for_int('Enter # of games to play', is_legal=predicate.is_positive, error_message='an int, but not > 0')\n\ngame_timer.start()\ndice.standard_rolls_for_debugging()\nfor game in irange(1, games_to_play): #Each iteration plays one game\n first_roll = dice.roll().pip_sum() #Roll the dice and record their pip sum\n\n #Based on firstRoll, decide how to continue:\n # immediate win/loss or trying to make point\n if first_roll == 7 or first_roll == 11:\n win_count += 1 #Win on the first roll with 7 or 11\n\n elif first_roll == 2 or first_roll == 3 or first_roll == 12:\n lose_count += 1 #Lose on the first roll with 2, 3, or 12\n\n else: #Try to make the point as the game continues\n point = first_roll #point will never store 7, 11, 2, 3, or 12\n\n while(True): #Roll until roll point (win) or 7 (lose)\n roll = dice.roll().pip_sum()\n\n if roll == point: #If made the point first\n win_count += 1 #...win and this game is over\n break\n elif roll == 7: #If roll a 7 first\n lose_count+= 1 #...lose and this game is over\n break\ngame_timer.stop();\n\n\n##Display Statistics\n\nprint(' Raw Wins/Lose =', '{:,}'.format(win_count), '/', '{:,}'.format(lose_count))\nprint(' % Wins/Lose =', 100.0*win_count/(win_count+lose_count), '/', 100.0*lose_count/(win_count+lose_count))\nprint()\n\nprint(' Dice Thrown =', '{:,}'.format(dice.rolls()))\nprint(' Avg Dice/game =', dice.rolls()/games_to_play)\nprint('dice rolls', dice.rolls())\n\nprint(' Elapsed Time =' , game_timer.read(), 'seconds')\nprint(' Speed =', '{:,}'.format(int(games_to_play/game_timer.read())), 'games/second')\n","sub_path":"ICS33/project0c/craps/craps.py","file_name":"craps.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"214884791","text":"import json\nimport psycopg2\n\nfrom data.config import yaposhkaDbName, mainDbName, dbUserName, dbPassword, HOSTDB\nfrom service.functions.logger import logger\nfrom service.functions.currency_function import getIndicatedCurrency\n\n\ndef crete_connection():\n return Database()\n\n\ndef create_yaposhka():\n return Database(yaposhkaDbName)\n\n\nclass Database:\n\n def __init__(self, dbName=mainDbName):\n self.connection = psycopg2.connect(\n database=dbName,\n user=dbUserName,\n password=dbPassword,\n host=HOSTDB,\n port='5432')\n self.cursor = self.connection.cursor()\n if HOSTDB == '127.0.0.1':\n self.cursor.execute(\"set LC_TIME = 'ru_RU.KOI8-R'\")\n logger.info(\"Connected to database\")\n\n def getFromDB(self, tableName, what, where, join='', orderBy='', groupBy=''):\n self.cursor.execute(\n f\"SELECT {what.upper()} \"\n f\"FROM {tableName.upper()} \"\n f\"{join} \"\n f\"WHERE {where} \"\n f\"{groupBy} \"\n f\"{orderBy}\"\n )\n return self.cursor.fetchall()\n\n def getColumnsNames(self, tableName: str):\n self.cursor.execute(f\"select column_name from information_schema.columns \"\n f\"where table_name='{tableName.lower()}'\")\n header = ''\n for elem in self.cursor.fetchall():\n header = f\"{header},{elem[0]}\"\n return header[1:]\n\n def insert(self, tableName: str, values: str):\n headers = self.getColumnsNames(tableName)\n quantity = headers.split(',').__len__()\n if values.split(',').__len__() == quantity:\n self.cursor.execute(f\"INSERT INTO {tableName} ({headers}) VALUES ({values})\")\n logger.info(f\"Data {values} successfully inserted to {tableName}\")\n else:\n logger.info(f\"Total columns number ({quantity}) \"\n f\"aren't equal to inserted values ({values.split(',').__len__()})\"\n f\"Data aren't inserted\")\n\n def update(self, tableName, what, how, condition):\n self.cursor.execute(f\"UPDATE {tableName} SET {what} = {how} WHERE {condition}\")\n self.connection.commit()\n\n def isExist(self, field, table, condition, like=False):\n if like:\n eq = \"LIKE\"\n else:\n eq = \"=\"\n self.cursor.execute(f\"SELECT EXISTS(SELECT {field} FROM {table} WHERE {field} {eq} {condition})\")\n result = self.cursor.fetchall()[0]\n return result[0]\n\n def isTableExist(self, tableName: str):\n self.cursor.execute(f\"SELECT count(table_name) FROM INFORMATION_SCHEMA.TABLES \"\n f\"WHERE TABLE_NAME = '{tableName.lower()}'\")\n result = self.cursor.fetchall()[0][0]\n if result > 0:\n return True\n else:\n return False\n\n def createTable(self, tableName: str, header: str):\n self.cursor.execute(f\"CREATE TABLE {tableName} ({header})\")\n self.connection.commit()\n logger.info(f\"Table {tableName} was successfully created\")\n\n def getCurrencyList(self, onlyName=False):\n if onlyName:\n what = \"name\"\n else:\n what = '*'\n self.cursor.execute(f\"SELECT {what} FROM CURRENCY WHERE 1=1\")\n result = self.cursor.fetchall()\n if onlyName:\n data = []\n else:\n data = {}\n for i in range(result.__len__()):\n if onlyName:\n data.append(result[i][0])\n else:\n data[result[i][0]] = {\n \"FullName\": result[i][1],\n \"Emoji\": result[i][2]\n }\n return data\n\n def deleteUser(self, userID):\n self.cursor.execute(f\"DELETE FROM USERS WHERE ID={userID}\")\n self.connection.commit()\n logger.info(f\"User with id {userID} was deleted\")\n\n def addUser(self, idUser, currency='UAH',\n mainCurrency=['EUR', 'USD'], location='NULL', currencyPriority=None, dayForShow=10, notify=None):\n if currencyPriority is None:\n currencyPriority = getIndicatedCurrency()\n if notify is None:\n notify = 'null'\n query = f\"INSERT INTO USERS (ID, LOCATION, CURRENCY, CURRENCYPRIORITY, MAINCURRENCY, \" \\\n f\"DAYFORSHOW, NOTIFY) VALUES ({idUser}, {location}, '{currency}', \" \\\n f\"'{json.dumps(currencyPriority)}'::json, ARRAY[{mainCurrency}],{dayForShow}, {notify})\"\n self.cursor.execute(query)\n self.connection.commit()\n\n logger.info(f\"User with ID {idUser} was created\")\n if notify != 'null':\n notify = f\"{notify.hour}:{notify.minute}\"\n data = {\n 'id': idUser,\n 'location': location,\n 'currency': currency,\n 'currencyPriority': currencyPriority,\n 'mainCurrency': mainCurrency,\n 'dayForShow': dayForShow,\n 'notify': notify\n }\n return data\n","sub_path":"classes/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"443244972","text":"def words(input_string):\n \"\"\"Counts the number of words in a string and its number of occurances.\n :param input_string\n :return: my_dictionary:\n \"\"\"\n dictionary = {}\n string = input_string.split()\n for word in string:\n dictionary_keys = dictionary.keys()\n if word not in dictionary_keys and word.isdigit():\n dictionary[int(word)] = 1\n elif word not in dictionary_keys:\n dictionary[word] = 1\n else:\n dictionary[word] += 1\n return dictionary\n\n# print(words(\"check this 123 and me\"))\n","sub_path":"boot-camp-18-day-3/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"583162157","text":"#!/usr/bin/env python\n\nimport subprocess\nimport os\nimport time\n\n'''\nMonitors and closes any incoming ssh atempts. It will preserve any that are opened before this is run.\nShould be easy enough to add in ftp and mysql and make this a overall defence. Tho it isn't really effective against someone that knows what they are doing.\nTURN THIS INTO A SERVICE\n'''\n\n\n#The three main lists.\nprocesses = []\nprotected = []\ntokill = []\n\n#Gets the open ssh processes. Should refactor this to incluse removeEmpty.\ndef gatherInfo(listin):\n getprocesses = subprocess.Popen(\"ps aux | grep sshd:\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in getprocesses.stdout.readlines():\n if \"grep\" not in line:\n listin.append(line.rstrip('\\n').split(' '))\n\n\n#gatherInfo generates some nasty stuff so this gets rid of that crap.\ndef removeEmpty(listin):\n i = 0\n while i < len(listin):\n if listin[i] == '':\n del listin[i]\n i = 0\n i += 1\n return listin\n\n#Goes through the stuff gatherInfo spits out and adds it to the list that given as a parameter.\ndef triageProcesses(listin):\n for process in processes:\n process = removeEmpty(process)\n if process[1] in protected:\n pass\n else:\n listin.append(process[1])\n\n#Kill all processes in its list and them emptys the list so it doesnt try and kill non-existing processes.\ndef killProcesses():\n for process in tokill:\n os.system(\"sudo kill \" + process)\n del tokill[:]\n \n#Protect the intialy connected connections.\ngatherInfo(processes)\ntriageProcesses(protected)\n\n\n#Close all other connections forever.\nwhile True:\n gatherInfo(processes)\n triageProcesses(tokill)\n killProcesses()\n time.sleep(.5)","sub_path":"Defence/BlockSSH.py","file_name":"BlockSSH.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"612215021","text":"class Solution1:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n if not text1 or not text2:return 0\n m,n,t1,t2=len(text1),len(text2),list(text1),list(text2)\n dp=[[0]*n for i in range(m)]\n dp[0][0]=1 if t1[0]==t2[0] else 0\n for j in range(1,n):\n dp[0][j]=dp[0][j-1] if dp[0][j-1] else (1 if t1[0]==t2[j] else 0)\n for i in range(m):\n tmp=1 if t1[i]==t2[0] else 0\n dp[i][0]=dp[i-1][0] if dp[i-1][0] else (1 if t1[i]==t2[0] else 0)\n for i in range(1,m):\n for j in range(1,n):\n if t1[i]==t2[j]:\n dp[i][j] =dp[i-1][j-1] + 1\n else:\n dp[i][j]=dp[i][j-1] if dp[i][j-1]>dp[i-1][j] else dp[i-1][j]\n return dp[m-1][n-1]\n \nclass Solution0:\n def longestCommonSubsequence0(self, text1: str, text2: str) -> int:\n if not text1 or not text2:return 0\n m,n=len(text1),len(text2)\n dp=[[0]*(n+1) for i in range(m+1)]\n for i in range(1,m+1):\n for j in range(1,n+1):\n if text1[i-1]==text2[j-1]:\n dp[i][j] =dp[i-1][j-1] + 1\n else:\n dp[i][j]=max(dp[i][j-1],dp[i-1][j])\n return dp[m][n]\n def longestCommonSubsequence0(self, text1: str, text2: str) -> int:\n if not text1 or not text2:return 0\n m,n=len(text1),len(text2)\n dp=[[0]*(n+1) for i in range(m+1)]\n for i in range(m):\n for j in range(n):\n if text1[i]==text2[j]:\n dp[i][j] =dp[i-1][j-1] + 1\n else:\n dp[i][j]=max(dp[i][j-1],dp[i-1][j])\n return dp[m-1][n-1]\n def longestCommonSubsequence1(self, text1: str, text2: str) -> int:\n if not text1 or not text2:return 0\n m,n,k=len(text1),len(text2),1\n if m int:\n if not text1 or not text2:return 0\n m,n=len(text1),len(text2)\n if m int:\n @functools.lru_cache(None)\n def helper(i,j):\n if i<0 or j<0:\n return 0\n if text1[i]==text2[j]:\n return helper(i-1,j-1)+1\n return max(helper(i-1,j),helper(i,j-1))\n return helper(len(text1)-1,len(text2)-1)\n","sub_path":"Week_06/longest_common_subsequence.py","file_name":"longest_common_subsequence.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"354101603","text":"\"\"\"\nReturns the final_pos_x, final_pos_y, and coins_colected of a pacman.\n\"\"\"\n\n__author__ = \"Dan McAteer\"\n\ndef read_input(text_file):\n\n input_file = open(text_file)\n lines = input_file.readlines()\n\n return lines\n\ndef get_grid_size(input_arr):\n\n line_1 = input_arr[0]\n x_coord, y_coord = map(int, line_1.split(\" \"))\n\n return x_coord, y_coord\n\ndef get_grid_perim(input_arr):\n\n grid_size = get_grid_size(input_arr)\n perim = []\n\n for index in range(0, grid_size[0]):\n x_perim_lower = (index, 0)\n x_perim_upper = (index, grid_size[1])\n y_perim_left = (0, index)\n y_perim_right = (grid_size[0], index)\n\n perim += [x_perim_lower, x_perim_upper, y_perim_left, y_perim_right]\n\n return perim\n\ndef get_start_pos(input_arr):\n\n line_2 = input_arr[1]\n x_coord, y_coord = map(int, line_2.split(\" \"))\n\n return x_coord, y_coord\n\ndef get_walls(input_arr):\n\n walls = []\n \n for index in range(3, len(input_arr)):\n \n line = input_arr[index]\n x_coord, y_coord = map(int, line.split(\" \"))\n coords = x_coord, y_coord\n walls.append(coords)\n \n return walls\n\ndef get_move_value(direction):\n\n direction_dict = {\"N\": 1, \"E\": 1, \"S\": -1, \"W\": -1}\n\n return direction_dict[direction]\n\ndef make_move(move, pos):\n x_moves = [\"E\", \"W\"]\n \n if move in x_moves:\n pos = (pos[0] + get_move_value(move)), pos[1]\n else:\n pos = pos[0], (pos[1] + get_move_value(move))\n \n return pos\n\ndef stop_move(move, pos):\n x_moves = [\"E\", \"W\"]\n\n if move in x_moves:\n pos = (pos[0] - get_move_value(move)), pos[1]\n else:\n pos = pos[0], (pos[1] - get_move_value(move))\n\n return pos\n\ndef is_edge_case(start_pos, grid_size, walls, moves):\n\n if start_pos > grid_size:\n return True\n elif start_pos in walls:\n return True\n elif not all(elem in ['N', 'S', 'E', 'W'] for elem in moves):\n return True\n else:\n return False\n \ndef pacman(input_file):\n \"\"\" Use this function to format your input/output arguments. Be sure not to change the order of the output arguments. \n Remember that code organization is very important to us, so we encourage the use of helper fuctions and classes as you see fit.\n \n Input:\n 1. input_file (String) = contains the name of a text file you need to read that is in the same directory, includes the \".txt\" extension\n (ie. \"input.txt\")\n Outputs:\n 1. final_pos_x (int) = final x location of Pacman\n 2. final_pos_y (int) = final y location of Pacman\n 3. coins_collected (int) = the number of coins that have been collected by Pacman across all movements\n \"\"\"\n input_arr = read_input(input_file)\n grid_size = get_grid_size(input_arr)\n grid_perim = get_grid_perim(input_arr)\n start_pos = get_start_pos(input_arr)\n walls = get_walls(input_arr)\n moves = input_arr[2].strip()\n coins_collected = 0\n traversed_points = [start_pos]\n\n if is_edge_case(start_pos, grid_size, walls, moves):\n return (-1, -1, 0)\n\n for move in moves:\n\n start_pos = make_move(move, start_pos)\n\n if start_pos in walls or start_pos in grid_perim:\n start_pos = stop_move(move, start_pos)\n elif start_pos in traversed_points:\n continue\n else:\n coins_collected += 1\n\n traversed_points.append(start_pos)\n\n final_pos_x = start_pos[0]\n final_pos_y = start_pos[1]\n return final_pos_x, final_pos_y, coins_collected","sub_path":"test_files/py_test/pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"501875604","text":"from datetime import date\ndataAtual = date.today()\n\ndef obterLimite():\n nomeVendedor = 'Daniel Sampaio Magalhães'\n print ('\\n\\nOlá, está é a Nemesis Store! Seja bem-vindo! Me chamo', nomeVendedor, 'e vou te ajudar hoje.')\n print ('Vamos precisar de alguns dados seu para realizar sua análise de crédito, tudo bem?')\n cargoAtual = input ('\\nPara comerçarmos, qual seu cargo na empresa em que trabalha atualmente? ')\n salarioAtual = float(input ('Humm, certo! e qual seu salário? '))\n anoNascimento = int(input ('E por último, qual o ano em que você nasceu? (ex: 1900): '))\n print ('\\nVamos ver se está tudo certo, ok?')\n print ('\\nSeu cargo atual é: ', cargoAtual, '\\nSeu salário é de: ', salarioAtual, '\\nVocê nasceu em: ', anoNascimento)\n anoAtual = dataAtual.year\n idadeCliente = anoAtual - int(anoNascimento)\n print ('Sua idade é:', idadeCliente, 'anos')\n limiteCliente = (salarioAtual * (idadeCliente / 1000)) + 100\n print ('\\nSegundo nosso sistema, você poderá gastar em nossa loja R$', limiteCliente)\n \n return limiteCliente, nomeVendedor, idadeCliente\n\ndef verificarProduto(obterLimite):\n\n itens = int(input('Quantos produtos gostaria de comprar? '))\n precoTotal = 0\n for quantidadeProduto in range(itens):\n\n print('\\n=== Produto {} ==='.format(quantidadeProduto + 1))\n produto = input('Qual o produto que você quer comprar? ')\n preco = float(input('E qual seu valor do {}? '.format(produto)))\n\n precoTotal = precoTotal + preco\n\n print ('Resta R$', limiteCliente - precoTotal, 'do seu limte.')\n\n print()\n \n if precoTotal <= limiteCliente * 0.60:\n print('Liberado!')\n elif precoTotal > limiteCliente * 0.60 and precoTotal <= limiteCliente * 0.90:\n print('Liberado ao parcelar em até 2 vezes.')\n elif precoTotal > limiteCliente * 0.90 and precoTotal <= limiteCliente * 100:\n print('Liberado ao parcelar em 3 ou mais vezes.')\n else:\n print('Bloqueado, escolha um produto de menor valor.')\n \n quantidadeLetrasNome = len(nomeVendedor)\n if precoTotal <= quantidadeLetrasNome or precoTotal <= idadeCliente:\n print('\\nParabéns, você ganhou um desconto de 6%!')\n print('O valor a ser pago já com o desconto é: R$', precoTotal - precoTotal * 0.06)\n print('\\n\\nObrigado, volte sempre!\\n\\n')\n else:\n print('\\n\\nObrigado, volte sempre!\\n\\n')\n\n return precoTotal\n\n\nlimiteCliente, nomeVendedor, idadeCliente = obterLimite()\n\nprecoTotal = verificarProduto(obterLimite)","sub_path":"ATP.py","file_name":"ATP.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54846437","text":"from urllib.request import urlopen\nimport random\nimport telebot\n#from goto import goto, label \n\n\nbot = telebot.TeleBot(\"1113715967:AAEf5eBctEv0Oe2SNBMVLEur5-vOAF3r7hE\")\n\nwhile True:\n @bot.message_handler(content_types=['text'])\n def send_welcome(message):\n\n ganr = message.text\n html = urlopen(\"https://zaycev.net/genres/\" + ganr + \"/index.html\").read().decode('utf-8')\n html_search = \"\"\n\n music = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n e = 0\n\n\n while e < 10:\n rand_page = random.randint(1,5)\n if rand_page == 1:\n html = urlopen(\"https://zaycev.net/genres/\" + ganr + \"/index.html\").read().decode('utf-8')\n else:\n html = urlopen(\"https://zaycev.net/genres/\" + ganr + \"/index_\" + str(rand_page) + \".html\").read().decode('utf-8')\n\n load_cnt = False\n music[e] = \"https://zaycev.net\"\n\n track_cnt = 0\n get_cnt = 0\n for i in html:\n rand_music = random.randint(1,20)\n if (\"musicset-track__track-name\" in html_search) and rand_music > 10:\n track_cnt+=1\n html_search = \"\"\n elif (\"musicset-track__track-name\" in html_search) and rand_music <= 10:\n html_search = \"\"\n if (\"href=\\\"\" in html_search) and track_cnt > 0 and i != \"\\\"\":\n music[e]+=i\n get_cnt+=1\n if get_cnt > 0:\n if i == \"\\\"\":\n q = 0\n dub_cnt = 0\n while q < 10:\n if music[e] == music[q]:\n dub_cnt +=1\n if dub_cnt == 2:\n music[e] = \"\"\n e-=1\n load_cnt = True\n q+=1\n break\n html_search += i\n e+=1\n if load_cnt == False:\n print(str(e) + \"`s - music founded!\") \n\n for i in music:\n bot.send_message(message.chat.id, i)\n bot.polling( none_stop = True )\n","sub_path":"pars_bot.py","file_name":"pars_bot.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"127712068","text":"\"\"\"\r\nPROBLEM STATEMENT\r\nOur Chef works in an operation to find number of covid affected patients. The covid affected patients increase day after day . The Corona Virus spreads to many people in a day. Our Chef needs to know the number of covid affected on a given day.\r\n\r\nThe Chef had to answer N queries each at a time\r\nThe corona virus spread from one affected person to X healthy persons and they also get affected\r\nThere is no treatment for corona virus . If a person is affected for Y or more days , he/she dies\r\nFor a given day find out number of covid affected patients who are alive\r\nDay 1 has only 1 corona affected patient\r\nThe output could be a large number (may be 1000 digits), so do the calculation accurately as much as possible\r\nThe Chef needs to determine the following:\r\n\r\nFor every query:\r\n\r\nNumber of covid affected patients on that day who are alive\r\nAs a programmer, help Chef to accomplish his operation.\r\n\r\nINPUT\r\nFirst line: N Y X\r\n\r\nN : number of queries\r\n\r\nY : Days to be alive for the corona patient\r\n\r\nX : Number of people the virus spreads from one affected to healthy persons\r\n\r\nNext N lines: Di\r\nDi - Day number\r\n\r\nOUTPUT\r\nOnly one Line : Number of covid affected patients on that day who are alive\r\nRepeat the above process for every query\r\n\r\n\r\nCONSTRAINTS:\r\n1≤N≤103\r\n1≤Di≤109\r\n\r\nSUBTASK:\r\nSubtask 1 - 5 point : T≤4\r\nSubtask 2 - 5 points : T = 10\r\nSubtask 3 - 90 points : T = 100\r\n\r\nSAMPLE INPUT 1\r\n4 2 4\r\n1\r\n2\r\n3\r\n4\r\n\r\nSAMPLE OUTPUT 1\r\n1\r\n5\r\n24\r\n116\r\n\r\n\r\nEXPLANATION :\r\nFirst query : On day 1: only 1 covid patient always (lets name him person A)\r\nSecond query : On day 2 : person A affect 4 other persons B,C,D,E . Total affected persons : A,B,C,D,E = 5 affected\r\nThird query : On day 3 : every person of A,B,C,D,E affect 4 new persons , So 5 * 4 = 20 new persons affected .\r\ntotal affected : 20 new people + A,B,C,D,E = 25 covid affected patients . But person A dies as he lived for 2 days after being affected. Son number of affected people : 25 - 1 = 24\r\nFourth query : every one of the 24 people affect 4 new people : so 24*4 = 96 new people affected.\r\nTotally affected = 24(old) + 96(new) = 120. But B,C,D,E dies as it is their 3rd day of infection , so total alive with covid is 120 - 4 = 116\r\n\"\"\"\r\n\r\ndef main():\r\n\tN, Y, X = map(int, input().split())\r\n\taff = 1\r\n\tnewaff = 1\r\n\toldaff = 1\r\n\tfor i in range(N):\r\n\t\tDi = int(input())\r\n\t\tif (Di == 1):\r\n\t\t\tprint(newaff)\r\n\t\tif (Di > 1) & (Di <= Y):\r\n\t\t\taff = newaff\r\n\t\t\tnewaff = (aff * X) + aff\r\n\t\t\toldaff = newaff - aff\r\n\t\t\tprint(newaff, aff, oldaff)\r\n\t\tif (Di > Y):\r\n\t\t\tspd = ((newaff * X) + newaff) - oldaff\r\n\t\t\taff = newaff\t\t\t\r\n\t\t\tnewaff = spd\r\n\t\t\toldaff = newaff - aff\r\n\t\t\tprint(spd, aff, newaff, oldaff)\r\n\t\t\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n\r\n\"\"\"\r\ndef main():\r\n\tN, Y, X = map(int, input().split())\r\n\taff = 1\r\n\tfor i in range(N):\r\n\t\tDi = int(input())\r\n\t\tif (Di == 1):\r\n\t\t\tprint(aff)\r\n\t\tif (Di > 1 & Di < Y):\r\n\t\t\tspd = (aff * 4) + aff\r\n\t\t\tnewaff = aff\r\n\t\t\taff = spd\r\n\t\t\tprint(spd, aff, newaff)\r\n\t\tif (Di >= Y):\r\n\t\t\tspd = ((aff * 4) + aff) - newaff\r\n\t\t\tnewaff = aff\r\n\t\t\taff = spd\r\n\t\t\tprint(spd, aff , newaff)\r\n\r\n\"\"\"","sub_path":"Codechef/Real Life Problems/dangviru.py","file_name":"dangviru.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268336233","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 1 23:22:31 2016\r\n\r\nInputs:\r\n1. data (np array): stock price, oldest TimePoints first\r\n2. TimePoints (int/int list): number of TimePoints for moving average\r\n\r\nFuctions:\r\n1. sma (np array): simple moving average, oldest TP first\r\n2. ema (np array): Exponential Moving Average, oldest TP first\r\n3. rsi (np array): Relative Strength Index, oldest TP first\r\n3. macd (np array): Moving Average Convergence/Divergence Oscillator, oldest TP first\r\n4. psar (np array): Parabolic stop and reverse, oldest TP first\r\n\r\nDONE:\r\n1. make TimePoints a var in each func\r\n2. input TimePoints check, initialize output with minimum TimePoints\r\n3. def __int2list to convert single integer TP to list\r\n4. add PSAR\r\n\r\nTODO:\r\n1. check if all element in TimePoints is int\r\n\r\n@author: Hugh\r\n\"\"\"\r\n\r\n# Import Library\r\n# ================================================================== #\r\nimport numpy as np\r\n#import collections\r\n# ================================================================== #\r\n\r\n# private: int to list\r\ndef __int2list(data, TP):\r\n # if Time point is single integer, convert to list\r\n if isinstance(TP,int):\r\n TP = [TP]\r\n # if time point is larger than data length, error \r\n for i in TP:\r\n if i >= len(data):\r\n raise ValueError('Time Point is larger than actual data length!')\r\n \r\n return TP\r\n\r\n\r\n# SMA\r\n# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages\r\ndef calcSMA(data, TimePoints):\r\n # check Timepoints\r\n TimePoints = __int2list(data, TimePoints)\r\n\r\n # initialize SMA\r\n sma = np.zeros([len(data),len(TimePoints)])\r\n # calculate SMA\r\n for i in range(len(TimePoints)):\r\n tmp_sma = list()\r\n for j in range(len(data)-TimePoints[i]+1):\r\n tmp_sma.append(np.mean(data[j:j+TimePoints[i]]))\r\n sma[:,i] = np.lib.pad(np.array(tmp_sma),(TimePoints[i]-1,0),\r\n 'constant',constant_values=0.0)\r\n return sma\r\n\r\n# EMA\r\ndef calcEMA(data, TimePoints):\r\n # check TP\r\n TimePoints = __int2list(data, TimePoints)\r\n\r\n # initialize EMA\r\n ema = np.zeros([len(data),len(TimePoints)])\r\n tmp_sma = calcSMA(data, TimePoints)\r\n\r\n # calculate EMA\r\n for i in range(len(TimePoints)):\r\n tmp_ema = list()\r\n ema_coeff = 2./(TimePoints[i]+1)\r\n ema_ini = tmp_sma[TimePoints[i]-1,i]\r\n tmp_ema.append(ema_ini)\r\n for j in range(TimePoints[i],len(data)):\r\n ema_previous_TimePoints = tmp_ema[-1]\r\n ema_new_TimePoints = (data[j] - ema_previous_TimePoints) * ema_coeff + ema_previous_TimePoints\r\n tmp_ema.append(ema_new_TimePoints)\r\n ema[:,i] = np.lib.pad(np.array(tmp_ema),(TimePoints[i]-1,0),\r\n 'constant',constant_values=0.0)\r\n return ema\r\n\r\n# RSI\r\n# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi\r\ndef calcRSI(data, TimePoints):\r\n # check TP\r\n TimePoints = __int2list(data, TimePoints)\r\n\r\n # calculate absolute gain and loss\r\n gain_loss = np.diff(data)\r\n gain = np.copy(gain_loss)\r\n loss = np.copy(gain_loss)\r\n\r\n for i in range(len(gain_loss)):\r\n if gain_loss[i] < 0:\r\n gain[i] = 0\r\n else:\r\n loss[i] = 0\r\n\r\n loss = np.absolute(loss)\r\n\r\n # initialize RSI\r\n rsi = np.zeros([len(data),len(TimePoints)])\r\n\r\n # calculate RSI\r\n for i in range(len(TimePoints)):\r\n avg_gain = list()\r\n avg_gain.append(np.mean(gain[0:TimePoints[i]]))\r\n\r\n avg_loss = list()\r\n avg_loss.append(np.mean(loss[0:TimePoints[i]]))\r\n\r\n for j in range(len(gain_loss)-TimePoints[i]):\r\n avg_gain.append((avg_gain[-1]*(TimePoints[i]-1) + gain[j+TimePoints[i]])/TimePoints[i])\r\n avg_loss.append((avg_loss[-1]*(TimePoints[i]-1) + loss[j+TimePoints[i]])/TimePoints[i])\r\n\r\n rs = np.true_divide(avg_gain, avg_loss)\r\n tmp_rsi = 100 - 100./(1+rs)\r\n rsi[:,i] = np.lib.pad(np.array(tmp_rsi),(TimePoints[i],0),\r\n 'constant',constant_values=0.0)\r\n return rsi\r\n\r\n# MACD\r\n# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_average_convergence_divergence_macd\r\ndef calcMACD(data, TP1, TP2, TP3):\r\n # check TP length\r\n TP1 = __int2list(data, TP1)\r\n TP2 = __int2list(data, TP2)\r\n TP3 = __int2list(data, TP3)\r\n \r\n if len(TP1)==len(TP2)==len(TP3):\r\n macd = np.zeros([len(data),len(TP1)])\r\n \r\n for i in range(len(TP1)): \r\n emaTP1 = calcEMA(data,TP1[i])\r\n emaTP2 = calcEMA(data,TP2[i])\r\n maxlen = max(TP1[i],TP2[i])\r\n MACDline = emaTP1-emaTP2\r\n MACDline = MACDline[maxlen-1:]\r\n Signalline = calcEMA(MACDline,TP3[i])\r\n minlen = TP3[i]-1\r\n tmp_macd = MACDline[minlen:] - Signalline[minlen:]\r\n tmp_macd = tmp_macd.ravel() # flatten the dimension\r\n macd[:,i] = np.lib.pad(np.array(tmp_macd), (len(data)-len(tmp_macd),0),\r\n 'constant',constant_values=0.0)\r\n else:\r\n print('TimePoints are not in the same length')\r\n macd = 0 \r\n return macd\r\n\r\n\r\n\r\n#==============================================================================\r\n# PSAR\r\n# assume rising SAR first, initial SAR is the lowest of low_val within timepoints\r\n# for more info about reverse: https://en.wikipedia.org/wiki/Parabolic_SAR:\r\n# and http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:parabolic_sar\r\n# Inputs: data_high (float): daily high price\r\n# data_low (float): daily low price\r\n# TimePoints (int): using first few days to initialize the SAR/EP\r\n# Outputs: sar (np array): Parabolic SAR values\r\n#==============================================================================\r\ndef calcPSAR(data_high, data_low, TimePoints):\r\n # initialize parameters\r\n AF_step = 0.02\r\n AF_max = 0.2\r\n trend = True # trend: True for rising, False for falling\r\n \r\n af = list()\r\n sar = list()\r\n ep = list()\r\n af.append(AF_step) # initialize AF as AF_step\r\n sar.append(min(data_low[0:TimePoints])) # initialize SAR as lowest low\r\n ep.append(max(data_high[0:TimePoints])) # initialize EP as highest high\r\n \r\n for i in range(TimePoints,len(data_high)): \r\n current_low = data_low[i]\r\n current_high = data_high[i]\r\n yesterday_low = data_low[i-1]\r\n yesterday_high = data_high[i-1]\r\n \r\n # for the last element there's no tomorrow data, set to Inf\r\n try:\r\n tomorrow_low = data_low[i+1]\r\n except IndexError:\r\n tomorrow_low = float('-Inf')\r\n \r\n try: \r\n tomorrow_high = data_high[i+1]\r\n except IndexError:\r\n tomorrow_high = float('Inf')\r\n \r\n # calculate SAR and update parameters\r\n if trend==True: # rising SAR\r\n current_sar = sar[-1] + af[-1]*(ep[-1] - sar[-1])\r\n # update AF and EP\r\n if current_high > ep[-1]: # if find new extreme point, append it\r\n ep.append(current_high)\r\n if af[-1] < AF_max: # if current AF not reaching AF_max, add AF_step\r\n af.append(af[-1] + AF_step)\r\n else: # otherwise append AF_max\r\n af.append(AF_max)\r\n else: # if no extreme point found, append current AF/EP for next period\r\n ep.append(ep[-1])\r\n af.append(af[-1])\r\n \r\n # update SAR and trend\r\n if current_sar >= tomorrow_high: # if current SAR is higher than next period\r\n trend = False # then reverse the trend to falling\r\n current_sar = ep[-1] # SAR for the new trend is the last EP\r\n af[-1] = AF_step # re-initialize AF as AF_step, which is usually 0.02\r\n ep[-1] = min(current_low, yesterday_low) # re-initialize EP as lowest of current period\r\n elif current_sar > min(current_low, yesterday_low): # else if current SAR higher than the lowest of current period\r\n current_sar = min(current_low, yesterday_low)\r\n \r\n elif trend==False: # falling SAR\r\n current_sar = sar[-1] - af[-1]*(sar[-1] - ep[-1])\r\n # update AF and EP\r\n if current_low < ep[-1]: # if find new lowest point, append it\r\n ep.append(current_low)\r\n if af[-1] < AF_max: # if current AF not reaching AF_max, add AF_step\r\n af.append(af[-1] + AF_step)\r\n else: # otherwise append AF_max\r\n af.append(AF_max)\r\n else: # if no extreme point found, append current AF/EP for next period\r\n ep.append(ep[-1])\r\n af.append(af[-1]) \r\n \r\n # update SAR and trend\r\n if current_sar <= tomorrow_low: # if current SAR is lower than next period\r\n trend = True # reverse the trend to rising\r\n current_sar = ep[-1] # SAR for the new trend is the latest EP\r\n af[-1] = AF_step # re-initialize AF as AF_step, which is usually 0.02\r\n ep[-1] = max(current_high, yesterday_high) # re-initialize EP as highest of current period\r\n elif current_sar < max(current_high, yesterday_high):\r\n current_sar = max(current_high, yesterday_high)\r\n # append current sar to the output list\r\n sar.append(current_sar)\r\n # convert sar from list to np array and padding with zeros \r\n sar = np.lib.pad(np.array(sar), (TimePoints-1,0), 'constant',constant_values=sar[0])\r\n return sar\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"584210436","text":"import argparse\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nfrom preprocessor import *\nfrom sizeChecker import get_file_size\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"Input csv with file names and types\", type=str)\nparser.add_argument(\"-d\", \"--data\", help=\"Path to data files\", type =str)\n\ndef map_files(csv_data):\n\tmapped_data = []\n\tprint(\"[*Reading in data files*]\")\n\tfor i in tqdm(csv_data):\n\t\t#print(base_path+i[0])\n\t\twith open(i[0], 'r') as mfile:\n\t\t\tdata = mfile.read()\n\t\t\tif i[1] != \"Energetic Bear\":\n\t\t\t\tprint(i[1])\n\t\t\t\tmapped_data.append((data, i[1], i[0]))\n\treturn mapped_data\n\ndef split_train_test(full_data):\n\ty = [(i[1], i[2]) for i in full_data]\n\tx = [i[0] for i in full_data]\n\tx_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.33, shuffle=True)\n\n\treturn (x_train, y_train),(x_test, y_test)\n\ndef open_csv(fname, base_path):\n\tfin = []\n\twith open(fname, 'r') as f:\n\t\tdata = f.readlines()\n\n\tcsv_data = [i.strip('\\n').split(',') for i in data]\n\tfor i in csv_data:\n\t\tif 100 < get_file_size(base_path+i[0]) < 500000:\n\t\t\tfin.append((base_path+i[0],i[1]))\n\treturn fin\n\ndef compress_list(lists):\n\tfin = []\n\tfor i in tqdm(lists):\n\t\tfor a in i:\n\t\t\tfin.append(a)\n\treturn fin\n\ndef np_list(lists):\n\tfin = []\n\tfor i in tqdm(lists):\n\t\tfin.append(np.array(i))\n\n\treturn np.array(fin)\n\ndef to_config(filename, train_len, test_len, max_len):\n\ta ={\n\t\"train_len\": train_len,\n\t\"test_len\": test_len,\n\t\"max_len\": max_len\n\t}\n\twith open(filename, 'w+') as f:\n\t\tf.write(json.dumps(a))\n\ndef split_to_file(filename, data):\n\twith open(filename, 'w+') as f:\n\t\tfor i in data:\n\t\t\tf.write(\"{}\\n\".format(i))\n\nif __name__ == \"__main__\":\n\n\ttokenizer = Decomp_tokenizer()\n\targs = parser.parse_args()\n\tfull_data = map_files(open_csv(args.input,args.data))\n\n\ttrain_data, test_data = split_train_test(full_data)\n\n\ttrain_malware = train_data[0]\n\ttrain_keys = [i[0] for i in train_data[1]]\n\ttest_malware = test_data[0]\n\ttest_keys = [i[0] for i in test_data[1]]\n\n\ttrain_files = [i[1] for i in train_data[1]]\n\ttest_files = [i[1] for i in test_data[1]]\n\n\tsplit_to_file(\"test_set.csv\", test_files)\n\tsplit_to_file(\"train_set.csv\", train_files)\n\n\n\tprint(\"[*Fitting the tokenizer*]\")\n\ttokenizer.fit_args(tqdm(train_malware))\n\ttokenizer.fit_instr(tqdm(train_malware))\n\ttokenizer.fit_label(train_keys)\n\ttokenizer.save_status()\n\n\t#tokenizer.recover_status()\n\t#tokenizer.save_status()\n\n\tprint(\"[*Tokenizing data and writing to file*]\")\n\ttrain_malware_args = tokenizer.tokenize_args(train_malware)\n\ttrain_malware_instr = tokenizer.tokenize_instr(train_malware)\n\tmax_len_args = len(max(train_malware_args, key=len))\n\tmax_len_inst = len(max(train_malware_instr, key=len))\n\ttrain_keys = tokenizer.tokenizeLabels(tqdm(train_keys))\n\ttrain_data_args = zip(train_malware_args, train_keys)\n\ttrain_data_instr = zip(train_malware_instr, train_keys)\n\n\tprint(\"[*Train data fitted and ready for writing to file*]\")\n\ttest_malware_args = tokenizer.tokenize_args(test_malware)\n\ttest_malware_instr = tokenizer.tokenize_instr(test_malware)\n\ttest_keys = tokenizer.tokenizeLabels(tqdm(test_keys))\n\ttest_data_args = zip(test_malware_args, test_keys)\n\ttest_data_instr = zip(test_malware_instr, test_keys)\n\n\ttrain_len = len(train_malware_args)\n\ttest_len = len(test_malware_args)\n\n\tto_config(\"tokenized_with_args.config\", train_len,test_len,max_len_args)\n\tto_config(\"tokenized_with_instructions.config\", train_len, test_len,max_len_inst)\n\n\tprint(\"[* ARG TRAIN DATA TO FILE *]\")\n\tfor index, i in enumerate(tqdm(train_data_args)):\n\t\t\twrite_to_data_file(\"args\",i,str(index),test=False,max_len=max_len_args)\n\tprint(\"[* ARG TEST DATA TO FILE *]\")\n\tfor index, i in enumerate(tqdm(test_data_args)):\n\t\twrite_to_data_file(\"args\",i,str(index), test=True,max_len=max_len_args)\n\n\tprint(\"[* INSTR TRAIN DATA TO FILE *]\")\n\tfor index, i in enumerate(tqdm(train_data_instr)):\n\t\t\twrite_to_data_file(\"instr\",i,str(index),test=False,max_len=max_len_inst)\n\tprint(\"[* INSTR TEST DATA TO FILE *]\")\n\tfor index, i in enumerate(tqdm(test_data_instr)):\n\t\twrite_to_data_file(\"instr\",i,str(index), test=True,max_len=max_len_inst)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"590279679","text":"#Prime Factorization - Have the user enter a\n# number and find all Prime Factors (if there are any) and display them.\n\n\n\n\n__author__ = 'etienne'\na = 2\nutile = 2\nmot = \" \"\nliste_nb_premier = []\n\ndef est_premier(nombre):\n\n liste = range(nombre)\n for i in range(2 , len(liste)):\n if nombre % liste[i] == 0 :\n\n break\n else :\n True\n else :\n liste_nb_premier.append(nombre)\n\nk = 0\nwhile True:\n try:\n k = int(input(\"Entrez votre nombre: \"))\n except ValueError:\n print(\"Not an integer!\")\n continue\n else:\n print(\"Yes an integer!\")\n break\nk = int(k)\ninitial = k\n\nwhile utile <= k:\n est_premier(utile)\n utile += 1\n\n\n\n\nwhile a != 1:\n if k > 1:\n for i in liste_nb_premier:\n if k % i == 0 :\n k = k/i\n mot += (str(i)+ \"*\")\n\n else:\n True\n else :\n\n a = 1\n\nprint(\"The list of prime factor of %s is : %s\" %(initial,mot))\n\n\n\n\n\n\n\n\n\n","sub_path":"prime_factorization.py","file_name":"prime_factorization.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"131019785","text":"from flask import Flask, jsonify, request\nfrom data.dao.sensordao import SensorDAO\nfrom data.dao.ruledao import RuleDAO\nfrom notification.texter import Texter\nfrom model.temperaturepoint import TemperaturePoint\nfrom model.rule import Rule\n\napp = Flask(__name__)\nsensordao = SensorDAO()\nruledao = RuleDAO()\n\n\n@app.route('/sensors', methods=['GET'])\ndef sensors():\n try:\n return jsonify(message='OK', data=[sensor.serialize() for sensor in sensordao.get_sensors()])\n except Exception as e:\n return jsonify(message='Fail', data=str(e))\n\n\n@app.route('/rules', methods=['GET', 'POST'])\ndef rules():\n try:\n if request.method == 'POST':\n req = request.get_json()\n rule = Rule(**req)\n sensor = sensordao.get_sensor(rule.sensor_id)\n if sensor:\n count = ruledao.add_rule(rule)\n data = {\"sensor\": sensor.serialize(), \"rule\": req, \"records_inserted\": count}\n return jsonify(message=\"OK\", data=data)\n else:\n return jsonify(message=\"OK\", data=[rule.serialize() for rule in ruledao.get_rules()])\n except Exception as e:\n print(e)\n return jsonify(message='Fail', data=str(e))\n\n\n@app.route('/notify', methods=['POST'])\ndef rule_checker():\n try:\n req = request.get_json()\n temperaturepoint = TemperaturePoint(**req)\n if ruledao.check_rule(temperaturepoint):\n with Texter('Rule has been activated'):\n print('Sent!')\n return jsonify(message=\"OK\", data=\"Notification has been sent\")\n else:\n return jsonify(message=\"OK\", data=\"Rule not met\")\n except Exception as e:\n return jsonify(message='Fail', data=str(e))\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=11011, debug=True)\n","sub_path":"rulesapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363869660","text":"''' Module that provided Finance Indicators '''\nimport pandas as pd\nimport numpy as np\nfrom sklearn import cluster\nfrom uAI_typedef import *\n\n# FINANCE CLASS\nclass Finance():\n\n ''' FINANCE CLASS '''\n def __init__(self, data):\n self.data = data\n self.set_RSI()\n self.set_BollingerBands()\n self.set_BBP()\n self.set_S_RSI()\n \n print (\"\\nFinance data set.\")\n\n # Calculate EMA\n def EMA(self, days, type='df'):\n ''' \n - Calculates EMA (Exponential Moving Average)\\n\n Parameters:\n ----------\n [in] days = interval \\n\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['EMA']\n '''\n data = self.data[CLOSE]\n ema = pd.DataFrame()\n ema[EMA] = data.ewm(span=days, adjust=False).mean() \n return self.returnValue(ema, type)\n\n # Set EMA \n def set_EMA(self, days): \n ''' \n - Set EMA (Exponential Moving Average) \\n\n Parameters:\n ----------\n [in] days = interval \n '''\n ema = pd.DataFrame(columns=[EMA+str(days)])\n ema = self.EMA(days)\n self.data.join(ema)\n\n # Calculate SMA \n def SMA(self, days, type='df'):\n ''' \n - Calculates SMA (Simple Moving Average) \\n \n Parameters:\n ---------- \n [in] days = interval \\n\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['SMA'] \n '''\n data = self.data[CLOSE]\n sma = pd.DataFrame()\n sma[SMA] = data.rolling(days).mean()\n return self.returnValue(sma, type)\n\n # Set SMA \n def set_SMA(self, days):\n ''' \n - Set SMA (Simple Moving Average) \\n\n Parameters:\n ----------\n [in] days = interval\n '''\n sma = pd.DataFrame(columns=[SMA+str(days)])\n sma = self.SMA(days)\n self.data.join(sma)\n\n # Calculate MACD EMA\n def MACD_EMA(self, macd):\n ''' \n - Calculates MACD EMA \n '''\n ema = macd.ewm(span=9, adjust=False).mean()\n return ema\n\n # Calculate MACD \n def MACD(self, type='df'):\n ''' \n - Calculates MACD \\n\n Parameters:\n ----------\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['MACD', 'SIGNAL', 'HIST'] \\n\n '''\n macd = self.EMA(12) - self.EMA(26)\n signal = self.MACD_EMA(macd)\n macd_hist = macd.as_matrix() - signal.as_matrix()\n \n macd_out = pd.DataFrame(columns = [MACD, MACD_S, MACD_H])\n macd_out[MACD] = macd\n macd_out[MACD_S] = signal\n macd_out[MACD_H] = macd_hist\n return self.returnValue(macd_out, type)\n\n # Set MACD \n def set_MACD(self):\n ''' \n - Set MACD \n '''\n self.macd = self.MACD()\n\n # Calculate RSI \n def RSI(self, period=14, type='df'): \n ''' \n - Calculates RSI (Relative Strenght Index)\\n\n Parameters:\n ----------\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['RSI'] \n ''' \n data = self.data\n \n UpI = [0] \n DoI = [0] \n \n for i in range ((data[CLOSE].size)-1):\n change = data.get_value(i+1, CLOSE) - data.get_value(i, CLOSE) \n if change >=0: UpD = change\n else: UpD = 0.0\n UpI.append(UpD) \n if change < 0 : DoD = change*(-1) \n else: DoD = 0.0 \n DoI.append(DoD) \n\n UpI = pd.DataFrame(UpI) \n DoI = pd.DataFrame(DoI) \n alpha = 1/period\n avgGain = UpI.ewm(alpha=alpha, adjust=False).mean()\n avgLoss = DoI.ewm(alpha=alpha, adjust=False).mean()\n rs = avgGain / avgLoss\n\n rsi = np.array(100 - (100 / (1 + rs)))\n rsi_out = pd.DataFrame(rsi[:,0], columns=[RSI])\n rsi_out = rsi_out.replace(np.NaN, 0)\n \n return self.returnValue(rsi_out, type)\n\n # Set RSI \n def set_RSI(self, period=14): \n ''' \n - Set RSI (Relative Strenght Index)\n '''\n self.rsi = self.RSI()\n\n # Calculate BB (Bollinger Bands)\n def BollingerBands(self, period=20, type='df'):\n ''' \n - Calculates BB (Bollinger Bands) \\n\n Parameters:\n ----------\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['BB_UP', 'BB_MED', 'BB_LOW']\n '''\n data = self.data[CLOSE]\n sma = data.rolling(window=period, min_periods=period - 1).mean()\n std = data.rolling(window=period, min_periods=period - 1).std()\n upper = (sma + (std * 2)).to_frame(BB_UP)\n lower = (sma - (std * 2)).to_frame(BB_LOW)\n medium = sma.to_frame(BB_MED)\n bb = pd.concat([lower, medium, upper], axis=1)\n return self.returnValue(bb, type)\n\n # Set BB (Bollinger Bands)\n def set_BollingerBands(self):\n '''\n - Set BB (Bollinger Bands) \n '''\n self.bb = self.BollingerBands()\n\n def BBP(self, type='df'):\n '''\n - Calculate Bollinger Band %\n '''\n bbp = pd.DataFrame()\n\n bb = self.bb\n bbp['BBP'] = ((self.data[CLOSE] - bb[BB_LOW]) / (bb[BB_UP] - bb[BB_LOW]))\n return self.returnValue(bbp, type)\n\n def set_BBP(self):\n self.bbp = self.BBP()\n \n # Calculate Stochastic RSI (STOCH RSI)\n def S_RSI(self, period=14, type='df'):\n ''' \n - Calculates Stochastic RSI (STOCH RSI) \\n\n Parameters:\n ----------\n [in] type = 'df' (default) or 'mat' \\n\n [out] df['S_RSI_K', 'S_RSI_D']\n '''\n rsi = self.RSI() \n rsi_aux = pd.DataFrame(rsi)\n high = rsi_aux.rolling(window=period).max()\n low = rsi_aux.rolling(window=period).min()\n\n stochRSI = pd.DataFrame()\n stochRSI[S_RSI_K] = ((rsi_aux - low) / (high - low))*100\n stochRSI[S_RSI_K] = stochRSI.rolling(window=3).mean()\n stochRSI[S_RSI_D] = stochRSI[S_RSI_K].rolling(window=3).mean()\n stochRSI = stochRSI.replace(np.NaN, 0)\n \n return self.returnValue(stochRSI, type)\n\n # Set Stochastic RSI (STOCH RSI)\n def set_S_RSI(self):\n '''\n - Set S_RSI (Stochastic RSI)\n '''\n self.s_rsi = self.S_RSI()\n pass\n\n def returnValue(self, value, type='df'):\n if type=='df': return value\n elif type=='mat':\n value = value.as_matrix()\n shape = value.shape\n if shape[1] == 1: return value[:,-1]\n else: return value\n\n # Calculate k-Means for a given number of Clusters\n def KMeans(self, n_clusters, data):\n ''' \n - Calculates k-Means for a given number of Clusters \n '''\n k_means = cluster.KMeans(n_clusters=n_clusters, random_state=0).fit(data)\n return k_means\n\n def saveData(self):\n ''' Save data into a CSV file ''' \n print(\"\\nSaving .csv file...\")\n fileName = str('Finance.csv')\n with open(fileName, 'w') as file:\n file.write(\"INDEX,TIME,OPEN,HIGH,LOW,CLOSE,VOL,RSI,BBP\\n\")\n\n rsi = pd.DataFrame(self.rsi).fillna(0).as_matrix()\n bbp = pd.DataFrame(self.bbp).fillna(0).as_matrix()\n\n for i,data in self.data.iterrows(): \n file.write(str(i) + ',' +\n str(data[TIME])+ ',' +\n str(data[OPEN]) + ',' +\n str(data[HIGH]) + ',' +\n str(data[LOW]) + ',' +\n str(data[CLOSE]) + ',' +\n str(data[VOL]) + ',' +\n str(rsi[i,0]) + ',' +\n str(bbp[i,0]) + '\\n')\n\n print(\"\\nFile saved: \"+ fileName+ \"\\n\")\n","sub_path":"uAI_Finance.py","file_name":"uAI_Finance.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"413531001","text":"#! /bin/python\n# refer - https://www.algoexpert.io/questions/River%20Sizes\nfrom Queue import Queue\n\n\ndef riverSizes(matrix):\n \"\"\"\n :param matrix: two-dimensional array containing only 0s and 1s(1 represents part of a river)\n :return: an array of the sizes of all rivers represented in the input matrix\n \"\"\"\n # visited map - k: (i,j) of all 1s v: bool\n # queue - (i,j) of all 1s\n # return 0-size filtered one\n res = []\n visited = get_visited_map(matrix)\n q = get_queue(matrix)\n while q.empty() is False:\n current = q.get()\n if not visited[current]:\n res.append(explore(current, visited))\n return filter(lambda x: x > 0, res)\n\n\ndef explore(start, visited):\n found = 0\n q = Queue()\n q.put(start)\n while q.empty() is False:\n current = q.get()\n if not visited[current]:\n visited[current] = True\n found = found + 1\n map(q.put, get_unvisited_neighbors(current, visited))\n\n return found\n\n\ndef get_unvisited_neighbors(current, visited):\n for nxt in get_neighbor_candidates(current):\n if nxt in visited and not visited[nxt]:\n yield nxt\n\n\ndef get_neighbor_candidates(current):\n i, j = current\n return [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]\n\n\ndef get_queue(matrix):\n q = Queue()\n for i, j, cell in get_coord_and_cell(matrix):\n if cell == 1:\n q.put((i, j))\n return q\n\n\ndef get_visited_map(matrix):\n return {(i, j): False\n for i, j, cell\n in get_coord_and_cell(matrix)\n if cell == 1}\n\n\ndef get_coord_and_cell(matrix):\n for i, row in zip(xrange(len(matrix)), matrix):\n for j, cell in zip(xrange(len(row)), row):\n yield (i, j, cell)\n","sub_path":"graph-medium-river-sizes/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139664192","text":"import cgi\nimport webapp2\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\nfrom google.appengine.api import images\nfrom webapp2_extras import sessions, auth\nfrom models import User\nfrom models import Comment\nfrom models import Message\nfrom models import ConnectionRequest\nfrom models import ForumPost\nfrom models import Skill\nfrom models import Forum\nfrom models import Conversation\nfrom models import NotificationBox\nfrom models import NotifiedMessage\n\nimport logging\nimport random\nimport string\nimport datetime\nfrom BaseHandler import SessionHandler\nfrom BaseHandler import login_required\n\nclass ManageConversations(SessionHandler):\n @login_required\n def get(self):\n logging.info(\"hi\")\n user = self.user_model\n user.msg_count = 0\n msg_count = user.msg_count\n user.put()\n conversations = Conversation.query(ndb.OR(\n user.username == Conversation.sender, \n user.username == Conversation.recipient))\n\n notifications = NotifiedMessage.query(NotifiedMessage.person_reference == user.username).fetch(10)\n notis_count = len(notifications)\n \n\n\n\n self.response.out.write(template.render('views/messages.html', {'viewer': user, \n 'conversations': conversations,\n 'notifications': notifications,\n 'notis_count': notis_count,\n 'msg_count': msg_count\n }))\n\n def post(self):\n\n recipient = cgi.escape(self.request.get('recipient'))\n text = cgi.escape(self.request.get('text'))\n sender = cgi.escape(self.request.get('sender'))\n\n\n #create msg and put\n\n #is this conversation already existed?\n #Yes\n #attach message\n #No create a new one\n #owner, sender, msg\n\n #put\n\n #There's a new coming msg to owner\n mesg = Message()\n mesg.time = datetime.datetime.now() - datetime.timedelta(hours=8)\n mesg.sender = sender\n mesg.recipient = recipient\n mesg.text = text\n mesg.put()\n \n owners_temp = []\n owners_temp.append(sender)\n owners_temp.append(recipient)\n\n temp1 = \" \".join((sender, recipient))\n temp2 = \" \".join((recipient, sender))\n\n recipient_user = User.query(User.username == recipient).get()\n if recipient_user:\n recipient_user.msg_count += 1\n recipient_user.put()\n\n\n\n \n conversation_qry = Conversation.query(ndb.OR(Conversation.owners == temp1, Conversation.owners == temp2)).get()\n \n if conversation_qry:\n logging.info(\"yes\")\n #logging.info(conversation_qry.owners)\n conversation_qry.children.append(mesg.key)\n conversation_qry.msg_count = conversation_qry.msg_count + 1\n conversation_qry.most_recent_msg = mesg.text\n conversation_qry.most_recent_date = str(mesg.time)\n\n\n conversation_qry.put()\n \n else:\n logging.info(\"noooooooooo\")\n #logging.info(conversation_qry)\n conv = Conversation()\n conv.sender = sender\n conv.recipient = recipient\n conv.owners = \" \".join((sender, recipient))\n conv.children.append(mesg.key)\n conv.msg_count = conv.msg_count + 1\n conv.most_recent_msg = mesg.text\n conv.most_recent_date = str(mesg.time)\n conv.put()\n\n\n\nclass OneConversationDetail(SessionHandler):\n @login_required\n def get(self, conv_reference):\n user = self.user_model\n #parse string\n #get messages\n temp_query = conv_reference.strip()\n temp = temp_query.split('&')\n\n msg_count = user.msg_count\n\n\n notifications = NotifiedMessage.query(NotifiedMessage.person_reference == user.username).fetch(10)\n notis_count = len(notifications)\n\n string1 = str(temp[0])\n string1 = str(string1).replace('[','').replace(']','')\n string1 = str(string1).replace(\"'\",'').replace(\"'\",'')\n string1 = str(string1).replace(\" \",\"\")\n string1 = str(string1).replace(\",\",\" \")\n\n string2 = str(temp[1:2])\n string2 = str(string2).replace('[','').replace(']','')\n string2 = str(string2).replace(\"'\",'').replace(\"'\",'')\n string2 = str(string2).replace(\" \",\"\")\n string2 = str(string2).replace(\",\",\" \")\n\n\n #logging.info(string2 + \" \" + string1)\n #logging.info(string1 + \" \" + string2)\n\n temp1 = \" \".join((string1, string2)) #conv_ref\n temp2 = \" \".join((string2, string1)) #conv_ref_inverse\n\n if user.username != string1 and user.username != string2:\n self.response.out.write(template.render('views/404notfound.html',{}))\n\n else:\n\n\n\n conversation_qry = Conversation.query(ndb.OR(Conversation.owners == temp1, Conversation.owners == temp2)).get()\n\n messages = []\n\n if conversation_qry:\n for oneMessage in conversation_qry.children:\n logging.info(oneMessage.get())\n messages.append(oneMessage.get())\n\n else:\n logging.info(\"not found\")\n\n\n #self.response.out.write(template.render('views/404notfound.html',{})) #prevent people other than tutor and tutee to see this transaction\n self.response.out.write(template.render('views/readMessage.html', {'messages': messages, \n 'user': user, \n 'temp2': temp2, \n 'temp1': temp1,\n 'notifications': notifications,\n 'notis_count': notis_count,\n 'msg_count': msg_count\n }))\n\napp = webapp2.WSGIApplication([('/conversations', ManageConversations),\n ('/conversations/(\\w+)', OneConversationDetail)\n ], debug=True)","sub_path":"Conversation.py","file_name":"Conversation.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362329819","text":"import os, sys\nimport ConfigParser\n\n\"\"\"\n @author Vikash Pareek\n @version 1.0\n Perform tasks for checkpoint #1 given in assignment.\n 1. Load the data into Spark DataFrame.\n\"\"\"\n\n## Getting home directory for the project\nhome = os.environ['XBA_HOME']\n\n## Adding XBA_HOME/bin to PYTHONPATH\nsys.path.append(home + '/bin/')\n\n## Importing assignment.py\nimport assignment\n\n## Reading aadhaar.ini for getting data file \ncfg = ConfigParser.ConfigParser()\ncfg.read(home + '/conf/aadhaar.ini')\n\n## Getting data file path\ndata = cfg.get('data', 'path')\npartition = int(cfg.get('spark', 'partition'))\n\nif __name__ == '__main__':\n ## Get DF for given data file\n asgn = assignment.Assignment()\n aadhaar_df = asgn.create_df(data, partition)\n\n ## Displaying top 25 records of dataframe\n aadhaar_df.show(25, truncate=False)\n","sub_path":"assignments/checkpoint1/dataframe/loadDF.py","file_name":"loadDF.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389647934","text":"import argparse\r\nimport glob\r\nimport logging\r\nimport os\r\nimport librosa\r\nimport numpy as np\r\nimport pyworld as pw\r\nimport soundfile as sf\r\n\r\nfrom functools import partial\r\nfrom multiprocessing import Pool\r\nfrom tqdm import tqdm\r\nfrom tensorflow_tts.utils import remove_outlier\r\n\r\ndef generate(data):\r\n \r\n tid = data[\"tid\"]\r\n audio = data[\"audio\"]\r\n mels = data[\"mels\"]\r\n\r\n # If the fft size is 2048, audio with 22050Hz sample rate is processed in 93 millisecond increments. \r\n # If it is 512, it is processed by 23 milliseconds.\r\n \r\n # lpcnet spec\r\n fft_size = 320 # or 640 processed16000Hz 20ms or 40ms\r\n hop_size = 160 # \r\n samplerate = 16000\r\n \r\n # check audio properties\r\n assert len(audio.shape) == 1, f\"{tid} seems to be multi-channel signal.\"\r\n assert np.abs(audio).max() <= 1.0, f\"{tid} is different from 16 bit PCM.\"\r\n \r\n # get spectrogram\r\n D = librosa.stft(\r\n audio,\r\n n_fft=fft_size,\r\n hop_length=hop_size, # default: win_length // 4\r\n win_length=None, # default: win_length = n_fft \r\n window='hann', # default: cosine window (‘hann’)\r\n pad_mode=\"reflect\"\r\n )\r\n \r\n S, _ = librosa.magphase(D) # (#bins, #frames)\r\n \r\n # check audio and feature length\r\n audio = np.pad(audio, (0, 3200), mode=\"edge\")\r\n audio = audio[: mels * hop_size]\r\n assert mels * hop_size == len(audio)\r\n \r\n # extract raw pitch\r\n _f0, t = pw.dio(audio.astype(np.double),fs=samplerate,f0_ceil=7600,frame_period=1000 * hop_size / samplerate)\r\n f0 = pw.stonemask(audio.astype(np.double), _f0, t, samplerate)\r\n if len(f0) >= mels:\r\n f0 = f0[: mels]\r\n else:\r\n f0 = np.pad(f0, (0, mels - len(f0)))\r\n \r\n # extract energy\r\n energy = np.sqrt(np.sum(S ** 2, axis=0))\r\n if len(energy) >= mels:\r\n energy = energy[: mels]\r\n else:\r\n energy = np.pad(energy, (0, mels - len(energy)))\r\n assert mels == len(f0) == len(energy)\r\n \r\n # remove outlier f0/energy\r\n f0 = remove_outlier(f0)\r\n energy = remove_outlier(energy)\r\n \r\n item = {}\r\n item[\"tid\"] = tid\r\n item[\"f0\"] = f0\r\n item[\"energy\"] = energy\r\n return item\r\n \r\ndef main():\r\n parser = argparse.ArgumentParser(description=\"Dump F0 & Energy\")\r\n parser.add_argument(\"--outdir\", default=\"./datasets/jsut/basic\", type=str, help=\"directory to save f0 or energy file.\")\r\n parser.add_argument(\"--rootdir\", default=\"./datasets/jsut/basic\", type=str, help=\"dataset directory root\")\r\n args = parser.parse_args()\r\n rootdir = args.rootdir\r\n \r\n datasets = []\r\n with open(os.path.join(rootdir, \"metadata.csv\"), encoding=\"utf-8\") as f:\r\n for line in f:\r\n tid, _ = line.strip().split(\"|\")\r\n pcm_path = os.path.join(rootdir, \"pcm\", f\"{tid}.s16\")\r\n feat_path = os.path.join(rootdir, \"feats\", f\"{tid}.f32\")\r\n \r\n audio, rate = sf.read(pcm_path, samplerate=16000, channels=1, format='RAW', subtype='PCM_16')\r\n data = {}\r\n data[\"tid\"] = tid\r\n data[\"audio\"] = audio\r\n data[\"mels\"] = os.stat(feat_path).st_size // 4 // 20\r\n datasets.append(data)\r\n \r\n if not os.path.exists(args.outdir):\r\n os.makedirs(args.outdir)\r\n \r\n partial_fn = partial(generate)\r\n \r\n p = Pool(4)\r\n items = p.imap_unordered(partial_fn,tqdm(datasets, total=len(datasets), desc=\"[Preprocessing]\"),chunksize=10)\r\n \r\n f0_path = os.path.join(rootdir, \"f0\")\r\n if not os.path.exists(f0_path):\r\n os.makedirs(f0_path)\r\n \r\n energy_path = os.path.join(rootdir, \"energies\") \r\n if not os.path.exists(energy_path):\r\n os.makedirs(energy_path)\r\n \r\n for item in items:\r\n tid = item[\"tid\"]\r\n f0 = item[\"f0\"]\r\n energy = item[\"energy\"]\r\n f0.astype(np.float32).tofile(os.path.join(f0_path, f\"{tid}.f0\"))\r\n energy.astype(np.float32).tofile(os.path.join(energy_path, f\"{tid}.e\"))\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"bin/dump_f0_energy.py","file_name":"dump_f0_energy.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"199856760","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n二叉树的所有路径\n\"\"\"\n\n\nclass Node(object):\n \"\"\"一般二叉树的节点\"\"\"\n\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n\ndef binary_tree_paths(root):\n \"\"\"\n 使用深度优先搜索\n\n - 如果当前节点不是叶子节点,则在当前的路径末尾添加该节点,并继续递归遍历该节点的每一个孩子节点。\n - 如果当前节点是叶子节点,则在当前路径末尾添加该节点后我们就得到了一条从根节点到叶子节点的路径,将该路径加入到答案即可\n\n 1\n 2 3\n 4 5\n\n 输出:['1->2->4', '1->3', '1->2->5']\n \"\"\"\n\n def construct_path(root, path):\n if not root:\n return\n\n path += str(root.val)\n # 如果是叶子节点,将路径加入结果\n if not root.left and not root.right:\n paths.append(path)\n # 如果不是叶子节点,则将当前的路径传入,然后将左右子节点参与构建\n else:\n path += '->'\n construct_path(root.left, path)\n construct_path(root.right, path)\n\n paths = []\n construct_path(root, '')\n return paths\n","sub_path":"dataStructure/binaryTree/binary_tree_path.py","file_name":"binary_tree_path.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96615661","text":"from rest_framework import serializers\nfrom apps.info.serializers import (\n TaskTypesGetSerializer,\n LearningRangeGetSerializer,\n LearningSubjectsGetSerializer,\n LessonThemeGetSerializer,\n LessonTypesGetSerializer,\n EducationProcessesGetSerializer,\n AttachmentGetSerializer,\n)\nfrom apps.users.serializers import (\n UserGetSerializer\n)\nfrom apps.users.models import (\n User,\n Subscription,\n)\nfrom .models import (\n Card,\n PurchasedCard,\n Group,\n Plan,\n Collection,\n PlanCard,\n)\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.utils import timezone\nfrom datetime import datetime, timedelta\n\n\nclass CardGetSerializer(serializers.ModelSerializer):\n\n card_type = TaskTypesGetSerializer()\n learning_range = LearningRangeGetSerializer(required=False, many=True)\n learning_subjects = LearningSubjectsGetSerializer(\n required=False, many=True)\n learning_themes = LessonThemeGetSerializer(required=False, many=True)\n learning_types = LessonTypesGetSerializer(required=False, many=True)\n education_process = EducationProcessesGetSerializer(\n required=False, many=True)\n images = AttachmentGetSerializer(required=False, many=True)\n bought = serializers.SerializerMethodField(required=False)\n favourite = serializers.SerializerMethodField(required=False)\n hosting_url = serializers.URLField(required=False)\n\n class Meta:\n model = Card\n fields = '__all__'\n\n def get_favourite(self, card: Card):\n request = self.context.get('request')\n user = None\n if request.user:\n user = request.user\n if user in card.favourites.all():\n return True\n return False\n\n def get_bought(self, card: Card):\n request = self.context.get('request')\n user = None\n if request.user:\n user = request.user\n if user:\n if card.accessory_level == 1:\n purchased_card = PurchasedCard.objects.filter(\n user=user,\n card=card\n )\n if purchased_card.exists():\n return True\n\n\n elif card.accessory_level == 2:\n subs = Subscription.objects.filter(\n user=user,\n end_date__gte=(timezone.now())\n )\n if subs.exists():\n return True\n return False\n return False\n\n def get_preview(self, attachemnt):\n request = self.context.get('request')\n if attachemnt.preview and getattr(attachemnt.preview, 'url'):\n file_url = attachemnt.preview.url\n return request.build_absolute_uri(file_url)\n return None\n\n def get_attachment(self, attachemnt):\n request = self.context.get('request')\n if attachemnt.attachemnt and getattr(attachemnt.attachemnt, 'url'):\n file_url = attachemnt.attachment.url\n return request.build_absolute_uri(file_url)\n return None\n\n def get_video(self, attachemnt):\n request = self.context.get('request')\n if attachemnt.video and getattr(attachemnt.video, 'url'):\n file_url = attachemnt.video.url\n return request.build_absolute_uri(file_url)\n return None\n\n\nclass PurchasedCardGetSerializer(serializers.ModelSerializer):\n\n user = UserGetSerializer()\n card = CardGetSerializer()\n\n class Meta:\n model = PurchasedCard\n fields = '__all__'\n\n\nclass PurchasedCardCreateSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = PurchasedCard\n exclude = ('user', )\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n\n\nclass GroupGetSerializer(serializers.ModelSerializer):\n\n user = UserGetSerializer()\n\n class Meta:\n model = Group\n fields = '__all__'\n\n\nclass GroupCreateSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Group\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n\n\nclass GroupPartialSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n user = serializers.PrimaryKeyRelatedField(\n queryset=User.objects.all(), required=False)\n duration = serializers.CharField(required=False)\n age = serializers.CharField(required=False)\n\n class Meta:\n model = Group\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n\n##\n\n\nclass PlanCardRetrieveSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = PlanCard\n fields = '__all__'\n\n\nclass PlanCardCreateSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = PlanCard\n fields = '__all__'\n\n##\n\n\nclass PlanGetSerializer(serializers.ModelSerializer):\n\n user = UserGetSerializer()\n cards = CardGetSerializer(many=True)\n groups = GroupGetSerializer(many=True)\n case_to_cards = PlanCardRetrieveSerializer(many=True)\n\n class Meta:\n model = Plan\n fields = (\n 'pk',\n 'name',\n 'user',\n 'cards',\n 'case_to_cards',\n 'groups'\n )\n\n\nclass PlanCreateSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Plan\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n\n\nclass PlanPartialSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n user = serializers.PrimaryKeyRelatedField(\n required=False, queryset=User.objects.all())\n cards = serializers.PrimaryKeyRelatedField(\n required=False, queryset=Card.objects.all(), many=True)\n groups = serializers.PrimaryKeyRelatedField(\n required=False, queryset=Group.objects.all(), many=True)\n\n class Meta:\n model = Plan\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n##\n\n\nclass PlanCardGetSerializer(serializers.ModelSerializer):\n\n plan = PlanGetSerializer()\n card = CardGetSerializer()\n\n class Meta:\n model = PlanCard\n fields = '__all__'\n##\n\n\nclass CollectionGetSerializer(serializers.ModelSerializer):\n\n user = UserGetSerializer()\n cards = CardGetSerializer(many=True)\n\n class Meta:\n model = Collection\n fields = '__all__'\n\n\nclass CollectionCreateSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n required=True, queryset=User.objects.all())\n cards = serializers.PrimaryKeyRelatedField(\n required=False, queryset=Card.objects.all(), many=True)\n class Meta:\n model = Collection\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n\n\nclass CollectionPartialSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n user = serializers.PrimaryKeyRelatedField(\n required=False, queryset=User.objects.all())\n cards = serializers.PrimaryKeyRelatedField(\n required=False, queryset=Card.objects.all(), many=True)\n\n class Meta:\n model = Collection\n fields = '__all__'\n\n def validate(self, attrs):\n request = self.context.get('request')\n user = request.user\n attrs['user'] = user\n return attrs\n","sub_path":"apps/lessons/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"85560183","text":"import math\nfrom typing import List, Tuple\n\nfrom agent import Agent\nfrom board import Board\nfrom . import heuristic\n\nimport LessardPhilippe.alpha_beta_agent\n\n# from . import Heuristic\n\ndebug_level=0\nthreshold=9000\n\n\n###########################\n# Alpha-Beta Search Agent #\n###########################\nclass AlphaBetaAgent(Agent):\n \"\"\"Agent that uses alpha-beta search\"\"\"\n\n # Class constructor.\n #\n # PARAM [string] name: the name of this player\n # PARAM [int] max_depth: the maximum search depth\n def __init__(self, name, max_depth):\n super().__init__(name)\n # Max search depth\n self.max_depth = max_depth\n\n # Pick a column.\n #\n # PARAM [board.Board] brd: the current board state\n # RETURN [int]: the column where the token must be added\n #\n # NOTE: make sure the column is legal, or you'll lose the game.\n def go(self, brd): # main routine invoked by game simulator\n \"\"\"Search for the best move (choice of column for the token)\"\"\"\n # Your code here\n\n # create heuristic at specified depth for the current board\n player = brd.player # get our player number\n top_node = heuristic.Heuristic((brd, -1), player) # create top node of tree\n hu = self.create_heuristic(top_node, self.max_depth, player) # recursively build tree\n\n (score, path) = self.alpha_beta(hu, self.max_depth, -math.inf, math.inf, True) # run alpha beta on huristic\n\n\n if(debug_level>=2):\n\n print(\"\\033[1;37;40m \\033[2;37:40m ---SELECTING PATH--------------------------------------------------------\")\n\n print(\"PATH[]\", path)\n return path[1] # return the path(colum choice) of one layer deep for the high score result\n\n # Get the successors of the given board.\n #\n # PARAM [board.Board] brd: the board state\n # RETURN [list of (board.Board, int)]: a list of the successor boards,\n # along with the column where the last\n # token was added in it\n\n def alpha_beta(self, node: heuristic.Heuristic, depth: int, a: float, b: float, maximizing_player: int) -> Tuple[\n float, List[int]]:\n \"\"\"alpha beta alg to search heuristic tree\"\"\"\n\n '''\n originally called on the root node, with true.\n Which means, that it recurses exactly once, with FALSE\n returning node.score of play after one move (since the root node, though having encoded a col value,\n means effectively nothing and should be ignored).\n \n '''\n\n # If we've reached end of exploration, returns this nodes individual score and just the move made to get here\n if depth == 0 or (not node.has_children()):\n return node.score, [node.col] # return tuple of score and path array\n\n # Whether we are doing minimizing or maximizing of score\n if maximizing_player:\n value = -math.inf\n value_path = []\n for child in node.children:\n # Recursively call\n child_score, child_path = self.alpha_beta(child, depth - 1, a, b, False)\n\n # If we find a better score, save its path as the current best\n if child_score > value:\n value = child_score\n value_path = child_path\n\n # Update alpha\n a = max(a, value)\n\n # Do alpha beta pruning\n if a >= b:\n break # beta cutoff - return early\n # if abs(child.calc_move_score(child.board,child.col))>threshold*(depth-depth+1):\n # print(\"THING\")\n # break#break early if game is won\n\n return value, [node.col] + value_path\n else:\n value = math.inf\n value_path = []\n for child in node.children:\n # Recursively call\n child_score, child_path = self.alpha_beta(child, depth - 1, a, b, True)\n\n # If we find a better (IE worse) score, save its path as the current best\n if child_score < value:\n value = child_score\n value_path = child_path\n\n # Update beta\n b = min(b, value)\n\n # Do alpha beta cut off\n if a >= b:\n break # Alpha cutoff - return early\n # if abs(child.calc_move_score(child.board,child.col))>threshold*(depth-depth+1):\n # print(\"THING\")\n # break#break early if game is won\n\n return value, [node.col] + value_path\n\n def create_heuristic(self, node: heuristic.Heuristic, depth: int, root: int) -> heuristic.Heuristic:\n \"\"\"recursively builds tree to specified depth\"\"\"\n if depth > 0:\n # If we're calling this we want to force (re)generation\n node.children = []\n successors = self.get_successors(node.board)\n for board_and_col in successors:\n # Make a new heuristic containing the new board, the col dropped to form it, and the player1\n child = heuristic.Heuristic(board_and_col, root, node.score)\n node.add_child(child) # , root)\n self.create_heuristic(child, depth - 1, root)\n\n # Regardless, return the node\n return node # if depth zero is reached return the tree that was input\n\n def get_successors(self, brd: Board) -> List[Tuple[Board, int]]:\n \"\"\"Returns the reachable boards from the given board brd. The return value is a tuple (new board state, column number where last token was added).\"\"\"\n # Get possible actions\n freecols = brd.free_cols()\n # Are there legal actions left?\n if not freecols:\n return []\n # Make a list of the new boards along with the corresponding actions\n succ = []\n for col in freecols:\n # Clone the original board\n nb = brd.copy()\n # Add a token to the new board\n # (This internally changes nb.player, check the method definition!)\n nb.add_token(col)\n # Add board to list of successors\n succ.append((nb, col))\n return succ\n\n\nTHE_AGENT = AlphaBetaAgent(\"GamarraNikolas3\", 4)\n","sub_path":"ConnectN/GamarraNikolas3/alpha_beta_agent.py","file_name":"alpha_beta_agent.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"407120647","text":"import time\nfrom threading import Thread\n\n\ndef show_help():\n print(\"\"\"\n Поддерживаемые комманды:\n help - это меню\n users - общий список пользователей\n conn - пользователи онлайн\n lh - история входов пользователя\n exit - завершение работы сервера\n \"\"\")\n\n\ndef interface_func(database):\n session = database.create_session()\n show_help()\n while True:\n command = input('Введите комманду: ')\n if command == 'help':\n show_help()\n elif command == 'exit':\n break\n elif command == 'users':\n for user in sorted(session.users_list()):\n print({user[0]})\n elif command == 'conn':\n for user in sorted(session.active_users_list()):\n print(f'Пользователь {user[0]}, подключен: {user[1]}:{user[2]}, время установки соединения: {user[3]}')\n elif command == 'lh':\n name = input('Введите имя конкретного пользователя. Для вывода всей истории, просто нажмите Enter: ')\n for user in session.login_history(name):\n print(f'Пользователь: {user[0]} время входа: {user[1]}. Вход с: {user[2]}:{user[3]}')\n else:\n print('Команда не распознана.')\n\n\ndef run_server_cli(server_thread, database):\n # интерфейс - другим\n interface_thread = Thread(target=interface_func, args=(database,))\n interface_thread.daemon = True\n interface_thread.start()\n\n while True:\n time.sleep(1)\n if server_thread.is_alive() and interface_thread.is_alive():\n continue\n break\n","sub_path":"Lesson_8_Grishechkina/server_dist/server/server_part/server_cli.py","file_name":"server_cli.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"254926193","text":"from __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n\nimport argparse\nimport time\nimport os\nfrom six.moves import cPickle\nfrom subprocess import call\n\nfrom utils import TextLoader\nfrom model import Model\n\nfrom six import text_type\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_dir', type=str, default='save',\n help='model directory to store checkpointed models')\n parser.add_argument('--sample', type=int, default=1,\n help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on bar lines')\n parser.add_argument('-T', type=str, default=\"A Tune\",\n help=\"Name of the tune\")\n parser.add_argument('-R', type=str, default=\"reel\",\n help=\"Type of the tune (reel, air, jig, polka, etc.)\")\n parser.add_argument('-M', type=str, default=\"4/4\",\n help=\"Time signature\")\n parser.add_argument('-L', type=str, default=\"1/8\",\n help=\"Length of beat (This should nearly always be 1/8)\")\n parser.add_argument('-K', type=str, default=\"Dmaj\",\n help=\"Key (Gmaj and Dmaj work best)\")\n parser.add_argument('--tune_dir', type=str, default=\".\",\n help='Where to save the svg of the generated tune')\n\n args = parser.parse_args()\n tune = sample(args)\n with open(os.path.join(args.tune_dir + \"/out.abc\"), \"w\") as text_file:\n text_file.write(tune)\n os.chdir(args.tune_dir)\n print(os.getcwd())\n call(\"pwd\")\n call(\"abcm2ps -g -O = out.abc\");\n\t\t\n\t\t \ndef sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:\n chars, vocab = cPickle.load(f)\n model = Model(saved_args, True)\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n saver = tf.train.Saver(tf.all_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n tune = (model.sample_tune(\n sess, chars, vocab, args.T, args.R,\n int(args.M[0]), int(args.M[2]),\n int(args.L[0]), int(args.L[2]),\n args.K, args.sample))\n tune = tune[:-1]\n print(tune)\n return tune\nif __name__ == '__main__':\n main()\n\n","sub_path":"sample_music.py","file_name":"sample_music.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"185340927","text":"#!/usr/bin/python3\n\"\"\" This script lists all state object from database hbtn_0e_6_usa\"\"\"\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy.sql import select\nfrom sqlalchemy import (create_engine)\n\nif __name__ == '__main__':\n a = sys.argv[1]\n b = sys.argv[2]\n c = sys.argv[3]\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(a,\n b,\n c),\n pool_pre_ping=True)\n conn = engine.connect()\n sql = select([State]).where(State.name.like('%a%'))\n res = conn.execute(sql)\n\n for row in res:\n print(\"{}: {}\".format(row[0], row[1]))\n","sub_path":"0x0F-python-object_relational_mapping/9-model_state_filter_a.py","file_name":"9-model_state_filter_a.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93143361","text":"import find_mxnet\nimport mxnet as mx\nimport logging\nimport os\nimport pickle\nimport socket\nimport sys\n\nclass gdSGD(mx.optimizer.Optimizer):\n \"\"\"A very simple SGD optimizer with momentum and weight regularization.\n It sends the weights to a server located at a higher level in the server\n tree, and pulls back updated weights from the server.\n\n Parameters\n ----------\n learning_rate : float, optional\n learning_rate of SGD\n\n momentum : float, optional\n momentum value\n\n wd : float, optional\n L2 regularization coefficient add to all the weights\n\n rescale_grad : float, optional\n rescaling factor of gradient.\n\n clip_gradient : float, optional\n clip gradient in range [-clip_gradient, clip_gradient]\n\n param_idx2name : dict of string/int to float, optional\n special treat weight decay in parameter ends with bias, gamma, and beta\n \"\"\"\n def __init__(self, momentum=0.0, **kwargs):\n super(gdSGD, self).__init__(**kwargs)\n self.momentum = momentum\n self.hierarchical = True\n self.sock = None\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state such as momentum.\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n if self.momentum == 0.0:\n return None\n else:\n return mx.ndarray.zeros(weight.shape, weight.context, dtype=weight.dtype)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters and send up the tree\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n assert(isinstance(weight, mx.ndarray.NDArray))\n assert(isinstance(grad, mx.ndarray.NDArray))\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = clip(grad, -self.clip_gradient, self.clip_gradient)\n\n if state:\n mom = state\n mom[:] *= self.momentum\n mom[:] += -lr * (grad + wd * weight)\n weight[:] += mom\n else:\n assert self.momentum == 0.0\n weight[:] += -lr * (grad + wd * weight)\n\n # now push the weight to root\n if self.sock is not None:\n try:\n logging.info('send')\n self.sock.send(pickle.dumps((index, weight)))\n logging.info('send ok')\n # zmq documentation says this recv is necessary.\n # I have not tested without this recv.\n # TODO(smihir): try to see if the recv can be removed.\n try:\n new = pickle.loads(self.sock.recv())\n weight[:] = new\n except Exception as ee:\n logging.info('cannot load new weights {}'.format(str(ee)))\n\n except Exception as e:\n logging.info('cannot send model upstream: {}'.format(str(e)))\n pass\n\n\ndef fit(args, network, data_loader, batch_end_callback=None):\n # kvstore\n kv = mx.kvstore.create(args.kv_store)\n kv.set_optimizer(mx.optimizer.Test())\n mx.optimizer.Optimizer.register(gdSGD)\n\n # logging\n head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'\n if 'log_file' in args and args.log_file is not None:\n log_file = args.log_file\n log_dir = args.log_dir\n log_file_full_name = os.path.join(log_dir, log_file)\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n logger = logging.getLogger()\n handler = logging.FileHandler(log_file_full_name)\n formatter = logging.Formatter(head)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n logger.info('start with arguments %s', args)\n else:\n logging.basicConfig(level=logging.DEBUG, format=head)\n logging.info('start with arguments %s', args)\n\n # load model\n model_prefix = args.model_prefix\n if model_prefix is not None:\n model_prefix += \"-%d\" % (kv.rank)\n model_args = {}\n if args.load_epoch is not None:\n assert model_prefix is not None\n tmp = mx.model.FeedForward.load(model_prefix, args.load_epoch)\n model_args = {'arg_params' : tmp.arg_params,\n 'aux_params' : tmp.aux_params,\n 'begin_epoch' : args.load_epoch}\n # TODO: check epoch_size for 'dist_sync'\n epoch_size = args.num_examples / args.batch_size\n model_args['begin_num_update'] = epoch_size * args.load_epoch\n\n # save model\n save_model_prefix = args.save_model_prefix\n if save_model_prefix is None:\n save_model_prefix = model_prefix\n checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)\n\n # data\n (train, val) = data_loader(args, kv)\n\n # train\n devs = mx.cpu() if args.gpus is None else [\n mx.gpu(int(i)) for i in args.gpus.split(',')]\n\n epoch_size = args.num_examples / args.batch_size\n\n if args.kv_store == 'dist_sync':\n epoch_size /= kv.num_workers\n model_args['epoch_size'] = epoch_size\n\n if 'lr_factor' in args and args.lr_factor < 1:\n model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(\n step = max(int(epoch_size * args.lr_factor_epoch), 1),\n factor = args.lr_factor)\n\n if 'clip_gradient' in args and args.clip_gradient is not None:\n model_args['clip_gradient'] = args.clip_gradient\n\n # disable kvstore for single device\n if 'local' in kv.type and (\n args.gpus is None or len(args.gpus.split(',')) is 1):\n kv = None\n\n model = mx.model.FeedForward(\n ctx = devs,\n symbol = network,\n num_epoch = args.num_epochs,\n learning_rate = args.lr,\n momentum = 0.9,\n wd = 0.00001,\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34),\n optimizer = 'gdSGD',\n **model_args)\n\n eval_metrics = ['accuracy']\n # TopKAccuracy only allows top_k > 1\n for top_k in [5, 10, 20]:\n eval_metrics.append(mx.metric.create('top_k_accuracy', top_k = top_k))\n\n if batch_end_callback is not None:\n if not isinstance(batch_end_callback, list):\n batch_end_callback = [batch_end_callback]\n else:\n batch_end_callback = []\n batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))\n\n model.fit(\n X = train,\n eval_data = val,\n eval_metric = eval_metrics,\n kvstore = kv,\n batch_end_callback = batch_end_callback,\n epoch_end_callback = checkpoint)\n","sub_path":"example/gdmnist/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552958070","text":"import argparse, os, shutil, time, warnings\nfrom datetime import datetime\nfrom pathlib import Path\nimport numpy as np\nimport sys\nimport math\n\nimport torch\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\n\n# import models\nfrom fp16util import *\nimport gc\n\nimport resnet\n\nimport dataloader\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\n parser.add_argument('data', metavar='DIR', help='path to dataset')\n parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50')\n parser.add_argument('--save-dir', type=str, default=Path.cwd(), help='Directory to save logs and models.')\n parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--epochs', default=45, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n parser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\n parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\n parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n parser.add_argument('--resize-sched', default='0.4,0.92', type=str,\n help='Scheduler to resize from 128 -> 224 -> 288')\n parser.add_argument('--lr-sched', default='0.1,0.47,0.78,0.95', type=str,\n help='Learning rate scheduler warmup -> lr -> lr/10 -> lr/100 -> lr/1000')\n parser.add_argument('--init-bn0', action='store_true', help='Intialize running batch norm mean to 0')\n parser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')\n parser.add_argument('--fp16', action='store_true', help='Run model fp16 mode.')\n parser.add_argument('--loss-scale', type=float, default=1,\n help='Loss scaling, positive power of 2 values can improve fp16 convergence.')\n parser.add_argument('--prof', dest='prof', action='store_true', help='Only run a few iters for profiling.')\n parser.add_argument('--val-ar', action='store_true', help='Do final validation by nearest aspect ratio')\n parser.add_argument('--distributed', action='store_true', help='Run distributed training')\n parser.add_argument('--world-size', default=-1, type=int, \n help='Number of gpus per machine. Param only needed for single machine training when using (faster) file sync')\n parser.add_argument('--dist-url', default='env://', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')\n parser.add_argument('--local_rank', default=0, type=int,\n help='Used for multi-process training. Can either be manually set ' +\n 'or automatically set by using \\'python -m multiproc\\'.')\n return parser\n\ncudnn.benchmark = True\nargs = get_parser().parse_args()\nif args.local_rank > 0: sys.stdout = open(f'{args.save_dir}/GPU_{args.local_rank}.log', 'w')\n\n\nclass DataManager():\n def __init__(self, resize_sched=[0.4, 0.92]):\n self.resize_sched = resize_sched\n self.load_data('-sz/160', 256, 128)\n \n def set_epoch(self, epoch):\n if epoch==2:\n self.load_data('-sz/320', 256, 224) # lower validation accuracy when enabled for some reason\n if epoch==4:\n self.load_data('', 256, 224)\n if epoch==6:\n self.load_data('-sz/320', 256, 256) \n if epoch==8:\n self.load_data('', 256, 288, min_scale=0.5, use_ar=args.val_ar)\n\n if hasattr(self.trn_smp, 'set_epoch'): self.trn_smp.set_epoch(epoch)\n if hasattr(self.val_smp, 'set_epoch'): self.val_smp.set_epoch(epoch)\n \n # For val_ar faster scheduler - [0.35,0.88]\n\n def get_trn_iter(self):\n self.trn_iter = iter(self.trn_dl)\n return self.trn_iter\n\n def get_val_iter(self):\n self.val_iter = iter(self.val_dl)\n return self.val_iter\n \n def load_data(self, dir_prefix, batch_size, image_size, **kwargs):\n print(f'Dataset changing. \\nImage size: {image_size}. \\nBatch size: {batch_size} \\nDirectory: {dir_prefix}\\n')\n estart = time.time()\n loaders = dataloader.get_loaders(args.data+dir_prefix, bs=batch_size, sz=image_size, workers=args.workers, distributed=args.distributed, **kwargs)\n self.trn_dl,self.val_dl,self.trn_smp,self.val_smp = loaders\n self.trn_dl = dataloader.fetcher(self.trn_dl)\n self.val_dl = dataloader.DataPrefetcher(self.val_dl, prefetch=False)\n self.trn_len = len(self.trn_dl)\n self.val_len = len(self.val_dl)\n # clear memory\n gc.collect()\n torch.cuda.empty_cache()\n endtime = time.time() - estart\n print(f'Time took to load data: {endtime}')\n\n\n # def dali_load_data(self, dir_prefix, batch_size, image_size, **kwargs):\n # print(f'Dali Dataset changing. \\nImage size: {image_size}. \\nBatch size: {batch_size} \\nDirectory: {dir_prefix}\\n')\n # traindir = args.data+dir_prefix+'/train'\n # valdir = args.data+dir_prefix+'/validation'\n\n # pipe = HybridPipe(batch_size=batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir)\n # pipe.build()\n # test_run = pipe.run()\n # from nvidia.dali.plugin.pytorch import DALIClassificationIterator\n # train_loader = DALIClassificationIterator(pipe, size=int(1281167 / args.world_size) )\n\n\n # pipe = HybridPipe(batch_size=batch_size, num_threads=args.workers, device_id = args.local_rank, data_dir=valdir)\n # pipe.build()\n # test_run = pipe.run()\n # from nvidia.dali.plugin.pytorch import DALIClassificationIterator\n # val_loader = DALIClassificationIterator(pipe, size=int(50000 / args.world_size) )\n\n # self.trn_dl = train_loader\n # self.val_dl = val_loader\n # self.trn_len = len(self.trn_dl)\n # self.val_len = len(self.val_dl)\n # # clear memory\n # gc.collect()\n # torch.cuda.empty_cache()\n\ndef main():\n # need to index validation directory before we start counting the time\n if args.val_ar: dataloader.sort_ar(args.data+'/validation')\n\n start_time = datetime.now()\n\n if args.distributed:\n print('Distributed: initializing process group')\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)\n assert(args.world_size == dist.get_world_size())\n\n if args.fp16: assert torch.backends.cudnn.enabled, \"fp16 mode requires cudnn backend to be enabled.\"\n\n model = resnet.resnet50(pretrained=args.pretrained)\n print(\"Loaded model\")\n\n model = model.cuda()\n n_dev = torch.cuda.device_count()\n if args.fp16: model = network_to_half(model)\n if args.distributed:\n model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)\n\n global model_params, master_params\n if args.fp16: model_params, master_params = prep_param_lists(model)\n else: master_params = list(model.parameters())\n\n criterion = nn.CrossEntropyLoss().cuda()\n\n print(\"Creating data loaders\")\n dm = DataManager(str_to_num_array(args.resize_sched))\n\n if args.evaluate: return validate(dm.get_val_iter(), len(dm.val_dl), model, criterion, 0, start_time)\n\n print(\"Begin training\")\n for epoch in range(10):\n estart = time.time()\n dm.set_epoch(epoch)\n\n train_start = time.time()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n train(dm.get_trn_iter(), len(dm.trn_dl), model, criterion, epoch)\n train_end = (time.time() - train_start)\n num_images = 1281167\n img_per_sec = num_images / train_end\n print(f'Training processed {img_per_sec} images per second.\\n')\n\n\n val_start = time.time()\n validate(dm.get_val_iter(), len(dm.val_dl), model, criterion, epoch, start_time)\n val_end = (time.time() - val_start)\n num_images = 50000\n img_per_sec = num_images / train_end\n print(f'Validation processed {img_per_sec} images per second.\\n')\n\n num_images = 1281167+50000\n end_time = (time.time() - estart)\n img_per_sec = num_images / end_time\n print(f'Total images processed per second: {img_per_sec}\\n')\n\ndef str_to_num_array(argstr):\n return [float(s) for s in argstr.split(',')]\n\n# item() is a recent addition, so this helps with backward compatibility.\ndef to_python_float(t):\n if hasattr(t, 'item'):\n return t.item()\n else:\n return t[0]\n\ndef train(trn_iter, trn_len, model, criterion, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n end = time.time()\n\n st = time.time()\n # print('Begin training loop:', st)\n for i,data in enumerate(trn_iter):\n # if i == 0: print('Received input:', time.time()-st)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n\n # compute output\n with torch.no_grad():\t\n fake_input = torch.zeros([1,3,64,64]).cuda()\t\n if args.fp16: fake_input = fake_input.half()\t\n _ = model(fake_input)\n\n # compute gradient and do SGD step\n # if i == 0: print('Evaluate and loss:', time.time()-st)\n\n torch.cuda.synchronize()\n\n # if i == 0: print('Backward step:', time.time()-st)\n # measure elapsed time\n batch_time.update(time.time() - end)\n\n end = time.time()\n\n should_print = ((i+1) % args.print_freq == 0) or (i+1 == trn_len)\n if args.local_rank == 0 and should_print:\n output = ('Epoch: [{0}][{1}/{2}]\\t' \\\n + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\\n + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' \\\n + 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\\n + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t' \\\n + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})').format(\n epoch, i+1, trn_len, batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5)\n print(output)\n with open(f'{args.save_dir}/full.log', 'a') as f:\n f.write(output + '\\n')\n \ndef validate(val_iter, val_len, model, criterion, epoch, start_time):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n model.eval()\n end = time.time()\n\n for i,data in enumerate(val_iter):\n \n # compute output\n with torch.no_grad():\t\n fake_input = torch.zeros([1,3,64,64]).cuda()\t\n if args.fp16: fake_input = fake_input.half()\t\n _ = model(fake_input)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n should_print = ((i+1) % args.print_freq == 0) or (i+1 == val_len)\n if args.local_rank == 0 and should_print:\n output = ('Test: [{0}/{1}]\\t' \\\n + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' \\\n + 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' \\\n + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t' \\\n + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})').format(\n i+1, val_len, batch_time=batch_time, loss=losses,\n top1=top1, top5=top5)\n print(output)\n with open(f'{args.save_dir}/full.log', 'a') as f:\n f.write(output + '\\n')\n\n time_diff = datetime.now()-start_time\n print(f'~~{epoch}\\t{float(time_diff.total_seconds() / 3600.0)}\\t')\n\n return top5.avg\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef sum_tensor(tensor):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n return rt\n\ndef reduce_tensor(tensor):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= args.world_size\n return rt\n\nif __name__ == '__main__': main()\n\n","sub_path":"pytorch/training/dataloader_performance.py","file_name":"dataloader_performance.py","file_ext":"py","file_size_in_byte":13802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"497101208","text":"\"\"\"\nBased on https://github.com/NVlabs/SPADE\n\nOriginal copyright:\n\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport time\nfrom pathlib import Path\nimport os\nimport argparse\nfrom math import floor, ceil\n\nimport torch\n\n\ndef get_split(N, ratios):\n\n if sum(ratios) != 1.0:\n raise ValueError(\"Rations must sum to one.\")\n\n rts = list(ratios)\n maxrt = max(rts)\n rts.remove(maxrt)\n\n split = [ceil(N * maxrt)] + [floor(N * rt) for rt in rts]\n return split\n\n\n\n\ndef copyconf(default_opt, **kwargs):\n \"\"\"\n returns a configuration for creating a generator\n |default_opt| should be the opt of the current experiment\n |**kwargs|: if any configuration should be overriden, it can be specified here\n \"\"\"\n conf = argparse.Namespace(**vars(default_opt))\n for key in kwargs:\n print(key, kwargs[key])\n setattr(conf, key, kwargs[key])\n return conf\n\n\ndef now():\n \"\"\"\n :return: date and time as YYYYmmddhhMM\n \"\"\"\n return time.strftime(\"%Y-%m-%d-%H-%M\")\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef save_network(net, label, epoch, opt):\n save_filename = '%s_net_%s.pth' % (epoch, label)\n save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)\n torch.save(net.cpu().state_dict(), save_path)\n if len(opt.gpu_ids) and torch.cuda.is_available():\n net.cuda()\n\n\ndef load_network(net, label, epoch, opt):\n save_filename = '%s_net_%s.pth' % (epoch, label)\n save_dir = os.path.join(opt.checkpoints_dir, opt.name)\n save_path = os.path.join(save_dir, save_filename)\n weights = torch.load(save_path)\n net.load_state_dict(weights)\n return net\n\n\ndef ensured_path(path: Path, isdir=False):\n if isdir:\n path.mkdir(parents=True, exist_ok=True)\n else:\n path.parent.mkdir(parents=True, exist_ok=True)\n return path","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566334140","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nimport quandl\nimport pandas as pd\nimport plotly.express as px\n\nquandl.ApiConfig.api_key = \"zGyHeGj9fVzHDHqLdBrU\"\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n# CHRIS/CME_NN1 - Henry Hub\n# CHRIS/CME_CL1 - Crude Oil\n\nhhData = quandl.get(\"CHRIS/CME_NN1\", returns=\"numpy\")\nhhDates = hhData[\"Date\"]\nhhPrices = hhData[\"Settle\"]\nhhDf = pd.DataFrame({\"Dates\": hhDates, \"Prices\": hhPrices})\n\ncoData = quandl.get(\"CHRIS/CME_CL1\", returns=\"numpy\")\ncoDates = coData[\"Date\"]\ncoPrices = coData[\"Settle\"]\ncoDf = pd.DataFrame({\"Dates\": coDates, \"Prices\": coPrices})\n\nhhFig = px.line(data_frame=hhDf, x=\"Dates\", y=\"Prices\")\ncoFig = px.line(data_frame=coDf, x=\"Dates\", y=\"Prices\")\n\napp.layout = html.Div(\n\n children=[\n\n html.H1(children='Collect your data from Quandl!'),\n html.Div(children=\"Dash: A web application framework for Python.\"),\n html.Br(),\n html.Button(\"Henry Hub\", id=\"btn-henryHub\", n_clicks=0),\n html.Button(\"Crude Oil\", id=\"btn-crudeOil\", n_clicks=0),\n dcc.Graph(id='the-graph', figure=hhFig)\n ]\n)\n\n# Whenever and input is changed - this section of the code is called back, and I believe\n# the code beneath it also activated\n@app.callback(\n Output(\"the-graph\", \"figure\"),\n [Input(\"btn-henryHub\", \"n_clicks\"),\n Input(\"btn-crudeOil\", \"n_clicks\")])\n\ndef updateGraph(btn1, btn2):\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n if \"btn-henryHub\" in changed_id:\n figOut = hhFig\n elif \"btn-crudeOil\" in changed_id:\n figOut = coFig\n else:\n figOut = hhFig\n return figOut\n\nif __name__ == '__main__':\n #app.run_server(\"0.0.0.0\", 80, debug=True)\n #app.run_server(debug=True)\n app.run_server(host=\"0.0.0.0\")\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"518879471","text":"from pages.login.LoginPage import LoginPage\nfrom tests.SuperTest import SuperTest\n\n\nclass BaseTestClassMethods(SuperTest):\n createFail = None\n\n @classmethod\n def setUpClass(cls):\n cls.driver = super().run_driver()\n LoginPage(cls.driver).login_as_super_admin()\n\n def setUp(self):\n if self.createFail is not None:\n self.fail(self.createFail)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver = super().run_driver()\n LoginPage(cls.driver).login_as_super_admin()\n super().delete_users(cls.driver, cls.employees)\n super().delete_company(cls.driver, cls.ico)\n cls.driver.quit()\n","sub_path":"tests/BaseTestClassMethods.py","file_name":"BaseTestClassMethods.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502273352","text":"import unittest\n\n\nclass TestQueue(unittest.TestCase):\n def setUp(self):\n from mock import MagicMock\n from tilequeue.queue import SqsQueue\n\n self.message = None\n self.mockQueue = MagicMock()\n self.mockQueue.write = self.fake_write\n self.mockQueue.write_batch = self.fake_write_batch\n self.sqs = SqsQueue(self.mockQueue)\n self.values = []\n self.key_name = None\n self.coords = None\n\n def fake_write(self, message):\n self.message = message\n\n def fake_write_batch(self, message_tuples):\n self.message_tuples = message_tuples\n\n def test_enqueue_batch_adds_tiles(self):\n from mock import MagicMock\n coords = ['1/1/1', '2/2/2']\n mock = MagicMock()\n mock.side_effect = [False, False]\n self.sqs.enqueue_batch(coords)\n self.assertEqual(2, len(self.message_tuples))\n self.assertEqual(self.message_tuples[0][1], \"1/1/1\")\n self.assertEqual(self.message_tuples[1][1], \"2/2/2\")\n\n def test_enqueue_should_write_message_to_queue(self):\n self.sqs.enqueue('1/1/1')\n self.assertIsNotNone(self.message)\n self.assertEqual('1/1/1', self.message.get_body())\n","sub_path":"tests/queue/test_sqs.py","file_name":"test_sqs.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"250556015","text":"import matplotlib as mpl\n#mpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm, colors\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nimport numpy as np\nfrom scipy.special import sph_harm\nimport sys\n\n#importing file\ncms = sys.argv[1]\nparameters = sys.argv[2]\n\nRe_list = []\nIm_list = []\nparameters_list = []\n\n\nfor line in open(cms):\n\tlist = line.split()\n\ttry:\n\t\tRe = float(list[0])\n\t\tIm = float(list[1])\n\texcept TypeError:\n\t\tprint(\"Type Error!\")\n\t\tcontinue\n\tRe_list.append(Re)\n\tIm_list.append(Im)\n\nfor line in open(parameters):\n\tlist = line.split()\n\ttry:\n\t\tvalue = str(list[0])\n\t\tparameter = float(list[1])\n\t\n\texcept TypeError:\n\t\tprint(\"Type Error!\")\n\t\tcontinue\n\tparameters_list.append(parameter)\n\n\tif len(parameters_list)>=4:\n\t\tbreak\n\n\na = open(parameters)\nlines = a.readlines()\nif lines:\t\n\tlast_line=lines[-1]\nH_value = last_line.split()[1]\n\t\nparameters_list.append(H_value)\n\n\nphi = np.linspace(0, np.pi, 150)\ntheta = np.linspace(0, 2*np.pi, 150)\ntheta_2 = np.linspace(np.pi, 3*np.pi, 150)\n\nphi, theta = np.meshgrid(phi, theta)\n\n# The Cartesian coordinates of the unit sphere\nx = np.sin(phi) * np.cos(theta)\ny = np.sin(phi) * np.sin(theta)\nz = np.cos(phi)\n\nx_2 = np.sin(phi) * np.cos(theta_2)\ny_2 = np.sin(phi) * np.sin(theta_2)\n\nl = len(Re_list)-1\n# Calculate the spherical harmonic Y(l,m) and normalize to [0,1]\nfcolors = (Re_list[0]*sph_harm(0,l,theta,phi)).real\nfor m in range(1,l+1):\n\tfcolors += ((complex(Re_list[m],Im_list[m])*sph_harm(m,l,theta,phi)).real) + ((complex(Re_list[m],Im_list[m]).conjugate()*pow(-1,m)*sph_harm(-m,l,theta,phi)).real) \t\n\nfcolors_2 = (Re_list[0]*sph_harm(0,l,theta_2,phi)).real\nfor m in range(1,l+1):\n\tfcolors_2 += ((complex(Re_list[m],Im_list[m])*sph_harm(m,l,theta_2,phi)).real) + ((complex(Re_list[m],Im_list[m]).conjugate()*pow(-1,m)*sph_harm(-m,l,theta_2,phi)).real) \t\n\nfmax, fmin = fcolors.max(), fcolors.min()\n#fcolors = (fcolors)\n#fcolors = fcolors/(fmax-fmin)\nfcolors = (fcolors - fmin)/(fmax - fmin)\n\nfmax_2, fmin_2 = fcolors_2.max(), fcolors_2.min()\n#fcolors = (fcolors)\n#fcolors = fcolors/(fmax-fmin)\nfcolors_2 = (fcolors_2 - fmin_2)/(fmax_2 - fmin_2)\n\n# Set the aspect ratio to 1 so our sphere looks spherical\n#fig = plt.figure(figsize=plt.figaspect(1.))\nfig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(1,2,1, projection='3d')\nax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=cm.seismic(fcolors)) \nax.view_init(45,0)\nax.set_axis_off()\nax = fig.add_subplot(1,2,2, projection='3d')\nax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=cm.seismic(fcolors)) \nax.view_init(45,180)\nax.set_axis_off()\n\nplt.title('Parameters are: el_not=%s, tau=%s, lambda3=%s, lambda4=%s\\nMinimum Hamiltonian value: %s'%(parameters_list[0], parameters_list[1], parameters_list[2], parameters_list[3], parameters_list[4]), fontsize=8)\n#plt.savefig(\"surface_pattern.png\")\nplt.show()\n","sub_path":"PhaseDiagramCalculations/GradientDescent_1l/surface_pattern_1l.py","file_name":"surface_pattern_1l.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"538795630","text":"\nresultado = 0\n\ndef convertir(lista):\n lista2 = lista.split(\",\")\n b= [int(x) for x in lista2]\n print(\"b es: \", b)\n #operacion(b)\n resultado = operacion(b)\n return resultado\n #igual(resultado)\n \n print(\"el resultado es: \",resultado)\n\ndef operacion(lista):\n \"\"\"\"\"\"\n #print(type(lista))\n return lista[0] + operacion(lista[1:]) if lista else 0","sub_path":"sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"530391813","text":"import re\n\nn = int(input())\nfnm = []\nlnm=[]\n\nif(n<=2 or n>=100):\n print(\"NA\")\nelse:\n for _ in range(n):\n a = input().split(\" \")\n\n\n fnm.append(a[0])\n lnm.append(a[1])\n\n for x in range(n):\n if fnm.count(fnm[x]) > 1:\n print(fnm[x]+\" \"+lnm[x])\n else:\n print(fnm[x])\n\n\n\n\n\n\n\n","sub_path":"views/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74521440","text":"import os\nimport sys\nimport re\nfrom collections import defaultdict\n\n# from_ih02_130_180.bed\n# mcf7.bed\n# from /beevol/home/satyanarr/softlinks/ctcf_len_dist/heatmap_one_system_to_other\nquery_fp = open(sys.argv[1])\nsubject_fp = open(sys.argv[2])\n\n# example line qery_fp\n# chr10\t100239910\t100242028\tchr10:100240692-100241087|peak8269_CTGTCTCTAGGGGGAGCAC%Carroll^6\n# example line subject_fp \n# chr10\t100018957\t100021075\tchr10:100019755-100020234|CTCF`159918_GAGACTGCAGGGGGAGGCA%ENCODE^6\n\nsubject_dict = defaultdict(lambda : \"NA\")\ncount_dict = defaultdict(lambda : 0)\nfor line in subject_fp:\n d_loc = [m.start() for m in re.finditer(\"\\t\", line)]\n cl_id = line[d_loc[-1]: len(line) - 1].split(\"^\")[-1] \n subject_dict[line[0:d_loc[2]]] = cl_id\n count_dict[cl_id] +=1\n\nreflection_dict = defaultdict(lambda : defaultdict (lambda : 0))\nfor line in query_fp:\n d_loc = [m.start() for m in re.finditer(\"\\t\", line)]\n cl_id = line[d_loc[-1]: len(line) - 1].split(\"^\")[-1]\n sub_cl = subject_dict[line[0:d_loc[2]]]\n if sub_cl != \"NA\":\n reflection_dict[cl_id][sub_cl] +=1\n\n #print (line[0:d_loc[2]] + \"\\t\" + cl_id + \"\\t\" + subject_dict[line[0:d_loc[2]]])\norder = list (map(str, range(1,7))) \nlast_line = [] \nfor k1 in order:\n print (k1, end = \"\\t\")\n total_count = 0 \n for k2 in order:\n print (str(reflection_dict[k1][k2]), end = \"\\t\")\n total_count += reflection_dict[k2][k1]\n last_line.append(count_dict[k1] - total_count)\n print ()\nprint (\"\\t\".join([str(len(order) + 1), \"\\t\".join(map(str, last_line))]))\n","sub_path":"scripts/table_find_tfbs_overlap.py","file_name":"table_find_tfbs_overlap.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"141939400","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 19 22:33:21 2018\n\n@author: Aeolitus\n\"\"\"\nimport Fertigkeiten\nfrom Hilfsmethoden import Hilfsmethoden, VoraussetzungException\nimport DatenbankEditManoever\nfrom PyQt5 import QtWidgets, QtCore\n\nclass DatenbankEditManoeverWrapper(object):\n def __init__(self, datenbank, man=None, readonly=False):\n super().__init__()\n self.datenbank = datenbank\n if man is None:\n man = Fertigkeiten.Manoever()\n self.manöverPicked = man\n self.nameValid = True\n self.readonly = readonly\n self.voraussetzungenValid = True\n manDialog = QtWidgets.QDialog()\n self.ui = DatenbankEditManoever.Ui_manDialog()\n self.ui.setupUi(manDialog)\n\n if not man.isUserAdded:\n if readonly:\n self.ui.warning.setText(\"Gelöschte Elemente können nicht verändert werden.\")\n self.ui.warning.setVisible(True)\n\n manDialog.setWindowFlags(\n QtCore.Qt.Window |\n QtCore.Qt.CustomizeWindowHint |\n QtCore.Qt.WindowTitleHint |\n QtCore.Qt.WindowCloseButtonHint)\n \n self.ui.nameEdit.setText(man.name)\n self.ui.nameEdit.textChanged.connect(self.nameChanged)\n self.nameChanged()\n self.ui.probeEdit.setText(man.probe)\n self.ui.gegenEdit.setText(man.gegenprobe)\n self.ui.comboTyp.setCurrentIndex(man.typ)\n \n self.ui.voraussetzungenEdit.setPlainText(Hilfsmethoden.VorArray2Str(man.voraussetzungen, None))\n self.ui.voraussetzungenEdit.textChanged.connect(self.voraussetzungenTextChanged)\n\n self.ui.textEdit.setPlainText(man.text)\n manDialog.show()\n ret = manDialog.exec_()\n if ret == QtWidgets.QDialog.Accepted:\n self.man = Fertigkeiten.Manoever()\n self.man.name = self.ui.nameEdit.text()\n self.man.probe = self.ui.probeEdit.text()\n self.man.gegenprobe = self.ui.gegenEdit.text()\n self.man.typ = self.ui.comboTyp.currentIndex()\n self.man.voraussetzungen = Hilfsmethoden.VorStr2Array(self.ui.voraussetzungenEdit.toPlainText(), datenbank)\n self.man.text = self.ui.textEdit.toPlainText()\n\n self.man.isUserAdded = False\n if self.man == self.manöverPicked:\n self.man = None\n else:\n self.man.isUserAdded = True\n else:\n self.man = None\n\n def nameChanged(self):\n name = self.ui.nameEdit.text()\n if name == \"\":\n self.ui.nameEdit.setToolTip(\"Name darf nicht leer sein.\")\n self.ui.nameEdit.setStyleSheet(\"border: 1px solid red;\")\n self.nameValid = False\n elif name != self.manöverPicked.name and name in self.datenbank.manöver:\n self.ui.nameEdit.setToolTip(\"Name existiert bereits.\")\n self.ui.nameEdit.setStyleSheet(\"border: 1px solid red;\")\n self.nameValid = False\n else:\n self.ui.nameEdit.setToolTip(\"\")\n self.ui.nameEdit.setStyleSheet(\"\")\n self.nameValid = True\n self.updateSaveButtonState()\n\n def voraussetzungenTextChanged(self):\n try:\n Hilfsmethoden.VorStr2Array(self.ui.voraussetzungenEdit.toPlainText(), self.datenbank)\n self.ui.voraussetzungenEdit.setStyleSheet(\"\")\n self.ui.voraussetzungenEdit.setToolTip(\"\")\n self.voraussetzungenValid = True\n except VoraussetzungException as e:\n self.ui.voraussetzungenEdit.setStyleSheet(\"border: 1px solid red;\")\n self.ui.voraussetzungenEdit.setToolTip(str(e))\n self.voraussetzungenValid = False\n self.updateSaveButtonState()\n\n def updateSaveButtonState(self):\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Save).setEnabled(not self.readonly and self.nameValid and self.voraussetzungenValid)","sub_path":"DatenbankEditManoeverWrapper.py","file_name":"DatenbankEditManoeverWrapper.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"205701207","text":"# -*- encoding: utf-8 -*-\nfrom model_utils.models import TimeStampedModel\nimport uuid as uuid_lib\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom catalog.models import Product, BaseProductModel\nfrom django.dispatch import receiver\nfrom django.db.models.signals import pre_save\n\nclass ConsumerManager(models.Manager):\n use_for_related_fields = True\n\n def fill_name_by_fields(self, instance, full_name):\n shared_name = full_name.split(' ')\n length = len(shared_name)\n\n if length < 2:\n return False\n\n setattr(instance, 'last_name', shared_name[0])\n setattr(instance, 'first_name', shared_name[1])\n\n if length == 3:\n setattr(instance, 'middle_name', shared_name[2] or \"\")\n\n return instance\n\n def is_consumer(self, first_name, last_name, **kwargs):\n return self.filter(first_name=first_name, last_name=last_name, **kwargs)\n\n def get_full_name(self, consumer):\n return '%s %s %s' % (getattr(consumer, \"last_name\", \"\"),\n getattr(consumer, \"first_name\", \"\"),\n getattr(consumer, \"middle_name\", \"\"))\n\nclass Consumer(TimeStampedModel):\n last_name = models.CharField(_('Фамилия'), max_length=36)\n first_name = models.CharField(_('Имя'), max_length=32)\n middle_name = models.CharField(_('Отчество'), max_length=32, blank=True, null=True)\n phone_number = models.CharField(\n _('Номер телефона'),\n max_length=26,\n blank=True,\n null=True\n )\n email = models.EmailField(\n _('Email'),\n max_length=150,\n blank=True,\n null=True\n )\n requisites = models.CharField(\n _('Реквизиты'), max_length=100,\n blank=True,\n null=True\n )\n uuid = models.UUIDField(\n _('Идентификатор'),\n db_index=True,\n default=uuid_lib.uuid4,\n editable=True\n )\n\n objects = ConsumerManager()\n\n def __str__(self):\n return '%s %s' % (self.last_name, self.first_name)\n class Meta:\n db_table = 'site_consumers'\n verbose_name = _('Заказчик')\n verbose_name_plural = _('Заказчики')\n\n\nclass OrderedProduct(BaseProductModel):\n combustibility = models.ForeignKey(\n \"home.combustibility\",\n verbose_name=_(\"Горючесть\"),\n default=None,\n null=True,\n blank=True\n )\n acoustics = models.ForeignKey(\n \"home.acoustics\",\n verbose_name=_(\"Акустика\"),\n default=None,\n null=True,\n blank=True\n )\n lightning = models.ForeignKey(\n \"home.lightning\",\n verbose_name=_(\"Освящение\"),\n default=None,\n null=True,\n blank=True\n )\n edges = models.ForeignKey(\n \"home.edge\",\n verbose_name=_(\"Кромки\"),\n default=None,\n null=True,\n blank=True\n )\n proportions = models.ForeignKey(\n \"home.proportion\",\n verbose_name=_(\"Пропорции\"),\n default=None,\n null=True,\n blank=True\n )\n material = models.ForeignKey(\n \"home.material\",\n verbose_name=_(\"Материал\"),\n default = None,\n null=True,\n blank=True\n )\n colors = models.ForeignKey(\n \"home.color\",\n verbose_name=_(\"Цвет\"),\n default=None,\n null=True,\n blank=True\n )\n product = models.ForeignKey(\n Product,\n related_name=\"ordered_product\",\n verbose_name=_('Продукт')\n )\n\n quantity = models.IntegerField(\n _('Количество'),\n default=1\n )\n\n full_price = models.DecimalField(\n _('Общая цена'),\n editable=True,\n null=True,\n blank=True,\n max_digits=18,\n decimal_places=2,\n help_text=_('Расчитывается автоматически. Зависит от установленной цены продукта и оформленного количества.')\n )\n uuid = models.UUIDField(\n _('Идентификатор'),\n db_index=True,\n default=uuid_lib.uuid4,\n editable=True\n )\n\n def __str__(self):\n return self.product.name\n\n class Meta:\n db_table = 'consumers_ordered_products'\n verbose_name = _('Оформленный продукт')\n verbose_name_plural = _('Оформленные продукты')\n\n\n@receiver(pre_save, sender=OrderedProduct)\ndef count_whole_price_of_ordered_product(sender, instance, **kwargs):\n price = getattr(instance.product, 'price', None)\n\n if price:\n instance.full_price = price * instance.quantity\n\n\n return True\n","sub_path":"ceiling/personal_data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2778818","text":"__author__ = 'Arno'\r\n\r\ndef word_count(line):\r\n start_char = 0\r\n end_char = 0\r\n words = {}\r\n for i in range(len(line)):\r\n if line[i].isalnum() and (i == 0 or not line[i-1].isalnum()):\r\n start_char = i\r\n for j in range(i, len(line)):\r\n if line[j].isalnum() and(j == len(line)-1 or not line[j+1].isalnum()):\r\n end_char = j\r\n word = line[i:j+1]\r\n if word in words:\r\n words[word] += 1\r\n else:\r\n words[word] = 1\r\n i = j\r\n break\r\n return words\r\n\r\n","sub_path":"all_data/exercism_data/python/word-count/79c9779b3c71498eb6d9e599874bc433.py","file_name":"79c9779b3c71498eb6d9e599874bc433.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"499549953","text":"import torch\nimport pdb\n\ndef sinkhorn_stabilized(a, b, M, reg, numItermax=100, tau=1e3, stopThr=1e-3, print_period=20):\n\n n_hists = 0\n\n # init data\n dim_a = len(a)\n dim_b = len(b)\n\n # we assume that no distances are null except those of the diagonal of\n # distances\n alpha, beta = torch.zeros(dim_a), torch.zeros(dim_b)\n\n u, v = torch.ones(dim_a) / dim_a, torch.ones(dim_b) / dim_b\n\n def get_K(alpha, beta):\n \"\"\"log space computation\"\"\"\n return torch.exp(-(M - alpha.reshape((dim_a, 1)) - beta.reshape((1, dim_b))) / reg)\n\n def get_Gamma(alpha, beta, u, v):\n \"\"\"log space gamma computation\"\"\"\n return torch.exp(-(M - alpha.reshape((dim_a, 1)) - beta.reshape((1, dim_b))) / reg + torch.log(1e-6 +\n u.reshape((dim_a, 1))) + torch.log(1e-6 + v.reshape((1, dim_b))))\n\n # print(torch.min(K))\n\n K = get_K(alpha, beta)\n transp = K\n loop = 1\n cpt = 0\n err = 1\n\n while loop:\n\n uprev = u\n vprev = v\n\n # sinkhorn update\n\n v = b / (torch.tensordot(K.T, u, dims=([0], [0])) + 1e-6)\n u = a / (torch.tensordot(K, v, dims=([1], [0])) + 1e-6)\n # remove numerical problems and store them in K\n if torch.abs(u).max() > tau or torch.abs(v).max() > tau:\n if n_hists:\n alpha, beta = alpha + reg * \\\n torch.max(torch.log(u), 1), beta + reg * torch.max(torch.log(1e-6 + v))\n else:\n alpha, beta = alpha + reg * torch.log(u), beta + reg * torch.log(1e-6 + v)\n if n_hists:\n u, v = torch.ones((dim_a, n_hists)) / dim_a, torch.ones((dim_b, n_hists)) / dim_b\n else:\n u, v = torch.ones(dim_a) / dim_a, torch.ones(dim_b) / dim_b\n K = get_K(alpha, beta)\n\n if cpt % print_period == 0:\n # we can speed up the process by checking for the error only all\n # the 10th iterations\n transp = get_Gamma(alpha, beta, u, v)\n err = torch.norm((torch.sum(transp, dim=0) - b))\n\n\n if err <= stopThr:\n loop = False\n\n if cpt >= numItermax:\n loop = False\n\n if torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)):\n # we have reached the machine precision\n # come back to previous solution and quit loop\n print('Warning: numerical errors at iteration', cpt)\n u = uprev\n v = vprev\n break\n\n cpt = cpt + 1\n\n return get_Gamma(alpha, beta, u, v)\n\n","sub_path":"superglue/sinkhorn.py","file_name":"sinkhorn.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388497846","text":"import os\nimport sys\nBASE_DIR = os.path.dirname(__file__)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tensorflow as tf\nimport numpy as np\nimport tf_util\nfrom pointnet_util import pointnet_sa_module, pointnet_fp_module\n\ndef placeholder_inputs(batch_size, points_in, num_feat):\n pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, points_in, num_feat), name='pointcloud_in')\n labels_pl = tf.placeholder(tf.int32, shape=(batch_size, points_in), name='labels')\n #smpws_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))\n return pointclouds_pl, labels_pl\n\n\ndef get_model(point_cloud, is_training, num_class, bn_decay=None):\n \"\"\" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class \"\"\"\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n end_points = {}\n l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) # point coordinates\n l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,-1]) #point attributes\n end_points['l0_xyz'] = l0_xyz\n\n # Set Abstraction layers\n l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=50000, radius=1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')\n l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=10000, radius=2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')\n l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=2000, radius=4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')\n l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=500, radius=9, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')\n l5_xyz, l5_points, l5_indices = pointnet_sa_module(l4_xyz, l4_points, npoint=100, radius=25, nsample=32, mlp=[512,512,1024], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer5')\n # debug line:\n # l4_points = tf.Print(l1_points, [l0_xyz, l0_points, l1_xyz, l1_points], 'ln-points', -1, 12)\n end_points['l1_xyz'] = l1_xyz\n end_points['l2_xyz'] = l2_xyz\n end_points['l3_xyz'] = l3_xyz\n end_points['l4_xyz'] = l4_xyz\n end_points['l5_xyz'] = l5_xyz\n\n # Feature Propagation layers\n l4_points = pointnet_fp_module(l4_xyz, l5_xyz, l4_points, l5_points, [512,512], is_training, bn_decay, scope='fa_layer0')\n l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')\n l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')\n l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')\n l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')\n\n # FC layers\n net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n end_points['feats'] = net\n net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')\n net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2', name='net')\n\n return net, end_points\n\n\ndef get_loss(pred, label):\n \"\"\" pred: BxNxC,\n label: BxN,\n smpw: BxN \"\"\"\n #classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, scope='loss')\n classify_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred, name='loss')\n tf.summary.scalar('classify_loss', classify_loss)\n return classify_loss\n","sub_path":"alsNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"241404444","text":"from kafka.admin import KafkaAdminClient, NewTopic\nfrom kafka import KafkaProducer, KafkaConsumer\nfrom kafka import KafkaConsumer\nfrom json import dumps, loads\n\n\nclass KafkaClient():\n def __init__(self, id: str, servers: list) -> None:\n try:\n self.kafka_servers = servers\n self.id = id\n self.consumer, self.producer = None, None\n self.admin_client = KafkaAdminClient(\n bootstrap_servers=servers,\n client_id=self.id\n )\n\n except Exception as e:\n print(e)\n print('failed')\n\n def get_topics(self) -> list:\n try:\n return self.admin_client.list_topics()\n except Exception as e:\n print(e)\n\n def create_topic_type(self, name: str, partitions: int = 1, replication_factor: int = 1) -> NewTopic:\n return NewTopic(name=name, num_partitions=partitions, replication_factor=replication_factor)\n\n def create_topics(self, topics: list) -> None:\n try:\n result = self.admin_client.create_topics(new_topics=topics)\n print(result)\n except Exception as e:\n print(e)\n\n def delete_topics(self, topic_names: list) -> None:\n try:\n self.admin_client.delete_topics(topics=topic_names)\n except Exception as e:\n print(e)\n\n def create_producer(self, key_serializer=None, value_serializer=None, acks: int = 1, retries: int = 0, max_in_flight_requests_per_connection: int = 5):\n try:\n self.producer = KafkaProducer(\n client_id=self.id,\n bootstrap_servers=self.kafka_servers,\n key_serializer=key_serializer,\n value_serializer=value_serializer,\n acks=acks,\n retries=retries,\n max_in_flight_requests_per_connection=max_in_flight_requests_per_connection\n )\n except Exception as e:\n print(e)\n\n def send_data(self, topic_name: str, data_list: list, func=lambda x: x):\n if(self.producer != None):\n try:\n for data in data_list:\n pass_data = func(data)\n self.producer.send(topic_name, value=pass_data)\n\n except Exception as e:\n print(e)\n else:\n print('Define a Producer First Using Create Consumer Method')\n\n def create_consumer(self, topics: str, group_id: str, auto_commit: bool = True, offset: str = 'earliest', key_deserializer=None, value_deserializer=None):\n try:\n self.consumer = KafkaConsumer(\n topics,\n client_id=self.id,\n bootstrap_servers=self.kafka_servers,\n auto_offset_reset=offset,\n enable_auto_commit=auto_commit,\n group_id=group_id,\n key_deserializer=key_deserializer,\n value_deserializer=value_deserializer\n )\n\n except Exception as e:\n print(e)\n\n def get_data(self, amount: int = 100):\n if(self.consumer != None):\n return_data = []\n try:\n for index, message in enumerate(self.consumer):\n while index <= amount:\n message = message.value\n print(index, message)\n return_data.append(f'{index}-{message}')\n\n return return_data\n\n except Exception as e:\n print(e)\n else:\n print('Define a Consumer First Using Create Consumer Method')\n\n def get_json_serializer(self):\n return lambda x: dumps(x).encode('utf-8')\n\n def get_json_deserializer(self):\n return lambda x: loads(x.decode('utf-8'))\n\n\nif __name__ == \"__main__\":\n\n kf_client = KafkaClient(\n 'milkyb',\n [\n 'b-1.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092',\n 'b-2.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092'\n ]\n )\n\n print(kf_client.get_topics())\n\n kf_client.create_producer(\n value_serializer=kf_client.get_json_serializer())\n\n kf_client.create_consumer(\n topics='Reiten-Text-Corpus',\n offset='earliest',\n auto_commit=True,\n group_id='text-corpus-reader',\n value_deserializer=kf_client.get_json_deserializer())\n\n kf_client.send_data(topic_name='Reiten-Text-Corpus',\n data_list=[{'number': 1}, {'number': 2}, {'number': 3}, {'number': 4}, {'number': 5}, {'number': 6}, {'number': 7}, {'number': 8}, {'number': 9}, {'number': 10}, {'number': 11}, {'number': 12}, {'number': 13}, {'number': 14}, {'number': 15}])\n\n print(kf_client.get_data(10))\n","sub_path":"scripts/KafkaClient.py","file_name":"KafkaClient.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"143714649","text":"from items import *\nfrom random import randint\nfrom colorama import Fore, Style\nfrom ui import UI\nui = UI()\n\ndef combat(enemy, player):\n while enemy.alive() and player.alive():\n ui.print_combat_status_table(player, enemy)\n print()\n ui.print_combat_menu(enemy)\n raw_input = input()\n if raw_input == \"1\":\n ui.clear()\n ui.print_combat_status_table(player, enemy)\n print()\n player.attack(enemy)\n if not enemy.alive():\n print(\"The {} is dead.\".format(type(enemy).__name__.lower()))\n elif raw_input == \"2\":\n pass\n else:\n print(\"Invalid input: {}. {} has a chance to attack!\".format(raw_input, type(enemy).__name__.lower()))\n\n if enemy.alive():\n enemy.attack(player)\n if not player.alive():\n print(\"The {} defeats you. Who will save your brother now?\").format(type(enemy).__name__.lower())\n input(\"Press\" + Fore.GREEN + \" Enter\" + Style.RESET_ALL+ \" to continue...\")\n ui.clear()\n ui.game_over_screen()\n \n input(\"Press\" + Fore.GREEN + \" Enter\" + Style.RESET_ALL+ \" to continue...\")\n \n # clear the screen\n ui.clear()","sub_path":"combat_engine.py","file_name":"combat_engine.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"507817211","text":"from time import time\n\nimport mysql\nimport requests\nimport RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\n\nreader = SimpleMFRC522()\ntry:\n card_id = None\n mysql.connect()\n while True:\n card_read_id, _ = reader.read()\n if card_read_id != card_id or time() - start_time > 1:\n start_time = time()\n card_id = card_read_id\n\n student_id = mysql.find_id_user(card_id)\n my_data = { 'action': 'get_status'}\n status_result = requests.post('http://192.168.1.20:5000/api', data = my_data)\n status_temp = None\n if eval(status_result.text)['status'] == 'shutdown':\n # print(student_id)\n if student_id != False:\n status_temp = 'boot'\n my_data = { 'action': 'debug',\n 'key': 'insert_status',\n 'student_id': str(student_id),\n 'status': status_temp}\n r = requests.post('http://192.168.1.20:5000/api', data = my_data)\n print(f'status={status_temp}')\n print(r.text)\n else:\n mysql.insert_unregistered_status(card_id, '')\n print(f'card_id={card_id}')\n elif eval(status_result.text)['status'] == 'boot':\n if student_id == eval(status_result.text)['studio_id']:\n status_temp = 'shutdown'\n my_data = { 'action': 'debug',\n 'key': 'insert_status',\n 'student_id': str(student_id),\n 'status': status_temp}\n r = requests.post('http://192.168.1.20:5000/api', data = my_data)\n print(f'status={status_temp}')\n print(r.text)\n else:\n mysql.insert_unregistered_status(card_id, '')\n print(f'card_id={card_id}')\n\n # if eval(status_result.text)['studio_id'] == student_id:\n # status_temp = None\n # if eval(status_result.text)['status'] == 'boot':\n # status_temp = 'shutdown'\n # else:\n # status_temp = 'boot'\n # else:\n # mysql.insert_unregistered_status(card_id, '')\n # print(f'card_id={card_id}')\n \nfinally:\n pass\n mysql.release()\n GPIO.cleanup()\n","sub_path":"rc522.py","file_name":"rc522.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"264617728","text":"import tqdm\nimport time\nimport sys\nfrom time import sleep\nfrom tqdm import tqdm\nfrom tqdm import trange\n'''\nfor i in tqdm(range(1000)):\n sleep(1)\n'''\n\n\n'''\nfor i in trange(100):\n sleep(1)\n'''\n\npbar = tqdm(['a','b','c','d'])\nfor char in pbar:\n pbar.set_description_str(\"Proceeding %s\"%char)\n\nclass ShowProcess():\n \"\"\"\n 显示处理进度的类\n 调用该类相关函数即可实现处理进度的提示\n \"\"\"\n i = 0 # 当前的处理进度\n max_steps = 0 # 总共需要处理的次数\n max_arrow = 50 # 进度条的长度\n infoDone = 'done'\n\n def __init__(self, max_steps,infoDone = \"Done\"):\n self.max_steps = max_steps\n self.i = 0\n self.infoDone = infoDone\n # 显示函数,根据当前的处理进度i显示进度\n # 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%\n\n def show_process(self,i=None):\n if i is not None:\n self.i = i\n else:\n self.i += 1\n num_arrow =int(self.i *self.max_arrow )\n num_line = self.max_arrow - num_arrow #计算显示多少个'-'\n percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%\n process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\\\n + '%.2f' % percent + '%' + '\\r' #带输出的字符串,'\\r'表示不换行回到最左边\n sys.stdout.write(process_bar) #这两句打印字符到终端\n sys.stdout.flush()\n if self.i >= self.max_steps:\n self.close()\n\n def close(self):\n print(\" \")\n print(self.infoDone)\n self.i = 0\n\nif __name__ =='__main__':\n max_steps = 100\n process_bar = ShowProcess(max_steps, 'OK')\n\n for i in range(max_steps):\n process_bar.show_process()\n time.sleep(0.01)\n\n\n\n","sub_path":"utils/tqdm_usage.py","file_name":"tqdm_usage.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"520788704","text":"class CountList:\n def __init__(self, *args):\n self.values = [x for x in args]\n self.count = {}.fromkeys(range(len(self.values)),0)\n def __len__(self):\n return len(self.values)\n def __getitem__(self, key):\n self.count[key] += 1\n return self.values[key]\n\nc1 = CountList(1,3,5,7,9)\nc2 = CountList(2,4,6,8,10)\nprint(\"c1.values:\", c1.values)\nprint(\"c2.values:\", c2.values)\nprint(\"c1[1]:\", c1[1])\nprint(\"c2[1]:\", c2[1])\nprint(\"c1[1]+c2[1]:\", c1[1]+c2[1])\nprint(\"c1.count:\", c1.count)\nprint(\"c2.count:\", c2.count)\n# print(\"c1.count:\", c1.count)\n# print(\"c2.count:\", c2.count)\n# # 调用\n# c1[1] ## 3\n# c2[1] ## 4\n# c1[1] + c2[1] \t## 7\n# c1.count ## {0:0,1:2,2:0,3:0,4:0}\n# c2.count ## {0:0,1:2,2:0,3:0,4:0}\t\n","sub_path":"hw1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"269633774","text":"import pygame\nimport sys\nfrom modules.constants import *\n\nclass EventHandler(object):\n # Handles user input\n\n def __init__(self):\n self.display_pos = [0, 0]\n self.rmb_down = False\n\n def handle_events(self):\n # Responds to in-game events\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == RIGHT_MOUSE_BUTTON:\n self.rmb_down = True\n\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == RIGHT_MOUSE_BUTTON:\n self.rmb_down = False","sub_path":"modules/event_handler.py","file_name":"event_handler.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491118651","text":"from http import HTTPStatus\nfrom typing import Mapping\n\nimport flask\nfrom celery.canvas import chain\nfrom celery.result import AsyncResult\nfrom flask import Response, redirect\nfrom flask.globals import request\nfrom flask.helpers import url_for\nfrom flask.templating import render_template\nfrom flask.views import MethodView\nfrom marshmallow import EXCLUDE\n\nfrom plugins.costume_loader_pkg import COSTUME_LOADER_BLP, CostumeLoader\nfrom plugins.costume_loader_pkg.schemas import (\n TaskResponseSchema,\n InputParametersSchema,\n InputParameters,\n)\nfrom plugins.costume_loader_pkg.tasks import loading_task\nfrom qhana_plugin_runner.api.plugin_schemas import PluginMetadataSchema\nfrom qhana_plugin_runner.db.models.tasks import ProcessingTask\nfrom qhana_plugin_runner.tasks import save_task_error, save_task_result\n\n\n@COSTUME_LOADER_BLP.route(\"/\")\nclass PluginsView(MethodView):\n \"\"\"Plugins collection resource.\"\"\"\n\n @COSTUME_LOADER_BLP.response(HTTPStatus.OK, PluginMetadataSchema)\n @COSTUME_LOADER_BLP.require_auth(\"basicAuth\", optional=False)\n def get(self):\n \"\"\"Plugin loader endpoint returning the plugin metadata.\"\"\"\n return {\n \"name\": CostumeLoader.instance.name,\n \"version\": CostumeLoader.instance.version,\n \"identifier\": CostumeLoader.instance.identifier,\n \"root_href\": url_for(f\"{COSTUME_LOADER_BLP.name}.PluginsView\"),\n \"title\": \"Costume loader\",\n \"description\": \"Loads all the costumes or base elements from the MUSE database.\",\n \"plugin_type\": \"data-loader\",\n \"tags\": [\"data:loading\"],\n \"processing_resource_metadata\": {\n \"href\": url_for(f\"{COSTUME_LOADER_BLP.name}.LoadingView\"),\n \"ui_href\": url_for(f\"{COSTUME_LOADER_BLP.name}.MicroFrontend\"),\n \"inputs\": [],\n \"outputs\": [\n [\n {\n \"output_type\": \"raw\",\n \"content_type\": \"application/json\",\n \"name\": \"Raw costume data\",\n },\n {\n \"output_type\": \"attribute-metadata\",\n \"content_type\": \"application/json\",\n \"name\": \"Attribute metadata for the costume data\",\n },\n {\n \"output_type\": \"graphs\",\n \"content_type\": \"application/zip\",\n \"name\": \"Taxonomies\",\n },\n ]\n ],\n },\n }\n\n\n@COSTUME_LOADER_BLP.route(\"/ui/\")\nclass MicroFrontend(MethodView):\n \"\"\"Micro frontend for the costume loader plugin.\"\"\"\n\n @COSTUME_LOADER_BLP.html_response(\n HTTPStatus.OK, description=\"Micro frontend of the costume loader plugin.\"\n )\n @COSTUME_LOADER_BLP.arguments(\n InputParametersSchema(\n partial=True, unknown=EXCLUDE, validate_errors_as_result=True\n ),\n location=\"query\",\n required=False,\n )\n @COSTUME_LOADER_BLP.require_auth(\"basicAuth\", optional=False)\n def get(self, errors):\n \"\"\"Return the micro frontend.\"\"\"\n return self.render(request.args, errors)\n\n @COSTUME_LOADER_BLP.html_response(\n HTTPStatus.OK, description=\"Micro frontend of the costume loader plugin.\"\n )\n @COSTUME_LOADER_BLP.arguments(\n InputParametersSchema(\n partial=True, unknown=EXCLUDE, validate_errors_as_result=True\n ),\n location=\"form\",\n required=False,\n )\n @COSTUME_LOADER_BLP.require_auth(\"basicAuth\", optional=False)\n def post(self, errors):\n \"\"\"Return the micro frontend with prerendered inputs.\"\"\"\n return self.render(request.form, errors)\n\n def render(self, data: Mapping, errors: dict):\n data_dict = dict(data)\n app = flask.current_app\n fields = InputParametersSchema().fields\n\n # define default values\n default_values = {\n fields[\"db_host\"].data_key: app.config.get(\"COSTUME_LOADER_DB_HOST\"),\n fields[\"db_user\"].data_key: app.config.get(\"COSTUME_LOADER_DB_USER\"),\n fields[\"db_password\"].data_key: app.config.get(\"COSTUME_LOADER_DB_PASSWORD\"),\n fields[\"db_database\"].data_key: app.config.get(\"COSTUME_LOADER_DB_DATABASE\"),\n }\n\n # overwrite default values with other values if possible\n default_values.update(data_dict)\n data_dict = default_values\n\n return Response(\n render_template(\n \"costume_loader_template.html\",\n name=CostumeLoader.instance.name,\n version=CostumeLoader.instance.version,\n schema=InputParametersSchema(),\n values=data_dict,\n errors=errors,\n process=url_for(f\"{COSTUME_LOADER_BLP.name}.LoadingView\"),\n )\n )\n\n\n@COSTUME_LOADER_BLP.route(\"/load_costumes_and_taxonomies/\")\nclass LoadingView(MethodView):\n \"\"\"Start a long running processing task.\"\"\"\n\n @COSTUME_LOADER_BLP.arguments(InputParametersSchema(unknown=EXCLUDE), location=\"form\")\n @COSTUME_LOADER_BLP.response(HTTPStatus.OK, TaskResponseSchema())\n @COSTUME_LOADER_BLP.require_auth(\"basicAuth\", optional=False)\n def post(self, input_params: InputParameters):\n \"\"\"Start the costume loading task.\"\"\"\n db_task = ProcessingTask(\n task_name=loading_task.name,\n parameters=InputParametersSchema().dumps(input_params),\n )\n db_task.save(commit=True)\n\n # all tasks need to know about db id to load the db entry\n task: chain = loading_task.s(db_id=db_task.id) | save_task_result.s(\n db_id=db_task.id\n )\n # save errors to db\n task.link_error(save_task_error.s(db_id=db_task.id))\n result: AsyncResult = task.apply_async()\n\n db_task.task_id = result.id\n db_task.save(commit=True)\n\n return redirect(\n url_for(\"tasks-api.TaskView\", task_id=str(result.id)), HTTPStatus.SEE_OTHER\n )\n","sub_path":"plugins/costume_loader_pkg/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"83763807","text":"from Domain.cheltuieli import creeaza_cheltuiala, get_str\nfrom Logic.crud import adaugare, stergere\n\n\ndef show_menu_in_line():\n print('Puteti adauga cheltuieli in lista de cheltuieli, cu functia \"add\" si introducand valori adecvate')\n print('Puteti afisa toate cheltuielile din lista de cheltuieli, cu functia \"showall\"')\n print('Puteti sterge cheltuieli in lista de cheltuieli, scriind \"delete\" si introducand un id al unei cheltuieli existente')\n print('Toate comenzile trebuie apelate pe o singura linie si separate prin \";\" iar campurile prin \",\", fara alti separatori!!!')\n print('')\n\ndef add(lst_cheltuieli, id_ap, nr_ap, suma, data, tip):\n try:\n id = int(id_ap)\n except ValueError as ve:\n print('Eroare: ', ve)\n return lst_cheltuieli\n try:\n nr_ap = int(nr_ap)\n except ValueError as ve:\n print('Eroare: ', ve)\n return lst_cheltuieli\n try:\n suma = int(suma)\n except ValueError as ve:\n print('Eroare: ', ve)\n return lst_cheltuieli\n try:\n id = int(id_ap)\n new_cheltuiala = creeaza_cheltuiala(id, nr_ap, suma, data, tip)\n lst_cheltuieli = adaugare(lst_cheltuieli, id, nr_ap, suma, data, tip, [], [])\n except ValueError as ve:\n print('Eroare:', ve)\n else:\n print('Cheltuiala s-a aduagat cu succes!')\n return lst_cheltuieli\n\n\ndef delete(lst_cheltuieli, id_ap):\n try:\n id_ap = int(id_ap)\n except ValueError as ve:\n print('Eroare: ', ve)\n return lst_cheltuieli\n try:\n lst_cheltuieli = stergere(lst_cheltuieli, id_ap, [], [])\n except ValueError as ve:\n print('Eroare:', ve)\n return lst_cheltuieli\n print('Cheltuiala s-a sters cu succes!')\n return lst_cheltuieli\n\n\ndef showall(lst_cheltuieli):\n for cheltuiela in lst_cheltuieli:\n print(get_str(cheltuiela))\n\ndef run_in_line_console(lst_cheltuieli):\n while True:\n show_menu_in_line()\n optiune = input('Introduceti comenzile:')\n optiuni = optiune.split(';')\n for comenzi in optiuni:\n sir_optiune = comenzi.split(',')\n if sir_optiune[0] == 'add':\n id_ap = sir_optiune[1]\n nr_ap = sir_optiune[2]\n suma = sir_optiune[3]\n data = sir_optiune[4]\n tip = sir_optiune[5]\n lst_cheltuieli = add(lst_cheltuieli, id_ap, nr_ap, suma, data, tip)\n if sir_optiune[0] == 'delete':\n id_ap = sir_optiune[1]\n lst_cheltuieli = delete(lst_cheltuieli, id_ap)\n if sir_optiune[0] == 'showall':\n showall(lst_cheltuieli)","sub_path":"User_interface/coomand_line_console.py","file_name":"coomand_line_console.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"579636671","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.layers.core import Dropout\n\ndef build_model():\n # Neural networks model building with Keras and TensorFlow\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=110)\n model = Sequential()\n input_shape = X_train.shape[1]\n model.add(layers.Dense(1024, input_shape=(input_shape,), activation=\"relu\"))\n\n # Dropout for avoiding overfitting\n model.add(Dropout(0.2))\n model.add(layers.Dense(1024, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(layers.Dense(1, activation=\"sigmoid\"))\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n model.summary()\n fit_keras = model.fit(X_train, y_train, epochs=100, verbose=True, validation_data=(X_test, y_test), batch_size=30)\n return model\n\ndef model_performance(model):\n accuracy = model.evaluate(X_train, y_train, verbose=False)\n print(\"Training score: {:.5f}\".format(accuracy[0]))\n print(\"Training accuracy: {:.5f}\\n\".format(accuracy[1]))\n\n accuracy = model.evaluate(X_test, y_test, verbose=False)\n print(\"Testing score: {:.5f}\".format(accuracy[0]))\n print(\"Testing accuracy: {:.5f}\\n\".format(accuracy[1]))\n\nif __name__ == '__main__':\n model = build_model()\n model_performance(model)\n","sub_path":"models/neural_networks_keras.py","file_name":"neural_networks_keras.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616104390","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or not head.next:\n return head\n \n dummy_head = ListNode(0)\n dummy_head.next = head\n p_prev = dummy_head\n p_1 = head\n p_2 = head.next\n \n while p_2:\n p_temp = p_2.next\n p_prev.next = p_2\n p_2.next = p_1\n p_1.next = p_temp\n \n if p_temp == None:\n break\n \n p_prev = p_1\n p_1 = p_temp\n p_2 = p_temp.next\n \n return dummy_head.next\n","sub_path":"linkedlist-medium/24.Swap-Nodes-in-Pairs.py","file_name":"24.Swap-Nodes-in-Pairs.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"391081485","text":"import pulumi\nimport pulumi_aws as aws\n\n#Define # of Swarm leader instances\nno_of_leaders=3\n\n#Define size of Swarm leader & worker instances\nleader_size = 't3.medium'\n\n#SSH Key to provision\nkey_name = \"aws-poc\"\n#read in leader data, to provision the instance\nwith open('init_script.swarm-leader.txt', 'r') as init_script:\n leader_user_data = init_script.read()\n print(leader_user_data)\n\n#Prep AMI to use : aws ubuntu 20:https://cloud-images.ubuntu.com/locator/ec2/\n\nami = aws.get_ami(most_recent=True,\n filters=[\n aws.GetAmiFilterArgs(\n name=\"name\",\n values=[\"ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*\"],\n ),\n aws.GetAmiFilterArgs(\n name=\"virtualization-type\",\n values=[\"hvm\"],\n ),\n ],\n owners=[\"099720109477\"]) \n\n#Create leader nodes\nfor i in range(1,1+no_of_leaders):\n instance_name='k3s'+str(i)\n server = aws.ec2.Instance(instance_name,\n instance_type=leader_size,\n #vpc_security_group_ids=[\"sg-0263ccb5f77e20721\"],\n vpc_security_group_ids=[\"sg-07d81e788234bfa40\"],\n user_data=leader_user_data,\n ami=ami.id,\n key_name=key_name)\n ip= 'k3s_public_ip'+str(i)\n dns= 'k3s_public_dns'+str(i)\n pulumi.export(ip, server.public_ip)\n pulumi.export(dns, server.public_dns)\n","sub_path":"__main.py","file_name":"__main.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442077315","text":"import serial\nfrom serial import SerialException\nfrom datetime import date, datetime\nimport time\nfrom ChargeController import ChargeController\nfrom BatteryController import BatteryController\nimport psycopg2\nfrom random import randint\n\n# Make sure to change this depending on your system, this one is for mac\n# linux will be something like \"ttyAMA0/ACM0\"\n# windows will be something like \"COM1\"\n# the baud rate should be 19200\n\n#ser = serial.Serial(\"/dev/tty.usbmodem1421\", 19200)\n\ndt = datetime.now()\nchargeID=randint(1, 1000000)\nbatteryID=randint(1, 10000000)\n\n# main loop\n\n\ndef insertBattery():\n\tquery = \"\"\"INSERT INTO home_bdata (id, current_soc, current_kw, current_kvar, state, capacity, roundtrip_eff,\n\tmin_soc, max_charging_rate, max_discharging_rate, charging_eff, discharging_eff, required_reserve, rated_voltage,\n\tphase, current_voltage, b_id_id, timestamp) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n\treturn query\ndef insertCharge():\n\tquery = \"\"\"INSERT INTO home_sdata (id, output_voltage, current, real_power, s_id_id, timestamp) VALUES (%s, %s, %s, %s, %s, %s);\"\"\"\n\treturn query\n\ndef createNodeController():\n\tquery = \"\"\"INSERT INTO home_nodecontroller (id, object_id, model, manufacturer, content_type_id) VALUES (%s, %s, %s, %s, %s, %s);\"\"\"\n\treturn query\n\ndef createSolar():\n\tquery = \"\"\"INSERT INTO home_solar (id, manufracturer, dimension, weight, short_circuit_current, open_circuit_voltage, capacity, azimuth, slope, nc_id_id, model) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n\treturn query\n\ndef createBattery():\n\tquery = \"\"\"INSERT INTO home_battery (id, manufacturer, dimension, weight, R, capacity, rountrip_efficiency, min_soc, max_charging_rate, max_discharing_rate, charging_efficiency, discharging_efficiency, required_reserve, rated_voltage, phase, current_voltage, nc_id_id, model) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n\treturn query\n\ntry:\n\tconn = psycopg2.connect(\"dbname='CYB_PHYS_CAPSTONE_DB' user='admin' host='localhost' password='abc123' port='3306'\")\n\tconn.autocommit = True\nexcept:\n\tprint(\"I am unable to connect to the database\")\n\n\ncursor = conn.cursor()\n\n# for getting nformation on the tables, and their columns\n#\n#\n#\n# cursor.execute(\"\"\"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'\"\"\")\n# for table in cursor.fetchall():\n# \tprint(table)\ncursor.execute(\"select column_name from information_schema.columns where table_name='home_bdata'\")\ncolumn_names = [row[0] for row in cursor]\nprint (column_names)\ncursor.execute(\"select column_name from information_schema.columns where table_name='home_battery'\")\ncolumn_names = [row[0] for row in cursor]\nprint (column_names)\ncursor.execute(\"\"\"SELECT * FROM home_battery\"\"\")\nprint(cursor.fetchall())\nprint (dt)\n\ntry:\n\tcursor.execute(createNodeController(), (1, 1, \"pi\",\"raspberrypi\", 9))\nexcept:\n\tprint(\"error trying to create node_controller\")\n\ntry:\n\tcursor.execute(createNodeController(), (2, 1, 'pi',\"raspberrypi\", 12))\nexcept:\n\tprint(\"error trying to create node_controller\")\n\n# try:\n# \tcursor.execute(createBattery(), (2,'outback', 34.0, 12, 0.0, 24.0, 90.0, 0.0, 1.0, 1.0, 90.0, 90.0, 20.0, 20.0, 1, 12.0, 1, 'Battery'))\n# except:\n# \tprint(\"Failed to create a new battery object\")\n#\n# try:\n# \tcursor.execute(createSolar(), (2, 'outback', 34, 12, 5.0, 0.0, 24.0, 0.0, 33.45, 2, 'Solar'))\n# except:\n# \tprint(\"Failed to create a new solar object\")\n\ndef get_timestamp():\n\t# get full date currently\n\tnow = datetime.now()\n\t# strip out the microseconds\n\tnow_str = str(now).split(\".\")[0]\n\t# recreate date\n\td = datetime.strptime(now_str, '%Y-%m-%d %H:%M:%S')\n\treturn d\n\n\noutput =\"00,4,0000,0126,0000,02,00023,287,099,001,00,33,062\"\nwhile 1:\n\tdt = get_timestamp()\n\tprint(str(dt).split(\".\")[0])\n\t#output =ser.readline()\n\tif(len(output)>3):\n\t\tif(output[3]=='5'):\n\t\t\tdeviceType=\"Inverter\"\n\t\t\tprint(output)\n\t\t\t#inverterData(deviceType, output)\n\t\tif(output[3]=='3'):\n\t\t\tchargeID+=1\n\t\t\tprint(output)\n\t\t\tdeviceType=\"Charge Controller\"\n\t\t\tcc=ChargeController()\n\t\t\tcc.chargeControl(array=output,deviceType=deviceType)\n\t\t\tcursor.execute(insertCharge(), (chargeID,cc.pvVoltage, cc.chargeCurrent, 0.0, 1, dt))\n\t\t\tconn.commit()\n\t\tif(output[3]=='4'):\n\t\t\tbatteryID+=1\n\t\t\tprint(output)\n\t\t\tdeviceType=\"DC Battery Monitor\"\n\t\t\tbc= BatteryController()\n\t\t\tbc.batteryDC(deviceType=deviceType, array=output)\n\t\t\tprint(bc.shuntAKillo)\n\t\t\tcursor.execute(insertBattery(),(batteryID, bc.stateOfCharge, bc.shuntAKillo, 12.0, 2.0, 24.0, 90.0, bc.minSOC, 1.0, 1.0, bc.netInputAH, bc.netOutputAH, 20.0, 24.0, 1, bc.batteryVoltage, 1, dt))\n\t\t\tconn.commit()\n\t\t\t#print(output, deviceType)\n\t\ttime.sleep(5)\n\n\n# this is for testing purposes (do not remove)\n# output = \"00,3,00,08,06,034,031,00,05,000,02,262,000,000,045\"\n# output = \"00,4,0000,0126,0000,02,00023,287,099,001,00,33,062\"\n# chargeController(output1)\n# batteryDC(output2)\n","sub_path":"outbackSerial.py","file_name":"outbackSerial.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"68982111","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:\n\n\nimport os\nimport sys\nsys.path.insert(0, os.getcwd() + \"/.\")\nimport time\nimport cv2\nimport torch\nfrom evaluation.evaluatingOfmAp import *\nfrom data_loader.imageDetectValDataLoader import ImageDetectValDataLoader\nfrom data_loader.trainDataProcess import TrainDataProcess\nfrom model.modelResultProcess import ModelResultProcess\nfrom torch_utility.torchModelProcess import TorchModelProcess\nfrom base_algorithm.nms import non_max_suppression\nfrom config import detectConfig\nfrom helper.arguments_parse import ArgumentsParse\n\n\nclass DetectionTest():\n\n def __init__(self, cfg_path, gpu_id):\n self.trainDataProcess = TrainDataProcess()\n self.torchModelProcess = TorchModelProcess()\n self.modelResultProcess = ModelResultProcess()\n self.model = self.torchModelProcess.initModel(cfg_path, gpu_id)\n self.device = self.torchModelProcess.getDevice()\n\n def load_weights(self, weights_path):\n self.torchModelProcess.loadLatestModelWeight(weights_path, self.model)\n self.torchModelProcess.modelTestInit(self.model)\n\n def test(self, val_Path):\n os.system('rm -rf ' + 'results')\n os.makedirs('results', exist_ok=True)\n dataloader = ImageDetectValDataLoader(val_Path, batch_size=1, img_size=detectConfig.imgSize)\n evaluator = MeanApEvaluating(val_Path, detectConfig.className)\n\n prev_time = time.time()\n for i, (img_path, img) in enumerate(dataloader):\n print('%g/%g' % (i + 1, len(dataloader)), end=' ')\n\n # Get detections\n with torch.no_grad():\n output = self.model(img.to(self.device))\n pred = self.modelResultProcess.detectResult(self.model, output, 5e-3)\n # print(pred)\n detections = non_max_suppression(pred, 5e-3, detectConfig.nmsThresh)\n\n print('Batch %d... Done. (%.3fs)' % (i, time.time() - prev_time))\n prev_time = time.time()\n\n path, fileNameAndPost = os.path.split(img_path)\n fileName, post = os.path.splitext(fileNameAndPost)\n img = cv2.imread(img_path)\n\n detectObjects = self.trainDataProcess.resizeDetectObjects(img, detectConfig.imgSize, detections,\n detectConfig.className)\n\n for object in detectObjects:\n confidence = object.classConfidence * object.objectConfidence\n x1 = object.min_corner.x\n y1 = object.min_corner.y\n x2 = object.max_corner.x\n y2 = object.max_corner.y\n with open(\"./results/comp4_det_test_\" + object.name + \".txt\", 'a') as file:\n file.write(\n \"{} {} {} {} {} {}\\n\".format(fileName, confidence, x1, y1, x2, y2))\n\n mAP, aps = evaluator.do_python_eval(\"./results/\", \"./results/comp4_det_test_\")\n\n return mAP, aps\n\n def save_test_result(self, epoch, mAP, aps):\n # Write epoch results\n with open('results.txt', 'a') as file:\n # file.write('%11.3g' * 2 % (mAP, aps[0]) + '\\n')\n file.write(\"Epoch: {} | mAP: {:.3f} | \".format(epoch, mAP))\n for i, ap in enumerate(aps):\n file.write(detectConfig.className[i] + \": {:.3f} \".format(ap))\n file.write(\"\\n\")\n\n\ndef main():\n print(\"process start...\")\n options = ArgumentsParse.test_input_parse()\n detect_test = DetectionTest(options.cfg, 0)\n detect_test.load_weights(options.weights)\n detect_test.test(options.valPath)\n print(\"process end!\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"runner/detectTest.py","file_name":"detectTest.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"611248834","text":"from typing import Optional\n\n\nclass Node:\n\n def __init__(self, data: int, next=None):\n self._data = data\n self._next = next\n\n\n# 单链表反转\ndef reserve(head: Node):\n pre = None\n p = head\n while p:\n next = p._next\n p._next = pre\n pre = p\n p = next\n return pre\n\n\n# 检测是否为环\ndef has_cycle(head: Node):\n slow, fast = head, head\n while fast and fast._next:\n slow = slow._next\n fast = fast._next._next\n if slow == fast:\n return True\n return False\n\n\n# 合并两个有序的链表\ndef merge_sorted_list(l1: Node, l2: Node) -> Optional[Node]:\n if l1 and l2:\n solider = Node(None)\n p = solider\n while l1 and l2:\n if l1._data < l2._data:\n p.next = l1\n l1 = l1._next\n else:\n p.next = l2\n l2 = l2._next\n p = p._next\n p._next = l1 if l1 else l2\n return solider._next\n return\n\n\n# 倒数第几位的节点\ndef remove_nth_from_end(head: Node, n: int) -> Optional[Node]:\n fast = head\n count = 0\n while fast:\n fast = fast._next\n count += 1\n if fast and count < n:\n return head # 没那么多数\n if fast and count == head:\n return head._next # 最后一个数\n slow = head\n while fast:\n slow, fast = slow._next, fast._next\n slow._next = slow._next._next\n\n\ndef find_middle_node(head: Node) -> Optional[Node]:\n slow, fast = head, head\n while fast._next and fast._next._next:\n slow._next, fast._next = slow._next, fast._next._next\n return slow\n\n\ndef is_palindrome(head: Node) -> bool:\n slow, fast = head, head\n while fast._next and fast._next._next:\n slow, fast = slow._next, fast._next._next\n reserve_node = reserve(slow)\n p = head\n flag = True\n while reserve_node and p:\n if reserve_node._data != p._data:\n flag = False\n break\n reserve_node, p = reserve_node._next, p._next\n return flag\n\n","sub_path":"linkedlist/linked_list_algo.py","file_name":"linked_list_algo.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633012013","text":"import numpy as np\nimport time\n# a = np.array([1, 2, 3, 4, 5])\na = np.random.rand(1000000)\nb = np.random.rand(1000000)\nprint(a)\ntic = time.time()\nc = np.dot(a, b)\ntoc = time.time()\nprint(c)\nprint('time:'+str(1000*(toc-tic)))\nc = 0\ntic = time.time()\nfor i in range(1000000):\n c += a[i]*b[i]\ntoc = time.time()\nprint(c)\nprint('time:'+str(1000*(toc-tic)))\n\na = np.array([1,2,3,4,5])\nb = np.array(range(30)).reshape((5,6))\nprint(b)\nprint(np.dot(a,b))\nprint(b.sum(axis=0))\nprint(b.sum(axis=1))","sub_path":"pythondemo/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"287259499","text":"\"\"\"\nDownload hooks for Pooch.fetch\n\"\"\"\nfrom __future__ import print_function\n\nimport requests\n\n\nclass HTTPDownloader: # pylint: disable=too-few-public-methods\n \"\"\"\n Download manager for fetching files over HTTP/HTTPS.\n\n When called, downloads the given file URL into the specified local file. Uses the\n :mod:`requests` library to manage downloads.\n\n Use with :meth:`pooch.Pooch.fetch` to customize the download of files (for example,\n to use authentication).\n\n Parameters\n ----------\n **kwargs\n All keyword arguments given when creating an instance of this class will be\n passed to :func:`requests.get`.\n\n Examples\n --------\n\n Download one of the data files from the Pooch repository:\n\n >>> import os\n >>> from pooch import version, check_version\n >>> url = \"https://github.com/fatiando/pooch/raw/{}/data/tiny-data.txt\".format(\n ... check_version(version.full_version))\n >>> downloader = HTTPDownloader()\n >>> # Not using with Pooch.fetch so no need to pass an instance of Pooch\n >>> downloader(url=url, output_file=\"tiny-data.txt\", pooch=None)\n >>> os.path.exists(\"tiny-data.txt\")\n True\n >>> with open(\"tiny-data.txt\") as f:\n ... print(f.read().strip())\n # A tiny data file for test purposes only\n 1 2 3 4 5 6\n >>> os.remove(\"tiny-data.txt\")\n\n Authentication can be handled by passing a user name and password to\n :func:`requests.get`. All arguments provided when creating an instance of the class\n are forwarded to :func:`requests.get`. We'll use ``auth=(username, password)`` to\n use basic HTTPS authentication. The https://httpbin.org website allows us to make a\n fake a login request using whatever username and password we provide to it:\n\n >>> user = \"doggo\"\n >>> password = \"goodboy\"\n >>> # httpbin will ask for the user and password we provide in the URL\n >>> url = \"https://httpbin.org/basic-auth/{}/{}\".format(user, password)\n >>> # Trying without the login credentials causes an error\n >>> downloader = HTTPDownloader()\n >>> try:\n ... downloader(url=url, output_file=\"tiny-data.txt\", pooch=None)\n ... except Exception:\n ... print(\"There was an error!\")\n There was an error!\n >>> # Pass in the credentials to HTTPDownloader and it will forward to requests.get\n >>> downloader = HTTPDownloader(auth=(user, password))\n >>> downloader(url=url, output_file=\"tiny-data.txt\", pooch=None)\n >>> with open(\"tiny-data.txt\") as f:\n ... for line in f:\n ... print(line.rstrip())\n {\n \"authenticated\": true,\n \"user\": \"doggo\"\n }\n >>> os.remove(\"tiny-data.txt\")\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n\n def __call__(self, url, output_file, pooch):\n \"\"\"\n Download the given URL over HTTP to the given output file.\n\n Uses :func:`requests.get`.\n\n Parameters\n ----------\n url : str\n The URL to the file you want to download.\n output_file : str or file-like object\n Path (and file name) to which the file will be downloaded.\n pooch : :class:`~pooch.Pooch`\n The instance of :class:`~pooch.Pooch` that is calling this method.\n\n \"\"\"\n kwargs = self.kwargs.copy()\n kwargs.setdefault(\"stream\", True)\n ispath = not hasattr(output_file, \"write\")\n if ispath:\n output_file = open(output_file, \"w+b\")\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n output_file.write(chunk)\n finally:\n if ispath:\n output_file.close()\n","sub_path":"pooch/downloaders.py","file_name":"downloaders.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"456135687","text":"from django import template\nimport math\nfrom forum.models import CategoryPostThreads\n\nregister = template.Library()\n\n@register.simple_tag\ndef comment_page(thread_id, comment_id):\n thread = CategoryPostThreads.objects.filter(id=thread_id).first()\n comment_count = 1\n for comment in thread.comments.all():\n if comment.id != comment_id:\n comment_count = comment_count + 1\n else:\n return math.ceil(comment_count / 5)","sub_path":"lethality/moderation/templatetags/foo_functions.py","file_name":"foo_functions.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"142423999","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom anyway.core import config, utils\nfrom webassets import Environment as AssetsEnvironment\nfrom flask_babel import Babel, gettext\nfrom flask_assets import Environment\nfrom flask_cors import CORS\nfrom flask_compress import Compress\n\n_PROJECT_ROOT = os.path.join(os.path.dirname(__file__))\n\n\"\"\"\ninitializes a Flask instance with default values\n\"\"\"\napp = Flask(\n \"anyway\",\n template_folder=os.path.join(config._PROJECT_ROOT, 'templates'),\n static_folder=os.path.join(config._PROJECT_ROOT, 'static'))\napp.config.from_object(config)\napp.config['BABEL_TRANSLATION_DIRECTORIES'] = os.path.join(config._PROJECT_ROOT, 'translations')\napp.config['SECURITY_REGISTERABLE'] = False\napp.config['SECURITY_USER_IDENTITY_ATTRIBUTES'] = 'username'\napp.config['BABEL_DEFAULT_LOCALE'] = 'he'\napp.config['OAUTH_CREDENTIALS'] = {\n 'facebook': {\n 'id': os.environ.get('FACEBOOK_KEY'),\n 'secret': os.environ.get('FACEBOOK_SECRET')\n },\n 'google': {\n 'id': os.environ.get('GOOGLE_LOGIN_CLIENT_ID'),\n 'secret': os.environ.get('GOOGLE_LOGIN_CLIENT_SECRET')\n }\n}\napp.config['RESTPLUS_MASK_SWAGGER'] = False\n\ndb = SQLAlchemy(app)\n\nfrom anyway.common.models import cbs_models, \\\n news_flash_models, \\\n schools_models, \\\n mobile_app_models, \\\n waze_models\n\nassets = Environment()\nassets.init_app(app)\nassets_env = AssetsEnvironment(os.path.join(config._PROJECT_ROOT, 'static'), '/static')\n\nCORS(app, resources={r\"/location-subscription\": {\"origins\": \"*\"}, r\"/report-problem\": {\"origins\": \"*\"}})\n\n# sg = SendGridAPIClient(app.config['SENDGRID_API_KEY'])\n\nbabel = Babel(app)\n\nCompress(app)\n\n\nfrom anyway.apis import api\napi.init_app(app)\n","sub_path":"anyway/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"606592121","text":"from solariat_bottle.api.base import BaseAPIView, api_request\n\n\nclass DemoManagementAPIView(BaseAPIView):\n\n endpoint = 'demo'\n commands = ['reset', \"reload\", 'save']\n\n @classmethod\n def register(cls, app):\n \"\"\" Chat API allows for extra commands, like 'summary' and 'session' \"\"\"\n view_func = cls.as_view(cls.endpoint)\n\n url = cls.get_api_url('')\n app.add_url_rule(url, view_func=view_func, methods=[\"POST\", ])\n\n def post(self, command=None, *args, **kwargs):\n if command in self.commands:\n meth = getattr(self, '_' + command)\n return meth(*args, **kwargs)\n return super(DemoManagementAPIView, self).post(*args, **kwargs)\n\n @api_request\n def _reset(self, user, *args, **kwargs):\n from solariat_bottle.db.predictors.abc_predictor import ABCPredictor\n for predictor in ABCPredictor.objects():\n predictor.packed_clf = None\n predictor.save()\n\n from solariat_bottle.db.predictors.base_predictor import BasePredictor\n for predictor in BasePredictor.objects.find(name__in=['Agent Matching Predictor',\n 'Alert Supervisor Decision',\n 'Chat Engagement Decision']):\n predictor.reset_fresh()\n # This would be feedback loop\n return dict(ok=True)\n\n @api_request\n def _reload(self, user, *args, **kwargs):\n from solariat_bottle.scripts.data_load.demo_helpers.save_load_db import load_db\n load_db()\n return dict(ok=True)\n\n @api_request\n def _save(self, user, *args, **kwargs):\n from solariat_bottle.scripts.data_load.demo_helpers.save_load_db import save_db\n from solariat_bottle.app import app\n app.logger.info(\"Saving DB State...\")\n save_db()\n app.logger.info(\"Save Completed Successfully...\")\n return dict(ok=True)\n","sub_path":"api/demo_management.py","file_name":"demo_management.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452961913","text":"import pytest\n\nfrom ..context import dnsimple\nfrom ..request_helper import RequestHelper, request\n\nfrom dnsimple.models import Domain\nfrom dnsimple.collections import RecordCollection\n\n@pytest.fixture\ndef subject(request):\n return Domain(request, {'id': 1, 'name':'example.com'})\n\nclass TestDomain(RequestHelper, object):\n\n def setup_method(self, method):\n self.original_find = dnsimple.collections.RecordCollection.find\n\n def teardown_method(self, method):\n dnsimple.collections.RecordCollection.find = self.original_find\n\n def test_assign_assigns_attributes(self, subject):\n subject.assign({'name': 'foo.com'})\n\n assert subject.id == 1\n assert subject.name == 'foo.com'\n\n def test_to_dict_returns_attributes(self, request):\n subject = Domain(request, {\n \"id\" : 1,\n \"account_id\" : 2,\n \"auto_renew\" : False,\n \"created_at\" : \"2016-08-01T00:00:00:000Z\",\n \"expires_on\" : None,\n \"lockable\" : True,\n \"name\" : \"foo.com\",\n \"record_count\" : 5,\n \"registrant_id\" : 300,\n \"service_count\" : 0,\n \"state\" : \"hosted\",\n \"token\" : \"token\",\n \"unicode_name\" : \"example.com\",\n \"updated_at\" : \"2016-08-01T00:00:00:000Z\",\n \"user_id\" : 400,\n \"whois_protected\": False\n })\n\n assert subject.to_dict() == {\n \"id\" : 1,\n \"account_id\" : 2,\n \"auto_renew\" : False,\n \"created_at\" : \"2016-08-01T00:00:00:000Z\",\n \"expires_on\" : None,\n \"lockable\" : True,\n \"name\" : \"foo.com\",\n \"record_count\" : 5,\n \"registrant_id\" : 300,\n \"service_count\" : 0,\n \"state\" : \"hosted\",\n \"token\" : \"token\",\n \"unicode_name\" : \"example.com\",\n \"updated_at\" : \"2016-08-01T00:00:00:000Z\",\n \"user_id\" : 400,\n \"whois_protected\": False\n }\n\n def test_records_creates_instance_of_collection(self, request, subject):\n collection = subject.records()\n\n assert isinstance(collection, RecordCollection)\n\n assert collection.request == request\n assert collection.domain == subject\n\n assert collection.name is None\n assert collection.type is None\n\n def test_records_filters_on_name(self, subject):\n collection = subject.records('www')\n\n assert collection.name == 'www'\n assert collection.type is None\n\n def test_records_filters_on_blank_name(self, subject):\n collection = subject.records('')\n\n assert collection.name == ''\n assert collection.type is None\n\n def test_records_filters_on_type(self, subject):\n collection = subject.records(type = 'A')\n\n assert collection.name is None\n assert collection.type == 'A'\n\n def test_record_invokes_finder_with_name(self, mocker, subject):\n finder = mocker.stub()\n finder.return_value = 'record'\n\n dnsimple.collections.RecordCollection.find = finder\n\n assert subject.record('www') == 'record'\n\n finder.assert_called_once_with('www', None)\n\n def test_record_invokes_finder_with_name_and_on_type(self, mocker, subject):\n finder = mocker.stub()\n\n dnsimple.collections.RecordCollection.find = finder\n\n subject.record('', type = 'A')\n\n finder.assert_called_once_with('', 'A')\n\n def test_delete_removes_domain(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'delete')\n subject = Domain(request, {'name':'example.com'})\n\n assert subject.delete() is True\n\n method.assert_called_once_with('domains/example.com')\n\n def test_delete_returns_false_when_removal_fails(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'delete', success = False)\n subject = Domain(request, {'name':'example.com'})\n\n assert subject.delete() is False\n\n def test_not_equal_when_no_ids(self, request):\n a = Domain(request, {})\n b = Domain(request, {})\n\n assert a != b\n\n def test_not_equal_when_only_one_id(self, request):\n a = Domain(request, {'id': 1})\n b = Domain(request, {})\n\n assert a != b\n\n def test_not_equal_when_ids_differ(self, request):\n a = Domain(request, {'id': 1})\n b = Domain(request, {'id': 2})\n\n assert a != b\n\n def test_equal_when_ids_are_the_same(self, request):\n a = Domain(request, {'id': 1})\n b = Domain(request, {'id': 1})\n\n assert a == b\n","sub_path":"tests/unit/test_domain.py","file_name":"test_domain.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"83179224","text":"#!/usr/bin/python3\n# global packagess\nimport os\nimport cv2\nimport rospy\nimport argparse\nimport time\nimport datetime\nimport numpy as np\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom openpose import pyopenpose as op\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String\n\n# local packages\nimport utils\nfrom lifting.prob_model import Prob3dPose\nfrom multi_proc_backend.msg import ImageInfo\nfrom tools import dbg, cfg\n\n\nclass SingleProc():\n def __init__(self, args):\n self.args = args\n\n # init openpose and 3d lifting model\n self.opWrapper = op.WrapperPython()\n params = dict(model_folder=\"/openpose/models/\")\n params['face'] = cfg.face\n params['hand'] = False\n params['model_pose'] = cfg.model_pose\n params['num_gpu'] = 1\n params['num_gpu_start'] = self.args.pid % cfg.ngpu\n self.opWrapper.configure(params)\n self.opWrapper.start()\n self.poseLifting = Prob3dPose()\n\n # cv bridge\n self.bridge = CvBridge()\n\n # subscriber and publisher\n self.sub = rospy.Subscriber(\n \"/multi_proc_backend/info_{}\".format(self.args.pid), ImageInfo,\n self.callback)\n self.pub = rospy.Publisher(\"/multi_proc_backend/result\",\n String,\n tcp_nodelay=True,\n queue_size=1)\n\n def callback(self, data):\n try:\n # get image\n img = self.bridge.imgmsg_to_cv2(data.img, \"8UC3\")\n ind = data.frame\n height, width = img.shape[:2]\n\n # print(f'Proc {self.args.pid} Get frame {ind} | Time {time.time()}')\n\n # use openpose\n datum = op.Datum()\n datum.cvInputData = img\n self.opWrapper.emplaceAndPop([datum])\n all_pose_2d = datum.poseKeypoints[..., :2].astype(np.int)\n all_pose_2d = utils.body25_to_mpi(all_pose_2d)\n\n # target main person\n main_person_id = utils.get_main_person(all_pose_2d, width, height)\n all_pose_2d = np.expand_dims(all_pose_2d[main_person_id], axis=0)\n\n # 3d lifting\n all_pose_2d, visibilities = utils.prepare_3d(all_pose_2d)\n transformed_pose_2d, weights = self.poseLifting.transform_joints(\n all_pose_2d, visibilities)\n all_pose_3d = self.poseLifting.compute_3d(transformed_pose_2d,\n weights)\n\n # send to reducer\n framed_pose = f'{ind} ' + str(all_pose_3d[0].tostring())\n self.pub.publish(framed_pose)\n\n #print(f'[{time.time()}] Proc {self.args.pid} inference frame {ind}')\n\n except BaseException as e:\n # dbg.logt(f\"Exception : _end_task enframeered exception : {e.__class__}\")\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--pid\", type=int, default=0, help=\"process id\")\n args = parser.parse_args()\n\n rospy.init_node('inference_video_{}'.format(args.pid), anonymous=True)\n proc = SingleProc(args)\n rospy.spin()\n","sub_path":"scripts/python/single_proc.py","file_name":"single_proc.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"98444914","text":"import nltk\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import stopwords\nimport re\nparagraph = \"Yasin even made friends with a boy who lived next door, called Andrew. All summer long, Andrew and Yasin played in the park or went to the zoo with Andrew’s mum. Andrew shared his toys and his comics with Yasin and told him all about his favourite superheroes.They even built a camp in Yasin’s back garden where they would hide from the grownups.\"\n#Preprocessing the data\ntext = re.sub(r'\\[[0-9]*\\]', ' ', paragraph)\ntext = re.sub(r'\\s+', ' ', text)\ntext = text.lower()\ntext = re.sub(r'\\d', ' ', text)\ntext = re.sub(r'\\s+', ' ', text)\n\n# Preparing the dataset\nsentences = nltk.sent_tokenize(text)\n\nsentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n\nfor i in range(len(sentences)):\n sentences[i] = [word for word in sentences[i] if word not in stopwords.words('english')]\n\n# Training the Word2Vec model\nmodel = Word2Vec(sentences, min_count=1) # here min_count=1 means if the word is present less than 1 ignore it,usually people take count=2 in large dataset\n\nwords = model.wv.key_to_index\n\n# Finding Word Vectors\nvector = model.wv['yasin']\n\n# Most similar words\nsimilar = model.wv.most_similar('friends')\nprint(similar)","sub_path":"word-2-vect.py","file_name":"word-2-vect.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"342232270","text":"import csv\n\narchi = open(\"Todas_las_carreras.csv\", \"r\")\n\ncsvreader = csv.reader(archi)\n\ncont = 0\nfor fila in csvreader:\n\n if (fila[2]==\"Universidad Nacional de La Plata\"):\n cont = cont + (0 if fila[19]=='' else int(fila[19]))\n print('Año: {} Facultad: {} -- Egresadas: {}'.format(fila[0], fila[3], fila[19]))\n\nprint('\\nTotal egresadas de la UNLP: {}'.format(cont))\n\n","sub_path":"Practica_5/clase7/ejemplos/mujeres_programadoras.py","file_name":"mujeres_programadoras.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"319368429","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 21 14:11:17 2018\n\n@author: PCCC\n\"\"\"\nimport unittest\nfrom selenium import webdriver\nimport selenium.webdriver.support.ui as ui\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport os\nimport re\nfrom public_package.pubilc_package import url,login_name,login_name_test,login_password,login_password_test\nfrom public_package.pubilc_package import sheet_setting, search, reset, currMenupath, page_title, goback, saveBtn,sheet_menu,sheet_prompt_message,work_space\nfrom public_package.pubilc_package import TESTCASE\nimport HTMLTestRunner\nimport xlrd\n'''\n用例名称:\n用例编号:\n用例场景:\n用例作者:\n'''\n\nxlsfile=work_space+r'\\\\'+sheet_menu.col_values(6,36,37)[0]\nexcel = xlrd.open_workbook(xlsfile)\nglobal sheet\nsheet = excel.sheet_by_name('二线站车辆过往记录')\n\nclass TESTCAST_ERXIANZHANCHELIANG(TESTCASE):\n def setUp(self):\n self.dr = webdriver.Chrome()\n self.dr.maximize_window()\n\n def tearDown(self):\n # print(\"脚本执行完成\")\n self.dr.quit()\n\n def login(self, username, password):\n self.dr.get(url)\n self.dr.find_element_by_id('vv').send_keys(username)\n self.dr.find_element_by_xpath('//*[@id=\"login_ff\"]/div[2]/input').send_keys(password)\n self.dr.find_element_by_xpath('//*[@id=\"login_ff\"]/a').click()\n\n def erxianzhancheliang_search(self):\n self.login(login_name, login_password)\n self.dr.find_element_by_xpath(sheet_menu.col_values(1,36,37)[0]).click()\n time.sleep(2)\n self.assertEqual('车辆管理',self.dr.find_element_by_xpath(currMenupath).text,'校验车辆管理菜单')\n self.dr.find_element_by_xpath(sheet_menu.col_values(3,36,37)[0]).click()\n self.dr.find_element_by_xpath(sheet_menu.col_values(5,36,37)[0]).click()\n self.dr.switch_to.frame('iframeb')\n time.sleep(3)\n self.assertEqual('二线站过往记录列表',self.dr.find_element_by_xpath(page_title).text,'二线站车辆过往记录')\n\n def test1_erxianzhancheliang_search_cphm(self):\n self.erxianzhancheliang_search()\n time.sleep(30)\n search_vale_cphm=sheet.col_values(1,0,1)[0]\n self.dr.find_element_by_xpath('//*[@id=\"form\"]/div[1]/div/input').send_keys(search_vale_cphm)\n self.dr.find_element_by_xpath(search).click()\n self.dr.switch_to.default_content()\n time.sleep(10)\n self.dr.switch_to.frame('iframeb')\n paginal_number = self.dr.find_element_by_xpath('/html/body/div[3]/div[2]/div/div[4]/div[1]/span[1]').text\n column = 2\n self.pagination_num(paginal_number, search_vale_cphm, column)\n print('车辆管理-二线站过往车辆:车牌号码条件查询功能正常')\n\n def test2_erxianzhancheliang_jcz(self):\n self.erxianzhancheliang_search()\n time.sleep(30)\n option_chioce=Select(self.dr.find_element_by_xpath('//*[@id=\"form\"]/div[2]/div/select'))\n for i in range(0, 8):\n if i == 0:\n print('查询全部数据时不校验查询结果')\n else:\n option_chioce.select_by_index(i)\n search_value=option_chioce.first_selected_option.text\n self.dr.find_element_by_xpath('//*[@id=\"search\"]').click()\n time.sleep(2)\n # self.dr.switch_to.default_content()\n # time.sleep(3)\n # self.dr.switch_to.frame('iframeb')\n # paginal_number = self.dr.find_element_by_xpath(\n # '/html/body/div[3]/div[2]/div/div[4]/div[1]/span[1]').text\n # column = 3\n # try:\n # # self.assertIsNone(self.dr.find_element_by_xpath('//*[@id=\"list\"]/tbody/tr/td').text,'校验是否有数据')\n # self.pagination_num(paginal_number, search_value, column)\n # except IOError:\n # print('查询数据为空')\n print('车辆管理-二线站过往车辆:检查站名称条件查询功能正常')\n\n def test3_erxianzhancheliang_search_date(self):\n self.erxianzhancheliang_search()\n time.sleep(30)\n search_vale_date='2018-04-02'\n self.dr.find_element_by_xpath('//*[@id=\"startDate\"]').clear()\n self.dr.find_element_by_xpath('//*[@id=\"endDate\"]').clear()\n self.dr.find_element_by_xpath('//*[@id=\"startDate\"]').send_keys(search_vale_date)\n self.dr.find_element_by_xpath('//*[@id=\"endDate\"]').send_keys(search_vale_date)\n self.dr.find_element_by_xpath('//*[@id=\"form\"]/div[1]/div/input').click()\n time.sleep(1)\n self.dr.find_element_by_xpath('//*[@id=\"search\"]').click()\n self.dr.switch_to.default_content()\n time.sleep(3)\n self.dr.switch_to.frame('iframeb')\n paginal_number = self.dr.find_element_by_xpath('/html/body/div[3]/div[2]/div/div[4]/div[1]/span[1]').text\n column = 4\n self.pagination_num(paginal_number, search_vale_date, column)\n print('车辆管理-二线站过往车辆:时间条件查询功能正常')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Test_case/CLGL/test二线站车辆过往记录.py","file_name":"test二线站车辆过往记录.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"288411129","text":"## Python packages\nfrom datetime import datetime\nimport json\nimport re\nfrom binascii import a2b_base64\nimport os\n\n## Django Packages\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.http import (\n Http404, HttpResponse, JsonResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,\n)\nfrom django.core import serializers\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.template.loader import render_to_string\nfrom django.contrib.gis.geos import Point\n\n\n## Custom Libs ##\nfrom lib.functions import *\n\n## Project packages\nfrom accounts.models import CustomUser\n\n## App packages\n\n# That includes from .models import *\nfrom .forms import * \n\n############################################################################\n\nMAIN_PAGE_DESCRIPTION = 'Create a guidebook of street-level photos to show people around a location. View others people have created to help plan your next adventure.'\n\n\n############################################################################\n\ndef home(request):\n return redirect('guidebook.guidebook_list', page=1)\n\ndef guidebook_list(request, page):\n guidebooks = None\n if request.method == \"GET\":\n form = GuidebookSearchForm(request.GET)\n if form.is_valid():\n name = form.cleaned_data['name']\n category = form.cleaned_data['category']\n tags = form.cleaned_data['tag']\n username = form.cleaned_data['username']\n image_key = form.cleaned_data['image_key']\n\n guidebooks = Guidebook.objects.all().filter(\n is_published=True,\n is_approved=True\n )\n if name:\n guidebooks = guidebooks.filter(name__contains=name)\n if category:\n guidebooks = guidebooks.filter(category_id=category)\n if username and username != '':\n users = CustomUser.objects.filter(username__contains=username)\n guidebooks = guidebooks.filter(user__in=users)\n if len(tags) > 0:\n for tag in tags:\n guidebooks = guidebooks.filter(tag=tag)\n\n if image_key:\n scenes = Scene.objects.filter(image_key__contains=image_key)\n guidebook_id_ary = []\n if scenes.count() > 0:\n for s in scenes:\n guidebook_id_ary.append(s.guidebook_id)\n guidebooks = guidebooks.filter(pk__in=guidebook_id_ary)\n\n if guidebooks == None:\n guidebooks = Guidebook.objects.all().filter(is_published=True, is_approved=True)\n form = GuidebookSearchForm()\n\n paginator = Paginator(guidebooks.order_by('-created_at'), 5)\n\n try:\n pGuidebooks = paginator.page(page)\n except PageNotAnInteger:\n pGuidebooks = paginator.page(1)\n except EmptyPage:\n pGuidebooks = paginator.page(paginator.num_pages)\n\n first_num = 1\n last_num = paginator.num_pages\n if paginator.num_pages > 7:\n if pGuidebooks.number < 4:\n first_num = 1\n last_num = 7\n elif pGuidebooks.number > paginator.num_pages - 3:\n first_num = paginator.num_pages - 6\n last_num = paginator.num_pages\n else:\n first_num = pGuidebooks.number - 3\n last_num = pGuidebooks.number + 3\n pGuidebooks.paginator.pages = range(first_num, last_num + 1)\n pGuidebooks.count = len(pGuidebooks)\n content = {\n 'guidebooks': pGuidebooks,\n 'form': form,\n 'pageName': 'Guidebooks',\n 'pageTitle': 'Guidebooks',\n 'pageDescription': MAIN_PAGE_DESCRIPTION\n }\n return render(request, 'guidebook/guidebook_list.html', content)\n\n@my_login_required\ndef my_guidebook_list(request, page):\n guidebooks = None\n if request.method == \"GET\":\n form = GuidebookSearchForm(request.GET)\n if form.is_valid():\n name = form.cleaned_data['name']\n category = form.cleaned_data['category']\n tags = form.cleaned_data['tag']\n image_key = form.cleaned_data['image_key']\n\n guidebooks = Guidebook.objects.all().filter(\n user=request.user\n )\n if name:\n guidebooks = guidebooks.filter(name__contains=name)\n if category:\n guidebooks = guidebooks.filter(category_id=category)\n\n if len(tags) > 0:\n for tag in tags:\n guidebooks = guidebooks.filter(tag=tag)\n\n if image_key:\n scenes = Scene.objects.filter(image_key__contains=image_key)\n guidebook_id_ary = []\n if scenes.count() > 0:\n for s in scenes:\n guidebook_id_ary.append(s.guidebook_id)\n guidebooks = guidebooks.filter(pk__in=guidebook_id_ary)\n\n if guidebooks == None:\n guidebooks = Guidebook.objects.all().filter(\n user=request.user\n )\n form = GuidebookSearchForm()\n\n paginator = Paginator(guidebooks.order_by('-created_at'), 5)\n\n try:\n pGuidebooks = paginator.page(page)\n except PageNotAnInteger:\n pGuidebooks = paginator.page(1)\n except EmptyPage:\n pGuidebooks = paginator.page(paginator.num_pages)\n\n first_num = 1\n last_num = paginator.num_pages\n if paginator.num_pages > 7:\n if pGuidebooks.number < 4:\n first_num = 1\n last_num = 7\n elif pGuidebooks.number > paginator.num_pages - 3:\n first_num = paginator.num_pages - 6\n last_num = paginator.num_pages\n else:\n first_num = pGuidebooks.number - 3\n last_num = pGuidebooks.number + 3\n pGuidebooks.paginator.pages = range(first_num, last_num + 1)\n pGuidebooks.count = len(pGuidebooks)\n form._my(request.user.username)\n content = {\n 'guidebooks': pGuidebooks,\n 'form': form,\n 'pageName': 'My Guidebooks',\n 'pageTitle': 'My Guidebooks',\n 'pageDescription': MAIN_PAGE_DESCRIPTION\n }\n return render(request, 'guidebook/guidebook_list.html', content)\n\ndef guidebook_detail(request, unique_id):\n guidebook = get_object_or_404(Guidebook, unique_id=unique_id)\n if request.user.is_authenticated:\n guidebook_like = GuidebookLike.objects.filter(guidebook=guidebook, user=request.user)\n if guidebook_like and guidebook_like.count() > 0:\n is_liked = True\n else:\n is_liked = False\n else:\n is_liked = False\n\n\n\n\n\n\n\n form = SceneForm()\n poi_form = PointOfInterestForm()\n content = {\n 'guidebook': guidebook,\n 'is_liked': is_liked,\n 'form': form,\n 'poi_form': poi_form,\n 'pageTitle': guidebook.name + ' - ' + 'Guidebook',\n 'pageDescription': guidebook.description,\n 'pageName': 'Guidebook Detail'\n }\n return render(request, 'guidebook/guidebook_detail.html', content)\n\n@my_login_required\ndef guidebook_create(request, unique_id=None):\n if request.method == \"POST\":\n form = GuidebookForm(request.POST, request.FILES)\n\n if form.is_valid():\n if unique_id is None:\n guidebook = form.save(commit=False)\n guidebook.user = request.user\n guidebook.save()\n if form.cleaned_data['tag'].count() > 0:\n for tag in form.cleaned_data['tag']:\n guidebook.tag.add(tag)\n for tag in guidebook.tag.all():\n if not tag in form.cleaned_data['tag']:\n guidebook.tag.remove(tag)\n try:\n # send email to creator\n subject = 'Your guidebook post is under review'\n html_message = render_to_string(\n 'emails/guidebook/guidebook/create.html',\n {'subject': subject, 'guidebook': guidebook},\n request\n )\n send_mail_with_html(subject, html_message, request.user.email)\n # send email to admin\n staffs = CustomUser.objects.filter(is_staff=True, is_active=True)\n staff_emails = []\n for staff in staffs:\n staff_emails.append(staff.email)\n if len(staff_emails) > 0:\n subject = 'Guidebook post needs to be approved'\n html_message = render_to_string(\n 'emails/guidebook/guidebook/create_admin.html',\n {'subject': subject, 'guidebook': guidebook},\n request\n )\n send_mail_with_html(subject, html_message, staff_emails)\n except:\n print('email sending error!')\n else:\n guidebook = get_object_or_404(Guidebook, unique_id=unique_id)\n guidebook.name = form.cleaned_data['name']\n guidebook.description = form.cleaned_data['description']\n guidebook.cover_image = form.cleaned_data['cover_image']\n guidebook.category = form.cleaned_data['category']\n\n if form.cleaned_data['tag'].count() > 0:\n for tag in form.cleaned_data['tag']:\n guidebook.tag.add(tag)\n for tag in guidebook.tag.all():\n if not tag in form.cleaned_data['tag']:\n guidebook.tag.remove(tag)\n guidebook.save()\n\n messages.success(request, 'A guidebook was created successfully.')\n return redirect('guidebook.add_scene', unique_id=guidebook.unique_id)\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(field.name + ': ' + error)\n return JsonResponse({\n 'status': 'failed',\n 'message': '
'.join(errors)\n })\n\n else:\n if unique_id:\n guidebook = get_object_or_404(Guidebook, unique_id=unique_id)\n form = GuidebookForm(instance=guidebook)\n else:\n form = GuidebookForm()\n content = {\n 'form': form,\n 'pageName': 'Create Guidebook',\n 'pageTitle': 'Create Guidebook',\n 'pageDescription': MAIN_PAGE_DESCRIPTION\n }\n return render(request, 'guidebook/create_main.html', content)\n\n@my_login_required\ndef ajax_guidebook_update(request, unique_id = None):\n if request.method == \"POST\":\n form = GuidebookForm(request.POST)\n\n if form.is_valid():\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n guidebook.name = form.cleaned_data['name']\n guidebook.description = form.cleaned_data['description']\n guidebook.category = form.cleaned_data['category']\n if form.cleaned_data['tag'].count() > 0:\n for tag in form.cleaned_data['tag']:\n guidebook.tag.add(tag)\n for tag in guidebook.tag.all():\n if not tag in form.cleaned_data['tag']:\n guidebook.tag.remove(tag)\n guidebook.save()\n return JsonResponse({\n 'status': 'success',\n 'message': 'Guidebook was uploaded successfully.',\n 'guidebook': {\n 'title': guidebook.name,\n 'description': guidebook.description,\n 'category': guidebook.category.name,\n 'tag': guidebook.getTags()\n }\n })\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(field.name + ': ' + error)\n return JsonResponse({\n 'status': 'failed',\n 'message': '
'.join(errors)\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n@my_login_required\ndef add_scene(request, unique_id):\n guidebook = get_object_or_404(Guidebook, unique_id=unique_id)\n if guidebook.user != request.user:\n messages.error(request, \"You can't access the page.\")\n return redirect('/')\n form = SceneForm()\n poi_form = PointOfInterestForm()\n g_form = GuidebookForm(instance=guidebook)\n content = {\n 'guidebook': guidebook,\n 'g_form': g_form,\n 'form': form,\n 'poi_form': poi_form,\n 'pageName': 'Edit Guidebook',\n 'pageTitle': 'Edit Guidebook',\n 'pageDescription': MAIN_PAGE_DESCRIPTION\n }\n return render(request, 'guidebook/add_scene.html', content)\n\n@my_login_required\ndef ajax_upload_file(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n if request.method == \"POST\":\n form = GuidebookImageForm(request.POST, request.FILES)\n if form.is_valid():\n new_guidebook = form.save(commit=False)\n guidebook.cover_image = new_guidebook.cover_image\n guidebook.save()\n return JsonResponse({\n 'status': 'success',\n 'message': 'A cover image is uploaded successfully.'\n })\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(field.name + ': ' + error)\n return JsonResponse({\n 'status': 'failed',\n 'message': '
'.join(errors)\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n@my_login_required\ndef ajax_add_scene(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n if request.method == \"POST\":\n form = SceneForm(request.POST)\n if form.is_valid():\n image_key = form.cleaned_data['image_key']\n title = form.cleaned_data['title']\n description = form.cleaned_data['description']\n scene = Scene.objects.filter(image_key=image_key, guidebook=guidebook)\n lat = float(form.cleaned_data['lat'])\n lng = float(form.cleaned_data['lng'])\n\n if scene:\n old_scene = scene[0]\n old_scene.title = title\n old_scene.description = description\n old_scene.lat = lat\n old_scene.lng = lng\n old_scene.save()\n return JsonResponse({\n 'type': 'update',\n 'scene_id': old_scene.pk,\n 'title': old_scene.title,\n 'description': old_scene.description,\n 'status': 'success',\n 'message': 'Scene is updated successfully.'\n })\n else:\n new_scene = Scene()\n new_scene.guidebook = guidebook\n new_scene.image_key = image_key\n new_scene.title = title\n new_scene.description = description\n new_scene.lat = lat\n new_scene.lng = lng\n scenes = guidebook.getScenes()\n max_sort = 0\n for s in scenes:\n if s.sort > max_sort:\n max_sort = s.sort\n new_scene.sort = max_sort + 1\n new_scene.save()\n scene_box_html = render_to_string(\n 'guidebook/scene_edit_box.html',\n {'guidebook': guidebook, 'scene': new_scene},\n request\n )\n scenes = Scene.objects.filter(guidebook=guidebook)\n if scenes and scenes.count() > 0:\n scene_count = scenes.count()\n else:\n scene_count = 0\n return JsonResponse({\n 'type': 'new',\n 'scene_id': new_scene.pk,\n 'scene_box_html': scene_box_html,\n 'scene_count': scene_count,\n 'status': 'success',\n 'message': 'A new Scene is added successfully.'\n })\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(field.name + ': ' + error)\n return JsonResponse({\n 'status': 'failed',\n 'message': '
'.join(errors)\n })\n return JsonResponse({\n 'status': 'failed',\n 'message': 'It failed to save Scene!'\n })\n\n@my_login_required\ndef ajax_order_scene(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n if request.method == \"POST\":\n order_str = request.POST.get('order_list')\n order_list = order_str.split(',')\n scene_list = []\n for i in range(len(order_list)):\n scene = Scene.objects.get(pk=int(order_list[i]))\n if scene is None or scene.guidebook != guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Scene does not exist or has no access.'\n })\n scene_list.append(scene)\n for i in range(len(order_list)):\n scene_list[i].sort = i\n scene_list[i].save()\n\n return JsonResponse({\n 'status': 'success',\n 'message': 'Scenes are ordered successfully.'\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'It failed to save Scene!'\n })\n\n@my_login_required\ndef ajax_save_poi(request, unique_id, pk):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n scene = Scene.objects.get(pk=pk)\n if not scene or scene.guidebook.user != request.user:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Scene does not exist or has no access.'\n })\n if request.method == \"POST\":\n if request.POST.get('type') == 'new':\n poi = PointOfInterest()\n poi.title = ''\n poi.description = ''\n categories = POICategory.objects.all()\n poi.category_id = categories[0].pk\n poi.position_x = request.POST.get('position_x')\n poi.position_y = request.POST.get('position_y')\n poi.scene = scene\n poi.save()\n message = 'A new Point of Interest is created successfully.'\n\n elif request.POST.get('type') == 'move':\n poi = PointOfInterest.objects.get(pk=request.POST.get('poi_id'))\n if not poi:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Point of Interest does not exist or has no access.'\n })\n poi.position_x = request.POST.get('position_x')\n poi.position_y = request.POST.get('position_y')\n poi.save()\n message = 'Position is updated successfully.'\n else:\n poi = PointOfInterest.objects.get(pk=request.POST.get('poi_id'))\n if not poi:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Point of Interest does not exist or has no access.'\n })\n if request.POST.get('title') == '':\n return JsonResponse({\n 'status': 'failed',\n 'message': 'Title is required!'\n })\n poi.title = request.POST.get('title')\n poi.category_id = request.POST.get('category_id')\n poi.description = request.POST.get('description')\n poi.save()\n message = 'Point of Interest is saved successfully.'\n\n serialized_obj = serializers.serialize('json', [poi, ])\n json_poi = json.loads(serialized_obj)\n poi_form = PointOfInterestForm(instance=poi)\n poi_box_html = render_to_string(\n 'guidebook/poi_edit_box.html',\n {'poi': poi, 'poi_form': poi_form},\n request\n )\n return JsonResponse({\n 'status': 'success',\n 'poi': json_poi,\n 'poi_box_html': poi_box_html,\n 'poi_count': scene.getPOICount(),\n 'message': message\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'It failed to save Point of Interest!'\n })\n\n@my_login_required\ndef ajax_delete_poi(request, unique_id, pk):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n scene = Scene.objects.get(pk=pk)\n if not scene or scene.guidebook.user != request.user:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Scene does not exist or has no access.'\n })\n if request.method == \"POST\":\n poi = PointOfInterest.objects.get(pk=request.POST.get('poi_id'))\n if not poi:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Point of Interest does not exist or has no access.'\n })\n poi.delete()\n message = 'Point of Interest is deleted successfully.'\n\n return JsonResponse({\n 'status': 'success',\n 'message': message,\n 'poi_count': scene.getPOICount()\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'It failed to delete Point of Interest!'\n })\n\ndef ajax_get_scene(request, unique_id):\n image_key = request.GET['image_key']\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n scene = Scene.objects.filter(guidebook=guidebook, image_key=image_key)\n if not scene or scene.count() == 0:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The scene does not exist or has no access.'\n })\n else:\n pois = PointOfInterest.objects.filter(scene=scene[0])\n poi_list = []\n for poi in pois:\n poi_box_html = render_to_string(\n 'guidebook/poi_box.html',\n {'poi': poi},\n request\n )\n poi_json = serializers.serialize('json', [poi, ])\n json_poi = json.loads(poi_json)\n poi_list.append({\n 'poi': json_poi,\n 'poi_box_html': poi_box_html\n })\n\n scene_first = scene[0]\n scene_first.poi_list = poi_list\n\n scene_json = serializers.serialize('json', [scene_first, ])\n json_scene = json.loads(scene_json)\n\n return JsonResponse({\n 'status': 'success',\n 'scene': json_scene,\n 'poi_list': poi_list\n })\n\n@my_login_required\ndef ajax_get_edit_scene(request, unique_id):\n image_key = request.GET['image_key']\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n scene = Scene.objects.filter(guidebook=guidebook, image_key=image_key)\n if not scene or scene.count() == 0:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The scene does not exist or has no access.'\n })\n else:\n pois = PointOfInterest.objects.filter(scene=scene[0])\n poi_list = []\n for poi in pois:\n poi_form = PointOfInterestForm(instance=poi)\n poi_box_html = render_to_string(\n 'guidebook/poi_edit_box.html',\n {'poi': poi, 'poi_form': poi_form},\n request\n )\n poi_json = serializers.serialize('json', [poi, ])\n json_poi = json.loads(poi_json)\n poi_list.append({\n 'poi': json_poi,\n 'poi_box_html': poi_box_html\n })\n\n scene_first = scene[0]\n scene_first.poi_list = poi_list\n\n scene_json = serializers.serialize('json', [scene_first, ])\n json_scene = json.loads(scene_json)\n\n return JsonResponse({\n 'status': 'success',\n 'scene': json_scene,\n 'poi_list': poi_list\n })\n\n@my_login_required\ndef ajax_get_scene_list(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n scenes = Scene.objects.filter(guidebook=guidebook)\n scenes_json = []\n for scene in scenes:\n scene_json = serializers.serialize('json', [scene, ])\n json_scene = json.loads(scene_json)\n scenes_json.append(json_scene)\n\n return JsonResponse({\n 'status': 'success',\n 'scene_list': scenes_json\n })\n\n@my_login_required\ndef ajax_delete_scene(request, unique_id, pk):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist or has no access.'\n })\n\n if request.method == \"POST\":\n scene = Scene.objects.get(pk=pk)\n if not scene or scene.guidebook.user != request.user:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Scene does not exist or has no access.'\n })\n pois = PointOfInterest.objects.filter(scene=scene)\n if pois and pois.count() > 0:\n for p in pois:\n p.delete()\n if scene.image_url:\n os.remove(\"media/\" + scene.image_url)\n scene.delete()\n\n\n message = 'Scene is deleted successfully.'\n\n scenes = Scene.objects.filter(guidebook=guidebook)\n if scenes and scenes.count() > 0:\n scene_count = scenes.count()\n else:\n scene_count = 0\n guidebook.is_published = False\n guidebook.save()\n return JsonResponse({\n 'status': 'success',\n 'message': message,\n 'scene_count': scene_count\n })\n\n return JsonResponse({\n 'status': 'failed',\n 'message': 'It failed to delete Scene!'\n })\n\n@my_login_required\ndef check_like(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist.'\n })\n\n if guidebook.user == request.user:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'This guidebook is created by you.'\n })\n\n guidebook_like = GuidebookLike.objects.filter(guidebook=guidebook, user=request.user)\n if guidebook_like:\n for g in guidebook_like:\n g.delete()\n liked_guidebook = GuidebookLike.objects.filter(guidebook=guidebook)\n if not liked_guidebook:\n liked_count = 0\n else:\n liked_count = liked_guidebook.count()\n return JsonResponse({\n 'status': 'success',\n 'message': 'Unliked',\n 'is_checked': False,\n 'liked_count': liked_count\n })\n else:\n guidebook_like = GuidebookLike()\n guidebook_like.guidebook = guidebook\n guidebook_like.user = request.user\n guidebook_like.save()\n liked_guidebook = GuidebookLike.objects.filter(guidebook=guidebook)\n if not liked_guidebook:\n liked_count = 0\n else:\n liked_count = liked_guidebook.count()\n return JsonResponse({\n 'status': 'success',\n 'message': 'Liked',\n 'is_checked': True,\n 'liked_count': liked_count\n })\n\n@my_login_required\ndef check_publish(request, unique_id):\n guidebook = Guidebook.objects.get(unique_id=unique_id)\n if not guidebook:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'The Guidebook does not exist.'\n })\n\n if guidebook.user != request.user:\n return JsonResponse({\n 'status': 'failed',\n 'message': 'This guidebook is not created by you.'\n })\n\n if not guidebook.is_approved:\n return JsonResponse({\n 'status': 'failed',\n 'message': \"This guidebook isn't approved.\"\n })\n\n if guidebook.is_published:\n guidebook.is_published = False\n message = 'Unpublished'\n else:\n guidebook.is_published = True\n message = 'Published'\n guidebook.save()\n return JsonResponse({\n 'status': 'success',\n 'message': message,\n 'is_published': guidebook.is_published\n })\n\n@my_login_required\ndef guidebook_delete(request, unique_id):\n guidebook = get_object_or_404(Guidebook, unique_id=unique_id)\n if guidebook.user == request.user:\n guidebook_like = GuidebookLike.objects.filter(guidebook=guidebook)\n if guidebook_like:\n for g in guidebook_like:\n g.delete()\n\n scenes = Scene.objects.filter(guidebook=guidebook)\n if scenes:\n for s in scenes:\n pois = PointOfInterest.objects.filter(scene=s)\n if pois:\n for p in pois:\n p.delete()\n\n guidebook.delete()\n messages.success(request, 'Photographer \"%s\" is deleted successfully.' % guidebook.name)\n else:\n messages.error(request, \"This user hasn't permission\")\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))","sub_path":"guidebook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"379967421","text":"\n\nfrom datetime import datetime\nimport evennia as ev\nfrom typeclasses.channels import Channel\nfrom evennia.utils import lazy_property\nfrom jobutils import Utils\nfrom jobs_settings import _VALID_BUCKET_ACTIONS\nfrom jobs_settings import _VALID_BUCKET_SETTINGS\n\ndate = datetime\nju = Utils()\n\nclass Bucket(Channel):\n \"\"\"\n The Bucket is the base object for the jobs system. This object\n inherits from the Channel object so that it can use the underlying\n hooks to deliver messages to Users.\n \"\"\"\n\n def at_channel_creation(self):\n \"\"\"This is done when the bucket is created\"\"\"\n # set sane defaults\n self.db.approval_board = '0'\n self.db.completion_board = '0'\n self.db.createdby = None\n self.db.createdon = '{:%m-%d-%Y at %H:%M %Z}'.format(date.utcnow())\n self.db.denial_board = '0'\n self.db.due_timeout = 0\n self.db.completed_jobs = 0\n self.db.num_of_jobs = len(self.associated)\n self.db.per_player_actions = {}\n self.db.percent_complete = 0\n self.db.resolution_time = 0\n self.db.valid_actions = _VALID_BUCKET_ACTIONS\n self.db.valid_settings = _VALID_BUCKET_SETTINGS\n self.db.group = \"admin\"\n\n @lazy_property\n def associated(self):\n \"\"\"search for and return any jobs associated with this bucket\"\"\"\n jobs = []\n for job in ev.search_tag(self.key, category=\"jobs\"):\n jobs.append(job)\n return jobs\n\n def check_access(self, obj):\n \"\"\"return whether the caller is in the actions dict\"\"\"\n if self.db.per_player_actions.keys() is not None:\n return obj in self.db.per_player_actions.keys()\n\n def grant_access(self, obj, action):\n \"\"\"give a character access to a bucket\"\"\"\n action = [action]\n ppa = self.db.per_player_actions\n if ju.ischaracter(obj):\n for act in action:\n if obj in ppa.keys():\n ppa[obj.key].value.append(act)\n else:\n ppa[obj.key] = [act]\n\n def has_jobs(self):\n \"\"\"return true if the bucket has any jobs on it, false if not\"\"\"\n return self.db.num_of_jobs < 0\n\n def has_access(self, action, obj):\n \"\"\"if self.caller is on the access list, allow this action\"\"\"\n ppa = self.db.per_player_actions.get(obj.key)\n if obj is not None:\n if obj.locks.check_lockstring(obj, \"dummy:perm(Admin)\"):\n return True\n else:\n return ppa and action in ppa\n else:\n return False\n\n def info(self, obj):\n \"\"\"returns pertinent bucket info as a list\"\"\"\n if self.has_access(\"info\", obj):\n ret = [self.key,\n self.db.desc,\n self.db.num_of_jobs,\n self.db.percent_complete,\n self.db.completion_board,\n self.db.approval_board,\n self.db.denial_board,\n self.db.due_timeout,\n self.db.resolution_time,]\n return ret\n\n def monitor(self, obj):\n \"\"\"Allows you to see a bucket when you type +jobs. Toggling it again turns it off\"\"\"\n pass\n\n\n def set(self, setting, value, obj):\n \"\"\"used to change settings on a particular bucket\"\"\"\n valid_settings = (self.db.desc, self.db.completion_board,\n self.db.approval_board, self.db.denial_board,\n self.db.due_timeout,)\n pass\n","sub_path":"world/jobs/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534096001","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import double\nfrom time import perf_counter\nfrom matrices_laplacianas import matriz_laplaciana_dispersa\n\n#NUMERO DE CORRIDAS \nNcorridas=5\n\n#TAMAÑO DE MATRICES A PROBAR PARA CADA CORRIDA\nNs=np.array([2,5,10,12,15,20,30,40,45,50,55,60,75,100,125,160,\n 200,250,350,500,600,800,1000,2000,5000,10000])\n\n#CREANDO ARCHIVOS DE TEXTO PARA CADA CORRIDA\nfor i in range(Ncorridas):\n print(f\"Corrida i = {i}\\n\")\n dts1=[] \n dts2=[]\n name=(f\"matmul{i}_dispersa.txt\")\n file=open(name,\"w\")\n \n for N in Ns:\n print(f\"N = {N}\")\n\n t1=perf_counter()\n \n A=matriz_laplaciana_dispersa(N,double)\n B=matriz_laplaciana_dispersa(N,double)\n\n t2=perf_counter()\n C=A@B\n t3=perf_counter() \n\n dt1=t2-t1\n dt2=t3-t2\n\n dts1.append(dt1)\n dts2.append(dt2)\n\n file.write(f\"{N} {dt1} {dt2}\\n\")\n\n print(f\"Tiempo de ensamblado = {dt1} s\")\n print(f\"Tiempo de solución = {dt2} s\")\n\n file.flush()\n \nfile.close()\n\n\n#CURVAS PARA CADA ARCHIVO DE TEXTO\nplt.figure()\nfor i in range(Ncorridas):\n tamano=[]\n tiempo_ens=[]\n tiempo_sol=[]\n \n name=(f\"matmul{i}_dispersa.txt\")\n f = open(name, \"r\")\n \n for i in f:\n lista_fila=i.split()\n tamano.append(double(lista_fila[0]))\n tiempo_ens.append(double(lista_fila[1]))\n tiempo_sol.append(double(lista_fila[2]))\n f.close()\n \n plt.subplot(2,1,1)\n \n plt.loglog(tamano,tiempo_ens,marker=\"o\",ms=3,color=\"k\",alpha=0.5)\n \n plt.subplot(2,1,2)\n \n plt.loglog(tamano,tiempo_sol,marker=\"o\",ms=3,color=\"k\",alpha=0.5)\n\ntamano=np.array(tamano)\ntiempo_ens=np.array(tiempo_ens)\ntiempo_sol=np.array(tiempo_sol)\n\nplt.subplot(2,1,1) \n \nfor i in range(5):\n a=tiempo_ens[25]/tamano[25]**i\n if i==0:\n plt.loglog(tamano,(tamano**i)*a,'--',label='Cte')\n elif i==1:\n plt.loglog(tamano,(tamano**i)*a,'--',label='O(N)')\n else:\n plt.loglog(tamano,(tamano**i)*a,'--',label=f'$O(N^{i})$')\n\nxTicks = [10,20,50,100,200,500,1000,2000,5000,10000,20000]\nxTicks_Text = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n\nyTicks = [10**-4,10**-3,10**-2,10**-1,1,10,60,600]\nyTicks_Text = [\"0.1 ms\",\"1 ms\",\"10 ms\",\"0.1 s\",\"1 s\",\"10 s\",\"1 min\",\"10min\"]\n\nplt.yticks(yTicks, yTicks_Text)\nplt.xticks(xTicks, xTicks_Text,rotation=45)\n\nplt.title(\"Complejidad Matmul Matriz Dispersa\", fontsize=10)\nplt.ylabel(\"Tiempo de ensamblado\")\nplt.grid()\nplt.ylim(10**-5,600)\n \n \nplt.subplot(2,1,2) \n\nfor i in range(5):\n a=tiempo_sol[25]/tamano[25]**i\n if i==0:\n plt.loglog(tamano,(tamano**i)*a,'--',label='Cte')\n elif i==1:\n plt.loglog(tamano,(tamano**i)*a,'--',label='O(N)')\n else:\n plt.loglog(tamano,(tamano**i)*a,'--',label=f'$O(N^{i})$')\n\nxTicks = [10,20,50,100,200,500,1000,2000,5000,10000,20000]\nxTicks_Text = [\"10\",\"20\",\"50\",\"100\",\"200\",\"500\",\"1000\",\n \"2000\",\"5000\",\"10000\",\"20000\"]\n\nyTicks = [10**-4,10**-3,10**-2,10**-1,1,10,60,600]\nyTicks_Text = [\"0.1 ms\",\"1 ms\",\"10 ms\",\"0.1 s\",\"1 s\",\"10 s\",\"1 min\",\"10min\"]\n\nplt.yticks(yTicks, yTicks_Text)\nplt.xticks(xTicks, xTicks_Text,rotation=45)\n\nplt.legend(loc=2,prop={'size': 8})\nplt.xlabel(\"Tamaño matriz N\")\nplt.ylabel(\"Tiempo de solución\")\nplt.grid() \nplt.ylim(10**-5,600)\n\nplt.tight_layout()\nplt.show()","sub_path":"Entrega7/complejidad_matmul_dispersa.py","file_name":"complejidad_matmul_dispersa.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"100269","text":"\"\"\"module to read in data from file into matrix, \n\n\"\"\"\nfrom typing import List\nimport os\nfrom pathlib import Path\n\nfrom loguru import logger\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\n\nfrom config.consts import PATH_TO_VIVE_CALIBRATION, DISTANCE_VIVE_ENDEFFECTOR, PATH_TO_ROBOT_CALIBRATION\n\n\ndef readViveData(fileLocation: Path):\n return np.loadtxt(fileLocation, delimiter=\" \", skiprows=2)\n\n\ndef get_vive_calibration_positions(date: str, experiment_number: str) -> List[np.ndarray]:\n \"\"\"Imports measurement data from a vive written to file. This function handles\\\n the import of all measurement points taken during (date,experimentnumber)\n\n The location is a directory whose path is set via env. Therein multiple\n folder with the name structure \"{date}_CalibrationSet_{number}\" live. Inside each\n are multiple files. Each file identifies a unique measurement set\n\n The order is important as signified by the numbering of the measurement\n files\n\n calibration_point_{x}.txt\n Args:\n date (string): [date of experiment, format yyyymmdd\n experimentNumber (str): number identifying the experiment/calibration on that date\n\n Returns:\n [list of numpy arrays]: list of measurements taken at individual. format is \\\n x,y,z,w,i,j,k\n \"\"\"\n folder_to_load_from = Path(PATH_TO_VIVE_CALIBRATION, date +\n \"_CalibrationSet_\" + experiment_number)\n\n file_naming_scheme: str = \"calibration_point_\"\n vive_tracker_pose_list = []\n counter = 1\n # file reading hinges on the files being named in ascending manner\n try:\n while True:\n path_to_file = folder_to_load_from.joinpath(\n file_naming_scheme + str(counter)+\".txt\")\n vive_tracker_pose_list.append(readViveData(path_to_file))\n counter += 1\n except OSError:\n logger.debug(f\"es wurden {counter-1}Punkte importiert\")\n return vive_tracker_pose_list\n\n\ndef get_calibration_points_quaternion(vive_tracker_pose_list: List[np.ndarray]) -> np.ndarray:\n \"\"\"calculates the position of the calibration point in LH coordinates\n\n For every \n\n Args:\n vive_tracker_pose_list (List[np.ndarray]): [description]\n\n Returns:\n np.ndarray: [description]\n \"\"\"\n res = np.zeros([len(vive_tracker_pose_list), 3])\n calibration_point_in_tracker_kos = np.array(\n [0, 0, DISTANCE_VIVE_ENDEFFECTOR]).reshape([-1, 1])\n for a, calib_pose in enumerate(vive_tracker_pose_list):\n x, y, z, w, i, j, k = np.mean(calib_pose, axis=0)\n rot_matrix_tracker_2_LH = R.from_quat([i, j, k, w])\n pos_tracker_in_LH = np.array([x, y, z]).reshape([-1, 1])\n calib_point_in_LH = rot_matrix_tracker_2_LH\\\n .as_matrix()@calibration_point_in_tracker_kos+pos_tracker_in_LH\n res[a, :] = calib_point_in_LH.flatten()\n return res\n\n\ndef get_calibration_points_matrix(vive_tracker_pose_list: List[np.ndarray]) -> np.ndarray:\n \"\"\"calculates the position of the calibration point in LH coordinates\n\n For every \n\n Args:\n vive_tracker_pose_list (List[np.ndarray]): [description]\n\n Returns:\n np.ndarray: [description]\n \"\"\"\n res = np.zeros([len(vive_tracker_pose_list), 3])\n calibration_point_in_tracker_kos = np.array(\n [0, 0, DISTANCE_VIVE_ENDEFFECTOR]).reshape([-1, 1])\n for a, calib_pose in enumerate(vive_tracker_pose_list):\n calib_pose = np.mean(calib_pose, axis=0)\n rot_matrix_tracker_2_LH = calib_pose.reshape([3, 4])\n rot_matrix_tracker_2_LH = np.vstack([rot_matrix_tracker_2_LH, [0, 0, 0, 1]])\n calib_point_in_LH = rot_matrix_tracker_2_LH@np.vstack([calibration_point_in_tracker_kos, 1])\n res[a, :] = calib_point_in_LH.flatten()[:-1]\n return res\n\n\ndef get_calibration_points(vive_tracker_pose_list: List[np.ndarray]) -> np.ndarray:\n \"\"\"calculates the position of the calibration point in LH coordinates\n\n For every \n\n Args:\n vive_tracker_pose_list (List[np.ndarray]): [description]\n\n Returns:\n np.ndarray: [description]\n \"\"\"\n if vive_tracker_pose_list[0].shape[1] == 12:\n return get_calibration_points_matrix(vive_tracker_pose_list)\n elif vive_tracker_pose_list[0].shape[1] == 7:\n return get_calibration_points_quaternion(vive_tracker_pose_list)\n else:\n raise ValueError(\"The pose have incorrect format. Neither matrix nor quaternion\")\n\n\ndef get_robot_data(date: str, experiment_number: str) -> np.ndarray:\n file_dir = Path(PATH_TO_ROBOT_CALIBRATION)\n file_path = file_dir.joinpath(date+\"_CalibrationSet_\"+experiment_number+\".txt\")\n return np.loadtxt(file_path, delimiter=\" \", skiprows=1)\n\n\nif __name__ == \"__main__\":\n experiment_number = \"1\"\n date = \"20210406\"\n v = get_vive_calibration_positions(date=date, experiment_number=experiment_number)\n a = get_calibration_points(v)\n\n # experiment_number = \"1\"\n # v = get_vive_calibration_positions(date=date, experiment_number=experiment_number)\n # b = get_calibration_points(v)\n\n # print(b[0])\n print(a[0])\n","sub_path":"robo_calibration_manual/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150163393","text":"# main.py\nimport sys\nimport os\nimport json\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QAction, QSplitter, QDialog, QLabel, QVBoxLayout, \\\n QPlainTextEdit, QLineEdit, QGridLayout, QWidget, QPushButton, QMessageBox\n\n# from pymarkup_fns import read_csv, read_toml, merge, render_html, render_pdf, load_css, get_subitems, printdic, read_csv_adv, get_images\n\n# import markdown2\n# import simpleeval\n\nPATH_SETTINGS = 'settings.json'\n\n\n\n\n\n\n'''\nclass SettingsDialog(QDialog):\n def __init__(self,mw):\n super().__init__()\n\n self.mw = mw\n # self.setWindowIcon(mw.icon)\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n self.setWindowTitle('Impostazioni')\n\n self.setup_layout()\n\n\n def FilePicker(self,key):\n print('FilePicker',key)\n chosen_path = QtWidgets.QFileDialog.getOpenFileName(self, 'Seleziona file', '', '*.csv',)\n # print(type(chosen_path),chosen_path)\n if chosen_path[0] != '':\n self.widgets[key].setText(chosen_path[0])\n self.mw.settings['paths'][key] = chosen_path[0]\n\n\n def FolderPicker(self,key):\n chosen_path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Seleziona cartella')\n print(type(chosen_path),chosen_path)\n if chosen_path != '':\n self.widgets[key].setText(chosen_path)\n self.mw.settings['paths'][key] = chosen_path\n\n\n def setup_widgets(self):\n pass\n\n\n def setup_layout(self):\n # print('Setting up settings dialog UI')\n\n self.widgets = {\n 'table_products': QLineEdit(),\n 'table_macros': QLineEdit(),\n 'img_folder': QLineEdit(), # QPlainTextEdit()\n }\n\n widget_labels = {\n 'table_products': \"Tabella prodotti:\",\n 'table_macros': \"Tabella note:\",\n 'img_folder': \"Cartella immagini:\",\n }\n\n widget_button_slots = {\n 'table_products': lambda: self.FilePicker('table_products'),\n 'table_macros': lambda: self.FilePicker('table_macros'),\n 'img_folder': lambda: self.FolderPicker('img_folder'),\n }\n\n grid_layout = QGridLayout()\n\n for i,key in enumerate(self.widgets.keys()):\n label = QLabel(widget_labels[key])\n widget = self.widgets[key]\n widget.setReadOnly(True)\n widget.setDisabled(True)\n button = QPushButton('Seleziona')\n button.pressed.connect(widget_button_slots[key])\n\n last_used_path = self.mw.settings['paths'].get(key,'')\n if os.path.exists(last_used_path):\n widget.setText(last_used_path)\n\n grid_layout.addWidget(label,i,0,1,1)\n grid_layout.addWidget(widget,i,1,1,1)\n grid_layout.addWidget(button,i,2,1,1)\n\n\n grid_widget = QWidget()\n grid_widget.setLayout(grid_layout)\n\n main_layout = QVBoxLayout()\n main_layout.addWidget(grid_widget)\n main_layout.addStretch()\n self.setLayout(main_layout)\n\n # settings_wh = self.mw.settings.get('settings_wh')\n # if settings_wh is not None:\n geometry = self.mw.settings.get('geometry_wh')\n if geometry:\n\n x,y,w,h = geometry\n # w,h = settings_wh\n # Position dialog at the center of the main window\n # g = self.mw.geometry()\n # x = g.x() + (g.width()-w) / 2\n # y = g.y() + (g.height()-h) / 2\n\n # self.setGeometry(x,y,w,h)\n else:\n self.resize(400,500)\n'''\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n \n self.BASE_TITLE = 'PyQt5 Starter'\n self.setWindowTitle(self.BASE_TITLE)\n\n # self.settings = self.LoadSettings()\n \n self.init_layout()\n\n\n def init_mainmenu(self):\n menubar = self.menuBar()\n filemenu = menubar.addMenu('File')\n # viewmenu = menubar.addMenu('Visualizza')\n \n items_filemenu = [\n ['Nuovo file', 'Ctrl+N', self.Dummy],\n ['Apri...', 'Ctrl+O', self.Dummy],\n ['Salva', 'Ctrl+S', self.Dummy],\n ['Salva con nome...', 'Ctrl+Shift+S', self.Dummy],\n [],\n ['Impostazioni...', 'Ctrl+I', self.Dummy],\n [],\n ['Chiudi', 'Ctrl+Shift+Q', self.Dummy],\n ]\n\n for data in items_filemenu:\n if len(data)>0:\n label,shortcut,function = data\n new_action = QAction(label,self)\n new_action.setShortcut(shortcut)\n new_action.triggered.connect(function)\n filemenu.addAction(new_action)\n else:\n filemenu.addSeparator()\n\n\n def init_layout(self):\n \n label1 = QLabel(\"QLabel 1\")\n label2 = QLabel(\"QLabel 2\")\n label3 = QLabel(\"QLabel 3\")\n label4 = QLabel(\"QLabel 3\")\n '''\n grid_layout = QGridLayout()\n grid_widget = QWidget()\n grid_widget.setLayout(grid_layout)\n\n grid_layout.addWidget(label1,0,0,1,1)\n grid_layout.addWidget(label2,0,1,1,1)\n grid_layout.addWidget(label3,1,0,1,1)\n grid_layout.addWidget(label4,0,1,1,1)\n '''\n main_layout = QVBoxLayout()\n\n main_layout.addWidget(label1)\n main_layout.addWidget(label2)\n main_layout.addWidget(label3)\n main_layout.addWidget(label4)\n\n # main_layout.addWidget(grid_widget)\n main_layout.addStretch()\n\n centralWidget = QtWidgets.QWidget()\n centralWidget.setLayout(main_layout)\n self.setCentralWidget(centralWidget)\n\n\n def Dummy(self):\n return\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec_())\n\n \nif __name__ == '__main__':\n main()\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191593939","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nimport locators\nfrom lib import *\n\n\ndef check_output(local_driver, value_a, value_b, output):\n input_clear_and_type(local_driver, locators.sum_page[\"input_a_id\"], value_a)\n input_clear_and_type(local_driver, locators.sum_page[\"input_b_id\"], value_b)\n\n get_total_button = local_driver.find_element_by_xpath(locators.sum_page[\"get_total_button_xpath\"])\n get_total_button.click()\n\n actual_output = local_driver.find_element_by_id(locators.sum_page[\"actual_output_id\"]).text\n\n result = \"Failure\"\n if output == actual_output:\n result = \"Success\"\n\n print(f\"Check: {value_a} + {value_b} = {actual_output} - {result}\")\n\n\ndef main():\n driver = webdriver.Firefox()\n try:\n driver.get('https://www.seleniumeasy.com/test/basic-first-form-demo.html')\n print(driver.title)\n assert 'Selenium' in driver.title\n WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, locators.sum_page[\"close_button\"]))).\\\n click()\n with open('data.csv') as data_csv:\n reader = csv.reader(data_csv)\n headers = next(reader, None)\n for row in reader:\n check_output(driver, row[0], row[1], row[2])\n finally:\n driver.quit()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"smelovnv/selenium/check_sum_output/selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355110530","text":"import base64\nimport os\nimport zlib\nimport time\nfrom pycrest.compat import bytes_, text_\nfrom pycrest.errors import APIException\n\ntry:\n from urllib.parse import quote, parse_qs\nexcept ImportError: # pragma: no cover\n from urllib import quote\n from urlparse import parse_qs\ntry:\n import testtools as unittest\nexcept ImportError:\n import unittest\ntry:\n import __builtin__\n builtins_name = __builtin__.__name__\nexcept ImportError:\n import builtins\n builtins_name = builtins.__name__\ntry:\n import pickle\nexcept ImportError:\n import cPickle as pickle\nimport httmock\nimport pycrest\nimport mock\n\n\nclass MockFilesystem(object):\n def __init__(self):\n self.fs = {'/': {}}\n\n def isdir(self, path):\n return path in self.fs\n\n def mkdir(self, path, mode=0o700):\n if not path:\n raise OSError(2, \"No such file or directory: '%s'\" % path)\n\n if path not in self.fs:\n self.fs[path] = {}\n\n def open(self, path, mode='r'):\n class FileObj(object):\n def __init__(self, elem):\n self.elem = elem\n self.closed = 0\n def __enter__(self):\n return self\n def __exit__(self, type, value, tb):\n self.closed = 1\n def write(self, data):\n self.elem['data'] = data\n def read(self):\n return self.elem['data']\n def close(self):\n self.closed = 1\n\n if path in self.fs:\n raise IOError(21, \"Is a directory: '%s'\" % path)\n\n directory, filename = os.path.split(path)\n\n if not self.isdir(directory):\n raise IOError(2, \"No such file or directory: '%s'\" % path)\n\n if mode in ['r', 'rb', 'r+', 'r+b'] \\\n and filename not in self.fs[directory]:\n raise IOError(2, \"No such file or directory: '%s'\" % path)\n\n if mode in ['w', 'wb']:\n self.fs[directory][filename] = {'data': ''}\n\n return FileObj(self.fs[directory][filename])\n\n def unlink(self, path):\n if path in self.fs:\n raise OSError(5, 'Is a directory')\n\n directory, filename = os.path.split(path)\n if directory not in self.fs \\\n or filename not in self.fs[directory]:\n raise OSError(2, \"No such file or directory: '%s'\" % path)\n self.fs[directory].pop(filename)\n\n def listdir(self, directory):\n return self.fs[directory].keys()\n\n\n@httmock.urlmatch(scheme=\"https\",\n netloc=r\"(public-)?crest(-tq)?\\.eveonline\\.com\",\n path=r\"^/?$\")\ndef root_mock(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"marketData\": {\n \"href\": \"https://crest-tq.eveonline.com/market/prices/\",\n },\n \"incursions\": {\n \"href\": \"https://crest-tq.eveonline.com/incursions/\",\n },\n \"status\": {\"eve\": \"online\"},\n \"queryString\": {\n \"href\": \"https://crest-tq.eveonline.com/queryString/\"\n },\n \"paginatedData\": {\n \"href\": \"https://crest-tq.eveonline.com/getPage/?page=2\"\n }\n },\n \"headers\": {\n \"Cache-Control\": \"private, max-age=300\"\n }\n }\n\n\n@httmock.urlmatch(scheme=\"https\",\n netloc=r\"(public-)?crest(-tq)?\\.eveonline\\.com\",\n path=r\"^/market/prices/?$\")\ndef market_mock(url, request):\n if url.netloc == 'crest-tq.eveonline.com':\n headers = {\n \"Authorization\": \"Bearer 123asd\",\n }\n else:\n headers = {}\n body = {\n \"totalCount\": 2,\n \"items\": [\n {\n \"avg_price\": 100,\n \"type\": {\n \"href\": \"getPunisher\",\n \"name\": \"Punisher\",\n \"id\": 597\n }\n },\n {\n \"avg_price\": 101,\n \"type\": {\n \"href\": \"getRifter\",\n \"name\": \"Rifter\",\n \"id\": 587\n }\n },\n [\n \"foo\",\n \"bar\"\n ],\n \"baz\"\n ]\n }\n return {\n \"status_code\": 200,\n \"content\": body,\n \"headers\": {\n \"Cache-Control\": \"private, max-age=300\"\n }\n }\n\n\n@httmock.urlmatch(scheme=\"https\",\n netloc=r\"^login.eveonline.com$\",\n path=r\"^/oauth/verify/?$\")\ndef verify_mock(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\"CharacterName\": \"Foobar\"},\n }\n\n\n@httmock.all_requests\ndef fallback_mock(url, request):\n print(\"No mock for: %s\" % request.url)\n return {\n \"status_code\": 404,\n \"body\": {},\n }\n\n\nall_mocks = [root_mock, market_mock, verify_mock, fallback_mock]\n\n\nclass TestApi(unittest.TestCase):\n @mock.patch('os.path.isdir')\n @mock.patch('os.mkdir')\n @mock.patch('os.unlink')\n @mock.patch('os.listdir')\n @mock.patch('%s.open' % builtins_name)\n def test_public_api(self, mock_open, mock_listdir, mock_unlink, mock_mkdir, mock_isdir):\n fs = MockFilesystem()\n mock_isdir.side_effect = fs.isdir\n mock_mkdir.side_effect = fs.mkdir\n mock_unlink.side_effect = fs.unlink\n mock_listdir.side_effect = fs.listdir\n mock_open.side_effect = fs.open\n\n @httmock.urlmatch(scheme=\"https\", netloc=r\"^crest-tq.eveonline.com$\", path=r\"^/queryString/?$\")\n def test_qs(url, request):\n self.assertEqual(url.query, \"query=string\")\n return {\"status_code\": 200, \"content\": {}}\n\n @httmock.urlmatch(scheme=\"https\", netloc=r\"^crest-tq.eveonline.com$\", path=r\"^/getPage/?$\")\n def test_pagination(url, request):\n self.assertEqual(url.query, \"page=2\")\n return {\"status_code\": 200, \"content\": {}}\n\n with httmock.HTTMock(test_qs, test_pagination, *all_mocks):\n eve = pycrest.EVE()\n eve().queryString(query=\"string\")\n eve.paginatedData()\n\n with httmock.HTTMock(*all_mocks):\n eve = pycrest.EVE()\n self.assertRaises(AttributeError, eve.__getattr__, 'marketData')\n eve()\n self.assertEqual(eve().marketData.href, \"https://crest-tq.eveonline.com/market/prices/\")\n self.assertEqual(eve.marketData().totalCount, 2)\n self.assertEqual(eve.marketData().items[0].avg_price, 100)\n self.assertEqual(eve.marketData().items[2][0], \"foo\")\n self.assertEqual(eve.marketData().items[3], \"baz\")\n self.assertEqual(eve().status().eve, \"online\")\n self.assertRaises(APIException, lambda: eve.incursions()) # Scala's notation would be nice\n # cache miss\n eve = pycrest.EVE(cache_dir='/cachedir')\n eve()\n\n # cache hit\n eve = pycrest.EVE(cache_dir='/cachedir')\n eve()\n\n # stale cache hit\n ls = list(os.listdir('/cachedir'))\n self.assertEquals(len(ls), 1)\n path = os.path.join('/cachedir', ls[0])\n\n recf = open(path, 'r')\n rec = pickle.loads(zlib.decompress(recf.read()))\n recf.close()\n rec['expires'] = 1\n\n recf = open(path, 'w')\n recf.write(zlib.compress(pickle.dumps(rec)))\n recf.close()\n\n eve = pycrest.EVE(cache_dir='/cachedir')\n eve()\n\n\n testing = pycrest.EVE(testing=True)\n self.assertEqual(testing._public_endpoint, \"http://api-sisi.testeveonline.com/\")\n\n def test_headers(self):\n _self = self\n @httmock.all_requests\n def custom_header(url, request):\n _self.assertIn(\"X-PyCrest-Testing\", request.headers)\n _self.assertEqual(request.headers[\"X-PyCrest-Testing\"], \"True\")\n\n @httmock.all_requests\n def no_custom_header(url, request):\n self.assertNotIn(\"X-PyCrest-Testing\", request.headers)\n\n with httmock.HTTMock(no_custom_header):\n eve = pycrest.EVE()\n eve()\n with httmock.HTTMock(custom_header):\n eve = pycrest.EVE(additional_headers={\"X-PyCrest-Testing\": \"True\"})\n eve()\n\n def test_user_agent(self):\n @httmock.all_requests\n def default_useragent(url, request):\n self.assertEqual(request.headers[\"User-Agent\"],\n \"PyCrest/{0}\".format(pycrest.version))\n\n @httmock.all_requests\n def custom_useragent(url, request):\n self.assertEqual(request.headers[\"User-Agent\"], \"Testing 123\")\n\n with httmock.HTTMock(default_useragent):\n eve = pycrest.EVE()\n eve()\n with httmock.HTTMock(custom_useragent):\n eve = pycrest.EVE(user_agent=\"Testing 123\")\n eve()\n\n def test_params(self):\n @httmock.all_requests\n def no_params(url, request):\n self.assertEqual(url.query, \"\")\n return {\"status_code\": 200, \"content\": {}}\n\n @httmock.all_requests\n def with_custom_params(url, request):\n self.assertNotEqual(url.query, \"\")\n return {\"status_code\": 200, \"content\": {}}\n\n with httmock.HTTMock(no_params):\n eve = pycrest.EVE()\n eve.get(\"http://example.com\")\n with httmock.HTTMock(with_custom_params):\n eve = pycrest.EVE()\n eve.get(\"http://example.com\", params={\"Foo\": \"Bar\"})\n\n\nclass TestAuthorization(unittest.TestCase):\n @mock.patch('os.path.isdir')\n @mock.patch('os.mkdir')\n @mock.patch('os.unlink')\n @mock.patch('os.listdir')\n @mock.patch('%s.open' % builtins_name)\n def test_authorize(self, mock_open, mock_listdir, mock_unlink, mock_mkdir, mock_isdir):\n client_id = \"bar\"\n api_key = \"foo\"\n code = \"foobar\"\n access_token = \"123asd\"\n refresh_token = \"asd123\"\n\n fs = MockFilesystem()\n mock_isdir.side_effect = fs.isdir\n mock_mkdir.side_effect = fs.mkdir\n mock_unlink.side_effect = fs.unlink\n mock_listdir.side_effect = fs.listdir\n mock_open.side_effect = fs.open\n\n @httmock.urlmatch(scheme=\"https\",\n netloc=r\"^login.eveonline.com$\",\n path=r\"^/oauth/token/?$\",\n method=\"POST\")\n def token_mock(url, request):\n params = parse_qs(url.query)\n if params['grant_type'][0] == 'authorization_code':\n auth = text_(base64.b64encode(bytes_(\"%s:%s\" % (client_id, api_key))))\n self.assertEqual(request.headers['Authorization'], \"Basic %s\" % auth)\n if params['code'][0] == code:\n body = {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n \"expires_in\": 1200\n }\n return {\"status_code\": 200, \"content\": body}\n elif params['grant_type'][0] == 'refresh_token':\n auth = text_(base64.b64encode(bytes_(\"%s:%s\" % (client_id, api_key))))\n self.assertEqual(request.headers['Authorization'], \"Basic %s\" % auth)\n if params['refresh_token'][0] == refresh_token:\n body = {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n \"expires_in\": 1200\n }\n return {\"status_code\": 200, \"content\": body}\n return {\"status_code\": 403, \"content\": {}}\n\n with httmock.HTTMock(token_mock, *all_mocks) as fake_http:\n eve = pycrest.EVE(api_key=api_key, client_id=client_id, redirect_uri=\"http://foo.bar\")\n auth_uri = \"%s/authorize?response_type=code&redirect_uri=%s&client_id=%s&scope=publicData\" % (\n eve._oauth_endpoint,\n quote(\"http://foo.bar\", safe=''),\n client_id,\n )\n self.assertEqual(eve.auth_uri(scopes=[\"publicData\"]), auth_uri)\n con = eve.authorize(code)\n self.assertRaises(APIException, lambda: eve.authorize(\"notcode\"))\n r = con.refresh()\n\n self.assertRaises(AttributeError, con.__getattr__, 'marketData')\n con()\n self.assertEqual(con.marketData.href, \"https://crest-tq.eveonline.com/market/prices/\")\n self.assertEqual(con.marketData().totalCount, 2)\n self.assertEqual(con.marketData().items[1].type.name, \"Rifter\")\n\n info = con.whoami()\n self.assertEqual(info['CharacterName'], 'Foobar')\n info = con.whoami()\n self.assertEqual(info['CharacterName'], con._cache['whoami']['CharacterName'])\n info = r.whoami()\n self.assertEqual(info['CharacterName'], 'Foobar')\n\n r.refresh_token = \"notright\"\n self.assertRaises(APIException, lambda: r.refresh())\n\n eve = pycrest.EVE(api_key=api_key, client_id=client_id)\n con = eve.authorize(code)\n self.assertEqual(con().marketData().totalCount, 2)\n self.assertEqual(con().marketData().totalCount, 2)\n\n # auth with refresh token\n con = eve.refr_authorize(con.refresh_token)\n self.assertRaises(AttributeError, con.__getattr__, 'marketData')\n con()\n self.assertEqual(con.marketData.href, \"https://crest-tq.eveonline.com/market/prices/\")\n self.assertEqual(con.marketData().totalCount, 2)\n self.assertEqual(con.marketData().items[1].type.name, \"Rifter\")\n\n # fail auth with refresh token\n self.assertRaises(APIException, lambda: eve.refr_authorize('notright'))\n\n # auth with temp token\n con = eve.temptoken_authorize(con.token,\n con.expires - time.time(),\n con.refresh_token)\n self.assertRaises(AttributeError, con.__getattr__, 'marketData')\n con()\n self.assertEqual(con.marketData.href, \"https://crest-tq.eveonline.com/market/prices/\")\n self.assertEqual(con.marketData().totalCount, 2)\n self.assertEqual(con.marketData().items[1].type.name, \"Rifter\")\n\n # test auto-refresh of expired token\n con = eve.temptoken_authorize(access_token,\n -1,\n refresh_token)\n con().marketData()\n self.assertGreater(con.expires, time.time())\n\n # test cache miss\n eve = pycrest.EVE(api_key=api_key, client_id=client_id, cache_dir='/cachedir')\n con = eve.authorize(code)\n times_get = fake_http.call_count\n con()\n self.assertEqual(fake_http.call_count, times_get + 1)\n\n # test cache hit\n times_get = fake_http.call_count\n con()\n self.assertEqual(fake_http.call_count, times_get)\n\n # test cache stale\n ls = list(os.listdir('/cachedir'))\n self.assertEquals(len(ls), 1)\n path = os.path.join('/cachedir', ls[0])\n\n recf = open(path, 'r')\n rec = pickle.loads(zlib.decompress(recf.read()))\n recf.close()\n rec['expires'] = 1\n\n recf = open(path, 'w')\n recf.write(zlib.compress(pickle.dumps(rec)))\n recf.close()\n\n times_get = fake_http.call_count\n con().marketData()\n self.assertEqual(times_get + 1, fake_http.call_count)\n\n\nclass TestApiCache(unittest.TestCase):\n @mock.patch('os.path.isdir')\n @mock.patch('os.mkdir')\n @mock.patch('os.unlink')\n @mock.patch('%s.open' % builtins_name)\n def test_apicache(self, mock_open, mock_unlink, mock_mkdir, mock_isdir):\n fs = MockFilesystem()\n mock_isdir.side_effect = fs.isdir\n mock_mkdir.side_effect = fs.mkdir\n mock_unlink.side_effect = fs.unlink\n mock_open.side_effect = fs.open\n\n # Just because pragma: no cover is ugly\n cache = pycrest.eve.APICache()\n self.assertRaises(NotImplementedError, lambda: cache.get(\"foo\"))\n self.assertRaises(NotImplementedError, lambda: cache.put(\"foo\", \"bar\"))\n self.assertRaises(NotImplementedError, lambda: cache.invalidate(\"foo\"))\n\n # Test default DictCache\n crest = pycrest.EVE()\n self.assertEqual(type(crest.cache).__name__, \"DictCache\")\n crest.cache.invalidate('nxkey')\n self.assertEqual(crest.cache.get('nxkey'), None)\n crest.cache.put('key', 'value')\n self.assertEqual(crest.cache.get('key'), 'value')\n\n\n # with mkdir needed\n crest = pycrest.EVE(cache_dir=\"/cachedir\")\n\n # without mkdir now\n crest = pycrest.EVE(cache_dir=\"/cachedir\")\n\n # cache created?\n self.assertEqual(type(crest.cache).__name__, \"FileCache\")\n\n # invalidate non-existing key\n crest.cache.invalidate('nxkey')\n\n # get non-existing key\n self.assertEqual(crest.cache.get('nxkey'), None)\n\n # cache (key, value) pair and retrieve it\n crest.cache.put('key', 'value')\n self.assertEqual(crest.cache.get('key'), 'value')\n\n # retrieve from disk\n crest = pycrest.EVE(cache_dir=\"/cachedir\")\n self.assertEqual(crest.cache.get('key'), 'value')\n\n # invalidate key and check it's removed\n crest.cache.invalidate('key')\n self.assertEqual(crest.cache.get('key'), None)\n\n # dirname == filename tests\n # Use _getpath for platform independence\n fs.mkdir(crest.cache._getpath('key'))\n self.assertRaises(OSError, lambda: crest.cache.invalidate('key'))\n self.assertRaises(IOError, lambda: crest.cache.get('key'))\n\n def test_cache_control(self):\n @httmock.all_requests\n def root_m(url, request):\n body = {\n \"shouldCache\": {\n \"href\": \"https://foo.bar/shouldCache/\"\n },\n \"shouldNotCache\": {\n \"href\": \"https://foo.bar/shouldNotCache/\"\n },\n \"noCache\": {\n \"href\": \"https://foo.bar/noCache/\"\n },\n \"noStore\": {\n \"href\": \"https://foo.bar/noStore/\"\n },\n \"brokenInt\": {\n \"href\": \"https://foo.bar/brokenInt\"\n }\n }\n return {\n \"status_code\": 200,\n \"content\": body\n }\n\n @httmock.urlmatch(path=r'^/shouldCache/?$')\n def shouldCache(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"href\": \"shouldCache\"\n },\n \"headers\": {\n \"Cache-Control\": \"private, max-age=300\"\n }\n }\n\n @httmock.urlmatch(path=r'^/shouldNotCache/?$')\n def shouldNotCache(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"href\": \"shouldNotCache\"\n }\n }\n\n @httmock.urlmatch(path=r'^/noCache/?$')\n def noCache(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"href\": \"noCache\"\n },\n \"headers\": {\n \"Cache-Control\": \"no-cache, max-age=300\"\n }\n }\n\n @httmock.urlmatch(path=r'^/noStore/?$')\n def noStore(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"href\": \"noStore\"\n },\n \"headers\": {\n \"Cache-Control\": \"no-store, max-age=300\"\n }\n }\n\n @httmock.urlmatch(path=r'^/brokenInt/?$')\n def brokenInt(url, request):\n return {\n \"status_code\": 200,\n \"content\": {\n \"href\": \"brokenInt\"\n },\n \"headers\": {\n \"Cache-Control\": \"private, max-age=asd\"\n }\n }\n\n with httmock.HTTMock(shouldCache, shouldNotCache, noCache, noStore, brokenInt, root_m) as fake_http:\n eve = pycrest.EVE()\n eve()\n\n call_count = fake_http.call_count\n eve.shouldCache()\n self.assertEqual(fake_http.call_count, call_count + 1)\n call_count = fake_http.call_count\n eve.shouldCache()\n self.assertEqual(fake_http.call_count, call_count)\n\n call_count = fake_http.call_count\n eve.shouldNotCache()\n self.assertEqual(fake_http.call_count, call_count + 1)\n call_count = fake_http.call_count\n eve.shouldNotCache()\n self.assertEqual(fake_http.call_count, call_count + 1)\n\n call_count = fake_http.call_count\n eve.noCache()\n self.assertEqual(fake_http.call_count, call_count + 1)\n call_count = fake_http.call_count\n eve.noCache()\n self.assertEqual(fake_http.call_count, call_count + 1)\n\n call_count = fake_http.call_count\n eve.noStore()\n self.assertEqual(fake_http.call_count, call_count + 1)\n call_count = fake_http.call_count\n eve.noStore()\n self.assertEqual(fake_http.call_count, call_count + 1)\n\n call_count = fake_http.call_count\n eve.brokenInt()\n self.assertEqual(fake_http.call_count, call_count + 1)\n call_count = fake_http.call_count\n eve.brokenInt()\n self.assertEqual(fake_http.call_count, call_count + 1)\n\n eve = pycrest.EVE()\n eve()\n with mock.patch('time.time') as mock_time:\n mock_time.return_value = 0\n eve.shouldCache()\n self.assertEqual(list(eve.cache._dict.items())[0][1]['expires'], 300)\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":22129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515227623","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/docbook2sla/tests/test_image.py\n# Compiled at: 2008-03-31 11:20:31\nimport os, sys, unittest, tempfile\nfrom lxml import etree\nfrom StringIO import StringIO\nfrom docbook2sla import DocBook2Sla\ndirname = os.path.dirname(__file__)\n\nclass ImagesTestCase(unittest.TestCase):\n __module__ = __name__\n\n def setUp(self):\n article = '
\\n \\n \\n \\n \\n \\n My Second Image\\n \\n \\n
'\n scribus = os.path.join(os.path.dirname(__file__), 'data', 'scribus', 'clean134.sla')\n self.d2s = DocBook2Sla()\n outputfn = self.d2s.create(StringIO(article), scribus)\n output = open(outputfn, 'r').read()\n outputtree = etree.XML(output)\n self.output = output\n self.outputtree = outputtree\n\n def test_no_other_pageobjects(self):\n \"\"\" No other pageobjects are existent \"\"\"\n count_pageobjects = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME!='uid001_image_1'])\")\n self.assertEqual(count_pageobjects, 0.0)\n\n def test_pageobject_exists(self):\n \"\"\" Test if image pageobject exists \"\"\"\n image1 = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME='uid001_image_1'])\")\n self.assertEqual(image1, 1.0)\n\n def test_pageobject_attributes(self):\n \"\"\" Test attributes \"\"\"\n ptype = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME='uid001_image_1' and @PTYPE='2'])\")\n self.assertEqual(ptype, 1.0)\n embedded = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME='uid001_image_1' and @EMBEDDED='0'])\")\n self.assertEqual(embedded, 1.0)\n irender = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME='uid001_image_1' and @IRENDER='0'])\")\n self.assertEqual(irender, 1.0)\n pfile = self.outputtree.xpath(\"count(//PAGEOBJECT[@ANNAME='uid001_image_1' and @PFILE='http://localhost:8080/test/example-article-1/internet-mail.png'])\")\n self.assertEqual(pfile, 1.0)\n\n\ndef test_suite():\n suite = unittest.TestLoader().loadTestsFromTestCase(ImagesTestCase)\n unittest.TextTestRunner(verbosity=2).run(suite)\n return suite\n\n\nif __name__ == '__main__':\n test_suite()","sub_path":"pycfiles/docbook2sla-0.0.16-py2.4/test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"624769042","text":"'''\nCreate a program that asks the user for a number and then prints out a list of all the divisors of that number. \n(If you don’t know what a divisor is, it is a number that divides evenly into another number. For example, 13 is a divisor of 26 because 26 / 13 has no remainder.)\n'''\n\nfrom math import floor\n\nrandom = input('Enter a random number: ')\n\np_divisors = [x for x in range(1,floor(int(random)/2)+1)]\n\ndivisors = [x for x in p_divisors if int(random)%x == 0]\n\ndivisors.append(int(random))\n\nprint(divisors)\n","sub_path":"python/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"234812783","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\n\nbrowser=webdriver.Chrome()\nurl=\"https://www.zhihu.com/explore\"\nbrowser.get(url)\nlogo=browser.find_element_by_id(\"zh-top-link-logo\")\nprint(logo)\nprint(logo.get_attribute(\"class\"))\nbrowser.close()\n","sub_path":"test/获取属性.py","file_name":"获取属性.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"462505795","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[122]:\n\n\nimport requests # all-purpose web scraping package\nimport re # finding permalink string from requests.response\nimport zipfile, io # stream & unzip\nimport os # for making output folder\nimport pandas as pd # datetime\nfrom pathlib import Path # to check if files already downloaded\n\n\n# In[118]:\n\n\n# select range of dates to check over\nfirst_icews_date = pd.to_datetime(20181004, format='%Y%m%d')\nyesterday = pd.Timestamp.today() - pd.Timedelta('1 days')\nall_dates = pd.date_range(first_icews_date, yesterday).strftime('%Y%m%d')\n\n\n# In[112]:\n\n\n# create output folder in pwd\noutput_folder = './icews_files'\ntry:\n os.mkdir(output_folder)\nexcept FileExistsError:\n pass\n\n\n# In[129]:\n\n\nfor date in all_dates:\n \n # check if file already downloaded\n file_path = Path(output_folder + '/' + date + '-icews-events.tab')\n if file_path.is_file():\n print(date + ' already downloaded.')\n continue\n \n # url search for date needed\n url = 'https://dataverse.harvard.edu/dataverse/icews?q={}&types=files&sort=score&order=desc'.format(date)\n \n # query url\n response = requests.get(url)\n \n # search for events page link in response text\n ext_raw = re.search(\n pattern = r'/file.xhtml\\?(persistentId=doi\\:10.7910/DVN/QI2T9A/[\\d\\w]{6})',\n string = response.text)\n \n # if it doesn't exist, assume it isn't posted\n if not ext_raw:\n print(date + ' not posted.')\n continue\n\n # collect download url extension string\n ext = ext_raw.groups(1)[0]\n \n # combine download url\n dl_url = 'https://dataverse.harvard.edu/api/access/datafile/:persistentId?' + ext\n\n # query download url\n download = requests.get(dl_url)\n\n # unzip\n try:\n zipfile.ZipFile(\n io.BytesIO(\n download.content)) \\\n .extractall(path = output_folder)\n except zipfile.BadZipFile:\n print(date + ' is corrupted.')\n \n # succes status message\n print(date + ' downloaded successfully.')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"old_files/download_all_icews.py","file_name":"download_all_icews.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"50084185","text":"import os\nimport sys\nimport codecs\nimport re\nimport locale\nfrom xml.sax.saxutils import escape\n\nfrom Node import *;\nfrom Helper import *;\n\nimport hashlib\n\n\nclass ChunkNode() :\n \n def __init__(self, header) :\n self.text = []\n self.header = header\n self.footer = None\n self.nodeList = []\n self.parent = '0'\n self.__attributes = {}\n self.parentRelation = 'root'\n self.name = None\n self.type = None\n self.head = None\n self.isParent = False\n self.errors = []\n self.upper = None\n self.updateDrel()\n self.type = None\n self.fsList = None\n self.phrase = \"\"\n\n def analyzeChunk(self) :\n [chunkType,chunkFeatDict,chunkFSList] = getChunkFeats(self.header)\n self.fsList = chunkFSList\n self.type = chunkType\n self.updateAttributes(chunkFeatDict)\n self.text = '\\n'.join([line for line in self.text])\n\n #Update chunk phrase\n for node in self.nodeList:\n if isinstance(node, ChunkNode):\n self.phrase += node.phrase.strip()+\" \"\n else:\n self.phrase += node.base64Lex.strip()+\" \"\n\n import hashlib\n self.id = hashlib.md5(self.phrase.encode('utf-8')).hexdigest()[:8]\n \n def updateAttributes(self,fsDict) :\n for attribute in fsDict.keys() :\n self.__attributes[attribute] = fsDict[attribute]\n self.assignName()\n self.updateDrel()\n\n def assignName(self) :\n if self.__attributes.has_key('name') : \n self.name = self.getAttribute('name')\n else :\n self.errors.append('No name for this chunk Node')\n\n # Implemented by SPM, a recursive implementation which assigns the \n # Hash of the lex as its name \n def assignNames(self):\n self.__attributes['name'] = hashlib.md5(self.phrase).hexdigest()[:8]\n self.assignName()\n for node in self.nodeList:\n node.assignNames() #Propagate into the tree\n \n def updateDrel(self) :\n if self.__attributes.has_key('drel') :\n drelList = self.getAttribute('drel').split(':')\n if len(drelList) == 2 :\n self.parent = drelList[1]\n self.parentRelation = self.getAttribute('drel').split(':')[0]\n elif self.__attributes.has_key('dmrel') :\n drelList = self.getAttribute('dmrel').split(':')\n if len(drelList) == 2 :\n self.parent = drelList[1]\n self.parentRelation = self.getAttribute('dmrel').split(':')[0]\n\n def printValue(self) :\n returnString = []\n for node in self.nodeList :\n returnString.append(node.printValue())\n return ' '.join(x for x in returnString)\n\n def printSSFValue(self, prefix, allFeat) :\n returnStringList = []\n returnValue = [prefix , '((' , self.type]\n if allFeat == False : \n fs = [''\n \n else :\n fs = self.fsList\n delim = '|'\n \n returnStringList.append('\\t'.join(x for x in returnValue) + '\\t' + delim.join(x for x in fs))\n nodePosn = 0\n for node in self.nodeList :\n nodePosn += 1\n if isinstance(node,ChunkNode) :\n returnStringList.extend(node.printSSFValue(prefix + '.' + str(nodePosn), allFeat))\n else :\n returnStringList.append(node.printSSFValue(prefix + '.' + str(nodePosn), allFeat))\n returnStringList.append('\\t' + '))')\n return returnStringList\n\n def getAttribute(self,key) :\n if self.__attributes.has_key(key) :\n return self.__attributes[key]\n else :\n return None\n\n def addAttribute(self,key,value) :\n self.__attributes[key] = value\n\n def deleteAttribute(self,key) :\n del self.__attributes[key]\n\n def getXML(self) :\n xml = '\"\n for node in self.nodeList:\n xml += node.getXML()\n\n xml += \"\"\n return xml\n\n\n\n","sub_path":"SSF/ChunkNode.py","file_name":"ChunkNode.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34831008","text":"import os\nimport hashlib\nimport datetime\n\nimport numpy as np\nfrom sqlalchemy import Enum\nfrom sqlalchemy import Float\nfrom sqlalchemy import Column\nfrom sqlalchemy import String\nfrom sqlalchemy import Integer\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy import inspect\nfrom sqlalchemy.orm import backref\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\nfrom .base import Model\nfrom .base import encode_string\nfrom .base import get_deployment_path\nfrom .event import EventScoreType\nfrom .datatype import NumpyType\n\n__all__ = [\n 'Submission',\n 'SubmissionScore',\n 'SubmissionFile',\n 'SubmissionFileType',\n 'SubmissionFileTypeExtension',\n 'Extension',\n 'SubmissionScoreOnCVFold',\n 'SubmissionOnCVFold',\n 'DetachedSubmissionOnCVFold',\n 'SubmissionSimilarity',\n]\n\n# evaluate right after train/test, so no need for 'scored' states\nsubmission_states = Enum(\n 'new', # submitted by user to frontend server\n 'checked', # not used, checking is part of the workflow now\n 'checking_error', # not used, checking is part of the workflow now\n 'trained', # training finished normally on the backend server\n 'training_error', # training finished abnormally on the backend server\n 'validated', # validation finished normally on the backend server\n 'validating_error', # validation finished abnormally on the backend server\n 'tested', # testing finished normally on the backend server\n 'testing_error', # testing finished abnormally on the backend server\n 'training', # training is running normally on the backend server\n 'sent_to_training', # frontend server sent submission to backend server\n 'scored', # submission scored on the frontend server.Final state\n name='submission_states')\n\nsubmission_types = Enum('live', 'test', name='submission_types')\n\n\nclass Submission(Model):\n \"\"\"An abstract (untrained) submission.\"\"\"\n\n __tablename__ = 'submissions'\n\n id = Column(Integer, primary_key=True)\n\n event_team_id = Column(\n Integer, ForeignKey('event_teams.id'), nullable=False)\n event_team = relationship('EventTeam', backref=backref(\n 'submissions', cascade='all, delete-orphan'))\n\n name = Column(String(20, convert_unicode=True), nullable=False)\n hash_ = Column(String, nullable=False, index=True, unique=True)\n submission_timestamp = Column(DateTime, nullable=False)\n sent_to_training_timestamp = Column(DateTime)\n training_timestamp = Column(DateTime) # end of training\n\n contributivity = Column(Float, default=0.0)\n historical_contributivity = Column(Float, default=0.0)\n\n type = Column(submission_types, default='live')\n state = Column(String, default='new')\n # TODO: hide absolute path in error\n error_msg = Column(String, default='')\n # user can delete but we keep\n is_valid = Column(Boolean, default=True)\n # We can forget bad models.\n # If false, don't combine and set contributivity to zero\n is_to_ensemble = Column(Boolean, default=True)\n # in competitive events participants can select the submission\n # with which they want to participate in the competition\n is_in_competition = Column(Boolean, default=True)\n\n notes = Column(String, default='') # eg, why is it disqualified\n\n train_time_cv_mean = Column(Float, default=0.0)\n valid_time_cv_mean = Column(Float, default=0.0)\n test_time_cv_mean = Column(Float, default=0.0)\n train_time_cv_std = Column(Float, default=0.0)\n valid_time_cv_std = Column(Float, default=0.0)\n test_time_cv_std = Column(Float, default=0.0)\n # the maximum memory size used when training/testing, in MB\n max_ram = Column(Float, default=0.0)\n # later also ramp_id\n UniqueConstraint(event_team_id, name, name='ts_constraint')\n\n def __init__(self, name, event_team):\n self.name = name\n self.event_team = event_team\n self.session = inspect(event_team).session\n sha_hasher = hashlib.sha1()\n sha_hasher.update(encode_string(self.event.name))\n sha_hasher.update(encode_string(self.team.name))\n sha_hasher.update(encode_string(self.name))\n # We considered using the id, but then it will be given away in the\n # url which is maybe not a good idea.\n self.hash_ = '{}'.format(sha_hasher.hexdigest())\n self.submission_timestamp = datetime.datetime.utcnow()\n event_score_types = EventScoreType.query.filter_by(\n event=event_team.event)\n for event_score_type in event_score_types:\n submission_score = SubmissionScore(\n submission=self, event_score_type=event_score_type)\n self.session.add(submission_score)\n self.reset()\n\n def __str__(self):\n return 'Submission({}/{}/{})'.format(\n self.event.name, self.team.name, self.name)\n\n def __repr__(self):\n repr = '''Submission(event_name={}, team_name={}, name={}, files={},\n state={}, train_time={})'''.format(\n encode_string(self.event.name),\n encode_string(self.team.name),\n encode_string(self.name),\n self.files,\n self.state,\n self.train_time_cv_mean)\n return repr\n\n @hybrid_property\n def team(self):\n return self.event_team.team\n\n @hybrid_property\n def event(self):\n return self.event_team.event\n\n @property\n def official_score_function(self):\n return self.event.official_score_function\n\n @property\n def official_score_name(self):\n return self.event.official_score_name\n\n @property\n def official_score(self):\n score_dict = {score.score_name: score for score in self.scores}\n return score_dict[self.official_score_name]\n\n @property\n def score_types(self):\n return self.event.score_types\n\n @property\n def Predictions(self):\n return self.event.Predictions\n\n @hybrid_property\n def is_not_sandbox(self):\n return self.name != os.getenv('RAMP_SANDBOX_DIR', 'starting_kit')\n\n @hybrid_property\n def is_error(self):\n return (self.state == 'training_error') |\\\n (self.state == 'checking_error') |\\\n (self.state == 'validating_error') |\\\n (self.state == 'testing_error')\n\n @hybrid_property\n def is_public_leaderboard(self):\n return self.is_not_sandbox & self.is_valid & (self.state == 'scored')\n\n @hybrid_property\n def is_private_leaderboard(self):\n return self.is_not_sandbox & self.is_valid & (self.state == 'scored')\n\n @property\n def path(self):\n return os.path.join(\n get_deployment_path(),\n 'submissions',\n 'submission_' + '{0:09d}'.format(self.id))\n\n @property\n def module(self):\n return self.path.lstrip('./').replace('/', '.')\n\n @property\n def f_names(self):\n return [file.f_name for file in self.files]\n\n @property\n def link(self):\n return self.files[0].link\n\n @property\n def full_name_with_link(self):\n return '{}/{}/{}'.format(\n self.link, self.event.name, self.team.name, self.name[:20])\n\n @property\n def name_with_link(self):\n return '{}'.format(self.link, self.name[:20])\n\n @property\n def state_with_link(self):\n return '{}'.format(\n os.path.join(self.hash_, 'error.txt'), self.state)\n\n def ordered_scores(self, score_names):\n \"\"\"Iterator yielding SubmissionScores.\n\n Ordered according to score_names. Called by get_public_leaderboard\n and get_private_leaderboard, making sure scores are listed in the\n correct column.\n\n Parameters\n ----------\n score_names : list of strings\n\n Return\n ----------\n scores : iterator of SubmissionScore objects\n \"\"\"\n score_dict = {score.score_name: score for score in self.scores}\n for score_name in score_names:\n yield score_dict[score_name]\n\n # These were constructing means and stds by fetching fold times. It was\n # slow because submission_on_folds contain also possibly large predictions\n # If postgres solves this issue (which can be tested on the mean and std\n # scores on the private leaderbord), the corresponding columns (which are\n # now redundant) can be deleted and these can be uncommented.\n # @property\n # def train_time_cv_mean(self):\n # return np.mean([ts.train_time for ts in self.on_cv_folds])\n\n # @property\n # def valid_time_cv_mean(self):\n # return np.mean([ts.valid_time for ts in self.on_cv_folds])\n\n # @property\n # def test_time_cv_mean(self):\n # return np.mean([ts.test_time for ts in self.on_cv_folds])\n\n # @property\n # def train_time_cv_std(self):\n # return np.std([ts.train_time for ts in self.on_cv_folds])\n\n # @property\n # def valid_time_cv_std(self):\n # return np.std([ts.valid_time for ts in self.on_cv_folds])\n\n # @property\n # def test_time_cv_std(self):\n # return np.std([ts.test_time for ts in self.on_cv_folds])\n\n def set_state(self, state):\n self.state = state\n for submission_on_cv_fold in self.on_cv_folds:\n submission_on_cv_fold.state = state\n\n def reset(self):\n self.contributivity = 0.0\n self.state = 'new'\n self.error_msg = ''\n for score in self.scores:\n score.valid_score_cv_bag = score.event_score_type.worst\n score.test_score_cv_bag = score.event_score_type.worst\n score.valid_score_cv_bags = None\n score.test_score_cv_bags = None\n\n def set_error(self, error, error_msg):\n self.reset()\n self.state = error\n self.error_msg = error_msg\n for submission_on_cv_fold in self.on_cv_folds:\n submission_on_cv_fold.set_error(error, error_msg)\n\n # contributivity could be a property but then we could not query on it\n def set_contributivity(self):\n self.contributivity = 0.0\n if self.is_public_leaderboard:\n # we share a unit of 1. among folds\n unit_contributivity = 1. / len(self.on_cv_folds)\n for submission_on_cv_fold in self.on_cv_folds:\n self.contributivity +=\\\n unit_contributivity * submission_on_cv_fold.contributivity\n\n def set_state_after_training(self):\n self.training_timestamp = datetime.datetime.utcnow()\n states = [submission_on_cv_fold.state\n for submission_on_cv_fold in self.on_cv_folds]\n if all(state in ['tested'] for state in states):\n self.state = 'tested'\n elif all(state in ['tested', 'validated'] for state in states):\n self.state = 'validated'\n elif all(state in ['tested', 'validated', 'trained']\n for state in states):\n self.state = 'trained'\n elif any(state == 'training_error' for state in states):\n self.state = 'training_error'\n i = states.index('training_error')\n self.error_msg = self.on_cv_folds[i].error_msg\n elif any(state == 'validating_error' for state in states):\n self.state = 'validating_error'\n i = states.index('validating_error')\n self.error_msg = self.on_cv_folds[i].error_msg\n elif any(state == 'testing_error' for state in states):\n self.state = 'testing_error'\n i = states.index('testing_error')\n self.error_msg = self.on_cv_folds[i].error_msg\n if 'error' not in self.state:\n self.error_msg = ''\n\n\nclass SubmissionScore(Model):\n __tablename__ = 'submission_scores'\n\n id = Column(Integer, primary_key=True)\n submission_id = Column(\n Integer, ForeignKey('submissions.id'), nullable=False)\n submission = relationship('Submission', backref=backref(\n 'scores', cascade='all, delete-orphan'))\n\n event_score_type_id = Column(\n Integer, ForeignKey('event_score_types.id'), nullable=False)\n event_score_type = relationship(\n 'EventScoreType', backref=backref('submissions'))\n\n # These are cv-bagged scores. Individual scores are found in\n # SubmissionToTrain\n valid_score_cv_bag = Column(Float) # cv\n test_score_cv_bag = Column(Float) # holdout\n # we store the partial scores so to see the saturation and\n # overfitting as the number of cv folds grow\n valid_score_cv_bags = Column(NumpyType)\n test_score_cv_bags = Column(NumpyType)\n\n @property\n def score_name(self):\n return self.event_score_type.name\n\n @property\n def score_function(self):\n return self.event_score_type.score_function\n\n # default display precision in n_digits\n @property\n def precision(self):\n return self.event_score_type.precision\n\n @property\n def train_score_cv_mean(self):\n return np.mean([ts.train_score for ts in self.on_cv_folds])\n\n @property\n def valid_score_cv_mean(self):\n return np.mean([ts.valid_score for ts in self.on_cv_folds])\n\n @property\n def test_score_cv_mean(self):\n return np.mean([ts.test_score for ts in self.on_cv_folds])\n\n @property\n def train_score_cv_std(self):\n return np.std([ts.train_score for ts in self.on_cv_folds])\n\n @property\n def valid_score_cv_std(self):\n return np.std([ts.valid_score for ts in self.on_cv_folds])\n\n @property\n def test_score_cv_std(self):\n return np.std([ts.test_score for ts in self.on_cv_folds])\n\n\n# TODO: we should have a SubmissionWorkflowElementType table, describing the\n# type of files we are expecting for a given RAMP. Fast unit test should be\n# set up there, and each file should be unit tested right after submission.\n# Kozmetics: erhaps mark which file the leaderboard link should point to (right\n# now it is set to the first file in the list which is arbitrary).\n# We will also have to handle auxiliary files (like csvs or other classes).\n# User interface could have a sinlge submission form with a menu containing\n# the file names for a given ramp + an \"other\" field when users will have to\n# name their files\nclass SubmissionFile(Model):\n __tablename__ = 'submission_files'\n\n id = Column(Integer, primary_key=True)\n submission_id = Column(\n Integer, ForeignKey('submissions.id'), nullable=False)\n submission = relationship(\n 'Submission',\n backref=backref('files', cascade='all, delete-orphan'))\n\n # e.g. 'regression', 'external_data'\n workflow_element_id = Column(\n Integer, ForeignKey('workflow_elements.id'),\n nullable=False)\n workflow_element = relationship(\n 'WorkflowElement', backref=backref('submission_files'))\n\n # e.g., ('code', 'py'), ('data', 'csv')\n submission_file_type_extension_id = Column(\n Integer, ForeignKey('submission_file_type_extensions.id'),\n nullable=False)\n submission_file_type_extension = relationship(\n 'SubmissionFileTypeExtension', backref=backref('submission_files'))\n\n # eg, 'py'\n @property\n def is_editable(self):\n return self.workflow_element.is_editable\n\n # eg, 'py'\n @property\n def extension(self):\n return self.submission_file_type_extension.extension.name\n\n # eg, 'regressor'\n @property\n def type(self):\n return self.workflow_element.type\n\n # eg, 'regressor', Normally same as type, except when type appears more\n # than once in workflow\n @property\n def name(self):\n return self.workflow_element.name\n\n # Complete file name, eg, 'regressor.py'\n @property\n def f_name(self):\n return self.type + '.' + self.extension\n\n @property\n def link(self):\n return '/' + os.path.join(self.submission.hash_, self.f_name)\n\n @property\n def path(self):\n return os.path.join(self.submission.path, self.f_name)\n\n @property\n def name_with_link(self):\n return '' + self.name + ''\n\n def get_code(self):\n with open(self.path) as f:\n code = f.read()\n return code\n\n def set_code(self, code):\n code.encode('ascii') # to raise an exception if code is not ascii\n with open(self.path, 'w') as f:\n f.write(code)\n\n def __repr__(self):\n return 'SubmissionFile(name={}, type={}, extension={}, path={})'.\\\n format(self.name, self.type, self.extension, self.path)\n\n\nclass SubmissionFileTypeExtension(Model):\n __tablename__ = 'submission_file_type_extensions'\n\n id = Column(Integer, primary_key=True)\n\n type_id = Column(\n Integer, ForeignKey('submission_file_types.id'), nullable=False)\n type = relationship(\n 'SubmissionFileType', backref=backref('extensions'))\n\n extension_id = Column(\n Integer, ForeignKey('extensions.id'), nullable=False)\n extension = relationship(\n 'Extension', backref=backref('submission_file_types'))\n\n UniqueConstraint(type_id, extension_id, name='we_constraint')\n\n @property\n def file_type(self):\n return self.type.name\n\n @property\n def extension_name(self):\n return self.extension.name\n\n\nclass SubmissionFileType(Model):\n __tablename__ = 'submission_file_types'\n\n id = Column(Integer, primary_key=True)\n # eg. 'code', 'text', 'data'\n name = Column(String, nullable=False, unique=True)\n is_editable = Column(Boolean, default=True)\n max_size = Column(Integer, default=None)\n\n\nclass Extension(Model):\n __tablename__ = 'extensions'\n\n id = Column(Integer, primary_key=True)\n # eg. 'py', 'csv', 'R'\n name = Column(String, nullable=False, unique=True)\n\n\nclass SubmissionScoreOnCVFold(Model):\n __tablename__ = 'submission_score_on_cv_folds'\n\n id = Column(Integer, primary_key=True)\n submission_on_cv_fold_id = Column(\n Integer, ForeignKey('submission_on_cv_folds.id'), nullable=False)\n submission_on_cv_fold = relationship(\n 'SubmissionOnCVFold', backref=backref(\n 'scores', cascade='all, delete-orphan'))\n\n submission_score_id = Column(\n Integer, ForeignKey('submission_scores.id'), nullable=False)\n submission_score = relationship('SubmissionScore', backref=backref(\n 'on_cv_folds', cascade='all, delete-orphan'))\n\n train_score = Column(Float)\n valid_score = Column(Float)\n test_score = Column(Float)\n\n UniqueConstraint(\n submission_on_cv_fold_id, submission_score_id, name='ss_constraint')\n\n @property\n def name(self):\n return self.event_score_type.name\n\n @property\n def event_score_type(self):\n return self.submission_score.event_score_type\n\n @property\n def score_function(self):\n return self.event_score_type.score_function\n\n\n# TODO: rename submission to workflow and submitted file to workflow_element\n# TODO: SubmissionOnCVFold should actually be a workflow element. Saving\n# train_pred means that we can input it to the next workflow element\n# TODO: implement check\nclass SubmissionOnCVFold(Model):\n \"\"\"SubmissionOnCVFold.\n\n is an instantiation of Submission, to be trained on a data file and a cv\n fold. We don't actually store the trained model in the db (lack of disk and\n pickling issues), so trained submission is not a database column. On the\n other hand, we will store train, valid, and test predictions. In a sense\n substituting CPU time for storage.\n \"\"\"\n\n __tablename__ = 'submission_on_cv_folds'\n\n id = Column(Integer, primary_key=True)\n\n submission_id = Column(\n Integer, ForeignKey('submissions.id'), nullable=False)\n submission = relationship(\n 'Submission', backref=backref(\n 'on_cv_folds', cascade=\"all, delete-orphan\"))\n\n cv_fold_id = Column(\n Integer, ForeignKey('cv_folds.id'), nullable=False)\n cv_fold = relationship(\n 'CVFold', backref=backref(\n 'submissions', cascade=\"all, delete-orphan\"))\n\n # filled by cv_fold.get_combined_predictions\n contributivity = Column(Float, default=0.0)\n best = Column(Boolean, default=False)\n\n # prediction on the full training set, including train and valid points\n # properties train_predictions and valid_predictions will make the slicing\n full_train_y_pred = Column(NumpyType, default=None)\n test_y_pred = Column(NumpyType, default=None)\n train_time = Column(Float, default=0.0)\n valid_time = Column(Float, default=0.0)\n test_time = Column(Float, default=0.0)\n state = Column(submission_states, default='new')\n error_msg = Column(String, default='')\n\n UniqueConstraint(submission_id, cv_fold_id, name='sc_constraint')\n\n def __init__(self, submission, cv_fold):\n self.submission = submission\n self.cv_fold = cv_fold\n self.session = inspect(submission).session\n for score in submission.scores:\n submission_score_on_cv_fold = SubmissionScoreOnCVFold(\n submission_on_cv_fold=self, submission_score=score)\n self.session.add(submission_score_on_cv_fold)\n self.reset()\n\n def __repr__(self):\n repr = 'state = {}, c = {}'\\\n ', best = {}'.format(\n self.state, self.contributivity, self.best)\n return repr\n\n @hybrid_property\n def is_public_leaderboard(self):\n return self.state == 'scored'\n\n @hybrid_property\n def is_trained(self):\n return self.state in\\\n ['trained', 'validated', 'tested', 'validating_error',\n 'testing_error', 'scored']\n\n @hybrid_property\n def is_validated(self):\n return self.state in ['validated', 'tested', 'testing_error', 'scored']\n\n @hybrid_property\n def is_tested(self):\n return self.state in ['tested', 'scored']\n\n @hybrid_property\n def is_error(self):\n return (self.state == 'training_error') |\\\n (self.state == 'checking_error') |\\\n (self.state == 'validating_error') |\\\n (self.state == 'testing_error')\n\n # The following four functions are converting the stored numpy arrays\n # <>_y_pred into Prediction instances\n @property\n def full_train_predictions(self):\n return self.submission.Predictions(y_pred=self.full_train_y_pred)\n\n @property\n def train_predictions(self):\n return self.submission.Predictions(\n y_pred=self.full_train_y_pred[self.cv_fold.train_is])\n\n @property\n def valid_predictions(self):\n return self.submission.Predictions(\n y_pred=self.full_train_y_pred[self.cv_fold.test_is])\n\n @property\n def test_predictions(self):\n return self.submission.Predictions(y_pred=self.test_y_pred)\n\n @property\n def official_score(self):\n for score in self.scores:\n if self.submission.official_score_name == score.name:\n return score\n\n def reset(self):\n self.contributivity = 0.0\n self.best = False\n self.full_train_y_pred = None\n self.test_y_pred = None\n self.train_time = 0.0\n self.valid_time = 0.0\n self.test_time = 0.0\n self.state = 'new'\n self.error_msg = ''\n for score in self.scores:\n score.train_score = score.event_score_type.worst\n score.valid_score = score.event_score_type.worst\n score.test_score = score.event_score_type.worst\n\n def set_error(self, error, error_msg):\n self.reset()\n self.state = error\n self.error_msg = error_msg\n\n def compute_train_scores(self):\n if self.is_trained:\n true_full_train_predictions =\\\n self.submission.event.problem.ground_truths_train()\n for score in self.scores:\n score.train_score = float(score.score_function(\n true_full_train_predictions, self.full_train_predictions,\n self.cv_fold.train_is))\n else:\n for score in self.scores:\n score.train_score = score.event_score_type.worst\n\n def compute_valid_scores(self):\n if self.is_validated:\n true_full_train_predictions =\\\n self.submission.event.problem.ground_truths_train()\n for score in self.scores:\n score.valid_score = float(score.score_function(\n true_full_train_predictions, self.full_train_predictions,\n self.cv_fold.test_is))\n else:\n for score in self.scores:\n score.valid_score = score.event_score_type.worst\n\n def compute_test_scores(self):\n if self.is_tested:\n true_test_predictions =\\\n self.submission.event.problem.ground_truths_test()\n for score in self.scores:\n score.test_score = float(score.score_function(\n true_test_predictions, self.test_predictions))\n else:\n for score in self.scores:\n score.test_score = score.event_score_type.worst\n\n def update(self, detached_submission_on_cv_fold):\n \"\"\"From trained DetachedSubmissionOnCVFold.\"\"\"\n self.state = detached_submission_on_cv_fold.state\n if self.is_error:\n self.error_msg = detached_submission_on_cv_fold.error_msg\n else:\n if self.is_trained:\n self.train_time = detached_submission_on_cv_fold.train_time\n if self.is_validated:\n self.valid_time = detached_submission_on_cv_fold.valid_time\n self.full_train_y_pred =\\\n detached_submission_on_cv_fold.full_train_y_pred\n if self.is_tested:\n self.test_time = detached_submission_on_cv_fold.test_time\n self.test_y_pred = detached_submission_on_cv_fold.test_y_pred\n\n\nclass DetachedSubmissionOnCVFold(object):\n \"\"\"Copy of SubmissionOnCVFold, all the fields we need in train and test.\n\n It's because SQLAlchemy objects don't persist through\n multiprocessing jobs. Maybe eliminated if we do the parallelization\n differently, though I doubt it.\n \"\"\"\n\n def __init__(self, submission_on_cv_fold):\n self.train_is = submission_on_cv_fold.cv_fold.train_is\n self.test_is = submission_on_cv_fold.cv_fold.test_is\n self.full_train_y_pred = submission_on_cv_fold.full_train_y_pred\n self.test_y_pred = submission_on_cv_fold.test_y_pred\n self.state = submission_on_cv_fold.state\n self.name = submission_on_cv_fold.submission.event.name + '/'\\\n + submission_on_cv_fold.submission.team.name + '/'\\\n + submission_on_cv_fold.submission.name\n self.path = submission_on_cv_fold.submission.path\n self.error_msg = submission_on_cv_fold.error_msg\n self.train_time = submission_on_cv_fold.train_time\n self.valid_time = submission_on_cv_fold.valid_time\n self.test_time = submission_on_cv_fold.test_time\n self.trained_submission = None\n self.workflow =\\\n submission_on_cv_fold.submission.event.problem.workflow_object\n\n def __repr__(self):\n text = 'Submission({}) on fold {}'.format(\n self.name, str(self.train_is)[:10])\n return text\n\n\nsubmission_similarity_type = Enum(\n 'target_credit', # credit given by one of the authors of target\n 'source_credit', # credit given by one of the authors of source\n 'thirdparty_credit', # credit given by an independent user\n name='submission_similarity_type'\n)\n\n\nclass SubmissionSimilarity(Model):\n __tablename__ = 'submission_similaritys'\n\n id = Column(Integer, primary_key=True)\n type = Column(submission_similarity_type, nullable=False)\n note = Column(String, default=None)\n timestamp = Column(DateTime, default=datetime.datetime.utcnow())\n similarity = Column(Float, default=0.0)\n\n user_id = Column(Integer, ForeignKey('users.id'))\n user = relationship(\n 'User', backref=backref('submission_similaritys'))\n\n source_submission_id = Column(\n Integer, ForeignKey('submissions.id'))\n source_submission = relationship(\n 'Submission', primaryjoin=(\n 'SubmissionSimilarity.source_submission_id == Submission.id'),\n backref=backref('sources', cascade='all, delete-orphan'))\n\n target_submission_id = Column(\n Integer, ForeignKey('submissions.id'))\n target_submission = relationship(\n 'Submission', primaryjoin=(\n 'SubmissionSimilarity.target_submission_id == Submission.id'),\n backref=backref('targets', cascade='all, delete-orphan'))\n\n def __repr__(self):\n text = 'type={}, user={}, source={}, target={} '.format(\n self.type, self.user, self.source_submission,\n self.target_submission)\n text += 'similarity={}, timestamp={}'.format(\n self.similarity, self.timestamp)\n return text\n","sub_path":"ramp-database/rampdb/model/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":28985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386183471","text":"import logging\nfrom get_draws.api import get_draws\nfrom gbd.decomp_step import decomp_step_from_decomp_step_id\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_model_draws(cause_id, model_version_id, location_ids, decomp_step_id, gbd_round_id):\n \"\"\"\n Reads draws using get_draws.\n :param cause_id:\n cause ID for the model\n :param model_version_id:\n model_version_id to be read from\n :param location_ids: list of ints\n list of location ids to filter the draws by\n :param decomp_step_id: int\n decomposition step ID\n :param gbd_round_id: int\n GBD round ID\n :return: dataframe\n pandas dataframe of the draws for a given list of locations\n \"\"\"\n logger.info(\"Reading draws with get_draws for cause ID {},\"\n \"model_version_id {}.\".format(cause_id, model_version_id))\n df = get_draws(gbd_id_type='cause_id',\n gbd_id=int(cause_id),\n source='codem',\n version_id=model_version_id,\n location_id=location_ids,\n decomp_step=decomp_step_from_decomp_step_id(decomp_step_id),\n gbd_round_id=int(gbd_round_id))\n return df\n\n","sub_path":"gbd_2019/shared_code/central_comp/cod/codem/hybridizer/draws.py","file_name":"draws.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"78356822","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 7 12:26:14 2018\n\n@author: slauniai\n\nMakes Fig 3 of Launiainen et al. 2019 GMD\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport seaborn as sns\nfrom scipy import stats\n\neps = np.finfo(float).eps\n\n#%% plot FIHy figure (Fig. 3)\n\ndef draw_Fig3(data, mod):\n\n # modeled daily values\n ET = mod[0]['ET']\n ET_low = mod[1]['ET']\n ET_high = mod[2]['ET']\n \n Wliq = mod[0]['Wliq']\n Wliq_low = mod[1]['Wliq']\n Wliq_high = mod[2]['Wliq']\n \n SWE = mod[0]['SWE']\n SWE_low = mod[1]['SWE']\n SWE_high = mod[2]['SWE']\n \n # measured values\n et_dry = data['ET']\n Prec = data['Prec']\n Ts = data['Tsh']\n et_dry[Prec > 0.1] = np.NaN \n tvec = data.index\n doy = data['doy']\n \n # soil moisture\n SWCa = data['SWCa']\n SWCa[Ts <= 0.5] = np.NaN\n SWCa[SWCa > 0.5] = np.NaN\n\n # SWE\n SWEm = data['SWE']\n SWEm = SWEm.dropna() \n \n sns.set_style('whitegrid')\n with sns.color_palette('muted'):\n fig = plt.figure()\n \n fig.set_size_inches(6.5, 7.5)\n \n plt.subplot(3,3,(1,2))\n \n plt.plot(tvec, et_dry, 'o', markersize=4, alpha=0.3, label='obs')\n plt.fill_between(tvec, ET_low, ET_high, facecolor='grey', alpha=0.5, label='range')\n plt.plot(tvec, ET, 'k-', alpha=0.6, lw=0.5, label='mod')\n #plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])\n #plt.legend(loc=2, fontsize=8)\n plt.setp(plt.gca().get_xticklabels(), fontsize=8)\n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n plt.ylabel('ET (mm d$^{-1}$)', fontsize=9)\n plt.ylim([-0.05, 5.0])\n plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])\n tstr = 'FIHy: %.1f (%.1f) m$^2$m$^{-2}$' % (4.0, 0.5)\n plt.title(tstr, fontsize=9) \n \n # scatterplot\n plt.subplot(3,3,3)\n xxx = et_dry.copy()\n xxx[doy < 120] = np.NaN\n xxx[doy > 273] = np.NaN\n meas = np.array(xxx.values.tolist())\n ix = np.where(np.isfinite(meas))[0]\n meas = meas[ix].copy()\n mod = ET[ix].copy()\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(meas, mod)\n print('slope', slope, 'interc', intercept)\n # slope, intercept, _, _ = stats.theilslopes(meas, mod, 0.95)\n # force regression through origin\n x = meas[:, np.newaxis]\n slope, res, _, _ = np.linalg.lstsq(x, mod)\n r2 = (1.0 - res / sum((mod - np.mean(mod))**2))[0]\n intercept = 0.0\n rmse = np.sqrt(((mod - meas) ** 2).mean())\n me = np.mean(mod - meas)\n \n xx = np.array([min(meas), max(meas)])\n plt.plot(meas, mod, 'o', markersize=4, alpha=0.3)\n \n plt.plot(xx, slope*xx, 'k-')\n plt.plot([0, 5], [0, 5], 'k--', linewidth=1)\n #plt.text(0.3, 4.5, 'y = %.2fx, R$^2$=%.2f' %(slope, r2), fontsize=8)\n tst = 's=%.2f\\nR$^2$=%.2f\\nME=%.2f' %(slope, r2, me)\n plt.text(0.3, 3.6, tst, fontsize=8)\n plt.xlim([-0.01, 5]); plt.ylim([-0.01, 5])\n ax = plt.gca()\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right()\n ax.set_xticks(np.arange(0, 6))\n ax.set_yticks(np.arange(0, 6))\n ax.set_xticklabels(np.arange(0, 6), fontsize=8)\n ax.set_yticklabels(np.arange(0, 6), fontsize=8)\n ax.set_aspect('equal')\n \n plt.ylabel('ET$_{mod}$ (mm d$^{-1}$)', fontsize=9)\n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n plt.xlabel('ET$_{obs}$ (mm d$^{-1}$)', fontsize=9, labelpad=-3)\n \n # soil moisture\n \n plt.subplot(3,3,(4,5))\n plt.plot(tvec, SWCa, 'o', markersize=4, alpha=0.3,label='obs')\n plt.fill_between(tvec, Wliq_low, Wliq_high, facecolor='grey', alpha=0.6, label='range')\n plt.plot(tvec, Wliq, 'k-',alpha=0.6, lw=0.5, label='mod') \n #plt.legend(loc=2, fontsize=8)\n #plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])\n #plt.legend(loc=2, fontsize=8)\n plt.ylabel('$\\\\theta$ (m$^3$ m$^{-3}$)', fontsize=9)\n plt.setp(plt.gca().get_xticklabels(), fontsize=8)\n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n #plt.ylabel('$\\\\theta$ (m$^3$ m$^{-3}$)', fontsize=8)\n plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])\n \n \n # scatterplot\n plt.subplot(3,3,6)\n \n meas = np.array(SWCa.values.tolist())\n ix = np.where(np.isfinite(meas))[0]\n meas = meas[ix].copy()\n mod = Wliq[ix].copy()\n slope, intercept, r_value, p_value, std_err = stats.linregress(meas, mod)\n r2 = r_value**2\n me = np.mean(mod - meas)\n \n #print slope, intercept\n xx = np.array([min(meas), max(meas)])\n plt.plot(meas, mod, 'o', markersize=4, alpha=0.3)\n plt.plot(xx, slope*xx + intercept, 'k-')\n plt.plot([0.05, 0.45], [0.05, 0.45], 'k--', linewidth=1)\n #plt.text( 0.15, 0.08, 'y = %.2f x + %.2f' %(slope, intercept), fontsize=8)\n tst = 's=%.2f\\nR$^2$=%.2f\\nME=%.2f' %(slope, r2, me)\n plt.text(0.28, 0.08, tst, fontsize=8)\n plt.xlim([0.05, 0.45]); plt.ylim([0.05, 0.45])\n \n ax = plt.gca()\n ax.set_yticks([0.1, 0.2, 0.3, 0.4])\n ax.set_xticks([0.1, 0.2, 0.3, 0.4])\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right() \n ax.set_aspect('equal') \n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n plt.setp(plt.gca().get_xticklabels(), fontsize=8)\n plt.ylabel('$\\\\theta$ mod (m$^3$ m$^{-3}$)', fontsize=9)\n plt.xlabel('$\\\\theta$ obs (m$^3$ m$^{-3}$)', fontsize=9, labelpad=-3)\n \n \n #%plot SWE\n \n plt.subplot(3,3,(7,8))\n plt.plot(SWEm, 'o', markersize=4, alpha=0.3,label='obs')\n plt.fill_between(tvec, SWE_low, SWE_high, facecolor='grey', alpha=0.6, label='range')\n plt.plot(tvec, SWE, 'k-', alpha=0.6, lw=0.7, label='mod') \n plt.legend(loc=2, fontsize=8)\n plt.ylim([-0.1, 150])\n plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])\n plt.ylabel('SWE (mm)', fontsize=9)\n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n plt.setp(plt.gca().get_xticklabels(), fontsize=8)\n \n x = SWEm.values\n y = SWE\n y = y.loc[SWEm.index].values\n y = y.ravel()\n \n plt.subplot(3,3,9)\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n r2 = r_value**2\n me = np.mean(y - x)\n \n #print slope, intercept\n xx = np.array([min(x), max(x)])\n plt.plot(x, y, 'o', markersize=4, alpha=0.3)\n plt.plot(xx, slope*xx + intercept, 'k-')\n plt.plot([-0.1, 150], [-0.1, 150], 'k--', linewidth=1)\n #plt.text( 0.15, 0.08, 'y = %.2f x + %.2f' %(slope, intercept), fontsize=8)\n tst = 's=%.2f\\nR$^2$=%.2f\\nME=%.2f' %(slope, r2, me)\n plt.text(10, 95, tst, fontsize=8)\n plt.xlim([-0.1, 150]); plt.ylim([-0.1, 150])\n ax = plt.gca()\n ax.set_yticks([0, 25, 50, 75, 100, 125])\n ax.set_xticks([0, 25, 50, 75, 100, 125])\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right() \n ax.set_aspect('equal') \n plt.setp(plt.gca().get_yticklabels(), fontsize=8)\n plt.setp(plt.gca().get_xticklabels(), fontsize=8)\n plt.ylabel('SWE$_{mod}$ (mm)', fontsize=9)\n plt.xlabel('SWE$_{obs}$ (mm)', fontsize=9)\n \n\n tt = 'Fig3_FIHy.png'\n plt.savefig(tt, dpi=600)\n plt.savefig('Fig3_FIHy.pdf')\n \n ","sub_path":"make_Fig3.py","file_name":"make_Fig3.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"134264228","text":"##############################################################################\n#\n# Copyright (C) 2018 Daniel Yurlov\n#\n##############################################################################\n\nimport matrices as dmatr\nimport copy\nimport numpy as np\nimport math\nimport operator as op\n\nfrom matrices import Matrix\nfrom matrices import Vector\nfrom matrices import Vector3D\nfrom functools import reduce\n\n\n\n# LinInequalities3d implements system of linear inequalities Ax <= b,\n# where A is matrix mx3; x is variables vector; b is free constants vector\n#\nclass LinInequalities3d(dmatr.Matrix):\n\n default_precision = 1.0e-14\n\n # constructor that gets matrix (m*3) of coefficients and number of \n # inequalities; \n # m - number of rows (inequalities)\n # number of columns = 3 (3-dimentional space)\n #\n def __init__(self, coefficient_matrix3d, free_vect):\n self.__n_ineqs = len(coefficient_matrix3d)\n error = self.__n_ineqs == 0\n error = error | len(coefficient_matrix3d) != self.__n_ineqs\n error = error | len(coefficient_matrix3d[0]) != 3\n error = error | len(free_vect) == 0\n error = error | len(free_vect) != self.__n_ineqs\n\n if error:\n raise ValueError(\"LinInequalities3d: Dimentions of elements\\\n aren't permissible!\")\n \n dmatr.Matrix.__init__(self, coefficient_matrix3d)\n \n self.__free_vect = free_vect\n self.__row_vectors = [dmatr.Vector3D( \\\n self._Matrix__row_vectors[row_i].get_list() ) \\\n for row_i in range(self.__n_ineqs)]\n\n\n @classmethod\n def from_another(this, another):\n return copy.copy(another)\n pass\n\n\n def get_coefficient_matrix(self):\n return self._Matrix__data\n\n\n def get_free_vector(self):\n return self.__free_vect\n \n\n def get_number_of_inequalities(self):\n return self.__n_ineqs\n\n\n def __copy_constructor(self, another):\n self = another.copy()\n\n \n # Function checks whether the vector is a solution of the system\n #\n def point_is_solution(self, point, epsilon = default_precision):\n yeah = True\n for i in range(0, self.__n_ineqs):\n b = self.__row_vectors[i].get_list()\n # Calculation of the left-hand side of the i-th inequality\n dot_product = np.dot(point, b)\n diff = dot_product - self.__free_vect[i]\n if (diff > epsilon):\n yeah = False\n break\n return yeah\n\n\n def point_is_solution_ex(self, point, \\\n epsilon = default_precision, initial_planes = [], found_planes = []):\n\n yeah = True\n check_list = range(0, self.__n_ineqs)\n check_list = list( set(check_list).difference(initial_planes) )\n for i in check_list:\n b = self.__row_vectors[i].get_list()\n # Calculation of the left-hand side of the i-th inequality\n dot_product = np.dot(point, b)\n diff = dot_product - self.__free_vect[i]\n if (diff > epsilon):\n yeah = False\n break\n if math.fabs(diff) < epsilon:\n found_planes.append(i)\n return yeah\n\n\n def solve_sle(self, ineq_indices):\n if len(ineq_indices) < 3:\n return None\n else:\n try:\n coeff_matr = [self._Matrix__data[row] for row in ineq_indices]\n free_vect = [self.__free_vect[row] for row in ineq_indices]\n solution_point = np.linalg.solve(coeff_matr, free_vect)\n return (True, solution_point)\n except np.linalg.LinAlgError:\n solution_point = None\n return (False, [])\n\n\n def find_points(self):\n pass\n\n\n# Class: Node of SolutionGraph\n#\nclass SgNode:\n\n # Constructor of empty node\n #\n def __init__(self, planes = [], naighbors = [], label = None, point = []):\n self.planes = planes\n self.neighbors = naighbors\n self.label = label\n self.point = point \n pass\n\n\n def set_label(self, label):\n self.label = label\n\n\n def add_neighbor(self, node):\n self.neighbors.append(node)\n\n\n def associate_with_surface(self, suface_id):\n self.surfs.append(suface_id)\n\n\n\nclass SolutionGraph:\n\n # Constructor of empty graph\n #\n def __init__(self, sli: LinInequalities3d):\n self.head = None\n self.count = 0\n self.sli = sli\n self.plane_distrib = \\\n [ [] for i in range(self.sli.get_number_of_inequalities())]\n self.__init_tensor()\n pass\n\n\n @staticmethod\n def link_nodes(node1: SgNode, node2: SgNode, path):\n naigh1 = (node2, path)\n naigh2 = (node1, path)\n n1_linked = False\n n2_linked = False\n for link in node1.neighbors:\n if link[0] == node2:\n n1_linked = True\n break\n for link in node2.neighbors:\n if link[0] == node1:\n n2_linked = True\n break\n if n1_linked == False:\n node1.neighbors.append(naigh1)\n if n2_linked == False:\n node2.neighbors.append(naigh2)\n\n\n @staticmethod\n def remove_node_paths_from_list(node: SgNode, list1):\n for naigh in node.neighbors:\n SolutionGraph.remove_pair_from_list(list1, naigh[1])\n\n\n class TensorElement:\n def __init__(self, args = None):\n if args == None:\n self.checked = False\n self.node = None\n else:\n self.checked = args[0]\n self.node = args[1]\n\n\n # Initalization of triangular 3D intersection array\n #\n def __init_tensor(self):\n order = self.sli.get_number_of_inequalities()\n self.tensor = []\n for i in range(order - 2):\n mat = []\n for j in range(i + 1, order - 1):\n row = []\n for k in range(j + 1, order):\n row.append(self.TensorElement())\n mat.append(row)\n self.tensor.append(mat)\n\n\n def get_intersection(self, planes = [0, 1, 2]):\n planes.sort()\n i0 = planes[0]\n i1 = planes[1] - i0 - 1\n i2 = planes[2] - planes[1] - 1\n return self.tensor[i0][i1][i2]\n\n\n def set_intersection(self, planes = [0, 1, 2], elem = TensorElement()):\n planes.sort()\n plen = len(planes)\n if plen == 3:\n i0 = planes[0]\n i1 = planes[1] - i0 - 1\n i2 = planes[2] - planes[1] - 1\n self.tensor[i0][i1][i2] = elem\n else:\n for i in range(plen - 2):\n for j in range(i + 1, plen - 1):\n for k in range(j + 1, plen):\n planes_comb = [planes[i], planes[j], planes[k]]\n i0 = planes_comb[0]\n i1 = planes_comb[1] - i0 - 1\n i2 = planes_comb[2] - planes_comb[1] - 1\n self.tensor[i0][i1][i2] = elem\n\n\n def append(self, node):\n if (self.head == None):\n pass\n pass\n\n\n # Warning: node uniqueness is not provided by this function!\n #\n def associate_node_with_planes(self, node: SgNode, planes):\n for i in node.planes:\n self.plane_distrib[i].append(node)\n\n\n def search_solution_vector(self) -> SgNode:\n order = self.sli.get_number_of_inequalities()\n for i in range(order - 2):\n for j in range(i + 1, order - 1):\n for k in range(j + 1, order):\n planes = [i, j, k];\n res = self.sli.solve_sle(planes)\n sol = res[1]\n if res[0]: \n ex_planes = []\n if self.sli.point_is_solution_ex(sol, \\\n initial_planes=planes, \\\n found_planes=ex_planes):\n\n all_pl = list(set(planes).union(ex_planes))\n sol_node = SgNode(all_pl, [], None, sol)\n self.associate_node_with_planes(sol_node, all_pl)\n te_ok = self.TensorElement([True, sol_node])\n self.set_intersection(all_pl, te_ok)\n return sol_node\n\n te_no = self.TensorElement([False, None])\n self.set_intersection(planes, te_no)\n return None\n\n\n def search_points(self, parent: SgNode, node_stack = []):\n count = 0\n parent_planes = parent.planes.copy()\n paths = self.cartesian_product(parent_planes)\n #paths = self.remove_pair_from_list(paths, parent_path)\n self.remove_node_paths_from_list(parent, paths)\n intersect_planes = range(self.sli.get_number_of_inequalities())\n intersect_planes = \\\n list(set(intersect_planes).difference(parent.planes))\n for path in paths:\n current_ineq = None\n for i in intersect_planes:\n current_ineq = path.copy()\n current_ineq.append(i)\n check = self.get_intersection(current_ineq)\n if check.checked:\n if check.node != None:\n self.link_nodes(parent, check.node, path)\n continue\n\n res = self.sli.solve_sle(current_ineq)\n if res[0]:\n point = res[1]\n ex_planes = []\n if self.sli.point_is_solution_ex(point, \\\n initial_planes=current_ineq, found_planes=ex_planes):\n planes = list(set(current_ineq).union(ex_planes))\n sol_node = SgNode(planes, [], None, point)\n self.link_nodes(parent, sol_node, path)\n self.associate_node_with_planes(sol_node, planes)\n node_stack.append(sol_node)\n count += 1\n te_ok = self.TensorElement([True, sol_node])\n self.set_intersection(planes, te_ok)\n for ex in ex_planes:\n try:\n intersect_planes.remove(ex)\n except:\n continue\n else:\n te_no = self.TensorElement([False, None])\n self.set_intersection(current_ineq, te_no)\n continue\n return count # for path in paths\n\n\n def build(self):\n stack = []\n done = False\n self.head = None\n count = 0\n self.head = self.search_solution_vector()\n if self.head == None:\n return 0\n else:\n count = 1\n current = self.head\n while current != None:\n count += self.search_points(current, stack)\n try:\n current = stack.pop()\n except:\n current = None\n pass\n return count\n\n\n def build_direct(self):\n order = self.sli.get_number_of_inequalities()\n for i in range(order - 2):\n for j in range(i + 1, order - 1):\n for k in range(j + 1, order):\n planes = [i, j, k]\n res = self.sli.solve_sle(planes)\n if res[0]:\n point = res[1]\n ex_planes = []\n if self.sli.point_is_solution_ex(point, \\\n initial_planes=planes, \\\n found_planes=ex_planes):\n pass\n\n # Unique elements in list are required!\n #\n @staticmethod\n def cartesian_product(some_list):\n res = []\n res_i = 0\n length = len(some_list)\n for i1 in range(length):\n for i2 in range(i1 + 1, length):\n res.append( [some_list[i1], some_list[i2]] )\n res_i += 1\n return res\n\n\n # Return True if pair1 and pair2 contains equal elements\n # Equal lenght of them and unique elements in each pair are recuired!\n #\n @staticmethod\n def equal_pair(pair1, pair2):\n un = set(pair1).union(pair2)\n if len(un) == len(pair1):\n return True\n else:\n return False\n\n \n # Return difference list1\\list2,\n # where list1, list2 - lists of unique pairs for each list\n #\n @staticmethod\n def list_of_pairs_difference(list1, list2):\n len1 = len(list1)\n len2 = len(list2)\n res = []\n for i1 in range(len1):\n eq = False\n for i2 in range(len2):\n list1i = list1[i1]\n eq = SolutionGraph.equal_pair(list1i, list2[i2])\n if eq:\n break\n if not(eq):\n res.append(list1i)\n return res\n\n\n # Return difference list1/list2,\n # where list1, list2 - lists of unique pairs for each list\n #\n @staticmethod\n def list_of_pairs_difference(list1, list2):\n len1 = len(list1)\n len2 = len(list2)\n res = []\n for i1 in range(len1):\n eq = False\n for i2 in range(len2):\n list1i = list1[i1]\n eq = SolutionGraph.equal_pair(list1i, list2[i2])\n if eq:\n break\n if not(eq):\n res.append(list1i)\n return res\n\n\n @staticmethod\n def remove_pair_from_list(list1, pair):\n if pair == None:\n return list1\n len1 = len(list1)\n res = []\n for i1 in range(len1):\n list1i = list1[i1]\n if not (SolutionGraph.equal_pair(list1i, pair)):\n res.append(list1i)\n return res\n\n\n # Number of k-combinations of n elements\n #\n @staticmethod\n def number_of_combinations(n, k):\n dnk = n - k\n numer = 1\n denom = 0\n try:\n if (k < dnk):\n numer = reduce(op.mul, range(dnk + 1, n + 1), 1)\n denom = math.factorial(k)\n else:\n numer = reduce(op.mul, range(k + 1, n + 1), 1)\n denom = math.factorial(dnk)\n except ValueError:\n raise ValueError(\"SolutionGraph.number_of_combinations(n, k):\" + \\\n \" arguments error!\")\n return numer // denom","sub_path":"PythonPrototype/lin_inequalities_3d.py","file_name":"lin_inequalities_3d.py","file_ext":"py","file_size_in_byte":14711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"494987190","text":"# 백준 11052 카드 구매하기\n# dp, 점화식은 dp[i] = dp[i - k] + p[k]\n# 카드를 i개 구매하는 최대 비용은 다음과 같다.\n# p[1] + dp[i-1]\n# p[2] + dp[i-2]\n# p[3] + dp[i-3]\n# . . . \n# p[i] + dp[0]\nimport sys\ninput = sys.stdin.readline\n\nn = int(input()) # 4\n# dp[i] = 카드 i개 구매하는 최대 가격, \ndp = [0 for _ in range(n+1)] # dp[0, 0, 0, 0, 0] \n# p[k] = k개가 들어있는 카드팩 가격\np = [0] + list(map(int, input().split())) # p[0, 1, 5, 6, 7] \n\nfor i in range(1, n+1): # 1,2,3,4\n for k in range(1, i+1): # 1,2,3,4\n dp[i] = max(dp[i], dp[i-k] + p[k])\n\nprint(dp[i])","sub_path":"11052.py","file_name":"11052.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"418850807","text":"# This is a function to extract teh exposure time from image\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\ndef get_exif(fn):\n ret = {} #Initilise ret to null\n i = Image.open(fn) #Open the image\n info = i._getexif() #Extract the metadata from the image.\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag) #This gets all the tags and value\n #print(decoded)\n ret[decoded] = value\n\t#print(decoded)\n return ret['ExposureTime'] #Return just the exposure time\n","sub_path":"DistributedRL/Aggregator/build/Code/sim/Parser/ExposureTime/exposure_time.py","file_name":"exposure_time.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99524521","text":"def cCipher(msg, shift):\n '''\n This function takes a string and encrypts the message by \n shifting the ASCII characters by the \"shift\" value amount.\n msg: str \n shift: int\n returns: str encrypted value of string\n '''\n # loop through multiple characters\n if isinstance(msg, str):\n msg_list = list(msg)\n new_list = []\n for char in msg_list:\n new_msg = chr(ord(char) + shift)\n new_list.append(new_msg)\n str1 = ''\n print(str1.join(new_list))\n else:\n print('The message is not a string.')\n\ncCipher('Mark', 3)","sub_path":"cCipher.py","file_name":"cCipher.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69952316","text":"import signal, os, time, sys\nfrom multiprocessing import Process\n\ndef my_handler(signum, stack_frame):\n print(f\"I have encountered the signal {signum}\")\n time.sleep(3)\n raise SystemExit('parent exit')\n # sys.exit(\"parent exit\")\n\ndef trigger_signal(pid, signum):\n time.sleep(3)\n print(f\"trigger signal {signum} in pid: {pid}\")\n # appel système à kill -10 pid\n os.kill(pid, signum)\n sys.exit(\"child exit\")\n\n# on associe la réception d'un signal SIGUSR1 à l'exécution de my_handler\n#signal.signal(signal.SIGUSR1, my_handler)\n# on désactive SIGINT (Ctrl C) patr SIG_IGN (IGNORE)\n#signal.signal(signal.SIGINT, signal.SIG_IGN)\nsignal.signal(signal.SIGINT, my_handler)\n\np = Process(target=trigger_signal, args=(os.getpid(), signal.SIGUSR1))\np.start()\n\nprint(\"waiting for signal SIGUSR1 to terminate\")\nsignal.pause()","sub_path":"process/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63050355","text":"from text_processor import *\nimport constant as constant\n\n\nclass Extractor:\n @staticmethod\n def extract_dimension_in_cm(dim):\n result = constant.STR_NOT_PROCESSED\n if dim.lower() == constant.STR_DIMENSION_UNAVAILABLE:\n return result\n if not constant.STR_CM in dim:\n return result\n\n cm_str = TextProcessor.process_brackets(dim)\n bracket_data = TextProcessor.extract_all_brackets_with_data(cm_str, [])\n bracket_count = len(bracket_data)\n\n if bracket_count == 0: # 'l.17xw.31/2inches43.2x8.9cm'\n if constant.STR_INCHES in cm_str:\n cm_data = [x for x in cm_str.split(constant.STR_INCHES) if constant.STR_CM in x]\n if cm_data:\n found = TextProcessor.extract_only_valid_dimension(cm_data[0])\n result = found\n else:\n result = TextProcessor.extract_only_valid_dimension(cm_str)\n elif bracket_count >= 1:\n if bracket_count in [1, 2, 3] and all(['x' not in x for x in bracket_data]) \\\n and all(\n TextProcessor.extract_regex_pattern_1d(x) != constant.STR_NOT_PROCESSED for x in bracket_data):\n cm_data = 'x'.join(TextProcessor.extract_regex_pattern_1d(x) for x in bracket_data)\n result = cm_data\n else:\n cm_data = [TextProcessor.extract_cm_inside_single_bracket(x) for x in bracket_data if\n constant.STR_CM in x]\n result = cm_data[\n 0] if cm_data else constant.STR_NOT_PROCESSED # get only first if multiple pairs exists\n\n if result == constant.STR_NOT_PROCESSED:\n if constant.CNF_ENABLE_DIRTY_GUESS: # mostly for more than 3 brackets select only first and ambiguios data 'diam:31/4in.(8.3cm)mount:201/2x15x7/8in.(52.1x38.1x2.2cm)'\n result = TextProcessor.extract_only_valid_dimension(\n bracket_data[0] if bracket_data else cm_str) # dirty guess\n if result == constant.STR_NOT_PROCESSED:\n s = \"\" # breakpoint to debug if dimension is still not processed\n return result\n","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"121531409","text":"# -*- coding: utf-8 -*-\n\"\"\"\nv2.1 更新说明\n补充 探尺差, 实际风速, 炉腹煤气指数 烧结矿<5mm\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n'''\n\n整理19年10月-20年2月数据\n需要以下目录文件:\n./data/西昌2#高炉数据19年10-11月/pkl/\n 西昌2#高炉采集数据表_送风系统.pkl\n 西昌2#高炉采集数据表_喷吹系统.pkl\n 铁水实绩表.pkl\n 上料实绩表.pkl\n 铁水成分表.pkl\n 炉渣成分表.pkl\n 上料质量表.pkl\n./data/西昌2#高炉数据19年10-11月/ \n 铁次时间.xlsx\n./data/西昌2#高炉数据19年12月-20年2月/origin/\n 铁次时间.xlsx\n./data/西昌2#高炉数据19年12月-20年2月/pkl/\n 西昌2#高炉采集数据表_送风系统.pkl\n 西昌2#高炉采集数据表_喷吹系统.pkl\n 西昌2#高炉-炉渣成分表.pkl \n 西昌2#高炉-上料实绩表.pkl\n 西昌2#高炉-上料质量表.pkl\n 西昌2#高炉-铁水实绩表.pkl\n 西昌2#高炉-铁水成分表.pkl\n'''\n\nPATH_DICT = {\n 0: './data/西昌2#高炉数据19年10-11月/pkl/',\n 1: './data/西昌2#高炉数据19年10-11月/',\n 2: './data/西昌2#高炉数据19年12月-20年2月/pkl/',\n 3: './data/西昌2#高炉数据19年12月-20年2月/origin/ '}\n\n\nclass DailyDate:\n\n def __init__(self, index):\n \"\"\"\n :param index: 时间数组\n 初始化结果\n \"\"\"\n self.res = pd.DataFrame(data=None, index=index)\n\n def get_yield(self, file_pkl):\n ''' \n Parameters\n ----------\n file_pkl : TYPE str\n 铁水实绩表 pkl 文件路径\n Returns\n -------\n None.\n\n '''\n df = pd.read_pickle(file_pkl)\n # 格式转换 \n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n df1 = df[['业务处理时间', '采集项值', '铁次号', '罐号']].set_index('业务处理时间')\n df2 = df1.drop_duplicates() # 去除重复数据\n df3 = df2.groupby('业务处理时间').sum()\n self.res['日产量'] = df3['采集项值']\n return None\n\n def get_coke(self, file_pkl):\n ''' \n Parameters\n ----------\n file_pkl : TYPE str\n 西昌2#高炉-上料质量表 pkl 文件路径\n Returns\n -------\n None.\n\n '''\n param_list = [\n '焦炭粒度、冷强度_M40',\n '焦炭粒度、冷强度_M10',\n '焦炭工分_St',\n '焦炭热性能_CRI',\n '焦炭热性能_CSR']\n\n df = pd.read_pickle(file_pkl)\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n for param in param_list:\n temp = df.groupby('采集项名称').get_group(param)\n temp1 = temp.groupby('业务处理时间').mean()\n self.res[param] = temp1['采集项值'].copy()\n return None\n\n def get_molten_iron(self, file_pkl):\n '''\n Parameters\n ----------\n file_pkl : TYPE str\n 西昌2#高炉-铁水成分表.pkl 文件路径\n Returns\n -------\n None.\n\n '''\n param_list = [\n '[C]',\n '[Ti]',\n '[Si]',\n '[S]'] # 还有 Delta[Ti]\n\n df_iron_comp = pd.read_pickle(file_pkl)\n df_iron_comp = df_iron_comp[df_iron_comp['铁次号'] >= '20000000'] # 提取出#2高炉的数据\n df_iron_comp = df_iron_comp[df_iron_comp['铁次号'] < '30000000']\n df = df_iron_comp\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n for param in param_list:\n temp = df.groupby('采集项名称').get_group(param)\n temp = temp[['业务处理时间', '采集项值']]\n temp1 = temp.groupby('业务处理时间').mean()\n if param == '[Ti]':\n self.res['Delta[Ti]'] = (temp.groupby('业务处理时间')['采集项值'].apply(np.max) -\n temp.groupby('业务处理时间')['采集项值'].apply(np.min))\n self.res[param] = temp1['采集项值'].copy()\n return None\n\n def get_slag(self, file_pkl):\n '''\n Parameters\n ----------\n file_pkl : TYPE str\n 西昌2#高炉-炉渣成分表.pkl 文件路径\n Returns\n -------\n None.\n\n '''\n df_slag = pd.read_pickle(file_pkl)\n df_slag = df_slag[df_slag['铁次号'] >= '20000000'] # 提取出#2高炉的数据\n df_slag = df_slag[df_slag['铁次号'] < '30000000']\n df = df_slag\n\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n\n param_list = [\n '(CaO)',\n '(SiO2)',\n '(MgO)',\n '(TiO2)',\n '(Al2O3)']\n\n for param in param_list:\n temp = df.groupby('采集项名称').get_group(param)\n temp1 = temp.groupby('业务处理时间').mean()\n\n self.res[param] = temp1['采集项值'].copy()\n\n self.res['R2'] = self.res['(CaO)'] / self.res['(SiO2)']\n self.res['R3'] = (self.res['(CaO)'] + self.res['(MgO)']) / self.res['(SiO2)']\n self.res['镁铝比'] = self.res['(MgO)'] / self.res['(Al2O3)']\n\n # DeltaR2处理\n CaO = df.groupby('采集项名称').get_group('(CaO)') # 筛选\n CaO2 = CaO.groupby(['铁次号', '业务处理时间'], as_index=False).mean().set_index('铁次号')\n\n SiO = df.groupby('采集项名称').get_group('(SiO2)') # 筛选\n SiO2 = SiO.groupby('铁次号').mean()\n\n CaO2['SiO2'] = SiO2['采集项值']\n CaO2['R2'] = CaO2['采集项值'] / CaO2['SiO2']\n self.res['DeltaR2'] = CaO2.groupby('业务处理时间')['R2'].mean()\n return None\n\n def get_ratio(self, file_pkl1, file_pkl2):\n '''\n Parameters\n ----------\n file_pkl1 : TYPE str\n 上料实绩表 文件路径\n file_pkl2 : TYPE str\n 西昌2#高炉采集数据表_喷吹系统 文件路径\n Returns\n -------\n None.\n\n '''\n ### 焦比\n df = pd.read_pickle(file_pkl1)\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间']).dt.floor('d')\n\n df_dict = {}\n for param in ['冶金焦(自产)', '小块焦']:\n df1 = df.groupby('采集项名称').get_group(param)\n df1_1 = df1[['业务处理时间', '采集项值', '上料批号']].set_index('业务处理时间')\n df1_2 = df1_1.drop_duplicates() # 去除重复数据\n df1_3 = df1_2.groupby('业务处理时间').sum()['采集项值']\n df_dict[param] = df1_3\n\n self.res['焦比'] = df_dict['冶金焦(自产)'] + df_dict['小块焦']\n self.res['焦比'] = self.res['焦比'] / self.res['日产量'] * 1000\n\n # # 煤比 \n # 喷吹速率\n df = pd.read_pickle(file_pkl2)\n df['采集项值'] = pd.to_numeric(df['采集项值']) # 格式化\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间']) # 格式化\n df = df.groupby('采集项名称').get_group('喷吹速率')[['业务处理时间', '采集项值']]\n df = df.set_index('业务处理时间').sort_index()\n df['采集项值'][df['采集项值'] > 1e4] = None # 去除1e9\n df_1T = df.resample('1T').mean()\n df_1T = df_1T.interpolate(method='linear')\n daily = df_1T.resample('1D').sum()\n self.res['煤比'] = daily.采集项值 / self.res.日产量 * 20\n\n self.res['燃料比'] = self.res['焦比'] + self.res['煤比']\n return None\n\n def get_wind(self, file_pkl):\n ''' \n file_pkl1 : TYPE str\n 西昌2#高炉采集数据表_送风系统 文件路径\n \n '标准风速'\n '西昌2#高炉采集数据表_送风系统'\n \n 实际风速=标准风速*(0.101325/273)*((273+风温)/(风压/10+0.101325))\n \n 标准风速 245 m3/s 标准风速\n 风温 1212 摄氏度\t 热风温度\n 风压 3.45 0.1MPa\t 热风压力\n \n 18年的实际风速平均 298\n '''\n # path = PATH_DICT[0] \n # file = '西昌2#高炉采集数据表_送风系统.pkl'\n df = pd.read_pickle(file_pkl)\n\n # 格式化\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n\n res = []\n param_list = ['标准风速', '热风温度', '热风压力']\n for param in param_list:\n temp = df.groupby('采集项名称').get_group(param).set_index('业务处理时间')\n temp.rename(columns={'采集项值': param}, inplace=True)\n temp[param][temp[param] > 1e7] = None\n res.append(temp.resample('24h').mean())\n cat = pd.concat(res, axis=1)\n\n cat['实际风速'] = cat['标准风速'] * (0.101325 / 273) * ((273 + cat['热风温度']) / (cat['热风压力'] / 1000 + 0.101325))\n self.res['实际风速'] = cat['实际风速']\n\n return None\n\n def get_rod_range(self, file_pkl):\n \"\"\"\n 计算探尺差\n 探尺差\n 西昌2#高炉采集数据表_上料系统\n \"\"\"\n df = pd.read_pickle(file_pkl) # 导入\n\n # 格式化\n df['业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df['采集项值'] = pd.to_numeric(df['采集项值'])\n\n # 把三个探尺高度筛选出来\n brothel = ['探尺(南)', '探尺(东)', '探尺(西)']\n hookers = []\n for hooker_name in brothel:\n hooker = df.groupby('采集项名称').get_group(hooker_name).set_index('业务处理时间') # 筛选\n hooker.drop(columns=['采集项编码', '采集项名称'], inplace=True)\n hooker.rename(columns={'采集项值': hooker_name}, inplace=True)\n\n hooker[hooker_name][hooker[hooker_name] > 1e7] = None # 去除1e7 的异常值\n hooker[hooker_name].drop_duplicates(keep=False, inplace=True) # 去除数据源中同一时刻的重复采样\n hookers.append(hooker)\n\n # 找出 所有 在同一时刻 三个探尺高度数据都不缺失的样本\n temp = pd.merge(hookers[0], hookers[1], how=\"inner\", left_index=True, right_index=True)\n blondie = pd.merge(temp, hookers[2], how=\"inner\", left_index=True, right_index=True)\n # 计算极差\n blondie['探尺差'] = blondie.max(axis=1) - blondie.min(axis=1)\n # 日平均\n wife = blondie['探尺差'].resample(\"24h\").mean()\n self.res['探尺差'] = wife\n return None\n\n def get_gas(self, file_pkl):\n '''\n 炉腹煤气指数计算:\n \n 炉腹煤气发生量/(9.5*9.5*3.14/4)\n \n 文件名:\n 西昌2#高炉采集数据表_高炉本体(炉顶,炉喉,炉身,炉腹)\n '''\n\n df = pd.read_pickle(file_pkl)\n df = df.groupby(\"采集项名称\").get_group('炉腹煤气发生量')\n\n # 格式化\n df.loc[:, '业务处理时间'] = pd.to_datetime(df['业务处理时间'])\n df.loc[:, '采集项值'] = pd.to_numeric(df['采集项值'])\n df.set_index('业务处理时间', inplace=True)\n df['采集项值'][df['采集项值'] > 1e7] = None\n taylor = df.resample(\"24h\").mean()\n self.res['炉腹煤气指数'] = taylor / (9.5 * 9.5 * 3.14 / 4)\n\n return None\n\n def get_sinter(self, data_select):\n \"\"\"\n 获取烧结矿<5mm的每日数据\n :param data_select: 2 表示19年12月之后的数据\n :return:\n \"\"\"\n # data_select = 2 # 选择19年12月之后的数据\n param = '高炉沟下烧结矿粒度_筛分指数(<5mm)'\n file = {0: '上料质量表.pkl', 2: '西昌2#高炉-上料质量表.pkl'}\n\n df = pd.read_pickle(PATH_DICT[data_select] + file[data_select])\n\n # 如果没有\n if not np.any(df['采集项名称'].isin([param])):\n self.res['烧结矿<5mm比例'] = None\n return None\n df = df.groupby('采集项名称').get_group(param)\n df['业务处理时间'] = df['业务处理时间'].apply(pd.to_datetime)\n df['采集项值'] = df['采集项值'].apply(pd.to_numeric)\n df = df.set_index('业务处理时间')\n daily = df.resample('24h').mean()\n self.res['烧结矿<5mm比例'] = daily\n return None\n\n\ndef main():\n\n index20 = pd.date_range('2019-12-01 00:00:00', '2020-2-15 00:00:00', freq='1D')\n daily20 = DailyDate(index20)\n\n index19 = pd.date_range('2019-10-01 00:00:00', '2019-11-30 23:59:59', freq='1D')\n daily19 = DailyDate(index19)\n\n daily20.get_yield(PATH_DICT[2] + '西昌2#高炉-铁水实绩表.pkl')\n daily20.get_coke(PATH_DICT[2] + '西昌2#高炉-上料质量表.pkl')\n daily20.get_molten_iron(PATH_DICT[2] + '西昌2#高炉-铁水成分表.pkl')\n daily20.get_slag(PATH_DICT[2] + '西昌2#高炉-炉渣成分表.pkl')\n daily20.get_ratio(PATH_DICT[2] + '西昌2#高炉-上料实绩表.pkl', PATH_DICT[2] + '西昌2#高炉采集数据表_喷吹系统.pkl')\n daily20.get_wind(PATH_DICT[2] + '西昌2#高炉采集数据表_送风系统.pkl')\n daily20.get_rod_range(PATH_DICT[2] + '西昌2#高炉采集数据表_上料系统.pkl')\n\n daily19.get_yield(PATH_DICT[0] + '铁水实绩表.pkl')\n daily19.get_coke(PATH_DICT[0] + '上料质量表.pkl')\n daily19.get_molten_iron(PATH_DICT[0] + '铁水成分表.pkl')\n daily19.get_slag(PATH_DICT[0] + '炉渣成分表.pkl')\n daily19.get_ratio(PATH_DICT[0] + '上料实绩表.pkl', PATH_DICT[0] + '西昌2#高炉采集数据表_喷吹系统.pkl')\n daily19.get_wind(PATH_DICT[0] + '西昌2#高炉采集数据表_送风系统.pkl')\n daily19.get_rod_range(PATH_DICT[0] + '西昌2#高炉采集数据表_上料系统.pkl')\n\n daily19.get_gas(PATH_DICT[0] + '西昌2#高炉采集数据表_高炉本体(炉顶,炉喉,炉身,炉腹).pkl')\n daily20.get_gas(PATH_DICT[2] + '西昌2#高炉采集数据表_高炉本体(炉顶,炉喉,炉身,炉腹).pkl')\n\n daily19.get_sinter(0)\n daily20.get_sinter(2)\n\n res = pd.concat([daily19.res, daily20.res])\n return res\n\nif __name__ == '__main__':\n \"\"\"\n 各个指标的处理说明\n https://docs.qq.com/sheet/DTnRobmxQbUxIUU9a?tab=5u26wg&c=A1A0A0\n \"\"\"\n\n res = main()\n\n # index20 = pd.date_range('2019-12-01 00:00:00', '2020-2-15 00:00:00', freq='1D')\n # daily20 = DailyDate(index20)\n\n # index19 = pd.date_range('2019-10-01 00:00:00', '2019-11-30 23:59:59', freq='1D')\n # daily19 = DailyDate(index19)\n\n # daily19.get_sinter(0)\n # daily20.get_sinter(2)\n\n # res = pd.concat([daily19.res, daily20.res])","sub_path":"project/proj-DailyData/数据日报表化v2.1.py","file_name":"数据日报表化v2.1.py","file_ext":"py","file_size_in_byte":15239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"408290483","text":"from copy import deepcopy as copy\nimport numpy as np\nfrom math import inf\n\n\"\"\"\nName\t\t: Setu Gupta\nRoll no.\t: 2018190\nQuestion 4 of HW2. Solving for v_*(s) and pi_*(s) for gridworld example.\n\"\"\"\n\n\n\"\"\"\nThis class has the mapping from every state to all possible actions and next states corresponding to them.\n\"\"\"\nclass states_and_actions:\n\n\tstates = set()\t\t# Set of all states\n\taction_set = {}\t\t# Dictionary having mapping between a state and possible actions. {state: set(actions)}\n\tnext_states_rewards = {}\t# Dictionary having mapping between state action pair and next state and reward pair(s). {(state, action) : [state_reward, ...]}\n\tstate_value = {}\t# Dictionary having mapping between state and its state value. {state: v_pi(s)}\n\n\tdef __init__(self):\n\t\tpass\n\n\t\"\"\"\n\tAdds a state to the set of states.\n\tArgs:\n\t\tstate\t: State to be added\n\tRets:\n\t\tNone \t\n\t\"\"\"\n\tdef add_state(self, state):\n\t\tself.states.add(state)\t# Add another state\n\t\tself.state_value[state] = 0\t# Initialize with 0\n\n\t\"\"\"\n\tAdds action to action set of a state.\n\tArgs:\n\t\tstate\t\t\t\t: State of whos action set is to be used\n\t\taction\t\t\t\t: Action to be added\n\t\tnext_states_rewards\t: [(state, reward), ...] A list of next states\n\tRets:\n\t\tNone \t\n\t\"\"\"\n\tdef add_action(self, state, action, next_states_rewards):\n\t\tif state not in self.action_set:\n\t\t\tself.action_set[state] = []\n\n\t\tself.action_set[state].append(action)\n\t\t\n\t\tstate_action_pair = (state, action)\n\t\tif state_action_pair not in self.next_states_rewards:\n\t\t\tself.next_states_rewards[state_action_pair] = []\n\n\t\tself.next_states_rewards[state_action_pair].append(next_states_rewards)\n\n\n\t\"\"\"\n\tResets state values for all states\n\t\"\"\"\n\tdef reset_state_value(self):\n\t\tfor s in self.states:\n\t\t\tself.state_value[s] = 0\n\n\t\"\"\"\n\tSets state values for a state\n\t\"\"\"\n\tdef set_state_value(self, state, value):\n\t\tself.state_value[state] = value\n\n\n\t\"\"\"\n\tGetters below\n\t\"\"\"\n\tdef get_states(self):\n\t\treturn self.states\n\n\tdef get_actions(self, state):\n\t\treturn self.action_set[state]\n\n\tdef get_next_states_rewards(self, state, action):\n\t\treturn self.next_states_rewards[(state, action)]\n\n\tdef get_state_value(self, state):\n\t\treturn self.state_value[state]\n\n\"\"\"\nThis class holds all the information about the environment.\nThis class has all the probabilities.\n\"\"\"\nclass MDP:\n\tmdp = {}\t# This is a dictionary mapping {(current_state, action, next_state, reward): probability}\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef set_probability(self, state, action, next_state, reward, probability):\n\t\tself.mdp[(state, action, next_state, reward)] = probability\n\n\t\"\"\"\n\tGetters below\n\t\"\"\"\n\tdef get_probability(self, state, action, next_state, reward):\n\t\treturn self.mdp[(state, action, next_state, reward)]\n\n\"\"\"\nThis class defines a policy\nA policy is a mapping between a state and all possible actions from that state to probabilities\n\"\"\"\nclass policy:\n\tpi = {}\t\t# A mapping between {(state, action): probability}\n\tstates_and_actions = None\t# A reference to states and actions\n\tgreedily_set = False\t# This variable is true if the policy is greedily set\n\n\tGREEDY_COMPARE_THRESH = 1e-6\t# Comparing threshold. If probability is greater than this, then we assume that it's non zero\n\n\t\"\"\"\n\tConstructor initializes policy as equiprobable for every state\n\t\"\"\"\n\tdef __init__(self, states_and_actions):\n\n\t\tself.states_and_actions = states_and_actions\n\t\tstates = states_and_actions.get_states()\n\n\t\tfor s in states:\n\t\t\tactions = states_and_actions.get_actions(s)\n\t\t\t\n\t\t\ttotal_actions = len(actions)\n\t\t\tfor a in actions:\n\t\t\t\tstate_action_pair = (s, a)\n\t\t\t\tself.pi[state_action_pair] = 1/total_actions\n\n\n\t\"\"\"\n\tGreedily sets action for state i.e. pi(a | s) = 1\n\tIt returns true if the policy was greedily set and didn't change\n\t\"\"\"\n\tdef set_greedy(self, state, actions):\n\n\t\t# Find the set of greedy actions\n\t\told_greedy_actions = []\n\t\tif(self.greedily_set):\n\t\t\tfor a in self.states_and_actions.get_actions(state):\n\t\t\t\tstate_action_pair = (state, a)\n\t\t\t\tif(self.pi[state_action_pair] >= self.GREEDY_COMPARE_THRESH):\n\t\t\t\t\told_greedy_actions.append(a)\n\n\t\tfor a in self.states_and_actions.get_actions(state):\n\t\t\tstate_action_pair = (state, a)\n\t\t\tself.pi[state_action_pair] = 0\n\n\t\tfor a in actions:\n\t\t\tstate_action_pair = (state, a)\n\t\t\tself.pi[state_action_pair] = 1/len(actions)\n\n\t\tif(sorted(actions) == sorted(old_greedy_actions) and self.greedily_set):\t# This ensures that that true is is returned only if old and new policy were identical \n\t\t\treturn True\n\n\t\tself.greedily_set = True\n\t\treturn False\n\n\t\"\"\"\n\tGetters below\n\t\"\"\"\n\tdef get_probability(self, state, action):\n\t\tstate_action_pair = (state, action)\n\t\treturn self.pi[state_action_pair]\n\n\tdef get_action(self, state):\n\t\tprob = []\t# Probability distribution\n\t\tactions = []\n\t\tfor a in states_and_actions.get_action(state):\n\t\t\tactions.append(a)\n\t\t\tprob.append(self.pi[(state, a)])\n\n\t\treturn np.random.choice(actions, p = prob)\n\n\t\"\"\"\n\tReturns a list of all greeily chosen actions for state\n\t\"\"\"\n\tdef get_all_greedy_actions(self, state):\n\t\tif(self.greedily_set):\n\t\t\tgreedy_actions = []\n\t\t\tfor a in self.states_and_actions.get_actions(state):\n\t\t\t\tstate_action_pair = (state, a)\n\t\t\t\tif(self.pi[state_action_pair] >= self.GREEDY_COMPARE_THRESH):\n\t\t\t\t\tgreedy_actions.append(a)\n\t\t\treturn greedy_actions\n\t\treturn []\n\n\nclass policy_evaluator:\n\tstates_and_actions = None\n\tpolicy = None\n\tmdp = None\n\tdiscount = 1\n\n\tdef __init__(self, policy, states_and_actions, mdp, discount):\n\t\tself.policy = policy\n\t\tself.states_and_actions = states_and_actions\n\t\tself.mdp = mdp\n\t\tself.discount = discount\n\n\t\"\"\"\n\tEvaluates v_pi(s) via solving linear equations \n\tNOTE: ONLY WORKS FOR GRIDWORLD\n\t\"\"\"\n\tdef linear(self):\n\t\tA = np.zeros((25,25))\n\t\tB = np.zeros(25)\n\n\t\t# Construct equations\n\t\tfor s in self.states_and_actions.get_states():\n\t\t\ts_idx = self.__get_index_from_state(s)\n\t\t\tA[s_idx][s_idx] = 1\n\t\t\tfor a in self.states_and_actions.get_actions(s):\n\t\t\t\tfor (ns, r) in self.states_and_actions.get_next_states_rewards(s, a):\n\t\t\t\t\tns_idx = self.__get_index_from_state(ns)\n\t\t\t\t\tA[s_idx][ns_idx] -= self.policy.get_probability(s, a) * self.mdp.get_probability(s,a,ns,r) * self.discount\n\t\t\t\t\tB[s_idx] += self.policy.get_probability(s, a) * self.mdp.get_probability(s,a,ns,r) * r\n\n\t\t# Solve equations\n\t\tsolution = np.linalg.solve(A,B)\n\n\t\t# Store results\n\t\tfor s_idx in range(len(solution)):\n\t\t\ts = self.__get_state_from_idx(s_idx)\n\t\t\tself.states_and_actions.set_state_value(s, solution[s_idx])\n\n\tdef __get_index_from_state(self, state):\n\t\treturn state[0] + state[1]*5\n\t\n\tdef __get_state_from_idx(self, idx):\n\t\treturn (idx%5, idx//5)\n\n\t\"\"\"\n\tEstimates v_pi(s) iteratively\n\tArgs:\n\t\ttheta\t: Error bound for comparision and breaking (-1 if doing value iterations i.e brak after one run)\n\t\"\"\"\n\tdef iterative(self, theta):\n\t\titeration_count = 0\n\t\tdelta = inf\n\t\twhile(delta > theta):\n\t\t\titeration_count += 1\n\t\t\tdelta = 0\n\t\t\tfor s in self.states_and_actions.get_states():\n\t\t\t\told_state_value = self.states_and_actions.get_state_value(s)\n\t\t\t\t\n\t\t\t\t# Calculate new state value\n\t\t\t\tnew_state_value = 0\n\t\t\t\tfor a in self.states_and_actions.get_actions(s):\n\t\t\t\t\tfor (ns, r) in self.states_and_actions.get_next_states_rewards(s, a):\n\t\t\t\t\t\tnew_state_value += self.policy.get_probability(s,a) * self.mdp.get_probability(s,a,ns,r) * (r + self.discount* self.states_and_actions.get_state_value(ns))\n\n\t\t\t\tself.states_and_actions.set_state_value(s, new_state_value)\t# Update to new value\n\n\t\t\t\tdelta = max(delta, abs(new_state_value - old_state_value))\n\t\t\t\n\t\t\tprint(\"Policy evaluated\", iteration_count, \"times\")\n\t\t\tself.__pretty_printing()\n\t\t\tif(theta == -1):\n\t\t\t\tbreak\n\n\tdef __pretty_printing(self):\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tval = round(self.states_and_actions.get_state_value(state), 1)\n\t\t\tprint(val, end = \"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\n\nclass policy_improver:\n\tstates_and_actions = None\n\tpolicy = None\n\tdiscount = 1\n\n\tARGMAX_BOUND = 0.1\t# The bound for maximum comparision\n\n\tdef __init__(self, policy, states_and_actions, mdp, discount):\n\t\tself.policy = policy\n\t\tself.states_and_actions = states_and_actions\n\t\tself.mdp = mdp\n\t\tself.discount = discount\n\n\t\"\"\"\n\tRuns policy improvement step\n\tReturns true of policy was stable\n\t\"\"\"\n\tdef improve(self):\n\t\tstable = True\n\t\t\n\t\t# Find argmax for all states\n\t\tfor s in self.states_and_actions.get_states():\n\t\t\taction_returns = []\t# [(action, expected return), ...] for all actions from state s\n\t\t\tfor a in self.states_and_actions.get_actions(s):\n\t\t\t\texpected_return = 0\n\t\t\t\tfor (ns, r) in self.states_and_actions.get_next_states_rewards(s, a):\n\t\t\t\t\texpected_return += self.mdp.get_probability(s,a,ns,r) * (r + self.discount * self.states_and_actions.get_state_value(ns))\n\t\t\t\taction_returns.append((a, expected_return))\n\n\t\t\tgreedy_actions = self.__argmax(action_returns)\n\t\t\tstable &= self.policy.set_greedy(s, greedy_actions)\n\n\t\treturn stable\n\n\n\t\"\"\"\n\tTakes in a list of [(action, expected return)] and returns a list [action] of for actions with maximum return\n\t\"\"\"\n\tdef __argmax(self, action_returns):\n\t\tmax_actions = []\n\t\tmax_return = -1e10\n\n\t\tfor (a, er) in action_returns:\n\t\t\tif(max_return + self.ARGMAX_BOUND < er):\n\t\t\t\tmax_actions.clear()\n\t\t\t\tmax_actions.append(a)\n\t\t\t\tmax_return = er\n\t\t\telif(max_return - self.ARGMAX_BOUND < er):\n\t\t\t\tmax_actions.append(a)\n\n\t\treturn max_actions\n\nclass value_iterator:\n\tpolicy = None\n\tstates_and_actions = None\n\tdiscount = None\n\tmdp = None\n\n\tARGMAX_BOUND = 1e-2\t# The bound for maximum comparision\n\n\tdef __init__(self, policy, states_and_actions, mdp, discount):\n\t\tself.policy = policy\n\t\tself.states_and_actions = states_and_actions\n\t\tself.mdp = mdp\n\t\tself.discount = discount\n\n\t# Does value iterations\n\tdef iterate(self, theta):\n\t\titeration_count = 0\n\t\tdelta = inf\n\t\twhile(delta > theta):\n\t\t\titeration_count += 1\n\t\t\tdelta = 0\n\t\t\tfor s in self.states_and_actions.get_states():\n\t\t\t\told_state_value = self.states_and_actions.get_state_value(s)\n\t\t\t\t\n\t\t\t\t# Calculate new state value\n\t\t\t\tnew_state_value = 0\n\t\t\t\tfor a in self.states_and_actions.get_actions(s):\n\t\t\t\t\ttemp_new_state_value = 0\t# Accumalator to sum over all (s', r) pairs\n\t\t\t\t\tfor (ns, r) in self.states_and_actions.get_next_states_rewards(s, a):\n\t\t\t\t\t\ttemp_new_state_value += self.mdp.get_probability(s,a,ns,r) * (r + self.discount* self.states_and_actions.get_state_value(ns))\n\t\t\t\t\tnew_state_value = max(temp_new_state_value, new_state_value)\t# Take max over all actions\n\t\t\t\t\n\t\t\t\tself.states_and_actions.set_state_value(s, new_state_value)\t# Update to new value\n\n\t\t\t\tdelta = max(delta, abs(new_state_value - old_state_value))\n\t\t\t\n\t\t\tprint(\"Value iterated\", iteration_count, \"times\")\n\t\t\tself.__pretty_printing()\n\t\t\tif(theta == -1):\n\t\t\t\tbreak\n\n\tdef set_policy(self):\n\n\n\t\t# Find argmax for all states\n\t\tfor s in self.states_and_actions.get_states():\n\t\t\taction_returns = []\t# [(action, expected return), ...] for all actions from state s\n\t\t\tfor a in self.states_and_actions.get_actions(s):\n\t\t\t\texpected_return = 0\n\t\t\t\tfor (ns, r) in self.states_and_actions.get_next_states_rewards(s, a):\n\t\t\t\t\texpected_return += self.mdp.get_probability(s,a,ns,r) * (r + self.discount * self.states_and_actions.get_state_value(ns))\n\t\t\t\taction_returns.append((a, expected_return))\n\t\t\t\n\t\t\tgreedy_actions = self.__argmax(action_returns)\n\t\t\tself.policy.set_greedy(s,greedy_actions)\n\n\t\"\"\"\n\tTakes in a list of [(action, expected return)] and returns a list [action] of for actions with maximum return\n\t\"\"\"\n\tdef __argmax(self, action_returns):\n\t\tmax_actions = []\n\t\tmax_return = -1e10\n\n\t\tfor (a, er) in action_returns:\n\t\t\tif(max_return + self.ARGMAX_BOUND < er):\n\t\t\t\tmax_actions.clear()\n\t\t\t\tmax_actions.append(a)\n\t\t\t\tmax_return = er\n\t\t\telif(max_return - self.ARGMAX_BOUND < er):\n\t\t\t\tmax_actions.append(a)\n\n\t\treturn max_actions\n\n\tdef __pretty_printing(self):\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tval = round(self.states_and_actions.get_state_value(state), 1)\n\t\t\tprint(val, end = \"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\nclass policy_iterator:\n\tpolicy = None\n\tstates_and_actions = None\n\tdiscount = None\n\tmdp = None\n\tpolicy_evaluator = None\n\tpolicy_improver = None\n\n\ttheta = 1e-6\t# Error bound for policy evaluation (-1 if doing value iterations)\n\n\tdef __init__(self, policy, states_and_actions, mdp, discount):\n\t\tself.policy = policy\n\t\tself.states_and_actions = states_and_actions\n\t\tself.mdp = mdp\n\t\tself.discount = discount\n\t\tself.policy_evaluator = policy_evaluator(self.policy, self.states_and_actions, self.mdp, self.discount)\n\t\tself.policy_improver = policy_improver(self.policy, self.states_and_actions, self.mdp, self.discount)\n\n\n\t\"\"\"\n\tRuns policy iterations\n\tPolicy evaluation is done by solving linear equations (ONLY FOR Q4)\n\t\"\"\"\n\tdef iterate(self):\n\t\tpolicy_stable = False\n\t\titeration_count = 0\n\t\twhile(not policy_stable):\n\t\t\titeration_count += 1\n\t\t\t#Policy evaluation step\n\t\t\tself.policy_evaluator.iterative(self.theta)\n\n\t\t\t# Policy improvement\n\t\t\tpolicy_stable = self.policy_improver.improve()\n\n\t\t\tprint(\"Improved policy\", iteration_count, \"times\")\n\n\n\nclass gridworld:\n\tmdp = None\t# MDP\n\tsa = None\t# States and actions\n\n\t# Action definitions\n\tNORTH\t= 0\n\tSOUTH\t= 1\n\tEAST\t= 2\n\tWEST\t= 3\n\t\n\t\"\"\"\n\tDefines the problem for gridworld example of book\n\tArgs:\n\t\tNone\n\tNote the naming convention of cells:\n\t\torigin at top left.\n\t\tx increases towards right\n\t\ty increases downwards\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.sa = states_and_actions()\n\t\tself.mdp = MDP()\n\n\t\t# Add states\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tself.sa.add_state(state)\n\n\t\t# Add actions, next_states, rewards and MDP\n\t\tfor s in self.sa.get_states():\n\t\t\tfor a in range(4):\n\t\t\t\tns, r = self.__get_next_state(s, a)\n\t\t\t\tself.sa.add_action(s, a, (ns, r))\n\t\t\t\tself.mdp.set_probability(s, a, ns, r, 1)\n\n\n\t\"\"\"\n\tReturns next state and reward given a current action and state.\n\t\"\"\"\n\tdef __get_next_state(self, state, action):\n\n\t\tif(state == (1,0)):\t# State A\n\t\t\tnext_state = (1,4)\t# A'\n\t\t\treward = 10\n\t\t\treturn next_state, reward\n\n\t\tif(state == (3,0)):\t# State B\n\t\t\tnext_state = (3,2)\t# B'\n\t\t\treward = 5\n\t\t\treturn next_state, reward\n\n\t\tnext_state = None\n\t\treward = 0\n\n\t\tif(action == self.NORTH):\n\t\t\tnext_state = (state[0], state[1] - 1)\n\t\tif(action == self.SOUTH):\n\t\t\tnext_state = (state[0], state[1] + 1)\n\t\tif(action == self.EAST):\n\t\t\tnext_state = (state[0] - 1, state[1])\n\t\tif(action == self.WEST):\n\t\t\tnext_state = (state[0] + 1, state[1])\n\n\t\tif(next_state[0] > 4 or next_state[0] < 0 or next_state[1] > 4 or next_state[1] < 0):\t# Boundary\n\t\t\tnext_state = state\n\t\t\treward = -1\n\n\t\treturn next_state, reward\n\n\n\t\"\"\"\n\tSolves question 2 of assignment\n\t\"\"\"\n\tdef q2(self):\n\t\tp = policy(self.sa)\n\t\tpe = policy_evaluator(p, self.sa, self.mdp, 0.9)\n\t\tpe.linear()\n\n\t\t# Pretty printing\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tval = round(self.sa.get_state_value(state), 1)\n\t\t\tprint(val, \"\", end = \"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\tdef q4(self):\n\t\tp = policy(self.sa)\n\t\tpi = policy_iterator(p, self.sa, self.mdp, 0.9)\n\t\tpi.iterate()\n\n\t\t# pretty printing\n\t\t# v_*(s)\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tval = round(self.sa.get_state_value(state), 1)\n\t\t\tprint(val, end = \"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\t\t# pi_*(s)\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tall_greedy_actions = p.get_all_greedy_actions(state)\n\t\t\toptimal_actions = \"\"\n\t\t\tif(self.NORTH in all_greedy_actions):\n\t\t\t\toptimal_actions += \"N\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.SOUTH in all_greedy_actions):\n\t\t\t\toptimal_actions += \"S\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.EAST in all_greedy_actions):\n\t\t\t\toptimal_actions += \"E\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.WEST in all_greedy_actions):\n\t\t\t\toptimal_actions += \"W\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tprint(optimal_actions, end=\"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\tdef q6(self):\n\t\tp = policy(self.sa)\n\t\tvi = value_iterator(p, self.sa, self.mdp, 0.9)\n\t\tvi.iterate(1e-6)\n\t\tvi.set_policy()\n\n\t\tprint(\"Completed value iterations\")\n\t\t# pretty printing\n\t\t# v_*(s)\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tval = round(self.sa.get_state_value(state), 1)\n\t\t\tprint(val, end = \"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\t\t# pi_*(s)\n\t\tfor i in range(25):\n\t\t\tstate = (i%5, i//5)\n\t\t\tall_greedy_actions = p.get_all_greedy_actions(state)\n\t\t\toptimal_actions = \"\"\n\t\t\tif(self.NORTH in all_greedy_actions):\n\t\t\t\toptimal_actions += \"N\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.SOUTH in all_greedy_actions):\n\t\t\t\toptimal_actions += \"S\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.EAST in all_greedy_actions):\n\t\t\t\toptimal_actions += \"E\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tif(self.WEST in all_greedy_actions):\n\t\t\t\toptimal_actions += \"W\"\n\t\t\telse:\n\t\t\t\toptimal_actions += \"-\"\n\t\t\tprint(optimal_actions, end=\"\\t\")\n\t\t\tif(i % 5 == 4):\n\t\t\t\tprint()\n\n\n\ndef main():\n\tgw = gridworld()\n\tgw.q6()\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"HW2/code/setu_q6_vi.py","file_name":"setu_q6_vi.py","file_ext":"py","file_size_in_byte":16665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233732203","text":"import gym\nimport pybulletgym\nimport numpy as np\nimport random\nimport time\nimport math\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd\nfrom torch.distributions.categorical import Categorical\nfrom torch.distributions.multinomial import Multinomial\nfrom torch.autograd import Variable\nfrom torch import Tensor\n\nimport matplotlib.pyplot as plt\n\nfrom util import *\n\n\nENV_NAME = \"InvertedDoublePendulumPyBulletEnv-v0\"\nDIMS = 9\nSCALE = [ 0.00868285, 0.03400105, -0.00312787, 0.95092393, -0.01797627, -0.10439248, 0.86726532, 0.01176883, 0.12335652]\nSTD = [0.11101651, 0.58301397, 0.09502404, 0.07712284, 0.29911971, 1.78995357, 0.20914456, 0.45163139, 3.08248822]\n\nSEED = 464684\n\nMAX_EPISODES = 10000\nBATCH_SIZE = 16\nMAX_TIMESTEPS = 200\n\nALPHA = 0.001\nGAMMA = 0.99\n\nACTION_STEP = 1\n\nclass MLPPolicy(nn.Module):\n def __init__(self, input_size=5, layers=(128,), scale=None, normalize=None, list_actions=None, gamma=0.99):\n super(MLPPolicy, self).__init__()\n\n self.list_action = list_actions\n\n self.gamma=gamma\n\n if scale is None:\n self.scale = torch.zeros(1, input_size)\n else:\n self.scale = torch.tensor([scale])\n\n if normalize is None:\n self.normalize = torch.ones(1, input_size)\n else:\n self.normalize = torch.tensor([normalize])\n\n self.layers = []\n for n_neurons in layers:\n self.layers.append(nn.Linear(input_size, n_neurons))\n self.layers.append(nn.Tanh())\n\n input_size = n_neurons\n\n self.layers.append(nn.Linear(input_size, self.list_action.shape[0]))\n self.layers.append(nn.Softmax(dim=0))\n\n self.net = nn.Sequential(*self.layers)\n\n def forward(self, x):\n # out = self.net((x.float() - self.scale) / self.normalize)\n out = self.net(x.float())\n return Categorical(out)\n\n\n def update_weight(self, log_probs, rewards, optimizer, batch_size=1):\n \"\"\"Update the weights of the neural network\"\"\"\n optimizer.zero_grad()\n\n losses = []\n for i in range(batch_size):\n R = 0\n batch_losses = []\n returns = []\n for r in rewards[i][::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns)\n # returns = returns - returns.mean()\n for log_prob, R in zip(log_probs[i], returns):\n batch_losses.append(-log_prob * R)\n loss = torch.stack(batch_losses).sum() / batch_size\n losses.append(loss)\n\n loss = torch.sum(torch.stack(losses)) \n loss.backward()\n optimizer.step()\n\n\ndef train(env, params, max_episodes=1000, max_timesteps=200, dims=9, scale=None, std=None, \n stop_if_alive_longer_than=200, stop_if_alive_longer_than_n_traj=100, list_actions=None):\n \"\"\"\n Train a model with a gaussian policy gradient\n\n Parameters:\n - env : the environnement\n - params : the aprameters of the training\n - max_episodes : the maximum number of batches that will be used for training\n - max_timesteps : the maximum number of timeteps for a single trajectory\n - scale : normalization for states (mean)\n - std : normalization for states (std)\n - stop_if_alive_longer_than : The training will stop if there is enough trajectories longer than this \n - stop_if_alive_longer_than_n_traj : Number of trajectories that need to be longer than stop_if_alive_longer_than for the training to stop\n - list_actions: list of discrete possible actions\n \"\"\"\n\n policy = MLPPolicy(dims, scale=scale, normalize=std, gamma=params.gamma, list_actions=list_actions)\n optimizer = optim.Adam(policy.parameters(), lr=params.lr)\n\n alive_time = []\n cum_rewards = []\n\n solved_for_n_iter = 0\n\n for i_episode in range(max_episodes):\n\n states = []\n actions = []\n rewards = []\n log_probs = []\n\n # Generate a batch of trajectories\n for b in range(params.batch_size):\n\n state = env.reset()\n batch_states = []\n states.append(batch_states)\n actions.append([])\n rewards.append([])\n log_probs.append([])\n\n for timesteps in range(max_timesteps):\n state_tensor = Tensor(state)\n\n action = policy(state_tensor).sample()\n log_prob = policy(state_tensor).log_prob(action)\n action = list_actions[action.item()]\n \n log_probs[b].append(log_prob)\n\n batch_states.append(state)\n actions[b].append(action)\n\n state, reward, done, _ = env.step([action.item()])\n\n if done:\n rewards[b].append(0)\n break\n\n rewards[b].append(reward)\n\n # Check if we can stop the training\n if len(rewards[b]) >= stop_if_alive_longer_than:\n solved_for_n_iter +=1\n else:\n solved_for_n_iter = 0\n\n if solved_for_n_iter >= stop_if_alive_longer_than_n_traj:\n break\n\n # Check if we can stop the training\n if solved_for_n_iter >= stop_if_alive_longer_than_n_traj:\n break\n\n # Compute cum reward\n cum_rewards_batch = []\n alive_time_batch = []\n for r in rewards:\n cum_rewards_batch.append(get_cum_reward(r, params.gamma))\n alive_time_batch.append(len(r))\n\n cum = np.mean(cum_rewards_batch)\n\n alive_time.append(np.mean(alive_time_batch))\n cum_rewards.append(cum)\n\n # Update the weight of the neural network\n policy.update_weight(log_probs, rewards, optimizer, params.batch_size)\n\n print(\"Episode {}/{} finished after a mean of {:.1f} timesteps and a std of {:.2f} and mean return of {:.2f}, min trajectory len : {}\".format(\n i_episode, max_episodes, np.mean(alive_time_batch), np.std(alive_time_batch), cum, min([len(x) for x in rewards])))\n\n\n return [policy, cum_rewards, alive_time]\n\nif __name__ == \"__main__\":\n\n env = gym.make(ENV_NAME)\n env.seed(seed=SEED)\n\n params = TrainingParameters(batch_size=BATCH_SIZE, n_layers=1, lr=ALPHA, gamma=GAMMA, discrete=True)\n\n list_actions = np.arange(-1, 1 + ACTION_STEP, ACTION_STEP)\n\n policy, cum_rewards, alive_time = train(env, params, max_episodes=MAX_EPISODES, max_timesteps=MAX_TIMESTEPS, dims=DIMS, scale=SCALE, std=STD,\n stop_if_alive_longer_than=128, stop_if_alive_longer_than_n_traj=150, list_actions=list_actions)\n\n env.close()\n\n save_results(params.get_model_name(), np.array([cum_rewards, alive_time]))\n\n torch.save(policy.state_dict(), \"results/\" + params.get_model_name())\n plt.plot(cum_rewards, label=\"Cum rewards\")\n\n plt.plot(build_moving_average(cum_rewards, alpha=0.1), label=\"Average\")\n plt.xlabel(\"Number of batches\")\n plt.ylabel(\"Mean cumulative reward of batch\")\n plt.legend()\n\n plt.show()\n","sub_path":"discrete.py","file_name":"discrete.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"98918473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 3 21:08:04 2018\n\n@author: Tzu-Ying, Pear, Yi\nProject name: UPS Optimization Model for Network Design\n\"\"\"\n\nimport pulp\nimport math\nimport openpyxl\n\ndata = openpyxl.load_workbook('list_zipcode.xlsx',read_only=True, data_only=True)\nsheet = data['Sheet1']\ntitle = []\nfor row in sheet.rows:\n \n for cell in row:\n title.append(cell.value)\n\n\ndata = openpyxl.load_workbook('cost_matrix_truck.xlsx',read_only=True, data_only=True)\nsheet = data['cost']\n\ncost_truck = {}\n\n#inputting cost for the truck \n# structure would be like {ori:{dest: ***, dest2:***} ori2: {dest:...}}\nfor row in sheet.rows:\n temp = []\n for cell in row:\n temp.append(cell.value)\n ori = temp[0]\n temp.pop(0)\n cost_truck[ori] = {}\n for i in range(len(title)):\n cost_truck[ori][title[i]] = []\n cost_truck[ori][title[i]].append(temp[i])\n\n\n\ndata2 = openpyxl.load_workbook('cost_matrix_deliveryday.xlsx', read_only=True, data_only=True)\nsheet2 = data2['day']\n\ndelivery_day = {}\n\n#inputting require days for each ori to dest\n# structure would be like {ori:{dest: ***, dest2:***} ori2: {dest:...}}\n\nfor row in sheet2.rows:\n temp = []\n\n for cell in row:\n temp.append(cell.value)\n ori = temp[0]\n temp.pop(0)\n delivery_day[ori] = {}\n for i in range(len(title)):\n delivery_day[ori][title[i]] = []\n delivery_day[ori][title[i]].append(temp[i])\n\n\n\n# creating dictionary for next day air cost from ori to dest\n# structure would be like {ori:{dest: ***, dest2:***} ori2: {dest:...}}\ndata = openpyxl.load_workbook('cost_matrix_nda.xlsx',read_only=True, data_only=True)\nsheet = data['cost']\n\ncost_nda = {}\n\nfor row in sheet.rows:\n temp = []\n for cell in row:\n temp.append(cell.value)\n ori = temp[0]\n temp.pop(0)\n cost_nda[ori] = {}\n for i in range(len(title)):\n cost_nda[ori][title[i]] = []\n cost_nda[ori][title[i]].append(temp[i])\n\n\n# creating dictionary for 2nd day air cost from ori to dest\n# structure would be like {ori:{dest: ***, dest2:***} ori2: {dest:...}}\ndata = openpyxl.load_workbook('cost_matrix_sda.xlsx',read_only=True, data_only=True)\nsheet = data['cost']\n\ncost_sda = {}\n\nfor row in sheet.rows:\n temp = []\n for cell in row:\n temp.append(cell.value)\n ori = temp[0]\n temp.pop(0)\n cost_sda[ori] = {}\n for i in range(len(title)):\n cost_sda[ori][title[i]] = []\n cost_sda[ori][title[i]].append(temp[i])\n\n# creating dictionary for demand of high value and value of each 3 digit\n# structure would be like {3digit: [high, low], ...}\ndata = openpyxl.load_workbook('demand.xlsx',read_only=True, data_only=True)\nsheet = data['Demand']\n\ndemand = {}\n\nfor row in sheet.rows:\n temp = []\n for cell in row:\n temp.append(cell.value)\n zipcode = temp[0]\n temp.pop(0)\n demand[zipcode] = []\n for i in range(len(temp)):\n demand[zipcode].append(temp[i])\n\n# creating dictionary for demand of high value and value of each 3 digit\n# structure would be like {3digit: cost, 3digit2: cost, ... }\ndata = openpyxl.load_workbook('facility_cost.xlsx',read_only=True, data_only=True)\nsheet = data['Demand']\n\nfacility_cost = {}\n\nfor row in sheet.rows:\n temp = []\n for cell in row:\n temp.append(cell.value)\n zipcode = temp[0]\n temp.pop(0)\n facility_cost[zipcode] = temp[0]\n\n","sub_path":"ups-model/model_LOCAL_13140.py","file_name":"model_LOCAL_13140.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441037301","text":"__author__ = 'Mike'\n#Fractal triangle with python turtle\nimport turtle\n\n#Draws equilateral triangle counter-clockwise\n#returning turtle to original orientation.\ndef drawTriangle(t,d):\n for i in range(0,3):\n t.left(60)\n t.forward(d)\n t.left(60)\n return\n\n#Draws fractal triangles within preexisting triangle.\n#Insure that python turtle is oriented down on side of the\n#triangle\ndef drawFractal(t, unit):\n #Stop recursion once smallest triangle size has been reached\n if(unit <= 10):\n return False\n else:\n #Move to subfractal triangle\n drawFractal(t, unit/2)\n \n #Move down supertriangle and begin drawing subtriangle\n t.forward(unit/2)\n drawTriangle(t, unit/2)\n \n #Check to see if a fractal can be drawn on the left subtriangle\n #if so then also draw fractal on right subtriangle\n if(drawFractal(t,unit/2)):\n t.left(120)\n t.forward(unit/2)\n t.right(120)\n drawFractal(t,unit/2)\n \n #Return to top of supertriangle\n t.right(120)\n t.forward(unit/2)\n t.left(120)\n else:\n #If smaller fractal can't be drawn return to top of supertriangle\n t.back(unit/2)\n return True\n\nwindow = turtle.Screen()\nmike = turtle.Turtle()\nmike.right(60)\ndrawTriangle(mike,100)\nmike.left(60)\ndrawFractal(mike, 100)\n\nwindow.exitonclick()\n","sub_path":"fractalTriangle.py","file_name":"fractalTriangle.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569189760","text":"# -*- coding: utf-8 -*-\n# BSD 3-Clause License\n#\n# Copyright (c) 2017\n# All rights reserved.\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ==========================================================================\n\nimport sys\n\nfrom transformers import AutoTokenizer\n\n\ndataset = sys.argv[1]\nmodel_name_or_path = sys.argv[2]\nmax_len = int(sys.argv[3])\n\nsubword_len_counter = 0\n\ntokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\nmax_len -= tokenizer.num_special_tokens_to_add()\n\nwith open(dataset, \"rt\") as f_p:\n for line in f_p:\n line = line.rstrip()\n\n if not line:\n print(line)\n subword_len_counter = 0\n continue\n\n token = line.split()[0]\n\n current_subwords_len = len(tokenizer.tokenize(token))\n\n # Token contains strange control characters like \\x96 or \\x95\n # Just filter out the complete line\n if current_subwords_len == 0:\n continue\n\n if (subword_len_counter + current_subwords_len) > max_len:\n print(\"\")\n print(line)\n subword_len_counter = current_subwords_len\n continue\n\n subword_len_counter += current_subwords_len\n\n print(line)\n","sub_path":"PyTorch/built-in/others/CLIP_for_PyTorch/transformers/examples/legacy/token-classification/scripts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"337909781","text":"import json\nfrom random import randint\n\nif __name__ == '__main__':\n\n chance_names = ['DZIEKANKA', 'POWToRKA Z BAZ DANYCH', 'HURTOWNIE CO SEMESTR', 'KLIKANIE ACCESSA', 'RANDOM OCENA']\n\n starting_value = 150\n value_step_change = 200\n\n data = []\n pk = 0\n for i in range(10):\n for ii, description in enumerate(chance_names):\n entry = {}\n entry['model'] = 'game.chance'\n entry['pk'] = pk + 1\n pk = pk + 1\n value = starting_value + i*value_step_change * randint(1,4)\n negative = randint(0,1)\n if negative == 0:\n value = value * (-1)\n fields = {}\n fields['description'] = description\n fields['value'] = value\n fields['chance_type'] = 1\n entry['fields'] = fields\n data.append(entry)\n\n with open('chance.json', 'w') as outfile:\n json.dump(data, outfile)\n","sub_path":"server/monopolibuda/generate_chance_data.py","file_name":"generate_chance_data.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"547615221","text":"def normalize_position(image, box):\n \"\"\" Takes in an image which will be provided and then computes the normalized bouding box information.\n\n Args:\n image (3D np.array): (rows, columns, channels)\n rows (int): width of image\n columns (int): height of image\n channels (int): number of channels, if the image is in color\n box (tuple): (ymin, xmin, ymax, xmax)\n (xmin, xmax, ymin, ymax)\n xmin (int): left most edge of the bounding box\n xmax (int): right most edge of the bounding box\n ymin (int): lowest edge of the bounding box\n ymax (int): highest edge of the bouding box\n\n From image, try to get image width and height such that we can scale it appropriately\n\n (xmin,ymax). (xmax,ymax)\n ---------------\n | |\n | |\n | |\n | |\n | |\n ________________\n (xmin,ymin) (xmax,ymin)\n\n\n Returns:\n tuple: (x, y, z, widht, height, depth)\n x (float): left most point and is scaled between -1 and 1. to scale, can do (xmin-image_width/2)/(image_width/2)\n y (float): bottom most point and is scaled between -1 and 1. to scale, can do (ymin-image_height/2)/(image_height/2)\n z (float): set to 0\n width (float):this is defined as xmax-xmin. to scale, compute (xmax-xmin)/(image_width/2)\n height (float): this is defined as ymax-ymin. to scale, compute (ymax-ymin)/(image_height/2)\n depth (float): set to 0\n\n (x,y+height). (x+width, y+height)\n ---------------\n | |\n | |\n | |\n | |\n | |\n ________________\n (x,y) (x+width,y)\n\n\n >>> from skimage.data import coffee\n >>> img = coffee()\n >>> normalize_position(img,(50,100,50,100))\n (-0.5, -0.8333333333333334, 0.0, 0.0, 0.0, 0.0)\n >>> normalize_position(img,(10,10,90,90))\n (-0.95, -0.9666666666666667, 0.0, 0.4, 0.26666666666666666, 0.0)\n >>> normalize_position(img,(0,0,600,400))\n (-1.0, -1.0, 0.0, 2.0, 2.0, 0.0)\n >>> normalize_position(img,(0,100,600,50))\n Traceback (most recent call last):\n ...\n AssertionError: xmin is greater than xmax\n >>> normalize_position(img,(0,100,600,600))\n Traceback (most recent call last):\n ...\n AssertionError: xmax is greater than image width\n >>> normalize_position(img,(200,100,100,400))\n Traceback (most recent call last):\n ...\n AssertionError: ymin is greater than ymax\n >>> normalize_position(img,(100,-100,100,400))\n Traceback (most recent call last):\n ...\n AssertionError: xmin < 0\n \"\"\"\n ymin, xmin, ymax, xmax = box\n im_width, im_height = image.shape[:2]\n\n assert xmin <= xmax, 'xmin is greater than xmax'\n assert ymin <= ymax, 'ymin is greater than ymax'\n assert xmin <= im_width, 'xmin is greater than image width'\n assert xmax <= im_width, 'xmax is greater than image width'\n assert ymin <= im_height, 'ymin is greater than image height'\n assert ymax <= im_height, 'ymax is greater than image height'\n assert xmin >= 0, 'xmin < 0'\n assert ymin >= 0, 'ymin < 0'\n\n x_center = im_width / 2\n y_center = im_height / 2\n x = (xmin - (x_center)) / x_center\n y = (ymin - (y_center)) / y_center\n width = (xmax - xmin) / x_center\n height = (ymax - ymin) / y_center\n z = 0.0\n depth = 0.0\n return x, y, z, width, height, depth\n\n\ndef estimate_distance(box):\n \"\"\"\n Args: box (tuple) : (ymin, xmin, ymax, xmax)\n ymin (float): between value of 0 to 1 for the bounding box y value\n xmin (float): between value of 0 to 1 for the bounding box x value\n ymax (float): between value of 0 to 1 for the bounding box y value\n xmax (float): between value of 0 to 1 for the bounding box x value\n\n Returns : tuple : (x, y, z, width, height, depth)\n x (float): x-center of the bounding box\n y (float): y-center of the bounding box\n z (float): set to 0\n width (float):this is defined as xmax-xmin.\n height (float): this is defined as ymax-ymin.\n depth (float): set to 0\n\n >>> estimate_distance((0.0,0.0,1.0,1.0))\n (0.5, 0.5, 0.0, 1.0, 1.0, 0.0)\n >>> estimate_distance((0.0, 0.0, 0.5, 1.0))\n (0.5, 0.25, 0.0, 1.0, 0.5, 0.0)\n >>> estimate_distance((1.0, 1.0, 1.0, 1.0))\n (1.0, 1.0, 0.0, 0.0, 0.0, 0.0)\n >>> estimate_distance((0.0,0.0, 0.5, 0.75))\n (0.375, 0.25, 0.0, 0.75, 0.5, 0.0)\n \"\"\"\n ymin, xmin, ymax, xmax = box\n x = (xmin + xmax) / 2.0\n y = (ymin + ymax) / 2.0\n z = 0.0\n width = xmax - xmin\n height = ymax - ymin\n depth = 0.0\n return x, y, z, width, height, depth\n\n\ndef position(normalized_box):\n \"\"\" takes an image and the bounding box, returns the position of the bounding box with respect to the image\n\n Args:\n normalized_box (tuple): (x, y, z, width, height, depth)\n x (float): x-center of the bounding box\n y (float): y-center of the bounding box\n z (float): set to 0\n width (float):this is defined as xmax-xmin.\n height (float): this is defined as ymax-ymin.\n depth (float): set to 0\n\n Returns:\n string: 'left', 'right' or 'center'\n\n\n >>> normalized_box = estimate_distance((0.0,0.0,1.0,1.0))\n >>> position(normalized_box)\n 'center'\n >>> normalized_box = estimate_distance((0.0,0.0,1.0,0.4))\n >>> position(normalized_box)\n 'left'\n >>> normalized_box = estimate_distance((0.0,0.5,1.0,1.0))\n >>> position(normalized_box)\n 'right'\n \"\"\"\n x, y, z, widht, height, depth = normalized_box\n if x > 0.6:\n return 'right'\n elif x < 0.4:\n return 'left'\n return 'center'\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"nlp/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"382899639","text":"import bugsnag\nfrom bugsnag.celery import connect_failure_handler\nfrom celery import Celery\n\nbugsnag.configure(api_key=\"066f5ad3590596f9aa8d601ea89af845\")\nconnect_failure_handler()\n\napp = Celery('tasks', broker='redis://localhost:6379/0')\n\n@app.task\ndef crash(x, y):\n return x + y\n\n@app.task\ndef other_crash():\n raise Exception('spam', 'eggs')\n","sub_path":"celery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191649746","text":"\ndef countvowels(s):\n s = s.lower()\n count = 0\n for c in s:\n if c in 'aeiou':\n count += 1\n print('Number of vowels: ' + str(count))\n\n\ndef countbobs(s):\n s = s.lower()\n count = 0\n s2 = s[:]\n index = s2.find('bob')\n while index > -1:\n count += 1\n s2 = s2[index + 2:]\n index = s2.find('bob')\n return count\n\n\ndef longestsubstring(s):\n s = s.lower()\n alpha = 'abcdefghijklmnopqrstuvwxyz'\n s_temp = ''\n s_final = ''\n alpha_rank = -1\n for c in s:\n s_rank = alpha.find(c)\n if s_rank >= alpha_rank:\n s_temp += c\n else:\n s_temp = ''\n s_temp += c\n alpha_rank = alpha.find(c)\n if len(s_temp) > len(s_final):\n s_final = s_temp\n return s_final\n\n\ndef test():\n for s in ('azcbobobegghakl', 'sfdbobobsdfs', 'abcdef', 'bobsabcde', 'absfsfabcdee', 'abcbcd'):\n print('Test : ' + s + ' Result : ' + longestsubstring(s))\n\n\ndef main():\n #longestsubstring('azcbobobegghakl')\n test()\n\nif __name__ == '__main__':\n main()","sub_path":"python_mit_2/ps1.py","file_name":"ps1.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"479737529","text":"class Minion:\n\n def __init__(self):\n self.health = 0\n self.attack = 0\n self.type = ''\n self.tier = 0\n self.ability = ''\n self.name = ''\n self.minion = f'\\n{self.name} \\nHealth: {self.health}, Attack: {self.attack}, Type: {self.type}, ' \\\n f' Tier: {self.tier}, Ability: {self.ability}'\n\n def attack(self, enemy):\n pass\n\n def __repr__(self):\n return f'\\n{self.name} \\nHealth: {self.health}, Attack: {self.attack}, Type: {self.type}, ' \\\n f' Tier: {self.tier}, Ability: {self.ability}'\n\n\nclass MurlockTidehunter(Minion):\n\n def __init__(self):\n super().__init__()\n self.health = 1\n self.attack = 2\n self.type = 'Murlock'\n self.tier = 1\n self.ability = 'Battlecry: Summon a 1/1 Murlock Scout.'\n self.name = 'Murlock Tidehunter'\n\n\nclass BrannBronzebeard(Minion):\n\n def __init__(self):\n super().__init__()\n self.health = 4\n self.attack = 2\n self.type = 'None'\n self.tier = 5\n self.ability = 'Your battlecries trigger twice.'\n self.name = 'Brann Bronzebeard'\n\n\nclass Khadgar(Minion):\n\n def __init__(self):\n super().__init__()\n self.health = 2\n self.attack = 2\n self.type = 'None'\n self.tier = 3\n self.ability = 'Your cards that summon minions summon twice as many.'\n self.name = 'Khadgar'\n\n\nclass MamaBear(Minion):\n\n def __init__(self):\n super().__init__()\n self.health = 4\n self.attack = 4\n self.type = 'Beast'\n self.tier = 5\n self.ability = 'Whenever you summon a beast, give it +4/+4.'\n self.name = 'Mama Bear'\n\n\nclass LightfangEnforcer(Minion):\n\n def __init__(self):\n super().__init__()\n self.health = 2\n self.attack = 2\n self.type = 'None'\n self.tier = 5\n self.ability = 'At the end of your turn, give a friendly minion of each type +2/+2.'\n self.name = 'Lightfang Enforcer'\n","sub_path":"creatures.py","file_name":"creatures.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"538753567","text":"from JumpScale import j\nfrom mitmproxy.models import HTTPResponse\nfrom mitmproxy.script import concurrent\nfrom netlib.http import Headers\n\ncache = j.core.db\nrequests = 0\n\nfilter_denied = [\n \"youtube.com\",\n]\n\nlong_cache = [\n \".deb\", \".gz\", \".xz\"\n]\n\nforce_cache = [\n \"archive.ubuntu.com\"\n]\n\ndef _nocache(flow):\n # flow.response.stream = True\n flow.response.headers[\"X-GIG-Cache\"] = \"not-cached\"\n\ndef _cached(flow):\n flow.response.headers[\"X-GIG-Cache\"] = \"put-in-cache\"\n\ndef _cache(flow):\n hkey = 'http.cache.head.%s' % flow.request.pretty_url\n bkey = 'http.cache.body.%s' % flow.request.pretty_url\n\n ttl = 3600\n\n for lng in long_cache:\n if flow.request.pretty_url.endswith(lng):\n # Caching for 1 month for long cache extensions\n ttl = 86400 * 31\n\n cache.setex(bkey, flow.response.raw_content, ttl)\n cache.setex(hkey, bytes(flow.response.headers), ttl)\n\n return _cached(flow)\n\ndef _restore(flow, rawhead):\n # Building usable headers\n headers = Headers()\n\n lines = rawhead.decode('utf-8')[:-2].split(\"\\r\\n\")\n for line in lines:\n temp = line.split(\": \")\n headers[temp[0]] = temp[1]\n\n body = cache.get('http.cache.body.%s' % flow.request.pretty_url)\n\n if len(body) == 0:\n print(\"Cache hit but body empty, let's doing a real request\")\n cache.delete('http.cache.body.%s' % flow.request.pretty_url)\n cache.delete('http.cache.head.%s' % flow.request.pretty_url)\n return\n\n # Building response from cache\n response = HTTPResponse(b\"HTTP/1.1\", 200, b\"OK\", headers, body)\n\n print(response)\n \n response.headers[\"X-GIG-Cache\"] = \"from-cache\"\n\n # Send forged response\n flow.reply.send(response)\n\ndef _hit(flow):\n hkey = 'http.hits.%s' % flow.request.pretty_url\n host = flow.client_conn.address.host\n\n cache.hincrby(hkey, host)\n\n#\n# mitmproxy events\n#\n@concurrent\ndef request(flow):\n global requests\n requests += 1\n\n if requests % 500 == 0:\n print(\"Saving cache\")\n j.core.db.save()\n\n # Log request\n _hit(flow)\n\n # Drop if host is not allowed\n if flow.request.pretty_host in filter_denied:\n flow.client_conn.finish()\n\n\n # Check for cache hit\n print(\"Checking cache: %s\" % flow.request.pretty_url)\n hithead = cache.get('http.cache.head.%s' % flow.request.pretty_url)\n\n if hithead:\n # Restore the page from the cache\n print(\"Cache hit !\")\n _restore(flow, hithead)\n\n else:\n print(\"Cache miss\")\n\ndef responseheaders(flow):\n # Check for binary passthrough (this will disable long_cache)\n for lng in long_cache:\n if flow.request.pretty_url.endswith(lng):\n print(\"Streaming content\")\n flow.response.stream = True\n\n@concurrent\ndef response(flow):\n # Let keep track on the header of our filter\n flow.response.headers[\"X-GIG-Filter\"] = \"Filtered\"\n\n # We only cache GET contents\n if flow.request.method != 'GET':\n return _nocache(flow)\n\n if flow.request.pretty_host in force_cache:\n print(\"Cache forced !\")\n return _cache(flow)\n\n # Check for cache\n if 'Pragma' in flow.response.headers:\n pragma = flow.response.headers[\"Pragma\"]\n\n # No cache for this entry\n if 'no-' in pragma:\n return _nocache(flow)\n\n if 'Cache-Control' in flow.response.headers:\n control = flow.response.headers[\"Cache-Control\"]\n\n if 'no-' in control:\n return _nocache(flow)\n\n _cache(flow)\n\n","sub_path":"dnsmasq-alt/http-filter.py","file_name":"http-filter.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"546930751","text":"import sqlite3\n\nclass DB(object):\n def __init__(self):\n self.conn = sqlite3.connect(\"pwn.db\")\n self.c = self.conn.cursor()\n\n self.c.execute(\"\"\"CREATE TABLE IF NOT EXISTS hosts (\n id INTEGER PRIMARY KEY AUTOINCREMENT, hostname TEXT, ip TEXT,\n os TEXT DEFAULT 'Unknown', active INTEGER DEFAULT 1)\"\"\")\n\n self.c.execute(\"\"\"CREATE TABLE IF NOT EXISTS ports (\n id INTEGER PRIMARY KEY AUTOINCREMENT, hostid INTEGER, port INTEGER,\n protocol TEXT, service TEXT, confidence INTEGER, product TEXT,\n version TEXT,\n unique(hostid, port, protocol))\"\"\")\n\n self.c.execute(\"\"\"CREATE TABLE IF NOT EXISTS information (\n id INTEGER PRIMARY KEY AUTOINCREMENT, hostid INTEGER, portid INTEGER,\n name TEXT, output TEXT, source TEXT, score INTEGER DEFAULT 1,\n parsed INTEGER DEFAULT 0, unique(hostid, portid, name))\"\"\")\n\n def host_information_get(self, ip, source=None):\n # Return all host infos, or specific to a source\n infos = []\n if source:\n for i in self.c.execute(\"SELECT * FROM information WHERE source = ? AND hostid = (SELECT id from hosts WHERE ip = ?)\", (source, ip)):\n infos.append(Info(i))\n else:\n for i in self.c.execute(\"SELECT * FROM information WHERE hostid = (SELECT id from hosts WHERE ip = ?)\", (ip,)):\n infos.append(Info(i))\n return infos\n\n def host_insert(self, h):\n # Insert host into db\n hostname = h.ip\n if h.hostname is not \"Unknown\":\n hostname = h.hostname\n ip = h.ip\n self.c.execute(\"INSERT INTO hosts (hostname, ip) VALUES (?,?)\", (hostname, ip))\n\n def hostips_get(self):\n # Returns IPs of all hosts\n return [x[0] for x in self.c.execute(\"SELECT ip from hosts WHERE 1\").fetchall()]\n\n def hostid_get(self, ip):\n return self.c.execute(\"SELECT id from hosts WHERE ip = ?\", (ip,)).fetchone()[0]\n\n def host_get(self, ip=None):\n # Get all hosts from db\n if ip:\n h = self.c.execute(\"SELECT * from hosts WHERE ip = ?\", (ip,)).fetchone()\n return Host(h)\n return None\n\n def host_update(self, id, hostname=None, os=None, active=None):\n if hostname is not None:\n self.c.execute(\"UPDATE hosts set hostname = ? WHERE id = ?\", (hostname, id))\n if os is not None:\n self.c.execute(\"UPDATE hosts set os = ? WHERE id = ?\", (os, id))\n if active is not None:\n self.c.execute(\"UPDATE hosts set active = ? WHERE id = ?\", (active, id))\n self.conn.commit()\n\n def hosts_get(self):\n # Get all hosts from db\n hosts = []\n for h in self.c.execute(\"SELECT * from hosts WHERE 1\"):\n hosts.append(Host(h))\n return hosts\n\n def infos_get(self):\n # Get all information (removing zero scoring information)\n infos = []\n for i in self.c.execute(\"SELECT * FROM information WHERE score >= 1\"):\n infos.append(Info(i))\n return infos\n\n def info_insert(self, hid, pid, name, output, source):\n # Insert info into db\n self.c.execute(\"INSERT INTO information (hostid, portid, name, output, source) VALUES(?,?,?,?,?)\", (hid, pid, name, output, source))\n\n def info_update(self, id, score=None, parsed=None):\n if score:\n self.c.execute(\"UPDATE information SET score = ? WHERE id = ?\", (score, id))\n if parsed:\n self.c.execute(\"UPDATE information SET parsed = ? WHERE id = ?\", (parsed, id))\n\n def ports_get(self, ip):\n # Returns all port information of a certain IP\n ports = []\n for p in self.c.execute(\"SELECT * from ports WHERE hostid = (SELECT id from hosts where ip = ?)\", (ip,)):\n ports.append(Port(p))\n return ports\n\n def port_insert(self, port, ip):\n # Insert port into db\n self.c.execute(\n \"INSERT INTO ports (hostid, port, protocol, service, confidence, product, version) VALUES ((SELECT id from hosts WHERE ip = ?),?,?,?,?,?,?)\", (ip, port.portnum, port.protocol, port.name, port.conf, port.product, port.version))\n\n def port_update(self, id, service=None, conf=None, product=None, version=None):\n if service:\n self.c.execute(\"UPDATE ports SET service = ? WHERE id = ?\", (service, id))\n if conf:\n self.c.execute(\"UPDATE ports SET confidence = ? WHERE id = ?\", (conf, id))\n if product:\n self.c.execute(\"UPDATE ports SET product = ? WHERE id = ?\", (product, id))\n if version:\n self.c.execute(\"UPDATE ports SET version = ? WHERE id = ?\", (version, id))\n\nclass Host(object):\n def __init__(self, d):\n self.id, self.hostname, self.ip, self.os, self.active = d\n\nclass Info(object):\n def __init__(self, i):\n self.id, self.hid, self.pid, self.name, self.output, self.source, self.score, self.parsed = i\n\nclass Port(object):\n def __init__(self, p):\n self.id, self.hid, self.pnum, self.proto, self.service, self.conf, self.product, self.version = p\n","sub_path":"libs/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"218788861","text":"# -*- coding: utf8 -*-\n\n# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Use of io.StringIO in python =< 2.7 requires all strings handled to be\n# unicode. See if StringIO.StringIO is available first\ntry:\n import StringIO as io\nexcept ImportError:\n import io\nimport textwrap\n\nimport mock\nimport testtools\n\nimport git_review\n\n\nclass GitReviewConsole(testtools.TestCase):\n \"\"\"Class for testing the console output of git-review.\"\"\"\n\n reviews = [\n {\n 'number': '1010101',\n 'branch': 'master',\n 'subject': 'A simple short subject'\n }, {\n 'number': '9877',\n 'branch': 'stable/codeword',\n 'subject': 'A longer and slightly more wordy subject'\n }, {\n 'number': '12345',\n 'branch': 'master',\n 'subject': 'A ridiculously long subject that can exceed the '\n 'normal console width, just need to ensure the '\n 'max width is short enough'\n }]\n\n @mock.patch('git_review.cmd.query_reviews')\n @mock.patch('git_review.cmd.get_remote_url', mock.MagicMock)\n @mock.patch('git_review.cmd._has_color', False)\n def test_list_reviews_no_blanks(self, mock_query):\n\n mock_query.return_value = self.reviews\n with mock.patch('sys.stdout', new_callable=io.StringIO) as output:\n git_review.cmd.list_reviews(None)\n console_output = output.getvalue().split('\\n')\n\n wrapper = textwrap.TextWrapper(replace_whitespace=False,\n drop_whitespace=False)\n for text in console_output:\n for line in wrapper.wrap(text):\n self.assertEqual(line.isspace(), False,\n \"Extra blank lines appearing between reviews\"\n \"in console output\")\n","sub_path":"git_review/tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646221830","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score,f1_score\nfrom sys import argv\n\nimport mlflow\nimport mlflow.sklearn\n\nimport os\n\n\n\n#df = pd.read_csv(os.path.join(os.path.dirname(__file__), \"../wine_data.csv\"))\n#print(df.shape)\n\nwine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"wine_data.csv\")\n\n\nfeature_names=['Type','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols'\n ,'Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue'\n ,'OD280/OD315 of diluted wines','Proline']\n\ndata = pd.read_csv(wine_path,names=feature_names)\n\n\n\n\ny=data['Type']\ndel data['Type']\ndata_train, data_test, class_train, class_test = train_test_split(data, y, test_size=0.3)\n\n\n\nnum_tree=int(argv[1]) if len(argv)>1 else 10\nmax_depth=int(argv[2]) if len(argv)>2 else 3\n\nwith mlflow.start_run():\n rf=RandomForestClassifier(n_estimators=num_tree,max_depth=max_depth)\n rf.fit(data_train,class_train)\n pred=rf.predict(data_test)\n accuracy=accuracy_score(class_test,pred)\n \n print(\"max_depth\", max_depth)\n print(\"num_tree\", num_tree)\n print(\"accuracy\", accuracy)\n print(rf, \"model\")\n \n \n mlflow.log_param(\"max_depth\", max_depth)\n mlflow.log_param(\"num_tree\", num_tree)\n\n mlflow.log_metric(\"accuracy\", accuracy)\n \n mlflow.sklearn.log_model(rf, \"model\")\n ","sub_path":"ml_flow.py","file_name":"ml_flow.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"166054744","text":"\"\"\"\nneuralnetwork module tests\n\"\"\"\nimport os.path as path\n\nimport numpy as np\nimport yaml\n\nimport yass\nfrom yass.batch import RecordingsReader, BatchProcessor\nfrom yass import neuralnetwork\nfrom yass.neuralnetwork import nn_detection as nn\n\n\ndef test_can_use_neural_network_detector(path_to_tests):\n yass.set_config(path.join(path_to_tests, 'config_nnet.yaml'))\n CONFIG = yass.read_config()\n data = RecordingsReader(path.join(path_to_tests,\n 'data/standarized.bin'),\n mmap=False).data.T\n nn(data, CONFIG.neighChannels, CONFIG.geom,\n CONFIG.spikes.temporal_features, 3,\n CONFIG.neural_network_detector.threshold_spike,\n CONFIG.neural_network_triage.threshold_collision,\n CONFIG.neural_network_detector.filename,\n CONFIG.neural_network_autoencoder.filename,\n CONFIG.neural_network_triage.filename)\n\n\ndef test_splitting_in_batches_does_not_affect_result(path_to_tests):\n yass.set_config(path.join(path_to_tests, 'config_nnet.yaml'))\n CONFIG = yass.read_config()\n\n PATH_TO_DATA = path.join(path_to_tests, 'data/standarized.bin')\n\n data = RecordingsReader(PATH_TO_DATA, mmap=False).data.T\n\n with open(path.join(path_to_tests, 'data/standarized.yaml')) as f:\n PARAMS = yaml.load(f)\n\n # buffer size makes sure we can detect spikes if they appear at the end of\n # any batch\n bp = BatchProcessor(PATH_TO_DATA, PARAMS['dtype'], PARAMS['n_channels'],\n PARAMS['data_format'], '100KB', buffer_size=15)\n mc = bp.multi_channel_apply\n res = mc(nn,\n mode='memory',\n cleanup_function=neuralnetwork.fix_indexes,\n neighbors=CONFIG.neighChannels,\n geom=CONFIG.geom,\n temporal_features=CONFIG.spikes.temporal_features,\n temporal_window=3,\n th_detect=CONFIG.neural_network_detector.threshold_spike,\n th_triage=CONFIG.neural_network_triage.threshold_collision,\n detector_filename=CONFIG.neural_network_detector.filename,\n autoencoder_filename=CONFIG.neural_network_autoencoder.filename,\n triage_filename=CONFIG.neural_network_triage.filename)\n\n scores_batch = np.concatenate([element[0] for element in res], axis=0)\n clear_batch = np.concatenate([element[1] for element in res], axis=0)\n collision_batch = np.concatenate([element[2] for element in res], axis=0)\n\n (scores, clear,\n collision) = nn(data, CONFIG.neighChannels, CONFIG.geom,\n CONFIG.spikes.temporal_features, 3,\n CONFIG.neural_network_detector.threshold_spike,\n CONFIG.neural_network_triage.threshold_collision,\n CONFIG.neural_network_detector.filename,\n CONFIG.neural_network_autoencoder.filename,\n CONFIG.neural_network_triage.filename)\n\n np.testing.assert_array_equal(clear_batch, clear)\n np.testing.assert_array_equal(collision_batch, collision)\n np.testing.assert_array_equal(scores_batch, scores)\n\n\ndef test_can_train_nnet(path_to_tests):\n pass\n","sub_path":"tests/test_neuralnet.py","file_name":"test_neuralnet.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133161957","text":"class Node:\r\n def __init__(self,val):\r\n self.val = val\r\n self.next = None\r\n\r\n\r\ndef deleteMiddle(head):\r\n\r\n def count(head):\r\n p = head\r\n count = 1\r\n while p.next is not None:\r\n p = p.next\r\n count += 1\r\n return count\r\n\r\n total = count(head)\r\n position = total//2\r\n c = 0 #or 1\r\n p = head\r\n while p.next is not None:\r\n if count == position-1:\r\n p = p.next.next\r\n count += 1\r\n else:\r\n p = p.next\r\n count += 1\r\n return p ","sub_path":"CTCI/deleteMiddleNode.py","file_name":"deleteMiddleNode.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338523055","text":"import sys\n\nnumber_of_cases = int(sys.stdin.readline().strip())\n\nfor case in range(number_of_cases):\n return_line = \"Case #\"+str(case+1)+\": \"\n N = long(sys.stdin.readline().strip())\n if N == 0:\n print(return_line+\"INSOMNIA\")\n else:\n M = N\n hash_table = [False]*10\n while True:\n #hash_table = [False]*10\n list_of_ints = [int(i) for i in str(M)]\n for item in list_of_ints:\n hash_table[item] = True\n if any(h==False for h in hash_table):\n M += N\n else:\n print(return_line + str(M))\n break\n\n\n\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_MachineGun_countingsheep.py","file_name":"16_0_1_MachineGun_countingsheep.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"504095387","text":"import cv2\nimport datetime\nimport os\nfrom pymongo import MongoClient\nfrom gtts import gTTS\n\nclass FaceDetectionCamera(object):\n client = MongoClient('localhost', 27017)\n\n db = client.student\n col = db.stdata\n col2 = db.subject\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read('trainer/trainer.yml')\n cascadePath = \"haarcascades/haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n def __init__(self):\n self.cam = cv2.VideoCapture(0)\n\n def __del__(self):\n self.cam.release()\n\n def get_frame(self):\n while True:\n success, img = self.cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n minW = 0.1 * self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)\n minH = 0.1 * self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n cdt = datetime.datetime.now().strftime(\"%a %d-%m-%Y %H:%M:%S\")\n txt_day = cdt.split(\" \")\n txt_date = txt_day[1].split(\"-\")\n txt_time = txt_day[2].split(\":\")\n\n faces = self.faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(int(minW), int(minH)),\n )\n # results_subject = self.col2.find(\n # {\"sj_date\": txt_day[0], \"sj_StartTime\": {\"$gte\": int(txt_time[0])}})\n # for get_subject_results in results_subject:\n # start_time = int(get_subject_results[\"sj_StartTime\"])\n # finish_time = int(get_subject_results[\"sj_FinishTime\"])\n # sj_id = get_subject_results[\"sj_id\"]\n # cv2.putText(img, sj_id, (540, 30), self.font, 0.8,\n # (40, 240, 255), 2, cv2.LINE_AA) # Subject\n\n # cv2.putText(img, \"EXIT (ESC OR ENTER)\", (10, 20), self.font, 0.4, (0, 0, 255), 1, cv2.LINE_AA) # EXIT\n cv2.putText(img, cdt, (10, 20), self.font, 0.4,\n (255, 0, 0), 1, cv2.LINE_AA) # TIME\n\n cv2.line(img, (20, 100), (20, 450), (0, 255, 0), 3)\n cv2.line(img, (20, 100), (100, 100), (0, 255, 0), 3)\n cv2.line(img, (20, 450), (100, 450), (0, 255, 0), 3)\n\n cv2.line(img, (620, 100), (620, 450), (0, 255, 0), 3)\n cv2.line(img, (620, 100), (540, 100), (0, 255, 0), 3)\n cv2.line(img, (620, 450), (540, 450), (0, 255, 0), 3)\n\n # (20,100),(100,100) (620,100),(540,100)\n # _____________ ______________\n # | |\n # | |\n # | |\n # | |\n # |(20,100),(20,450) (620,100),(620,450)|\n # | |\n # | |\n # | |\n # |____________ _____________|\n # (20,450),(100,450) (620,450),(540,450)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # (x, y)_______\n # | |\n # | |\n # | |\n # |___(x+w,y+h)\n\n faces_count = str(faces.shape[0])\n cv2.putText(img, \"Number of faces detected: \" + faces_count, (0, img.shape[0] - 10),\n self.font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n id, confidence = self.recognizer.predict(gray[y: y + h, x: x + w])\n print(id)\n if (confidence < 100):\n results = self.col.find({\"st_id\": id})\n for getresult in results:\n getid = getresult[\"st_id\"]\n f_name = getresult[\"f_name\"]\n l_name = getresult[\"l_name\"]\n st_status = getresult[\"st_status\"]\n if (st_status == \"none\"):\n query_id = {\"st_id\": getid}\n set_st_status = {\"$set\": {\"st_status\": \"Checked\"}}\n self.col.update_one(query_id, set_st_status)\n else:\n pass\n confidence = \" {0}%\".format(round(100 - confidence))\n status = \"DETECTED\"\n # cv2.putText(img, confidence, (540, 30), self.font,\n # 0.8, (40, 240, 255), 2, cv2.LINE_AA)\n cv2.putText(img, str(status), (170, 100),\n self.font, 2, (0, 255, 0), 2)\n cv2.putText(img, str(getid), (150, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, f_name, (230, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, l_name, (340, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, st_status, (430, 450), self.font,\n 0.3, (255, 255, 255,), 1, cv2.LINE_AA)\n else:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n # confidence = \" {0}%\".format(round(100 - confidence))\n cv2.putText(img, \"unknown\", (150, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, \"unknown\", (230, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, \"unknown\", (340, 450), self.font,\n 0.4, (255, 255, 255), 1, cv2.LINE_AA)\n\n ret, jpeg = cv2.imencode('.jpg', img)\n return jpeg.tobytes()\n","sub_path":"facedetection.py","file_name":"facedetection.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"596394524","text":"# Use modern Python\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nfrom collections import defaultdict\n\nfrom corehq.apps.accounting.exceptions import AccountingError\nfrom corehq.apps.accounting.models import (\n SoftwarePlanEdition,\n SoftwarePlanVisibility,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_plans(edition_to_role, edition_to_product_rate, edition_to_feature_rate, feature_types, product_types,\n dry_run, verbose, apps):\n DefaultProductPlan = apps.get_model('accounting', 'DefaultProductPlan')\n SoftwarePlan = apps.get_model('accounting', 'SoftwarePlan')\n SoftwarePlanVersion = apps.get_model('accounting', 'SoftwarePlanVersion')\n Role = apps.get_model('django_prbac', 'Role')\n\n editions = edition_to_role.keys()\n edition_to_features = _ensure_features(feature_types, editions, dry_run=dry_run, verbose=verbose, apps=apps)\n for product_type in product_types:\n for edition in editions:\n role_slug = edition_to_role[edition]\n try:\n role = Role.objects.get(slug=role_slug)\n except Role.DoesNotExist:\n logger.info(\"Could not find the role '%s'. Did you forget to run cchq_prbac_bootstrap?\")\n logger.info(\"Aborting. You should figure this out.\")\n return\n software_plan_version = SoftwarePlanVersion(role=role)\n\n product, product_rate = _ensure_product_and_rate(\n edition_to_product_rate, product_type, edition,\n dry_run=dry_run, verbose=verbose, apps=apps,\n )\n feature_rates = _ensure_feature_rates(\n edition_to_feature_rate, edition_to_features[edition], edition,\n dry_run=dry_run, verbose=verbose, apps=apps,\n )\n software_plan = SoftwarePlan(\n name='%s Edition' % product.name, edition=edition, visibility=SoftwarePlanVisibility.PUBLIC\n )\n if dry_run:\n logger.info(\"[DRY RUN] Creating Software Plan: %s\" % software_plan.name)\n else:\n try:\n software_plan = SoftwarePlan.objects.get(name=software_plan.name)\n if verbose:\n logger.info(\"Plan '%s' already exists. Using existing plan to add version.\"\n % software_plan.name)\n except SoftwarePlan.DoesNotExist:\n software_plan.save()\n if verbose:\n logger.info(\"Creating Software Plan: %s\" % software_plan.name)\n\n software_plan_version.plan = software_plan\n\n # must save before assigning many-to-many relationship\n if hasattr(SoftwarePlanVersion, 'product_rates'):\n software_plan_version.save()\n\n product_rate.save()\n if hasattr(SoftwarePlanVersion, 'product_rates'):\n software_plan_version.product_rates.add(product_rate)\n elif hasattr(SoftwarePlanVersion, 'product_rate'):\n software_plan_version.product_rate = product_rate\n else:\n raise AccountingError('SoftwarePlanVersion does not have product_rate or product_rates field')\n\n # must save before assigning many-to-many relationship\n if hasattr(SoftwarePlanVersion, 'product_rate'):\n software_plan_version.save()\n\n for feature_rate in feature_rates:\n feature_rate.save()\n software_plan_version.feature_rates.add(feature_rate)\n software_plan_version.save()\n\n if edition == SoftwarePlanEdition.ADVANCED:\n trials = [True, False]\n else:\n trials = [False]\n for is_trial in trials:\n default_product_plan = DefaultProductPlan(product_type=product.product_type, edition=edition, is_trial=is_trial)\n if dry_run:\n logger.info(\"[DRY RUN] Setting plan as default for product '%s' and edition '%s'.\" %\n (product.product_type, default_product_plan.edition))\n else:\n try:\n default_product_plan = DefaultProductPlan.objects.get(product_type=product.product_type,\n edition=edition, is_trial=is_trial)\n if verbose:\n logger.info(\"Default for product '%s' and edition \"\n \"'%s' already exists.\" % (\n product.product_type, default_product_plan.edition\n ))\n except DefaultProductPlan.DoesNotExist:\n default_product_plan.plan = software_plan\n default_product_plan.save()\n if verbose:\n logger.info(\"Setting plan as default for product '%s' and edition '%s'.\" %\n (product.product_type,\n default_product_plan.edition))\n\n\ndef _ensure_product_and_rate(edition_to_product_rate, product_type, edition, dry_run, verbose, apps):\n \"\"\"\n Ensures that all the necessary SoftwareProducts and SoftwareProductRates are created for the plan.\n \"\"\"\n SoftwareProduct = apps.get_model('accounting', 'SoftwareProduct')\n SoftwareProductRate = apps.get_model('accounting', 'SoftwareProductRate')\n\n if verbose:\n logger.info('Ensuring Products and Product Rates')\n\n product = SoftwareProduct(name='%s %s' % (product_type, edition), product_type=product_type)\n if edition == SoftwarePlanEdition.ENTERPRISE:\n product.name = \"Dimagi Only %s\" % product.name\n\n product_rate = SoftwareProductRate(**edition_to_product_rate[edition])\n if dry_run:\n logger.info(\"[DRY RUN] Creating Product: %s\" % product)\n logger.info(\"[DRY RUN] Corresponding product rate of $%d created.\" % product_rate.monthly_fee)\n else:\n try:\n product = SoftwareProduct.objects.get(name=product.name)\n if verbose:\n logger.info(\"Product '%s' already exists. Using \"\n \"existing product to add rate.\"\n % product.name)\n except SoftwareProduct.DoesNotExist:\n product.save()\n if verbose:\n logger.info(\"Creating Product: %s\" % product)\n if verbose:\n logger.info(\"Corresponding product rate of $%d created.\"\n % product_rate.monthly_fee)\n product_rate.product = product\n return product, product_rate\n\n\ndef _ensure_features(feature_types, editions, dry_run, verbose, apps):\n \"\"\"\n Ensures that all the Features necessary for the plans are created.\n \"\"\"\n Feature = apps.get_model('accounting', 'Feature')\n\n if verbose:\n logger.info('Ensuring Features')\n\n edition_to_features = defaultdict(list)\n for edition in editions:\n for feature_type in feature_types:\n feature = Feature(name='%s %s' % (feature_type, edition), feature_type=feature_type)\n if edition == SoftwarePlanEdition.ENTERPRISE:\n feature.name = \"Dimagi Only %s\" % feature.name\n if dry_run:\n logger.info(\"[DRY RUN] Creating Feature: %s\" % feature)\n else:\n try:\n feature = Feature.objects.get(name=feature.name)\n if verbose:\n logger.info(\"Feature '%s' already exists. Using \"\n \"existing feature to add rate.\"\n % feature.name)\n except Feature.DoesNotExist:\n feature.save()\n if verbose:\n logger.info(\"Creating Feature: %s\" % feature)\n edition_to_features[edition].append(feature)\n return edition_to_features\n\n\ndef _ensure_feature_rates(edition_to_feature_rate, features, edition, dry_run, verbose, apps):\n \"\"\"\n Ensures that all the FeatureRates necessary for the plans are created.\n \"\"\"\n FeatureRate = apps.get_model('accounting', 'FeatureRate')\n\n if verbose:\n logger.info('Ensuring Feature Rates')\n\n feature_rates = []\n for feature in features:\n feature_rate = FeatureRate(**edition_to_feature_rate[edition][feature.feature_type])\n feature_rate.feature = feature\n if dry_run:\n logger.info(\"[DRY RUN] Creating rate for feature '%s': %s\" % (feature.name, feature_rate))\n elif verbose:\n logger.info(\"Creating rate for feature '%s': %s\" % (feature.name, feature_rate))\n feature_rates.append(feature_rate)\n return feature_rates\n","sub_path":"corehq/apps/accounting/bootstrap/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347811682","text":"import sys\nimport os\nfrom PyQt4 import QtGui\nfrom Config import Config\n\ndef resource_path(relative):\n if hasattr(sys, \"_MEIPASS\"):\n return os.path.join(sys._MEIPASS, relative)\n return os.path.join(relative)\n\nclass OptionsDialog(QtGui.QDialog):\n\n def __init__(self):\n super(OptionsDialog, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('Options - Homo Monitor')\n self.setWindowIcon(QtGui.QIcon(resource_path('data/icon.ico')));\n self.resize(350, 150)\n\n ok = QtGui.QPushButton(\"OK\")\n cancel = QtGui.QPushButton(\"Cancel\")\n label = QtGui.QLabel(\"Post resulting HTML to remote URL:\")\n label2 = QtGui.QLabel(\"(Use this option if you want to monitor your miners remotely)\")\n self.edit_url = QtGui.QLineEdit()\n self.edit_url.setPlaceholderText(\"http://example.com/post.php\")\n self.edit_url.setText(Config.get(\"post_url\"))\n\n vbox = QtGui.QVBoxLayout()\n hbox = QtGui.QHBoxLayout()\n\n # Horisontal buttons box (space, Ok, Cancel)\n hbox.addStretch(1)\n hbox.addWidget(ok)\n hbox.addWidget(cancel)\n\n # Main vertical box\n vbox.addStretch(1)\n vbox.addWidget(label)\n vbox.addWidget(self.edit_url)\n vbox.addWidget(label2)\n vbox.addStretch(1)\n vbox.addLayout(hbox)\n\n self.setLayout(vbox)\n\n # Connect buttons to behaviour\n ok.clicked.connect(self.ok)\n cancel.clicked.connect(self.close)\n\n ok.setFocus()\n\n def ok(self):\n url = str(self.edit_url.text())\n Config.set(\"post_url\", url)\n self.close();\n\nif __name__ == \"__main__\" :\n app = QtGui.QApplication(sys.argv)\n od = OptionsDialog()\n od.show()\n sys.exit(app.exec_())","sub_path":"OptionsDialog.py","file_name":"OptionsDialog.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"119064405","text":"import random\nfrom spytest.dicts import SpyTestDict\n\ndef get_unique_random_list(list_length,range_start,range_end):\n list_item = set()\n while len(list(list_item)) < list_length:\n list_item.add(random.randint(range_start,range_end))\n return list(list_item)\n\ndef get_random_sequence_list(list_length):\n start_item = random.randint(2,4000)\n list_item =[item for item in range(start_item,start_item+list_length)]\n return list_item\n\ndef convert_mac_to_dot(mac):\n mac = mac.replace(\":\",\"\")\n return (mac[0:4] + '.' + mac[4:8] + '.' + mac[8:12]).lower()\n\ndata = SpyTestDict()\ndata.testing ='VRRP'\nvrrp_sessions = 4\n\nvlan_list = get_random_sequence_list(list_length=vrrp_sessions+8)\nvlan_intf_list = ['Vlan{}'.format(vlan_id) for vlan_id in vlan_list]\n\nvrrp_vlans = vlan_list[0:vrrp_sessions]\ndut1_uplink_vlans = vlan_list[vrrp_sessions:vrrp_sessions+2]\ndut2_uplink_vlans = vlan_list[vrrp_sessions+2:vrrp_sessions+4]\n\nvrrp_vlan_intf = vlan_intf_list[0:vrrp_sessions]\ndut1_uplink_vlan_intf = vlan_intf_list[vrrp_sessions:vrrp_sessions+2]\ndut2_uplink_vlan_intf = vlan_intf_list[vrrp_sessions+2:vrrp_sessions+4]\n\ndut1_vlans = vrrp_vlans + dut1_uplink_vlans\ndut2_vlans = vrrp_vlans + dut2_uplink_vlans\ndut3_vlans = vrrp_vlans\ndut4_vlans = vlan_list[vrrp_sessions:]\n\ndut1_vlan_intf = vrrp_vlan_intf + dut1_uplink_vlan_intf\ndut2_vlan_intf = vrrp_vlan_intf + dut2_uplink_vlan_intf\ndut3_vlan_intf = vrrp_vlan_intf\ndut4_vlan_intf = vlan_intf_list[vrrp_sessions:]\n\n\nlag_id_list = get_unique_random_list(list_length=4,range_start=1,range_end=100)\nlag_intf_list = ['PortChannel{}'.format(lag_id) for lag_id in lag_id_list]\n\n\n#IP params\nmask = '24'\ndut1_4_ip_list = ['14.14.{}.1'.format(i) for i in range(1,4)]\ndut4_1_ip_list = ['14.14.{}.2'.format(i) for i in range(1,4)]\ndut2_4_ip_list = ['24.24.{}.1'.format(i) for i in range(1,4)]\ndut4_2_ip_list = ['24.24.{}.2'.format(i) for i in range(1,4)]\n\n#VRRP params\nvrid_list = get_unique_random_list(list_length=vrrp_sessions,range_start=1,range_end=100)\nip_octet_list = get_unique_random_list(list_length=vrrp_sessions,range_start=25,range_end=100)\n\nvrrp_ip_list = []\nvip_list = []\nvrrp_sec_ip_list =[]\nvrrp_ip_nw = []\nfor session in range(vrrp_sessions):\n vrrp_ip_list.append(['{}.{}.{}.{}'.format(ip_octet_list[session],ip_octet_list[session],ip_octet_list[session],i) for i in range(1,4)])\n vip_list.append('{}.{}.{}.{}'.format(ip_octet_list[session],ip_octet_list[session],ip_octet_list[session],str(random.randint(5,150))))\n vrrp_sec_ip_list.append('{}.{}.{}.{}'.format(ip_octet_list[session],ip_octet_list[session],ip_octet_list[session],str(random.randint(193,194))))\n vrrp_ip_nw.append('{}.{}.{}.0'.format(ip_octet_list[session], ip_octet_list[session], ip_octet_list[session]))\n\n\nvmac_list = ['00:00:5E:00:01:{}'.format(format(vrid,'02X')) for vrid in vrid_list]\nvmac_list_1 = [convert_mac_to_dot(vmac) for vmac in vmac_list ]\nvrrp_priority_list_dut1 = get_unique_random_list(vrrp_sessions/2,101,253) + get_unique_random_list(vrrp_sessions/2,1,99)\nvrrp_priority_list_dut2 = get_unique_random_list(vrrp_sessions/2,1,99) + get_unique_random_list(vrrp_sessions/2,101,253)\n\n#BGP params\ndut1_as = '100'\ndut1_router_id = '1.1.1.1'\ndut2_as = '200'\ndut2_router_id = '2.2.2.2'\ndut4_as = '400'\ndut4_router_id = '4.4.4.4'\npeer_v4_1 = 'peer_v4_1'\npeer_v4_2 = 'peer_v4_2'\n\n\ndut4_route_list = [\"100.100.{}.0\".format(i) for i in range(1,vrrp_sessions+1)]\ndut4_tg_ip_list = [\"100.100.{}.1\".format(i) for i in range(1,vrrp_sessions+1)]\ntg_src_ip_list = [vrrp_ip_list[session][2] for session in range(vrrp_sessions)]\ntg_dest_ip_list = [\"100.100.{}.2\".format(i) for i in range(1,vrrp_sessions+1)]\ntg_dest_mac_list = [\"00:00:00:12:22:{}\".format(format(i,'02X')) for i in range(1,vrrp_sessions+1)]\ntg_src_mac_list = [\"00:00:00:11:22:{}\".format(format(i,'02X')) for i in range(1,vrrp_sessions+1)]\ntraffic_rate = 5000\ntg2_src_mac = \"00:00:00:44:44:44\"\nframe_size_bytes = 128\nrate_threshold = 5.0\n\ntrigger_list = random.sample(set(['fast_boot','config_reload']),1) +['docker_restart']\n\n\n","sub_path":"tests/routing/VRRP/vrrp_vars.py","file_name":"vrrp_vars.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"598267255","text":"import atexit\nimport os.path\nfrom pathlib import Path\nfrom typing import Any, Callable, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torchaudio import (\n compliance,\n datasets,\n kaldi_io,\n sox_effects,\n transforms\n)\nfrom torchaudio._backend import (\n check_input,\n _audio_backend_guard,\n _get_audio_backend_module,\n get_audio_backend,\n set_audio_backend,\n)\nfrom torchaudio._soundfile_backend import SignalInfo, EncodingInfo\n\ntry:\n from .version import __version__, git_version # noqa: F401\nexcept ImportError:\n pass\n\n\ndef load(filepath: Union[str, Path],\n out: Optional[Tensor] = None,\n normalization: Union[bool, float, Callable] = True,\n channels_first: bool = True,\n num_frames: int = 0,\n offset: int = 0,\n signalinfo: Optional[SignalInfo] = None,\n encodinginfo: Optional[EncodingInfo] = None,\n filetype: Optional[str] = None) -> Tuple[Tensor, int]:\n r\"\"\"Loads an audio file from disk into a tensor\n\n Args:\n filepath (str or Path): Path to audio file\n out (Tensor, optional): An output tensor to use instead of creating one. (Default: ``None``)\n normalization (bool, float, or callable, optional): If boolean `True`, then output is divided by `1 << 31`\n (assumes signed 32-bit audio), and normalizes to `[-1, 1]`.\n If `float`, then output is divided by that number\n If `Callable`, then the output is passed as a parameter\n to the given function, then the output is divided by\n the result. (Default: ``True``)\n channels_first (bool, optional): Set channels first or length first in result. (Default: ``True``)\n num_frames (int, optional): Number of frames to load. 0 to load everything after the offset.\n (Default: ``0``)\n offset (int, optional): Number of frames from the start of the file to begin data loading.\n (Default: ``0``)\n signalinfo (sox_signalinfo_t, optional): A sox_signalinfo_t type, which could be helpful if the\n audio type cannot be automatically determined. (Default: ``None``)\n encodinginfo (sox_encodinginfo_t, optional): A sox_encodinginfo_t type, which could be set if the\n audio type cannot be automatically determined. (Default: ``None``)\n filetype (str, optional): A filetype or extension to be set if sox cannot determine it\n automatically. (Default: ``None``)\n\n Returns:\n (Tensor, int): An output tensor of size `[C x L]` or `[L x C]` where L is the number\n of audio frames and C is the number of channels. An integer which is the sample rate of the\n audio (as listed in the metadata of the file)\n\n Example\n >>> data, sample_rate = torchaudio.load('foo.mp3')\n >>> print(data.size())\n torch.Size([2, 278756])\n >>> print(sample_rate)\n 44100\n >>> data_vol_normalized, _ = torchaudio.load('foo.mp3', normalization=lambda x: torch.abs(x).max())\n >>> print(data_vol_normalized.abs().max())\n 1.\n\n \"\"\"\n\n return _get_audio_backend_module().load(\n filepath,\n out=out,\n normalization=normalization,\n channels_first=channels_first,\n num_frames=num_frames,\n offset=offset,\n signalinfo=signalinfo,\n encodinginfo=encodinginfo,\n filetype=filetype,\n )\n\n\ndef load_wav(filepath: Union[str, Path], **kwargs: Any) -> Tuple[Tensor, int]:\n r\"\"\" Loads a wave file. It assumes that the wav file uses 16 bit per sample that needs normalization by shifting\n the input right by 16 bits.\n\n Args:\n filepath (str or Path): Path to audio file\n\n Returns:\n (Tensor, int): An output tensor of size `[C x L]` or `[L x C]` where L is the number\n of audio frames and C is the number of channels. An integer which is the sample rate of the\n audio (as listed in the metadata of the file)\n \"\"\"\n kwargs['normalization'] = 1 << 16\n return load(filepath, **kwargs)\n\n\ndef save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:\n r\"\"\"Convenience function for `save_encinfo`.\n\n Args:\n filepath (str): Path to audio file\n src (Tensor): An input 2D tensor of shape `[C x L]` or `[L x C]` where L is\n the number of audio frames, C is the number of channels\n sample_rate (int): An integer which is the sample rate of the\n audio (as listed in the metadata of the file)\n precision (int, optional): Bit precision (Default: ``16``)\n channels_first (bool, optional): Set channels first or length first in result. (\n Default: ``True``)\n \"\"\"\n\n return _get_audio_backend_module().save(\n filepath, src, sample_rate, precision=precision, channels_first=channels_first\n )\n\n\n@_audio_backend_guard(\"sox\")\ndef save_encinfo(filepath: str,\n src: Tensor,\n channels_first: bool = True,\n signalinfo: Optional[SignalInfo] = None,\n encodinginfo: Optional[EncodingInfo] = None,\n filetype: Optional[str] = None) -> None:\n r\"\"\"Saves a tensor of an audio signal to disk as a standard format like mp3, wav, etc.\n\n Args:\n filepath (str): Path to audio file\n src (Tensor): An input 2D tensor of shape `[C x L]` or `[L x C]` where L is\n the number of audio frames, C is the number of channels\n channels_first (bool, optional): Set channels first or length first in result. (Default: ``True``)\n signalinfo (sox_signalinfo_t, optional): A sox_signalinfo_t type, which could be helpful if the\n audio type cannot be automatically determined (Default: ``None``).\n encodinginfo (sox_encodinginfo_t, optional): A sox_encodinginfo_t type, which could be set if the\n audio type cannot be automatically determined (Default: ``None``).\n filetype (str, optional): A filetype or extension to be set if sox cannot determine it\n automatically. (Default: ``None``)\n\n Example\n >>> data, sample_rate = torchaudio.load('foo.mp3')\n >>> torchaudio.save('foo.wav', data, sample_rate)\n\n \"\"\"\n ch_idx, len_idx = (0, 1) if channels_first else (1, 0)\n\n # check if save directory exists\n abs_dirpath = os.path.dirname(os.path.abspath(filepath))\n if not os.path.isdir(abs_dirpath):\n raise OSError(\"Directory does not exist: {}\".format(abs_dirpath))\n # check that src is a CPU tensor\n check_input(src)\n # Check/Fix shape of source data\n if src.dim() == 1:\n # 1d tensors as assumed to be mono signals\n src.unsqueeze_(ch_idx)\n elif src.dim() > 2 or src.size(ch_idx) > 16:\n # assumes num_channels < 16\n raise ValueError(\n \"Expected format where C < 16, but found {}\".format(src.size()))\n # sox stores the sample rate as a float, though practically sample rates are almost always integers\n # convert integers to floats\n if signalinfo:\n if signalinfo.rate and not isinstance(signalinfo.rate, float):\n if float(signalinfo.rate) == signalinfo.rate:\n signalinfo.rate = float(signalinfo.rate)\n else:\n raise TypeError('Sample rate should be a float or int')\n # check if the bit precision (i.e. bits per sample) is an integer\n if signalinfo.precision and not isinstance(signalinfo.precision, int):\n if int(signalinfo.precision) == signalinfo.precision:\n signalinfo.precision = int(signalinfo.precision)\n else:\n raise TypeError('Bit precision should be an integer')\n # programs such as librosa normalize the signal, unnormalize if detected\n if src.min() >= -1.0 and src.max() <= 1.0:\n src = src * (1 << 31)\n src = src.long()\n # set filetype and allow for files with no extensions\n extension = os.path.splitext(filepath)[1]\n filetype = extension[1:] if len(extension) > 0 else filetype\n # transpose from C x L -> L x C\n if channels_first:\n src = src.transpose(1, 0)\n # save data to file\n src = src.contiguous()\n\n import _torch_sox\n _torch_sox.write_audio_file(filepath, src, signalinfo, encodinginfo, filetype)\n\n\ndef info(filepath: str) -> Tuple[SignalInfo, EncodingInfo]:\n r\"\"\"Gets metadata from an audio file without loading the signal.\n\n Args:\n filepath (str): Path to audio file\n\n Returns:\n (sox_signalinfo_t, sox_encodinginfo_t): A si (sox_signalinfo_t) signal\n info as a python object. An ei (sox_encodinginfo_t) encoding info\n\n Example\n >>> si, ei = torchaudio.info('foo.wav')\n >>> rate, channels, encoding = si.rate, si.channels, ei.encoding\n \"\"\"\n\n return _get_audio_backend_module().info(filepath)\n\n\n@_audio_backend_guard(\"sox\")\ndef sox_signalinfo_t() -> SignalInfo:\n r\"\"\"Create a sox_signalinfo_t object. This object can be used to set the sample\n rate, number of channels, length, bit precision and headroom multiplier\n primarily for effects\n\n Returns: sox_signalinfo_t(object)\n - rate (float), sample rate as a float, practically will likely be an integer float\n - channel (int), number of audio channels\n - precision (int), bit precision\n - length (int), length of audio in samples * channels, 0 for unspecified and -1 for unknown\n - mult (float, optional), headroom multiplier for effects and ``None`` for no multiplier\n\n Example\n >>> si = torchaudio.sox_signalinfo_t()\n >>> si.channels = 1\n >>> si.rate = 16000.\n >>> si.precision = 16\n >>> si.length = 0\n \"\"\"\n\n import _torch_sox\n return _torch_sox.sox_signalinfo_t()\n\n\n@_audio_backend_guard(\"sox\")\ndef sox_encodinginfo_t() -> EncodingInfo:\n r\"\"\"Create a sox_encodinginfo_t object. This object can be used to set the encoding\n type, bit precision, compression factor, reverse bytes, reverse nibbles,\n reverse bits and endianness. This can be used in an effects chain to encode the\n final output or to save a file with a specific encoding. For example, one could\n use the sox ulaw encoding to do 8-bit ulaw encoding. Note in a tensor output\n the result will be a 32-bit number, but number of unique values will be determined by\n the bit precision.\n\n Returns: sox_encodinginfo_t(object)\n - encoding (sox_encoding_t), output encoding\n - bits_per_sample (int), bit precision, same as `precision` in sox_signalinfo_t\n - compression (float), compression for lossy formats, 0.0 for default compression\n - reverse_bytes (sox_option_t), reverse bytes, use sox_option_default\n - reverse_nibbles (sox_option_t), reverse nibbles, use sox_option_default\n - reverse_bits (sox_option_t), reverse bytes, use sox_option_default\n - opposite_endian (sox_bool), change endianness, use sox_false\n\n Example\n >>> ei = torchaudio.sox_encodinginfo_t()\n >>> ei.encoding = torchaudio.get_sox_encoding_t(1)\n >>> ei.bits_per_sample = 16\n >>> ei.compression = 0\n >>> ei.reverse_bytes = torchaudio.get_sox_option_t(2)\n >>> ei.reverse_nibbles = torchaudio.get_sox_option_t(2)\n >>> ei.reverse_bits = torchaudio.get_sox_option_t(2)\n >>> ei.opposite_endian = torchaudio.get_sox_bool(0)\n\n \"\"\"\n\n import _torch_sox\n ei = _torch_sox.sox_encodinginfo_t()\n sdo = get_sox_option_t(2) # sox_default_option\n ei.reverse_bytes = sdo\n ei.reverse_nibbles = sdo\n ei.reverse_bits = sdo\n return ei\n\n\n@_audio_backend_guard(\"sox\")\ndef get_sox_encoding_t(i: int = None) -> EncodingInfo:\n r\"\"\"Get enum of sox_encoding_t for sox encodings.\n\n Args:\n i (int, optional): Choose type or get a dict with all possible options\n use ``__members__`` to see all options when not specified. (Default: ``None``)\n\n Returns:\n sox_encoding_t: A sox_encoding_t type for output encoding\n \"\"\"\n\n import _torch_sox\n if i is None:\n # one can see all possible values using the .__members__ attribute\n return _torch_sox.sox_encoding_t\n else:\n return _torch_sox.sox_encoding_t(i)\n\n\n@_audio_backend_guard(\"sox\")\ndef get_sox_option_t(i: int = 2) -> Any:\n r\"\"\"Get enum of sox_option_t for sox encodinginfo options.\n\n Args:\n i (int, optional): Choose type or get a dict with all possible options\n use ``__members__`` to see all options when not specified.\n (Default: ``sox_option_default`` or ``2``)\n Returns:\n sox_option_t: A sox_option_t type\n \"\"\"\n\n import _torch_sox\n if i is None:\n return _torch_sox.sox_option_t\n else:\n return _torch_sox.sox_option_t(i)\n\n\n@_audio_backend_guard(\"sox\")\ndef get_sox_bool(i: int = 0) -> Any:\n r\"\"\"Get enum of sox_bool for sox encodinginfo options.\n\n Args:\n i (int, optional): Choose type or get a dict with all possible options\n use ``__members__`` to see all options when not specified. (Default:\n ``sox_false`` or ``0``)\n\n Returns:\n sox_bool: A sox_bool type\n \"\"\"\n\n import _torch_sox\n if i is None:\n return _torch_sox.sox_bool\n else:\n return _torch_sox.sox_bool(i)\n\n\n_SOX_INITIALIZED = False\n# This variable has a micro lifecycle. (False -> True -> None)\n# False: Not initialized\n# True: Initialized\n# None: Already shut down (should not be initialized again.)\n\n_SOX_SUCCESS_CODE = 0\n# defined at\n# https://fossies.org/dox/sox-14.4.2/sox_8h.html#a8e07e80cebeff3339265d89c387cea93a9ef2b87ec303edfe40751d9a85fadeeb\n\n\n@_audio_backend_guard(\"sox\")\ndef initialize_sox() -> int:\n \"\"\"Initialize sox for use with effects chains.\n\n You only need to call this function once to use SoX effects chains multiple times.\n It is safe to call this function multiple times as long as ``shutdown_sox`` is not yet called.\n Once ``shutdown_sox`` is called, you can no longer use SoX effects and calling this function\n results in `RuntimeError`.\n\n Note:\n This function is not required for simple loading.\n\n Returns:\n int: Code corresponding to sox_error_t enum. See\n https://fossies.org/dox/sox-14.4.2/sox_8h.html#a8e07e80cebeff3339265d89c387cea93\n \"\"\"\n global _SOX_INITIALIZED\n if _SOX_INITIALIZED is None:\n raise RuntimeError('SoX effects chain has been already shut down. Can not initialize again.')\n if not _SOX_INITIALIZED:\n import _torch_sox\n code = _torch_sox.initialize_sox()\n if code == _SOX_SUCCESS_CODE:\n _SOX_INITIALIZED = True\n atexit.register(shutdown_sox)\n return code\n return _SOX_SUCCESS_CODE\n\n\n@_audio_backend_guard(\"sox\")\ndef shutdown_sox() -> int:\n \"\"\"Showdown sox for effects chain.\n\n You do not need to call this function as it will be called automatically\n at the end of program execution, if ``initialize_sox`` was called.\n\n It is safe to call this function multiple times.\n\n Returns:\n int: Code corresponding to sox_error_t enum. See\n https://fossies.org/dox/sox-14.4.2/sox_8h.html#a8e07e80cebeff3339265d89c387cea93\n \"\"\"\n global _SOX_INITIALIZED\n if _SOX_INITIALIZED:\n import _torch_sox\n code = _torch_sox.shutdown_sox()\n if code == _SOX_INITIALIZED:\n _SOX_INITIALIZED = None\n return code\n return _SOX_SUCCESS_CODE\n\n\ndef _audio_normalization(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n a number, or a callable that takes the audio tensor as an input. SoX uses\n 32-bit signed integers internally, thus bool normalizes based on that assumption.\n \"\"\"\n\n if not normalization:\n return\n\n if isinstance(normalization, bool):\n normalization = 1 << 31\n\n if isinstance(normalization, (float, int)):\n # normalize with custom value\n a = normalization\n signal /= a\n elif callable(normalization):\n a = normalization(signal)\n signal /= a\n","sub_path":"torchaudio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"75562520","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\nfrom turtlebot_gamepad_training_replay.msg import ButtonValues\n\nclass JoyTwist(object):\n def __init__(self):\n self._btm_sub = rospy.Subscriber('/buttons', ButtonValues, self.button_callback, queue_size=1)\n self._joy_sub = rospy.Subscriber('/joy', Joy, self.joy_callback, queue_size=1)\n self._vel_pub = rospy.Publisher('/cmd_vel_mux/imput/teleop', Twist, queue_size=1) #cmd_vel_mux/input/navi\n\n self.on = False\n \n def button_callback(self, btm_msg):\n self.on = btm_msg.training \n\n def joy_callback(self, joy_msg):\n if not self.on:\n return\n\n if joy_msg.buttons[0] == 1:\n twist = Twist()\n twist.linear.x = joy_msg.axes[1] * 0.2\n twist.angular.z = joy_msg.axes[0] * 3.14/32\n self._vel_pub.publish(twist)\n\nif __name__ == '__main__':\n rospy.init_node('turtlebot_training')\n turtlebot_training = JoyTwist()\n rospy.spin()\n","sub_path":"scripts/turtlebot_training.py","file_name":"turtlebot_training.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488772383","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 23 16:00:03 2019\n\n@author: havu\n\"\"\"\n\nclass IntcodeComputer:\n JUMP = {\"01\" : 4,\n \"02\" : 4,\n \"03\" : 2,\n \"04\" : 2,\n \"05\" : 3,\n \"06\" : 3,\n \"07\" : 4,\n \"08\" : 4,\n \"09\" : 2,\n \"99\" : 1,\n }\n\n def __init__(self, program):\n self.code = program.copy()\n self.i = 0\n self.rb = 0\n self.halted = False\n\n def run_computer(self, inpt=None):\n \"\"\"Run the computer until it halts, yielding the output.\"\"\"\n while not self.halted:\n instruction = str(self.code[self.i]).rjust(5, '0')\n opcode = instruction[-2:]\n modes = instruction[:3]\n\n if opcode == \"99\":\n self.halted = True\n return 2\n\n # parameter pointers\n p = [0, 0, 0, 0]\n\n for j in range(1, 4):\n if modes[-j] == \"0\":\n p[j] = self.code[self.i+j]\n elif modes[-j] == \"1\":\n p[j] = self.i+j\n elif modes[-j] == \"2\":\n p[j] = self.code[self.i+j] + self.rb\n \n if len(self.code) < max(p):\n self.code.extend([0] * max(p))\n \n if opcode == \"03\":\n self.code[p[1]] = inpt\n elif opcode == \"04\":\n yield self.code[p[1]]\n elif opcode == \"09\":\n self.rb += self.code[p[1]]\n\n elif opcode == \"01\":\n self.code[p[3]] = self.code[p[1]] + self.code[p[2]]\n elif opcode == \"02\":\n self.code[p[3]] = self.code[p[1]] * self.code[p[2]]\n elif opcode == \"05\":\n if self.code[p[1]] != 0:\n self.i = self.code[p[2]]\n continue\n elif opcode == \"06\":\n if self.code[p[1]] == 0:\n self.i = self.code[p[2]]\n continue\n elif opcode == \"07\":\n if self.code[p[1]] < self.code[p[2]]:\n self.code[p[3]] = 1\n else:\n self.code[p[3]] = 0\n elif opcode == \"08\":\n if self.code[p[1]] == self.code[p[2]]:\n self.code[p[3]] = 1\n else:\n self.code[p[3]] = 0\n else:\n raise ValueError(\n f\"Opcode {opcode} at index {self.i} is not valid.\")\n\n self.i += self.JUMP[opcode]\n\n\n\nwith open(\"input13.txt\") as f:\n data = f.read()\n\ndata = [int(i) for i in data.split(\",\")]\n\n# part 1\n\ncomputer = IntcodeComputer(data)\n\noutput = list(computer.run_computer())\n\nprint(sum(1 for n in output[2 :: 3] if n == 2))\n","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"138333554","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nBIOL7800 assignment 16\nOscar Johnson 24 March 2016\n\nCopyright Oscar Johnson 2016\n\nAnalysis of avian life history data using the database\ncompiled by Myhrvold et al. 2015:\n\nNathan P. Myhrvold, Elita Baldridge, Benjamin Chan, Dhileep Sivam, Daniel L.\nFreeman, and S. K. Morgan Ernest. 2015. An amniote life-history database to\nperform comparative analyses with birds, mammals, and reptiles. Ecology\n96:3109. http://dx.doi.org/10.1890/15-0846.1\n\"\"\"\n\nimport unittest\nimport numpy\nimport pandas\n\nimport task2\n\n\nclass TestTask2(unittest.TestCase):\n\n def test_listify(self):\n \"\"\" test that data type is correct\"\"\"\n observed = task2.listify('name, item \\n name2, item2')\n self.addTypeEqualityFunc(list, observed)\n\n def test_lists_df_1(self):\n \"\"\"test that data type is correct\"\"\"\n my_file = open('Aves_Database_Aug_2015.csv', 'r')\n observed = task2.lists_df(my_file)\n self.addTypeEqualityFunc(list, observed[0])\n\n def test_lists_df_2(self):\n \"\"\"test that list contains some data\"\"\"\n my_file = open('Aves_Database_Aug_2015.csv', 'r')\n obs = task2.lists_df(my_file)\n observed = obs[0][0]\n self.addTypeEqualityFunc(str, observed)\n\n def test_array(self):\n \"\"\" test that data type is correct\"\"\"\n observed = task2.array([['name, item'], ['name2, item2']])\n self.addTypeEqualityFunc(numpy.ndarray, observed)\n\n def test_frame1(self):\n \"\"\" test that data type is correct\"\"\"\n observed = task2.frame(numpy.array([['name', 'item'],\n ['name2', 'item2'],\n ['name3', 'item3']]),\n i=['1', '2', '3'],\n c=['1', '2'],)\n self.addTypeEqualityFunc(pandas.core.frame.DataFrame, observed)\n\n def test_frame2(self):\n \"\"\"\n test that length is correct\n 'length' is the length of the index\n \"\"\"\n obs = task2.frame(numpy.array([['name', 'item'],\n ['name2', 'item2'],\n ['name3', 'item3']]),\n i=['1', '2', '3'],\n c=['1', '2'],)\n observed = len(obs)\n self.assertEqual(observed, 3)\n\n def test_get_family(self):\n \"\"\"test that I'm getting correct families in array\"\"\"\n my_file = open('Aves_Database_Aug_2015.csv', 'r')\n l = task2.listify(my_file)\n my_file.close()\n x = task2.array(l)\n observed = task2.get_family(x)\n self.assertIn('Accipitridae', observed)\n\n def test_get_species(self):\n \"\"\"test that I'm getting correct species names in array\"\"\"\n my_file = open('Aves_Database_Aug_2015.csv', 'r')\n l = task2.listify(my_file)\n my_file.close()\n a = pandas.DataFrame(numpy.array(l))\n observed = task2.get_species(a)\n self.assertIn('Mimus_gilvus', observed)\n\n def test_pearson_coof(self):\n \"\"\"\n \"\"\"\n pass\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"answers/henicorhina/task2_test.py","file_name":"task2_test.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268598766","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('API', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Action',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('type', models.CharField(choices=[('OPEN', 'Open'), ('REGISTER', 'Register'), ('EXIT', 'Exit'), ('ERROR', 'Error'), ('VOTE', 'Vote'), ('DOWNLOAD', 'Download')], max_length=140)),\n ],\n ),\n ]\n","sub_path":"API/migrations/0002_action.py","file_name":"0002_action.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464852116","text":"import socket, json\r\nfrom _thread import start_new_thread\r\n\r\nfrom player import Player\r\n\r\n\r\n# Socket config\r\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\ns.connect((\"8.8.8.8\", 80))\r\ninfo = s.getsockname()\r\ns.close()\r\nprint(f'ADDRESS: {info[0]}\\nPORT: {info[1]}')\r\nADDRESS = info[0]\r\nPORT = info[1]\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.bind((ADDRESS, PORT))\r\nsock.listen(5)\r\n\r\n\r\n#Game config\r\nplayers = list()\r\n\r\n\r\ndef add_new_player(conn, addr, name):\r\n global players\r\n players.append(Player(20, 20, addr, conn, name))\r\n print(f'NEW PLAYER FROM {addr[1]}')\r\n\r\ndef connect_new_players():\r\n while True:\r\n conn, addr = sock.accept()\r\n conn.send(str('HELLO!').encode())\r\n name = conn.recv(1024).decode()\r\n print(name)\r\n add_new_player(conn, addr, name)\r\n print('SAVED!')\r\n\r\ndef host():\r\n start_new_thread(connect_new_players, ())\r\n while True:\r\n send_data = {'players' : list()}\r\n for player in players:\r\n data = player.conn.recv(1024)\r\n if data.decode() == 'CLOSED':\r\n player.conn.send('HAVE A NICE DAY!'.encode())\r\n print(f'PLAYER {player.addr[1]} DELETED')\r\n players.remove(player)\r\n else:\r\n data = json.loads(data.decode())\r\n player.update(data['xbutton'], data['ybutton'], (640, 480), players)\r\n for player in players:\r\n player.update_collide(players, (640, 480))\r\n for player in players:\r\n player.apply_move()\r\n for player in players:\r\n send_data['players'].append({\r\n 'x': player.rect.x,\r\n 'y': player.rect.y,\r\n 'color': player.color,\r\n 'addr': player.addr[1],\r\n 'name': player.name,\r\n })\r\n for player in players:\r\n player.conn.send(json.dumps(send_data).encode())\r\n\r\nhost()\r\nsock.close()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442984514","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 29 00:18:13 2016\r\n\r\n@author: ruialberto\r\n\"\"\"\r\n\r\ndef longest_run(L):\r\n \"\"\"\r\n Assumes L is a list of integers containing at least 2 elements.\r\n Finds the longest run of numbers in L, where the longest run can\r\n either be monotonically increasing or monotonically decreasing. \r\n In case of a tie for the longest run, choose the longest run \r\n that occurs first.\r\n Does not modify the list.\r\n Returns the sum of the longest run. \r\n \"\"\"\r\n \r\n lista_tmp = []\r\n mono_inc = []\r\n mono_dec = []\r\n max = 0\r\n \r\n if len(L) < 2:\r\n return 0\r\n \r\n if len(L) == 2:\r\n if L[1] >= L[0] and lista_tmp == []:\r\n mono_inc.extend(L)\r\n return mono_inc\r\n else:\r\n mono_dec.extend(L)\r\n return mono_dec\r\n \r\n \r\n for i in range (len(L)-1):\r\n \r\n if L[i+1] <= L[i] and lista_tmp == []:\r\n \r\n lista_tmp.append(L[i])\r\n lista_tmp.append(L[i+1])\r\n if i+2 == len(L):\r\n mono_dec.append(lista_tmp)\r\n break\r\n else:\r\n continue\r\n \r\n \r\n if L[i+1] <= L[i]:\r\n lista_tmp.append(L[i+1])\r\n if i+2 == len(L):\r\n mono_dec.append(lista_tmp)\r\n break\r\n else:\r\n continue\r\n \r\n else:\r\n mono_dec.append(lista_tmp)\r\n lista_tmp = []\r\n if i+2 == len(L):\r\n break\r\n \r\n \r\n for f in mono_dec:\r\n x = len(f)\r\n if x > max:\r\n resultado_dec = []\r\n resultado_dec.extend(f)\r\n max = x\r\n \r\n \r\n \r\n \r\n return resultado\r\n\r\n#print (longest_run([10,4,3,8,3,4,5,7,7,2]))\r\nprint (longest_run([5,4,10])) \r\n#print (longest_run([8,3,4,5,7,7,7,8,2]))\r\n#print (longest_run([4,1,2,3,4]))\r\n#print (longest_run([2,3,1,4,6,8,9,3])) \r\n#print (longest_run([1,2,8,3,2,9,10,2])) \r\n#print (longest_run([10, 4, 3, 8, 3, 4, 5, 7, 7, 2] )) ","sub_path":"final exame/problem4_b.py","file_name":"problem4_b.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"26116072","text":"from gather import create_app\nfrom gather.utils.exts import db\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\n\n\n# 创建flask应用对象\napp = create_app(\"develop\")\n\napp.template_folder = 'static/html'\nfrom flask import request, render_template, jsonify\n@app.route('/test_post', methods=['POST', 'GET'])\ndef test_post():\n if request.method == 'GET':\n return render_template('test_post.html')\n elif request.method == 'POST' :\n return jsonify('ok')\n\nmanager = Manager(app)\n\nmigrate = Migrate(app, db)\nmanager.add_command(\"db\", MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464084582","text":"import cv2\nimport numpy as np\nimport os\n\n# set video name\ncap = cv2.VideoCapture('cloud1.mp4')\n\nos.mkdir(\"img\")\nos.chdir(\"./img\")\n\n# Check if camera opened successfully\nif (cap.isOpened()== False): \n\tprint(\"Error opening video stream or file\")\n\ni = 0\n\nprint(\"creating img...\")\nwhile(cap.isOpened()):\n\t# Capture frame-by-frame\n\tret, frame = cap.read()\n\tif ret == True:\n\t\tname = \"img\"+str(i)+\".jpg\"\n\t\tcv2.imwrite(name,frame)\n\t\ti +=1\n\t\t# Press Q on keyboard to\texit\n\t\tif cv2.waitKey(25) & 0xFF == ord('q'):\n\t\t\tbreak\n\telse: \n\t\tbreak\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"streamToImg.py","file_name":"streamToImg.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240332673","text":"from flask import Flask\n\nfrom configs import ConfigDealer\n\n\ndef create_app(config_name: str) -> Flask:\n \"\"\"Создание объекта точки входа приложаения\"\"\"\n app = Flask(__name__)\n config = ConfigDealer.get_main(config_name)\n app.config.from_object(config)\n config.init_app(app)\n from .api import api\n app.register_blueprint(api, url_prefix='/api/v1')\n return app\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"645417037","text":"def calc(b, e):\n cnt = e // b\n return (b + e) * cnt // 2\n\nN = int(input())\n\nans = 0\n\nfor i in range(1, N+1):\n ans += calc(i, N//i * i)\n\nprint(ans)\n","sub_path":"practice/python/ABC172/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"76793053","text":"# -*- coding: utf-8 -*-\n\n# 1.\t使用scrapy���架进行爬取(10分)\n# 9.\t将抓取的数据存储到json文件中,并且进行展示(10分)\n# 10.\t全程需要有运行调试的过程,并且有注释 (10分)\n\nimport scrapy\nfrom ..items import TengxunzhaopinItem\n\n\nclass ZhaopinSpider(scrapy.Spider):\n name = 'zhaopin'\n\n # allowed_domains = ['xxx']\n # start_urls = ['xxx']\n\n # 2.\t抓取前10页的信息(10分)\n def start_requests(self):\n for page in range(0, 100, 10):\n url = 'https://hr.tencent.com/position.php?&start=' + str(page) + '#a'\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n # 3.\t请使用xpath进行数据解析 (10分)\n all_tr = response.xpath(\"//tr[@class='even'] | //tr[@class='odd']\")\n for tr in all_tr:\n # 4.\t抓取职位名称,职位类别(10分)\n mingcheng = tr.xpath(\"./td[@class='l square']/a/text()\")[0].extract()\n leibie = tr.xpath(\"./td[2]/text()\")\n if len(leibie) == 0:\n leibie = '空'\n else:\n leibie = leibie[0].extract()\n # 5.\t抓取当前职位招聘的人数 (10分)\n renshu = tr.xpath(\"./td[3]/text()\")[0].extract()\n # 6.\t抓取当前职位招聘的地点(10分)\n didian = tr.xpath(\"./td[4]/text()\")[0].extract()\n # 7.\t抓取当前职位发布的照片时间(10分)\n shijian = tr.xpath(\"./td[5]/text()\")[0].extract()\n href = 'https://hr.tencent.com/' + tr.xpath(\"./td[@class='l square']/a/@href\")[0].extract()\n yield scrapy.Request(url=href, callback=self.parse_xq,\n meta={'mingcheng': mingcheng, 'leibie': leibie, 'renshu': renshu, 'didian': didian,\n 'shijian': shijian, 'href': href})\n\n # 8.\t进入详细页,获取工作职责和工作要求(10分)\n def parse_xq(self, response):\n mingcheng = response.meta['mingcheng']\n leibie = response.meta['leibie']\n renshu = response.meta['renshu']\n didian = response.meta['didian']\n shijian = response.meta['shijian']\n href = response.meta['href']\n print(mingcheng, href)\n zhize = response.xpath(\"string(//tr[@class='c'][1]/td[@class='l2']/ul[@class='squareli'])\")[0].extract()\n yaoqiu = response.xpath(\"string(//tr[@class='c'][2]/td[@class='l2']/ul[@class='squareli'])\")[0].extract()\n item = TengxunzhaopinItem()\n item['mingcheng'] = mingcheng\n item['leibie'] = leibie\n item['renshu'] = renshu\n item['didian'] = didian\n item['shijian'] = shijian\n item['href'] = href\n item['zhize'] = zhize\n item['yaoqiu'] = yaoqiu\n yield item\n","sub_path":"tengxunzhaopin/tengxunzhaopin/spiders/zhaopin.py","file_name":"zhaopin.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"323311139","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1425621186.763828\n_enable_loop = True\n_template_filename = 'C:\\\\Python34\\\\Scripts\\\\colonial\\\\user\\\\templates/thankyou.html'\n_template_uri = 'thankyou.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['contents', 'top_banner']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, '/home/templates/base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def contents():\n return render_contents(context._locals(__M_locals))\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n def top_banner():\n return render_top_banner(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_banner'):\n context['self'].top_banner(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'contents'):\n context['self'].contents(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_contents(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def contents():\n return render_contents(context)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n\\t \\t
\\r\\n
\\r\\n \\r\\n \"Thank\\r\\n\\r\\n \\t\\t
\\r\\n
\\r\\n\\r\\n\\r\\n
\\r\\n\\t

Join or Die (attributed to Benjamin Franklin)

\\r\\n\\t

\\r\\n\\t This cartoon originally appeared during the French and Indian War. It was recycled to stimulate the American Colonies to unite against the British Rule. We are grateful you have joined us! Together we will preserve our nations Divine History.\\r\\n\\t

\\r\\n\\t
\\r\\n\\t
\\r\\n\\t \\tHome\\r\\n\\t
\\r\\n\\r\\n\\t\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_top_banner(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def top_banner():\n return render_top_banner(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\t
Thank You!
\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"uri\": \"thankyou.html\", \"line_map\": {\"67\": 4, \"27\": 0, \"52\": 10, \"37\": 1, \"73\": 4, \"42\": 6, \"59\": 10, \"60\": 15, \"61\": 15, \"79\": 73}, \"filename\": \"C:\\\\Python34\\\\Scripts\\\\colonial\\\\user\\\\templates/thankyou.html\", \"source_encoding\": \"ascii\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"user/cached_templates/templates/thankyou.html.py","file_name":"thankyou.html.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344555199","text":"import numpy as np\nimport pandas as pd\n\n\n# 1\nphotos_period = 5\nbus_period = 8\nnumber_of_days = 400\n\n# 2\nbus_stop = pd.DataFrame(index=range(number_of_days))\n\n# 3\nbus_stop['photos'] = bus_stop.index % photos_period == 0\nbus_stop['schedule'] = bus_stop.index % bus_period == 0\n\n# 4\nbus_stop['representation'] = bus_stop['photos'] & bus_stop['schedule']\n\n# 5\nstop_count = bus_stop['schedule'].sum()\nstop_rep_count = bus_stop['representation'].sum()\npercentage = 100 * stop_rep_count / stop_count\nprint(f'There was {stop_count} stops.')\nprint(f'We caught {stop_rep_count} stops.')\nprint(f'We capture {percentage:.2f}% of the schedule.')\n\n# 6\nsampled_stops = bus_stop[bus_stop['representation']]\nsampled_stops_periods = np.diff(sampled_stops.index)\nprint(f'The smallest time interval is {sampled_stops_periods.min()} days.')\n","sub_path":"scripts/bus_simulation.py","file_name":"bus_simulation.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"306582651","text":"from app.readinglist import link\n\nclass ReadingList:\n \"\"\"Base List Object for Personal and Team Lists\"\"\"\n def __init__(self, name, created_by, shared = False, **kwargs):\n self.name = name\n self.reading_list = list()\n self.shared = shared\n self.created_by = created_by\n self.owner = kwargs.get('owner', created_by)\n \n def validate_link(self, list_entry):\n \"\"\"test link to see if it is a link object from app.readinglist.link\"\"\"\n valid_classes = (link.Link, link.Book, ReadingList)\n if type(list_entry) in valid_classes:\n return True\n else:\n return\n \n def add_item(self, list_entry):\n \"\"\"adds an item (Link, Book, or List) to self.ReadingList\"\"\" \n if self.validate_link(list_entry):\n\n if list_entry not in self.reading_list:\n self.reading_list.append(list_entry)\n else:\n print('Item Already Exists in ReadingList')\n\n else:\n raise TypeError('link must be a Link or Book')\n \n def delete_link(self, list_entry):\n \"\"\"link must be a Link or Book\"\"\"\n if list_entry in self.reading_list: \n self.reading_list.remove(list_entry)\n else:\n print('Item is not in reading_list')\n \n \nclass TeamLists(ReadingList):\n \"\"\"Team Reading List\"\"\"\n def __init__(self, team, shared = False):\n self.team = team\n\n","sub_path":"app/readinglist/readinglist.py","file_name":"readinglist.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"531395140","text":"# coding=utf-8\n\n\"\"\"\nauthor: wlc\nfunction: 知乎检索业务逻辑层\n\"\"\"\n\n# 引入外部库\n\n# 引入内部库\nfrom src.dao.zhihuDao import *\nfrom src.entity.retrieveResult import *\n\n\nclass ZhihuOperator:\n\tdef __init__ (self):\n\t\t# 该业务逻辑功能\n\t\tself.intent = '知乎检索'\n\n\t\t# 该业务逻辑子功能\n\t\tself.subintent = {\n\t\t\t0: '知乎热榜检索',\n\t\t\t1: '关键字检索'\n\t\t}\n\n\tdef get_billboard (self) -> RetrieveResult:\n\t\t\"\"\"\n\t\t获取知乎热榜内容\n\t\t:return:\n\t\t\"\"\"\n\t\t# 检索对象创建\n\t\tdata = RetrieveResult(intent=self.intent, subintent=self.subintent[0])\n\n\t\t# 信息检索\n\t\tdata.set_data(ZhihuDao.get_billboard_result())\n\n\t\treturn data\n","sub_path":"src/service/zhihuOperator.py","file_name":"zhihuOperator.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279465273","text":"\nimport argparse\nimport pickle\nimport numpy as np\nimport sklearn\nfrom sklearn import linear_model, svm\nimport tqdm\nfrom collections import defaultdict\nimport seaborn as sn\nimport pandas as pd\nimport sys\nsys.path.append(\"inlp/\")\nfrom inlp import debias\nimport matplotlib.pyplot as plt\n\ndef load_data(path):\n\n with open(path, \"rb\") as f:\n\n return pickle.load(f)\n\ndef run_inlp(train_dev_datasets, classifier, num_classifiers, run_on_all):\n\n type2proj = {}\n\n print(\"Keys: {}\".format(train_dev_datasets.keys()))\n alpha = 1e-3\n early_stopping = True\n \n \n if classifier == \"sgd-log\":\n clf = sklearn.linear_model.SGDClassifier\n params = {\"max_iter\": 3000, \"early_stopping\": early_stopping, \"random_state\": 0, \"n_jobs\": 8, \"loss\": \"log\", \"fit_intercept\": False, \"alpha\": alpha}\n \n if classifier == \"sgd-hinge\":\n clf = sklearn.linear_model.SGDClassifier\n #params = {\"early_stopping\": early_stopping, \"random_state\": 1, \"n_jobs\": 8, \"loss\": \"hinge\", \"alpha\": alpha}\n params = {\"max_iter\": 2000, \"early_stopping\": early_stopping, \"random_state\": 1, \"n_jobs\": 8, \"fit_intercept\": False}\n \n if classifier == \"sgd-perceptron\":\n clf = sklearn.linear_model.SGDClassifier\n params = {\"early_stopping\": early_stopping, \"random_state\": 0, \"n_jobs\": 8, \"loss\": \"perceptron\", \"alpha\": alpha}\n \n elif classifier == \"svm\":\n clf = sklearn.svm.LinearSVC\n params = {\"max_iter\": 1000, \"dual\": False, \"random_state\": 0}\n \n # individual types of RCs\n \n for positive_type in tqdm.tqdm(train_dev_datasets.keys()):\n\n train_x, train_y, train_is_rc = train_dev_datasets[positive_type][\"train\"]\n dev_x, dev_y, dev_is_rc = train_dev_datasets[positive_type][\"dev\"]\n P, rowspace_projections, Ws, accs = debias.get_debiasing_projection(clf, params, num_classifiers, 768, True,\n 0, train_x, train_y, dev_x, dev_y, by_class = False, Y_train_main = False, Y_dev_main = False, dropout_rate = 0)\n print(\"norms:\", [np.linalg.norm(w) for w in Ws])\n print(\"accs:\", accs)\n #print(\"orthogonality test:\\n\", np.array(Ws).squeeze(1).dot(np.array(Ws).squeeze(1).T))\n print(\"==============================================================\")\n \n type2proj[positive_type] = P\n \n # all RCs\n \"\"\"\n train_x, train_y = np.concatenate([train_dev_datasets[positive_type][\"train\"][0] for positive_type in train_dev_datasets.keys()], axis = 0), np.concatenate([train_dev_datasets[positive_type][\"train\"][1] for positive_type in train_dev_datasets.keys()], axis = 0)\n dev_x, dev_y = np.concatenate([train_dev_datasets[positive_type][\"dev\"][0] for positive_type in train_dev_datasets.keys()], axis = 0), np.concatenate([train_dev_datasets[positive_type][\"dev\"][1] for positive_type in train_dev_datasets.keys()], axis = 0)\n \n P, rowspace_projections, Ws = debias.get_debiasing_projection(clf, params, num_classifiers, 768, True,\n 0, train_x, train_y, dev_x, dev_y, by_class = False, Y_train_main = False, Y_dev_main = False, dropout_rate = 0)\n type2proj[\"all\"] = P \n \"\"\"\n \n return type2proj\n \ndef plot(labels, results, layer):\n\n df_cm = pd.DataFrame(results, index = labels,\n columns = labels)\n plt.figure(figsize = (10,7))\n sn.heatmap(df_cm, annot=True)\n plt.title(\"Recall on test positives (columns) for each training positive type (rows). {}\".format(layer))\n #plt.show()\n plt.savefig(\"../results/plots/recall-pairs-{}.png\".format(layer), dpi=600) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='collect training datasets',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--train-dev-path', dest='train_dev_path', type=str,\n default=\"../data/datasets.5000a.layer=6.masked=False.pickle\",\n help='input_path')\n parser.add_argument('--classifier', dest='classifier', type=str,\n default=\"svm\",\n help='sgd/svm')\n parser.add_argument('--num-classifiers', dest='num_classifiers', type=int,\n default=8,\n help='number of inlp classifiers')\n parser.add_argument('--all', dest='all', type=int,\n default=0,\n help='wehther to run on all RC vs. non-RC')\n \n args = parser.parse_args()\n run_on_all = args.all == 1\n \n layer = \"layer=\"+str(args.train_dev_path.split(\".\")[-3].split(\"=\")[-1])\n masked = \"masked=\"+str(args.train_dev_path.split(\".\")[-2].split(\"=\")[-1])\n\n if layer == \"layer=-1\": layer = \"layer=12\"\n \n train_dev_datasets = load_data(args.train_dev_path)\n print(layer)\n type2proj = run_inlp(train_dev_datasets, args.classifier, args.num_classifiers, run_on_all)\n with open(\"../data/type2P.{}.iters={}.classifier={}.{}.pickle\".format(layer, args.num_classifiers, args.classifier, masked), \"wb\") as f:\n \n pickle.dump(type2proj, f)\n","sub_path":"old/src/run_inlp.py","file_name":"run_inlp.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"400189274","text":"# >>> 0x61\n# 97\n\n# https://docs.python.org/3/reference/datamodel.html#object.__iadd__\n# https://docs.python.org/3/library/operator.html#inplace-operators\n\nfrom collections.abc import Iterable, Iterator, MutableSequence\n\n# a = [1, 2]\n\n# >>> a += [3, 4] # 就地修改 调用 __iadd__() -> extend(iterable)\n# >>> a\n# [1, 2, 3, 4]\n#\n# >>> a += (3, 4) # 就地修改 调用 __iadd__() -> extend(iterable)\n# >>> a\n# [1, 2, 3, 4]\n#\n# >>> a + [3, 4] # 返回新列表 调用 __add__()\n# [1, 2, 3, 4]\n#\n# >>> a + (3, 4)\n# Traceback (most recent call last):\n# File \"\", line 1, in \n# a + (3, 4)\n# TypeError: can only concatenate list (not \"tuple\") to list\n\n\n# >>> i = 1024\n# >>>\n# >>> i.to_bytes(2,'big')\n# b'\\x04\\x00'\n# >>> len(b'1024')\n# 4\n\n\nfrom threading import Thread\nimport time\n\ndef foo():\n return 1/0\n\ndef bar():\n time.sleep(3)\n print('bar start')\n foo()\n print('bar stop')\n\ntry:\n t = Thread(target=bar)\n t.start()\nexcept ZeroDivisionError:\n print('~~~')\n\nwhile True:\n time.sleep(1)\n print('everything is ok')\n if t.is_alive():\n print('alive')\n else:\n print('dead')\n","sub_path":"faq.py","file_name":"faq.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"542009139","text":"import networkx as nx\n\nOPEN = '.'\nELF = 'E'\nGOBLIN = 'G'\n\nclass Elf:\n def __init__(self):\n self.health = 200\n self.attack = 40\n\n def __str__(self):\n return ELF\n\nclass Goblin:\n def __init__(self):\n self.health = 200\n self.attack = 3\n\n def __str__(self):\n return GOBLIN\n\ndef load(filename):\n with open(filename) as f:\n cave = dict()\n for y, line in enumerate(f):\n for x, c in enumerate(line):\n if c == OPEN:\n cave[x, y] = OPEN\n elif c == ELF:\n cave[x, y] = Elf()\n elif c == GOBLIN:\n cave[x, y] = Goblin()\n\n return cave\n\ndef print_cave(cave):\n maxx = max(x for x, y in cave.keys())\n maxy = max(y for x, y in cave.keys())\n for y in range(0, maxy + 2):\n print(''.join(str(cave.get((x, y), '#')) for x in range(0, maxx + 2)))\n\ndef reading_order_key(item):\n return item[0][1], item[0][0]\n\ndef distance(a, b):\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n\ndef adjacent_to(x, y):\n return [(x, y - 1), (x - 1, y), (x + 1, y), (x, y + 1)]\n\ndef main():\n cave = load('day15.txt')\n\n def attack_adjacent(xy, char, targets):\n adjacent_targets = [(txy, tc) for txy, tc in targets if distance(xy, txy) == 1]\n if len(adjacent_targets):\n txy, tchar = min(adjacent_targets, key=lambda x: (x[1].health, reading_order_key(x)))\n tchar.health -= char.attack\n if tchar.health > 0:\n print(char, 'at', xy, 'attacks', tchar, 'at', txy, 'remaining health', tchar.health)\n else:\n print(char, 'at', xy, 'kills', tchar, 'at', txy)\n cave[txy] = OPEN\n if tchar.__class__ == Elf:\n raise Exception('an elf died')\n return True\n\n def do_turn():\n turn_order = [item for item in cave.items() if item[1] != OPEN]\n turn_order.sort(key=reading_order_key)\n for xy, char in turn_order:\n if char not in cave.values():\n continue # died this turn\n\n targets = [(txy, tc) for txy, tc in cave.items() if tc != OPEN and tc.__class__ != char.__class__]\n if len(targets) == 0:\n return True\n\n if attack_adjacent(xy, char, targets):\n continue\n\n graph = nx.Graph()\n maxx = max(x for x, y in cave.keys())\n maxy = max(y for x, y in cave.keys())\n for y in range(0, maxy + 1):\n for x in range(0, maxx + 1):\n src = cave.get((x, y), '#')\n dest = cave.get((x, y + 1), '#')\n if (src == OPEN or src == char) and (dest == OPEN or dest == char):\n graph.add_edge((x, y), (x, y + 1))\n dest = cave.get((x + 1, y), '#')\n if (src == OPEN or src == char) and (dest == OPEN or dest == char):\n graph.add_edge((x, y), (x + 1, y))\n\n if xy in graph:\n potential_destinations = set()\n for txy, tchar in targets:\n for axy in adjacent_to(*txy):\n if cave.get(axy, '#') == OPEN:\n potential_destinations.add(axy)\n\n distances = nx.multi_source_dijkstra_path_length(graph, {xy})\n connected_destinations = [(txy, distances[txy]) for txy in potential_destinations if txy in distances]\n if len(connected_destinations):\n destination, dist = min(connected_destinations, key=lambda i: (i[1], i[0][1], i[0][0]))\n # paths = nx.all_shortest_paths(graph, xy, destination)\n # step = min((p[1] for p in paths), key=lambda x: (x[1], x[0]))\n step = min((potential_step for potential_step in adjacent_to(*xy) if cave.get(potential_step, '#') == OPEN),\n key=lambda s: nx.shortest_path_length(graph, s, destination))\n print(char, 'at', xy, 'moves to', step, 'heading for', destination)\n assert cave[step] == OPEN\n cave[xy] = OPEN\n cave[step] = char\n attack_adjacent(step, char, targets)\n continue\n\n print(char, 'at', xy, 'does nothing')\n\n for turn in range(0, 1000):\n print('turn', turn)\n if do_turn():\n print_cave(cave)\n remaining_hp = sum(i.health for i in cave.values() if i != OPEN)\n print('remaining hp', remaining_hp)\n print('answer', remaining_hp * turn)\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2018/day15_2.py","file_name":"day15_2.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628302967","text":"import logging\nlog = logging.getLogger(__name__)\nclass pool_structure:\n def __init__(self,diff):\n self.harvesters:dict={}\n self.works={}\n self.pool_diff=diff\n self.log=log\n \nclass pool_harvester:\n def __init__(self):\n self.harvester_id=\"\"\n self.plots={}\n\nclass pool_plot:\n def __init__(self):\n self.pk=None\n self.harvester_id=\"\"\n","sub_path":"chia/farmer/pool_structure.py","file_name":"pool_structure.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419664782","text":"from __future__ import with_statement\nfrom alembic import context\nfrom logging.config import fileConfig\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\nconfig = context.config\n\n# Interpret the config file for Python logging.\n# This line sets up loggers basically.\nfileConfig(config.config_file_name)\n\nimport sqlalchemy as sa\nfrom changes.db.types.enum import Enum\nfrom changes.db.types.guid import GUID\nfrom changes.db.types.json import JSONEncodedDict\nsa.Enum = Enum\nsa.GUID = GUID\nsa.JSONEncodedDict = JSONEncodedDict\n\n# add your model's MetaData object here\n# for 'autogenerate' support\nfrom flask import current_app\nfrom changes.config import create_app, db\n\nimport warnings\nfrom sqlalchemy.exc import SAWarning\nwarnings.simplefilter(\"ignore\", SAWarning)\n\nif not current_app:\n app = create_app()\nelse:\n app = current_app\napp.app_context().push()\ntarget_metadata = db.metadata\n\n# force registration of models\nimport changes.models # NOQA\n\n# other values from the config, defined by the needs of env.py,\n# can be acquired:\n# my_important_option = config.get_main_option(\"my_important_option\")\n# ... etc.\n\n\ndef run_migrations_offline():\n \"\"\"Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n \"\"\"\n connection = db.engine.connect()\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n )\n\n with context.begin_transaction():\n context.run_migrations()\n\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n \"\"\"\n connection = db.engine.connect()\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n )\n\n try:\n with context.begin_transaction():\n context.run_migrations()\n finally:\n connection.close()\n\nif context.is_offline_mode():\n run_migrations_offline()\nelse:\n run_migrations_online()\n","sub_path":"migrations/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"221925288","text":"import logging\nimport os\nimport queue\nimport sys\nimport threading\nimport time\n\nfrom .server import Client\nfrom .utils import file_mtime\n\nlog = logging.getLogger('deoplete.jedi.worker')\nworkers = []\nwork_queue = queue.Queue()\ncomp_queue = queue.Queue()\n\n\nclass Worker(threading.Thread):\n _exc_info = None\n \"\"\"Exception info being set in threads.\"\"\"\n daemon = True\n\n def __init__(self, python_path, in_queue, out_queue, desc_len=0,\n server_timeout=10, short_types=False, show_docstring=False,\n debug=False):\n self._client = Client(python_path, desc_len, short_types,\n show_docstring, debug)\n self.server_timeout = server_timeout\n self.in_queue = in_queue\n self.out_queue = out_queue\n super(Worker, self).__init__()\n self.log = log.getChild(self.name)\n\n def completion_work(self, cache_key, extra_modules, source, line, col,\n filename, options):\n try:\n completions = self._client.completions(cache_key, source, line, col,\n filename, options)\n except Exception:\n self._exc_info = sys.exc_info()\n return\n modules = {f: file_mtime(f) for f in extra_modules}\n if completions is not None:\n for c in completions:\n m = c['module']\n if m and m not in modules and os.path.exists(m):\n modules[m] = file_mtime(m)\n\n self.results = {\n 'cache_key': cache_key,\n 'time': time.time(),\n 'modules': modules,\n 'completions': completions,\n }\n\n def run(self):\n while True:\n try:\n work = self.in_queue.get()\n self.log.debug('Got work')\n\n self.results = None\n t = threading.Thread(target=self.completion_work, args=work)\n t.start()\n t.join(timeout=self.server_timeout)\n\n if self._exc_info is not None:\n break\n\n if self.results:\n self.out_queue.put(self.results)\n self.log.debug('Completed work')\n else:\n self.log.warn('Restarting server because it\\'s taking '\n 'too long')\n # Kill all but the last queued job since they're most\n # likely a backlog that are no longer relevant.\n while self.in_queue.qsize() > 1:\n self.in_queue.get()\n self.in_queue.task_done()\n self._client.restart()\n self.in_queue.task_done()\n except Exception:\n self.log.debug('Worker error', exc_info=True)\n\n def join(self):\n \"\"\"Join the thread and raise any exception from it.\n\n This is used and picked up by :func:`Source._ensure_workers_are_alive`.\n \"\"\"\n threading.Thread.join(self)\n if self._exc_info:\n raise self._exc_info[1]\n\n\ndef start(python_path, count, desc_len=0, server_timeout=10, short_types=False,\n show_docstring=False, debug=False):\n while count > 0:\n t = Worker(python_path, work_queue, comp_queue, desc_len,\n server_timeout, short_types, show_docstring, debug)\n workers.append(t)\n t.start()\n log.debug('Started worker: %r', t)\n count -= 1\n","sub_path":"rplugin/python3/deoplete/sources/deoplete_jedi/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"371397394","text":"import os\nimport os.path\nimport json\nimport re\n\n\nclass EmptyClass:\n pass\n\ndef EmptyMethod():\n pass\n\n@staticmethod\ndef EmptyStaticMethod():\n pass\n \ndef VariablesInInstance(Instance):\n EmptyClassInstanceDictionaryKeys=list(dict(EmptyClass.__dict__).keys())\n InstanceDictionary=dict(type(Instance).__dict__)\n InstanceDictionaryKeys=list(InstanceDictionary.keys())\n Cursor=0\n End=InstanceDictionaryKeys.__len__()\n while(Cursor=3):\n self.Code=Cells[0]\n self.Name=Cells[1]\n self.StandardRole=Cells[2]\n elif(Cells.__len__()==2):\n self.Code=Cells[0]\n self.StandardRole=Cells[1]\n elif(Cells.__len__()==1):\n self.Code=Cells[0]\n \n def ImportIDSketch(self,IDSketch):\n IDContent=IDSketch.Content\n Cells=IDContent.split(\"|\")\n Code=Cells[2]\n if(not((self.Code==None)or(self.Code==Code))):\n return\n self.Language=Cells[0]\n self.Corpus=Cells[1]\n self.Code=Cells[2]\n self.Age=Cells[3]\n self.Sex=Cells[4]\n self.Group=Cells[5]\n self.SES=Cells[6]\n self.IDRole=Cells[7]\n self.Education=Cells[8]\n self.Custom=Cells[9]\n \nclass Main(Daton):\n SpeakerCode=None\n Sketch=None\n Words=None\n Parameters=None\n Dependents=None\n \n def __init__(self,SpeakerCode=None,Sketch=None,Words=[],Parameters=[],Dependents=[]):\n self.SpeakerCode=SpeakerCode\n self.Sketch=Sketch\n self.Words=Words\n self.Parameters=Parameters\n self.Dependents=Dependents\n \n @staticmethod\n def DeleteConsecutiveSpace(Content):\n '''\n '''\n Buffer=Content\n while(Buffer.find(\" \")>=0):\n Buffer=Buffer.replace(\" \",\" \")\n return Buffer\n\n def HandleSpecialMarker(self,Content):\n '''\n '''\n Segments=Content.split(\" \")\n Handled=\"\"\n for Segment in Segments:\n Index=Segment.find(\"@\")\n if(Index>=0):\n Parts=Segment.split(\"@\")\n Scope=Parts[0]\n Note=\"@\"+Parts[1]\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled+Segment.replace(Note,\"\")+\" \"\n else:\n Handled=Handled+Segment+\" \"\n\n Handled=Handled[:-1]\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n \n def HandleSimpleEvent(self,Content):\n Segments=Content.split(\" \")\n Handled=\"\"\n for Segment in Segments:\n Index=Segment.find(\"&=\")\n if(Index>=0):\n Parts=Segment.split(\"&=\")\n Scope=Content\n Note=\"&=\"+Parts[1]\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled+Segment.replace(Note,\"\")+\" \"\n else:\n Handled=Handled+Segment+\" \"\n Handled=Handled[:-1]\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n \n def HandleInterposedWord(self,Content):\n Segments=Content.split(\" \")\n Handled=\"\"\n for Segment in Segments:\n Index=Segment.find(\"&*\")\n if(Index>=0):\n Parts=Segment.split(\"&*\")\n Scope=Content\n Note=\"&*\"+Parts[1]\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled+Segment.replace(Note,\"\")+\" \"\n else:\n Handled=Handled+Segment+\" \"\n Handled=Handled[:-1]\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n \n def HandlePause(self,Content):\n Segments=Content.split(\" \")\n Handled=\"\"\n for Segment in Segments:\n Index1=Segment.find(\"(.)\")\n Index2=Segment.find(\"(..)\")\n Index3=Segment.find(\"(...)\")\n if((Index1>=0)or\n (Index2>=0)or\n (Index3>=0)):\n Scope=Content\n if(Index1>=0):\n Note=\"(.)\"\n elif(Index2>=0):\n Note=\"(..)\"\n elif(Index3>=0):\n Note=\"(...)\"\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled+Segment.replace(Note,\"\")+\" \"\n else:\n Handled=Handled+Segment+\" \"\n Handled=Handled[:-1]\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n def HandleLongVocalEvent(self,Content):\n Handled=Content\n return Handled\n def HandleLongNonvocalEvent(self,Content):\n Handled=Content\n return Handled\n \n def HandleScopedSymbol(self,Content):\n Rest=Content\n Pattern=\"\\\\<(.*?)\\\\> \\\\[(.*?)\\\\](.*)\"\n Handled=Content\n while(Rest.__len__()>0):\n Matched=re.search(Pattern,Rest)\n if((not(Matched==None))and(Matched.lastindex>=3)):\n Scope=Matched.group(1)\n Note=\"[\"+Matched.group(2)+\"]\"\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled.replace(\"<\"+Scope+\">\",Scope)\n Handled=Handled.replace(Note,\"\")\n Rest=Matched.group(3)\n else:\n break\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n \n def HandleSquareBraketedSymbol(self,Content):\n Rest=Content\n Pattern=\"\\\\[(.*?)\\\\](.*)\"\n Handled=Content\n while(Rest.__len__()>0):\n Matched=re.search(Pattern,Rest)\n if((not(Matched==None))and(Matched.lastindex>=2)):\n Scope=Content\n Note=\"[\"+Matched.group(1)+\"]\"\n ThisParameter=Parameter(Scope=Scope,Note=Note)\n self.Parameters.append(ThisParameter)\n Handled=Handled.replace(Note,\"\")\n Rest=Matched.group(2)\n else:\n break\n Handled=Main.DeleteConsecutiveSpace(Handled)\n return Handled\n \n def HandleEvents(self,Content):\n '''\n '''\n Handled=Content\n Handled=self.HandleScopedSymbol(Handled)\n Handled=self.HandleSquareBraketedSymbol(Handled)\n Handled=self.HandleLongVocalEvent(Handled)\n Handled=self.HandleLongNonvocalEvent(Handled)\n Handled=self.HandlePause(Handled)\n Handled=self.HandleSimpleEvent(Handled)\n Handled=self.HandleSpecialMarker(Handled)\n return Handled\n \n def ImportMainSketch(self,MainSketch):\n '''\n '''\n self.SpeakerCode=MainSketch.Tag\n self.Sketch=MainSketch\n Handled=self.Sketch.Content\n Handled=self.HandleEvents(Handled)\n Cells=Handled.split(\" \")\n for Cell in Cells:\n ThisWord=Word(Plain=Cell)\n self.Words.append(ThisWord)\n Dependets=MainSketch.Dependents\n for Dependent in Dependets:\n if(Dependent.Tag==\"mor\"):\n ThisMor=Mor()\n ThisMor.ImportMorSegment(Dependent.Content)\n self.Dependents.append(ThisMor)\n else:\n self.Dependents.append(Dependent)\n \nclass Mor(Daton):\n '''\n '''\n Plain=None\n Morphemes=None\n Parameters=None\n def __init__(self,Plain=None,Morphemes=[],Parameters=[]):\n self.Plain=Plain\n self.Morphemes=Morphemes\n self.Parameters=Parameters\n def ImportMorSegment(self,MorSegment):\n '''\n '''\n Cells=MorSegment.split(\"~\")\n if(Cells.__len__()==1):\n ThisMorpheme=Morpheme()\n ThisMorpheme.ImportMorPart(Cells[0])\n self.Morphemes.append(ThisMorpheme)\n elif(Cells.__len__()>1):\n CellCursor=0\n while(CellCursor=2):\n self.Content=Cells[1]\n self.Category=Cells[0]\n elif(Cells.__len__()==1):\n self.Content=Cells[0]\n\nclass Word(Daton):\n '''\n '''\n Plain=None\n Parameters=None\n def __init__(self,Plain=None,Parameters=[]):\n '''\n '''\n self.Plain=Plain\n self.Parameters=Parameters\n\nclass Parameter(Daton):\n '''\n '''\n Scope=None\n Note=None\n def __init__(self,Scope=None,Note=None):\n '''\n '''\n self.Scope=Scope\n self.Note=Note \n\nclass Clamshell(Daton):\n '''\n '''\n ChaHomePath=None\n ChaPaths=None\n Sketches=None\n Participants=None\n Mains=None\n\n def __init__(self,ChaHomePath=None,ChaPaths=[],Sketches=[],Participants=[],Mains=[]):\n '''\n '''\n self.ChaHomePath=ChaHomePath\n self.ChaPaths=ChaPaths\n self.Sketches=Sketches\n self.Participants=Participants\n self.Mains=Mains\n\n @staticmethod\n def ReadClamshellFile(ClamshellPath):\n '''\n '''\n File=open(ClamshellPath,\"r\",-1,\"utf-8\")\n Content=File.read()\n Clamshell=json.loads(Content)\n File.close()\n return Clamshell\n\n @staticmethod\n def WriteClamshellFile(Clamshell,ClamshellPath):\n '''\n '''\n File=open(ClamshellPath,\"w\",-1,\"utf-8\")\n Content=json.dumps(Clamshell)\n Content=File.write(Content)\n File.flush()\n File.close()\n \n @staticmethod\n def ListChaPathInFolder(FolderPath):\n '''\n '''\n FuturePaths=[FolderPath]\n ChaPaths=[]\n while(FuturePaths.__len__()>0):\n PresentPaths=FuturePaths\n FuturePaths=[]\n for PresentPath in PresentPaths:\n Names=os.listdir(PresentPath)\n for Name in Names:\n Path=PresentPath+Name\n if(os.path.isdir(Path)):\n FuturePath=Path+\"/\"\n FuturePaths.append(FuturePath)\n if(os.path.isfile(Path)):\n if(Path[-4:]==\".cha\"):\n ChaPaths.append(Path)\n return ChaPaths\n\n @staticmethod\n def ReadChaFile(ChaPath):\n '''\n '''\n File=open(ChaPath,\"r\",-1,\"utf-8\")\n Content=File.read()\n File.close()\n return Content\n\n @staticmethod\n def SplitChaLines(ChaContent):\n '''\n '''\n Lines=ChaContent.split(\"\\n\")\n ChaLines=[]\n Cursor=-1\n ChaLine=\"\"\n while(Cursor=0):\n Tag=ChaLine[1:Index]\n Content=ChaLine[Index+2:]\n else:\n Tag=ChaLine[1:]\n Content=\"\"\n if(ChaLine[0]==\"@\"):\n ThisSketch=Sketch(\n Type=Sketch.SketchTypeHeader(),\n Tag=Tag,\n Content=Content,\n Dependents=[]\n )\n Sketches.append(ThisSketch)\n SketchCursor=SketchCursor+1\n if(ChaLine[0]==\"*\"):\n ThisSketch=Sketch(\n Type=Sketch.SketchTypeMain(),\n Tag=Tag,\n Content=Content,\n Dependents=[]\n )\n Sketches.append(ThisSketch)\n SketchCursor=SketchCursor+1\n if(ChaLine[0]==\"%\"):\n ThisSketch=Sketch(\n Type=Sketch.SketchTypeDependent,\n Tag=Tag,\n Content=Content,\n Dependents=[]\n )\n Sketches[SketchCursor].Dependents.append(ThisSketch)\n ChaLineCursor=ChaLineCursor+1\n return Sketches\n \n def MakeSketches(self):\n '''\n '''\n FolderPath=self.ChaHomePath\n ChaPaths=Clamshell.ListChaPathInFolder(FolderPath)\n self.ChaPaths=ChaPaths\n for ChaPath in ChaPaths:\n ChaContent=Clamshell.ReadChaFile(ChaPath)\n ChaLines=Clamshell.SplitChaLines(ChaContent)\n TheseSketches=Clamshell.MakeSketchesFromChaLines(ChaLines)\n self.Sketches=self.Sketches+TheseSketches\n \n def ParseParticipants(self):\n '''\n '''\n for ThisSketch in self.Sketches:\n if(ThisSketch.Type==Sketch.SketchTypeHeader()):\n if(ThisSketch.Tag==\"Participants\"):\n ParticipantContent=ThisSketch.Content\n ParticipantParts=ParticipantContent.split(\",\")\n for ParticipantPart in ParticipantParts:\n ThisParticipant=Participant()\n ThisParticipant.ImportParticipantPart(ParticipantPart)\n self.Participants.append(Participant)\n if(ThisSketch.Tag==\"ID\"):\n BufferParticipant=Participant()\n BufferParticipant.ImportIDSketch(ThisSketch)\n ParticipantCursor=0\n while(ParticipantCursor\n\n'''\nfor detection purpose, we need to label the data with 0, 1 \n'''\nimport mujoco_py as mj\nfrom robot import robot\nimport numpy as np\nimport cv2\nimport json\nimport os\nfrom tqdm import tqdm\nimport random\nfrom config import *\nimport tensorflow as tf\nfrom PIL import Image\n\nclass data_generator:\n \n def __init__(self, filename, camera_num, data_num, data_name, picture_size = [image_size, image_size], save_path = data_path):\n self.camera_num = camera_num\n if data_name == \"validation\":\n data_num = int( data_num * 0.5)\n self.data_num = data_num\n self.robot = robot(filename)\n self.size = picture_size\n self.save_path = os.path.join(save_path, data_name)\n # init include delete the old pictures:\n self.model = self.robot.model\n self.jnt_range = self.model.jnt_range\n \n\n def __one_picture(self, qpos):\n # we can only use position\n self.robot.set_by_type(qpos)\n for i in range(self.camera_num):\n # get picture from different angle\n if i == 0:\n img = self.robot.get_picture(self.size, i)\n else:\n s_img = self.robot.get_picture(self.size, i)\n img = np.concatenate([img, s_img], axis = 1)\n # concatenate those image together.\n return img\n\n def __one_array(self, qpos):\n # generate [0, 1, 0, 1, 0, 0 ....]\n heat_map = np.zeros([self.size[0], self.size[1], 1])\n center = int(self.size[0]/2.0)\n cv2.circle(heat_map, (int(qpos[0]*280)+center, center), 13, (255), -1)\n return heat_map \n\n def __gen_qpos_fun(self):\n # this file is used to generate the random qpos\n # we will make it self-adjust\n qpos = []\n for one_range in self.jnt_range:\n one_p = random.uniform(one_range[0], one_range[1])\n qpos.append(one_p)\n return qpos\n\n def img_name(self, group_num):\n # 1_2.img means the second picture for the first qpos\n name = str(group_num) + \".jpg\"\n return name\n \n def data_generates(self):\n # generate a set of datas:\n qpos_dict = {}\n for i in tqdm(range(self.data_num)):\n qpos = self.__gen_qpos_fun()\n img = self.__one_picture(np.array(qpos))\n name = self.img_name(i)\n path = os.path.join(self.save_path, name)\n cv2.imwrite(path, img)\n hm_img = self.__one_array(qpos)\n hm_path = os.path.join(self.save_path, 'hm_', name)\n cv2.imwrite(hm_path, img)\n\n with open('qpos.json', 'w') as f:\n json.dump(qpos_dict, f)\n print(\"data generating finished!\")\n \ndef tf_record_write(name):\n writer = tf.python_io.TFRecordWriter(\"{}.tfrecords\".format(name))\n \n # read json\n with open(\"./qpos.json\", \"r\") as f:\n qpos = json.load(f)\n \n # read in the picture:\n for i in tqdm(qpos.keys()):\n # i is a string like \"1\", \"2\"\n # it reads out the groups. the id in groups will be settled later\n # name will be settled as \"1.jpg\", no more groups.\n img_path = os.path.join(data_path, name, i) + \".jpg\"\n hm_img_path = os.path.join(data_path,'hm_'+name, i) + \".jpg\"\n img = Image.open(img_path)\n hm_img = Image.open(hm_img_path)\n img_raw = img.tobytes() # covert into bytes\n hm_raw = hm.img.tobytes()\n example = tf.train.Example(features = tf.train.Features(feature = {\n \"label\": tf.train.Feature(float_list = tf.train.BytesList(value = [hm_raw])),\n \"img_raw\": tf.train.Feature(bytes_list = tf.train.BytesList(value = [img_raw]))\n }))\n writer.write(example.SerializeToString())\n \n writer.close()\n print(\" tf write done!\")\n # implement net:\n \nif __name__ == \"__main__\":\n # write in picture\n os.system('rm -rf ./data/train/*')\n os.system('rm -rf ./data/validation/*')\n gen_names = ['train', 'validation']\n for gen_name in gen_names:\n dg = data_generator(\"test.xml\", camera_num, data_num, gen_name)\n dg.data_generates()\n\n print(\"Write in picture form\")\n tf_record_write(gen_name)\n\n# read and write in tf_record.\n\n","sub_path":"drl_test/config/vgg/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"511463693","text":"import sqlite3\r\ncon = sqlite3.connect(\"Workers\")\r\ncon.row_factory = sqlite3.Row\r\n\r\ncur = con.cursor()\r\n\r\ndef tabela():\r\n cur.executescript(\"\"\"\r\n DROP TABLE IF EXISTS pracownicy;\r\n CREATE TABLE IF NOT EXISTS pracownicy (\r\n id INTEGER PRIMARY KEY ASC,\r\n imie varchar(250) NOT NULL,\r\n nazwisko varchar(250) NOT NULL,\r\n miejscowosc varchar(250) NOT NULL,\r\n zarobki INTEGER NOT NULL\r\n)\"\"\")\r\n\r\ndef wyswietl():\r\n with con:\r\n cur.execute('SELECT * FROM pracownicy')\r\n wynik_prac=cur.fetchall()\r\n print('\\nPracownicy:')\r\n for x in wynik_prac:\r\n print(x['id'],x['imie'],x['nazwisko'],x['miejscowosc'],x['zarobki'])\r\n\r\n\r\ndef wyswietlsort():\r\n with con:\r\n cur.execute('SELECT * FROM pracownicy ORDER BY imie ASC')\r\n wynik_prac = cur.fetchall()\r\n print('\\nPracownicy:')\r\n for x in wynik_prac:\r\n print(x['id'], x['imie'], x['nazwisko'], x['miejscowosc'], x['zarobki'])\r\n\r\ndef dodaj(imie, nazwisko, miejscowosc, zarobki):\r\n with con:\r\n cur.execute('INSERT INTO pracownicy VALUES(NULL, ?, ?, ?, ?)', (imie, nazwisko, miejscowosc, zarobki))\r\n print('\\nDodano pracownika')\r\n\r\ndef usun(id):\r\n with con:\r\n cur.execute('DELETE FROM pracownicy WHERE id=?', (id,))\r\n print('\\nUsunięto pracownika')\r\n\r\ndef zmien(id, msc):\r\n with con:\r\n cur.execute('UPDATE pracownicy SET miejscowosc=? WHERE id=?', (msc, id))\r\n print('\\nZaktualizowano miejsce zamieszkania.')\r\n\r\ndef znajdz(num):\r\n with con:\r\n cur.execute('SELECT * FROM pracownicy WHERE miejscowosc=?', (num,))\r\n y = cur.fetchall()\r\n print('\\nPracownik:')\r\n for x in y:\r\n print(x['id'], x['imie'], x['nazwisko'], x['miejscowosc'], x['zarobki'])\r\n\r\ndef znajdzmax():\r\n with con:\r\n cur.execute('SELECT * FROM pracownicy WHERE zarobki = (SELECT MAX(zarobki) FROM pracownicy)', ())\r\n y = cur.fetchall()\r\n print('\\nNajwiększy darmozjad:')\r\n for x in y:\r\n print(x['id'], x['imie'], x['nazwisko'], x['miejscowosc'], x['zarobki'])\r\n\r\ndef znajdzmin():\r\n with con:\r\n cur.execute('SELECT * FROM pracownicy WHERE zarobki = (SELECT MIN(zarobki) FROM pracownicy)', ())\r\n y = cur.fetchall()\r\n print('\\nNajmniejszy darmozjad:')\r\n for x in y:\r\n print(x['id'], x['imie'], x['nazwisko'], x['miejscowosc'], x['zarobki'])\r\ndef menu():\r\n print(\"Witaj, co chcesz zrobić?\")\r\n print(\"(1) Utwórz bazę / zastąp obecną\")\r\n print(\"(2) Wyświetl bazę\")\r\n print(\"(2.2) Wyświetl bazę (z sortowaniem alfabetycznym po imieniu)\")\r\n print(\"(3) Dodaj pracownika\")\r\n print(\"(4) Zlikwiduj pracownika\")\r\n print(\"(5) Zaktualizuj miejsce zamieszkania pracownika\")\r\n print(\"(6) Znajdz pracownika\")\r\n print(\"(6.1) Znajdz największego darmozjada\")\r\n print(\"(6.2) Znajdz najmniejszego darmozjada\")\r\n print(\"(7) Zakończ program\")\r\n\r\nmenu()\r\nopcja = input(\"Jaką opcje wybierasz: \")\r\nif opcja == \"1\":\r\n tabela()\r\n print(\"Utworzono bazę\")\r\nelif opcja == \"2\":\r\n wyswietl()\r\nelif opcja == \"2.2\":\r\n wyswietlsort()\r\nelif opcja == \"3\":\r\n imi = input(\"Podaj imie: \")\r\n naz = input(\"Podaj nazwisko: \")\r\n msc = input(\"Podaj miejsce zamieszkania: \")\r\n zar = input(\"Podaj zarobki: \")\r\n dodaj(imi, naz, msc, zar)\r\nelif opcja == \"4\":\r\n ID = input(\"Podaj ID do usunięcia z listy: \")\r\n usun(ID)\r\nelif opcja == \"5\":\r\n ID = input(\"Podaj ID do edycji miejsca zamieszkania: \")\r\n msc = input(\"Podaj nowe miejsce zamieszkania: \")\r\n zmien(ID, msc)\r\nelif opcja == \"6\":\r\n msc = input(\"Podaj miejscowość osoby której chcesz znaleść: \")\r\n znajdz(msc)\r\nelif opcja == \"6.1\":\r\n znajdzmax()\r\nelif opcja == \"6.2\":\r\n znajdzmin()\r\nelse:\r\n print(\"Koniec programu\")\r\n\r\ncon.close()\r\n","sub_path":"Laboratoria sem2/LAB16-zad/LAB16-z2.py","file_name":"LAB16-z2.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150096766","text":"\"\"\"\n将一个文件从客户端发送到服务端\n* 文件可能是文本文件也可能是二进制文件\n\"\"\"\n\nfrom socket import *\n\ns = socket()\ns.bind(('127.0.0.1',8888))\ns.listen(5)\n\nc,addr = s.accept()\nprint(\"Connect from\",addr)\n\n# recv内容,写入本地文件\n# 本地文件以wb\n\n# 打开文件\nf = open('rb.jpg','wb')\n\n# 循环接收内容写入本地\nwhile True:\n data = c.recv(1024) # bytes\n if data == b'##':\n break\n f.write(data)\n\ndata = c.recv(1024)\nprint(data.decode())\n\n\nf.close()\nc.close()\ns.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"udp传输模型/recv_file.py","file_name":"recv_file.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32504327","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 11:25:33 2019\n\n@author: ntr002\n\"\"\"\nimport WaPOR\nfrom datetime import datetime\nimport requests\nimport os\nfrom WaPOR import GIS_functions as gis\n\n\ndef main(Dir, data='AETI',Startdate='2009-01-01', Enddate='2018-12-31', \n latlim=[-40.05, 40.05], lonlim=[-30.5, 65.05],level=1, \n version = 2, Waitbar = 1):\n \"\"\"\n This function downloads yearly WAPOR LCC data\n\n Keyword arguments:\n Dir -- 'C:/file/to/path/'\n Startdate -- 'yyyy-mm-dd'\n Enddate -- 'yyyy-mm-dd'\n latlim -- [ymin, ymax] (values must be between -40.05 and 40.05)\n lonlim -- [xmin, xmax] (values must be between -30.05 and 65.05)\n \"\"\"\n print(f'\\nDownload WaPOR Level {level} yearly {data} data for the period {Startdate} till {Enddate}')\n\n # Download data\n WaPOR.API.version=version\n bbox=[lonlim[0],latlim[0],lonlim[1],latlim[1]]\n catalog=WaPOR.API.getCatalog()\n \n if level==1:\n cube_code=f\"L1_{data}_A\"\n elif level==2:\n cube_code=f'L2_{data}_A'\n elif level==3:\n print('Level 3 data only available in some areas with specific data cube code below: ') \n for i,row in catalog.iterrows(): \n if (f'L3' in row['code'])&(f'{data}' in row['code'])&(row['code'][-1]=='A'):\n print('%s: %s'%(row['caption'],row['code']))\n cube_code=input('Insert Level 3 cube code for the selected area: ')\n else:\n print('Invalid Level')\n \n try:\n cube_info=WaPOR.API.getCubeInfo(cube_code)\n multiplier=cube_info['measure']['multiplier']\n except:\n print('ERROR: Cannot get cube info. Check if WaPOR version has cube %s'%(cube_code))\n return None\n time_range='{0},{1}'.format(Startdate,Enddate)\n try:\n df_avail=WaPOR.API.getAvailData(cube_code,time_range=time_range)\n except:\n print('ERROR: cannot get list of available data')\n return None\n if Waitbar == 1:\n import WaPOR.WaitbarConsole as WaitbarConsole\n total_amount = len(df_avail)\n amount = 0\n WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n Dir=os.path.join(Dir,'WAPOR.v%s_yearly_%s' %(version,cube_code))\n if not os.path.exists(Dir):\n os.makedirs(Dir)\n \n for index,row in df_avail.iterrows(): \n download_url=WaPOR.API.getCropRasterURL(bbox,cube_code,\n row['time_code'],\n row['raster_id'],\n WaPOR.API.Token,\n print_job=False) \n filename='{0}.tif'.format(row['raster_id'])\n outfilename=os.path.join(Dir,filename) \n download_file=os.path.join(Dir,'raw_{0}.tif'.format(row['raster_id']))\n #Download raster file\n resp=requests.get(download_url) \n open(download_file,'wb').write(resp.content) \n driver, NDV, xsize, ysize, GeoT, Projection= gis.GetGeoInfo(download_file)\n Array = gis.OpenAsArray(download_file,nan_values=True)\n CorrectedArray=Array*multiplier\n gis.CreateGeoTiff(outfilename,CorrectedArray,\n driver, NDV, xsize, ysize, GeoT, Projection)\n os.remove(download_file) \n\n if Waitbar == 1: \n amount += 1\n WaitbarConsole.printWaitBar(amount, total_amount, \n prefix = 'Progress:', \n suffix = 'Complete', \n length = 50)\n return Dir\n \n","sub_path":"modules/WaPOR/download_yearly.py","file_name":"download_yearly.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"495201125","text":"# convert PyTorch gazenet.py to Gluon yaw.\n\nimport mxnet as mx\nfrom gluoncv import model_zoo, data, utils\nfrom matplotlib import pyplot as plt\nfrom mxnet.gluon import HybridBlock\nfrom mxnet.gluon.model_zoo import vision\n\nfrom mxnet import gluon\nfrom mxnet.gluon import nn\nfrom mxnet.autograd import record\nfrom mxnet.gluon import Block\nimport math\nfrom mxnet import nd\nimport time\n\ndef freeze_bn(block):\n try:\n iter(block)\n for b in block:\n freeze_bn(b)\n return\n except TypeError:\n pass\n \n if isinstance(block, nn.BatchNorm):\n #print('freeze', block.name)\n block._kwargs['use_global_stats'] = True\n elif isinstance(block, vision.BottleneckV1):\n freeze_bn(block.body)\n freeze_bn(block.downsample)\n\nclass Gazenet_mxJiang_yaw_zoo(HybridBlock):\n def __init__(self, num_bins, **kwargs):\n \n ctx = kwargs.pop('ctx')\n super(Gazenet_mxJiang_yaw_zoo, self).__init__(**kwargs)\n self.net = mx.gluon.nn.HybridSequential(prefix='')\n with self.net.name_scope():\n mx.random.seed(int(time.time()))\n self.model_resnet50 = vision.resnet50_v1(pretrained=True, ctx=ctx, root='./')\n #self.model_resnet50.features[1]._kwargs['use_global_stats'] = True\n freeze_bn(self.model_resnet50.features)\n #print('net features:', self.model_resnet50.features)\n self.net.add(self.model_resnet50)\n self.model_yaw = mx.gluon.nn.Dense(num_bins)\n self.model_yaw.collect_params().initialize(mx.initializer.Uniform(1/math.sqrt(2048)), ctx=ctx, force_reinit=True)\n self.model_yaw.bias.set_data(mx.nd.random.uniform(-0.2,0.2,shape=(num_bins,),ctx=ctx))\n self.net.add(self.model_yaw) \n \n def hybrid_forward(self, F, x):\n pre_yaw = self.net(x)\n model_net_params = self.net.collect_params()\n #print('conv1',model_net_params['resnetv10_conv0_weight'].data())\n #print('bn1 running mean',model_net_params['resnetv10_batchnorm0_running_mean'].data())\n #print('bn1 running var',model_net_params['resnetv10_batchnorm0_running_var'].data())\n #print('bn1 gamma',model_net_params['resnetv10_batchnorm0_gamma'].data())\n #print('bn1 beta',model_net_params['resnetv10_batchnorm0_beta'].data())\n \n return pre_yaw","sub_path":"gazenet_mxJiang_yaw_zoo.py","file_name":"gazenet_mxJiang_yaw_zoo.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"544554239","text":"\nimport numpy as np\nimport torch\ndef get_data(df_nodes,df_edges,max_size):\n\n\n df_nodes = df_nodes.set_index(\"graph_id\")\n idc = list(set(list(df_nodes.index)))\n\n nodes = []\n for idx in idc:\n tmp = df_nodes.loc[idx]\n nodes.append({\"label\":\"Node\",\"data\":torch.Tensor(tmp.drop(\"node_id\",1).values),\"node_id\":tmp[\"node_id\"].values})\n\n df_edges = df_edges.set_index(\"graph_id\")\n idc = list(set(list(df_edges.index)))\n\n edges = []\n for idx in idc:\n tmp = df_edges.loc[idx]\n edges.append({\"label\":\"Edge\",\"data\":torch.Tensor(tmp.drop([\"node_id_from\",\"node_id_to\"],1).values),\"node_from_id\":tmp[\"node_id_from\"].values,\"node_to_id\":tmp[\"node_id_to\"].values})\n\n\n assert(len(nodes)==len(edges))\n\n masks = []\n\n\n for i in range(0,len(nodes)):\n\n n_nodes = len(nodes[i][\"data\"])\n n_edges = len(edges[i][\"data\"])\n\n \n idc = []\n for k in range(0,n_edges):\n\n \n idc_from = np.argwhere(edges[i][\"node_from_id\"][k] == nodes[i][\"node_id\"])[0,0]\n idc_to = np.argwhere(edges[i][\"node_to_id\"][k] == nodes[i][\"node_id\"])[0,0]\n \n idc.append([n_nodes+k,idc_from])\n idc.append([idc_from,n_nodes+k])\n #mask[n_nodes+k, idc_from]=True\n #mask[idc_from,n_nodes+k]=True\n\n idc.append([n_nodes+k,idc_to])\n idc.append([idc_to,n_nodes+k])\n #mask[n_nodes+k, idc_to]=True\n #mask[idc_to,n_nodes+k]=True\n\n idc.append([idc_from,idc_to])\n idc.append([idc_to,idc_from])\n #mask[idc_from,idc_to] = True\n #mask[idc_to,idc_from] = True\n\n iv = torch.LongTensor(idc).t()\n mask = [iv,torch.ones(len(idc)).type(torch.long),torch.Size([max_size,max_size])]\n #mask = torch.sparse.LongTensor(iv,torch.ones(len(idc)).type(torch.long),torch.Size([max_size,max_size]))\n masks.append(mask)\n\n\n return nodes,edges,masks","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"463808637","text":"# coding: utf-8\n\nfrom utg import relations as r\n\nRESTRICTIONS = {\n r.WORD_TYPE.NOUN: { r.NUMBER.PLURAL: (r.GENDER,),\n r.NOUN_FORM.COUNTABLE: (r.NUMBER,) },\n r.WORD_TYPE.ADJECTIVE:{ r.NUMBER.PLURAL: (r.GENDER,),\n r.ADJECTIVE_CATEGORY.RELATIVE: (r.GRADE,),\n r.ADJECTIVE_CATEGORY.POSSESSIVE: (r.GRADE,),\n r.ADJECTIVE_FORM.SHORT: (r.CASE, r.ANIMALITY, r.GRADE) },\n r.WORD_TYPE.PRONOUN:{ r.NUMBER.PLURAL: (r.GENDER,),\n r.PRONOUN_CATEGORY.REFLEXIVE: (r.NUMBER, r.GENDER, r.PERSON),\n r.PRONOUN_CATEGORY.INTERROGATIVE: (r.PERSON,),\n r.PRONOUN_CATEGORY.RELATIVE: (r.NUMBER, r.GENDER, r.PERSON),\n r.PRONOUN_CATEGORY.DEMONSTRATIVE: (r.PERSON,),\n r.PRONOUN_CATEGORY.ATTRIBUTIVE: (r.PERSON,),\n r.PRONOUN_CATEGORY.NEGATIVE: (r.PERSON,),\n r.PRONOUN_CATEGORY.VAGUE: (r.PERSON,),\n r.PRONOUN_CATEGORY.MUTUAL: (r.PERSON,), },\n r.WORD_TYPE.VERB:{ r.VERB_FORM.INFINITIVE: (r.TIME, r.PERSON, r.NUMBER, r.GENDER),\n r.VERB_FORM.CONDITIONAL: (r.TIME, r.PERSON,),\n r.VERB_FORM.IMPERATIVE: (r.TIME, r.GENDER,),\n r.NUMBER.PLURAL: (r.GENDER,),\n r.TIME.PAST: (r.PERSON, ),\n r.TIME.PRESENT: (r.GENDER,),\n r.TIME.FUTURE: (r.GENDER,) },\n r.WORD_TYPE.PARTICIPLE:{ r.NUMBER.PLURAL: (r.GENDER,),\n r.PARTICIPLE_FORM.SHORT: (r.TIME, r.VOICE, r.CASE, r.ANIMALITY) },\n r.WORD_TYPE.INTEGER:{},\n r.WORD_TYPE.TEXT:{},\n r.WORD_TYPE.PREPOSITION:{}}\n\n\nINVERTED_RESTRICTIONS = {}\nfor word_type, word_type_restrictions in RESTRICTIONS.iteritems():\n inverted_restrictions = {}\n for property, property_groups in word_type_restrictions.iteritems():\n for property_group in property_groups:\n if property_group not in inverted_restrictions:\n inverted_restrictions[property_group] = set()\n inverted_restrictions[property_group].add(property)\n INVERTED_RESTRICTIONS[word_type] = {k: frozenset(v) for k, v in INVERTED_RESTRICTIONS.items()}\n\nPRESETS = {r.NOUN_FORM.COUNTABLE: r.NUMBER.PLURAL}\n","sub_path":"utg/restrictions.py","file_name":"restrictions.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"305300765","text":"\n\n#calss header\nclass _STYLUS():\n\tdef __init__(self,): \n\t\tself.name = \"STYLUS\"\n\t\tself.definitions = [u'a small, pointed device on a record player that picks up the sound signals stored on a record', u'a small, pointed metal or plastic stick that you use to make some devices work']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_stylus.py","file_name":"_stylus.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"200212181","text":"number_of_test_cases = int(input())\n\nnot_checked = []\n\ndef check():\n not_checked.sort()\n print(not_checked)\n for y in range(len(not_checked)-1):\n\n potentialPrefix = not_checked[y]\n couldHavePrefix = not_checked[y+1]\n\n print(potentialPrefix, couldHavePrefix)\n\n if len(couldHavePrefix) >= len(potentialPrefix):\n if couldHavePrefix.startswith(potentialPrefix):\n return(\"NO\")\n\n return (\"YES\")\n\n\nfor i in range(number_of_test_cases):\n number_of_phone_numbers = int(input())\n for x in range(number_of_phone_numbers):\n phone_number = input()\n not_checked.append(phone_number)\n\n print(check())\n not_checked.clear()\n\n\n\n","sub_path":"Kattis/Phone List.py","file_name":"Phone List.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11805038","text":"import unittest\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport os\n\nfrom narcis_api import models\n\nclass ModelsTestCase(unittest.TestCase):\n def setUp(self):\n self.engine = create_engine('sqlite:///models_test.db')\n models.Base.metadata.create_all(self.engine)\n self.DBSession = sessionmaker(bind=self.engine)\n self.session = self.DBSession()\n\n self.user = models.User(\n id=0,\n username='dan.stark',\n display_name='Dan Stark',\n first_name='Dan',\n last_name='Stark',\n email='dan.stark@email.net',\n )\n self.project = models.Project(\n id=1,\n name='Test Project',\n name_slug='test-project',\n private=True,\n url='http://test.domain.com/project/',\n user_id=0,\n )\n self.device = models.Device(\n id=2,\n name='Macbook Air',\n name_slug='macbook-air',\n )\n self.operating_system = models.OperatingSystem(\n id=3,\n name='OS X 10.9.5',\n name_slug='os-x-10-8-3',\n )\n self.browser = models.Browser(\n id=4,\n name='Chrome 43',\n name_slug='chrome-43',\n )\n self.platform = models.Platform(\n id=5,\n device_id=2,\n operating_system_id=3,\n browser_id=4,\n project_id=1,\n )\n self.page = models.Page(\n id=6,\n name='Test Page',\n name_slug='test-page',\n path='test/page/',\n project_id=1,\n )\n self.branch = models.Branch(\n id=7,\n name='master',\n name_slug='master',\n project_id=1,\n )\n self.build = models.Build(\n id=8,\n name='0000000',\n name_slug='0000000',\n branch_id=7,\n )\n self.screenshot = models.Screenshot(\n id=9,\n image='http://img.server.com/0000000',\n page_id=6,\n platform_id=5,\n build_id=8,\n )\n\n def tearDown(self):\n os.remove('models_test.db')\n\n def test_user_constructor(self):\n self.session.add(self.user)\n self.session.commit()\n assert(self.user.id == 0)\n assert(self.user.username == 'dan.stark')\n assert(self.user.display_name == 'Dan Stark')\n assert(self.user.first_name == 'Dan')\n assert(self.user.last_name == 'Stark')\n assert(self.user.email == 'dan.stark@email.net')\n\n def test_project_constructor(self):\n self.session.add(self.user)\n self.session.add(self.project)\n self.session.commit()\n assert(self.project.id == 1)\n assert(self.project.name == 'Test Project')\n assert(self.project.name_slug == 'test-project')\n assert(self.project.private == True)\n assert(self.project.url == 'http://test.domain.com/project/')\n assert(self.project.user_id == 0)\n assert(self.project.user == self.user)\n\n def test_device_constructor(self):\n self.session.add(self.device)\n self.session.commit()\n assert(self.device.id == 2)\n assert(self.device.name == 'Macbook Air')\n assert(self.device.name_slug == 'macbook-air')\n\n def test_operating_system_constructor(self):\n self.session.add(self.operating_system)\n self.session.commit()\n assert(self.operating_system.id == 3)\n assert(self.operating_system.name == 'OS X 10.9.5')\n assert(self.operating_system.name_slug == 'os-x-10-8-3')\n\n def test_browser_constructor(self):\n self.session.add(self.browser)\n self.session.commit()\n assert(self.browser.id == 4)\n assert(self.browser.name == 'Chrome 43')\n assert(self.browser.name_slug == 'chrome-43')\n\n def test_platform_constructor(self):\n self.session.add(self.device)\n self.session.add(self.operating_system)\n self.session.add(self.browser)\n self.session.add(self.user)\n self.session.add(self.project)\n self.session.add(self.platform)\n self.session.commit()\n assert(self.platform.id == 5)\n assert(self.platform.device_id == 2)\n assert(self.platform.operating_system_id == 3)\n assert(self.platform.browser_id == 4)\n assert(self.platform.project_id == 1)\n assert(self.platform.device == self.device)\n assert(self.platform.operating_system == self.operating_system)\n assert(self.platform.browser == self.browser)\n assert(self.platform.project == self.project)\n\n def test_page_constructor(self):\n self.session.add(self.user)\n self.session.add(self.project)\n self.session.add(self.page)\n self.session.commit()\n assert(self.page.id == 6)\n assert(self.page.name == 'Test Page')\n assert(self.page.name_slug == 'test-page')\n assert(self.page.path == 'test/page/')\n assert(self.page.project_id == 1)\n assert(self.page.project == self.project)\n\n def test_branch_constructor(self):\n self.session.add(self.user)\n self.session.add(self.project)\n self.session.add(self.branch)\n self.session.commit()\n assert(self.branch.id == 7)\n assert(self.branch.name == 'master')\n assert(self.branch.name_slug == 'master')\n assert(self.branch.project_id == 1)\n assert(self.branch.project == self.project)\n\n def test_build_constructor(self):\n self.session.add(self.branch)\n self.session.add(self.build)\n self.session.commit()\n assert(self.build.id == 8)\n assert(self.build.name == '0000000')\n assert(self.build.name_slug == '0000000')\n assert(self.build.branch_id == 7)\n assert(self.build.branch == self.branch)\n\n def test_screenshot_constructor(self):\n self.session.add(self.user)\n self.session.add(self.project)\n self.session.add(self.page)\n self.session.add(self.device)\n self.session.add(self.operating_system)\n self.session.add(self.browser)\n self.session.add(self.platform)\n self.session.add(self.build)\n self.session.add(self.screenshot)\n self.session.commit()\n assert(self.screenshot.id == 9)\n assert(self.screenshot.image == 'http://img.server.com/0000000')\n assert(self.screenshot.page_id == 6)\n assert(self.screenshot.platform_id == 5)\n assert(self.screenshot.build_id == 8)\n assert(self.screenshot.page == self.page)\n assert(self.screenshot.platform == self.platform)\n assert(self.screenshot.build == self.build)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"narcis_api/tests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484175433","text":"# Williams, Ronald J. “Simple Statistical Gradient-Following Algorithms for\n# Connectionist Reinforcement Learning.” Machine Learning 8, no. 3 (1992): 229–56.\n# associate task\n# 2-armed bandit\n\nimport random\n\nfrom k_arm_bandit_finite_states import KArmedBandit\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\nimport time\n\nSQRT_PI = np.sqrt(np.pi)\nK = 2\n\n\ndef squashing_function(input):\n return 1. / (1. + np.exp(-input))\n\n\nclass GaussianUnit:\n def __init__(self, input_size_, action_space_):\n self.weights_mean = np.zeros(input_size_)\n self.mean = np.random.random(1)*(K-1)\n self.std = 0.1\n self.action_space = np.array(action_space_)\n pass\n\n def __call__(self, input):\n self.mean = squashing_function(self.weights_mean.dot(input)) * (K - 1)\n return np.random.normal(self.mean, self.std)\n\n def characteristic_eligibility(self, x, y):\n s_mean = self.weights_mean.dot(x)\n mu = squashing_function(s_mean)\n partial_mu = (y - mu) / (self.std**2)\n nabla_weights_mean = partial_mu * np.exp(-s_mean) / ((1 + np.exp(-s_mean)) ** 2) * x\n return nabla_weights_mean\n\n\ndef reinforce_algorithm(repeat_times=1000000, random_seed=0):\n # 10 arm bandits\n reward_log_list = np.zeros(int(repeat_times / 100))\n optimal_action_hit_list = np.zeros(int(repeat_times / 100))\n np.random.seed(random_seed)\n env_mean = np.random.normal(.0, 1.0, K)\n\n env = KArmedBandit(env_mean, np.ones(K))\n gu = GaussianUnit(K, range(K))\n state, reward, is_done, _ = env.step(0)\n alpha = 0.001\n base_line_mean = 0\n\n for repeat_i in range(repeat_times):\n x = np.array(state)\n y = gu(x)\n action = round(y)\n if action >= K or action < 0:\n state, reward, is_done, _ = env.step(0)\n reward = -10\n else:\n state, reward, is_done, _ = env.step(action)\n r = reward\n nabla_mean = gu.characteristic_eligibility(x, y)\n alpha_mean = alpha\n gu.weights_mean += (alpha_mean * (r - base_line_mean) * nabla_mean)\n if repeat_i % 100 == 0:\n # alpha *= 0.995\n reward_log_list[int(repeat_i / 100)] = r\n optimal_action_hit_list[int(repeat_i / 100)] = 1 if action == env.optimal_action else 0\n print('---------------------------------------------------')\n print('optimal action:')\n print(env.optimal_action)\n print('mean:')\n print(gu.mean)\n print('std:')\n print(gu.std)\n print('mean learning rate:')\n print(alpha_mean)\n return optimal_action_hit_list, reward_log_list\n\n\ndef experiment():\n experiment_time = 80\n seed_seq = np.random.randint(0, 100000, experiment_time)\n\n repeat_times = 100000\n thread_num = 8\n reward_matrix = []\n optimal_action_hit_matrix = []\n\n for experiment_i in range(int(experiment_time / thread_num)):\n result = []\n pool = Pool()\n for thread_i in range(thread_num):\n result.append(pool.apply_async(reinforce_algorithm, [repeat_times, seed_seq[experiment_i * thread_num+thread_i]]))\n pool.close()\n pool.join()\n for result_i in result:\n optimal_action_hit_thread, reward_thread = result_i.get(timeout=500)\n reward_matrix.append(reward_thread)\n optimal_action_hit_matrix.append(optimal_action_hit_thread)\n average_reward_list = np.zeros(len(reward_matrix[0]))\n average_optimal_action_hit_list = np.zeros(len(optimal_action_hit_matrix[0]))\n for i in reward_matrix:\n average_reward_list += i\n for i in optimal_action_hit_matrix:\n average_optimal_action_hit_list += i\n # plt.plot(average_reward_list / experiment_time, label='reward')\n plt.plot(average_optimal_action_hit_list / experiment_time, label='optimal action rate')\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n # optimal_action_list, reward_list = reinforce_algorithm()\n experiment()\n","sub_path":"simple_statistical_gradient_following_algorithm_for_connectionist/associate_task.py","file_name":"associate_task.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484114203","text":"import viz\r\nimport viztask\r\nviz.go()\r\n\r\n### Add all the resources. ###############\r\n\r\n#Add a sky with an environment map.\r\nenv = viz.add(viz.ENVIRONMENT_MAP,'sky.jpg')\r\ndome = viz.add('skydome.dlc')\r\ndome.texture(env)\r\n\r\n#Add a maze model.\r\nmaze = viz.add('art/maze.ive')\r\n\r\n#Add balloons.\r\nballoons = []\r\nfor pos in [[.2,3.4],[-3.2,6.8],[-9.8,23.4],\r\n\t[6.4,30.2],[-13.0,33.4] ]:\r\n\tballoon = viz.add('art/balloon.ive' )\r\n\tballoon.setScale( 2,2,2 )\r\n\tballoon.setPosition( pos[0],1.7,pos[1] )\r\n\tballoon.color( viz.RED )\r\n\tballoon.specular( viz.WHITE )\r\n\tballoon.shininess( 128 )\r\n\tballoons.append( balloon )\r\n\t\r\n#Add a popping sound.\r\npop = viz.addAudio( 'art/pop.wav' )\r\n\r\n#Turn on viewpoint collision and set\r\n#its distance buffer.\r\nviz.collision( viz.ON )\r\nviz.collisionbuffer( .1 )\r\n\r\n#Add a subwindow and associated it \r\n#with a viewpoint.\r\nsubwindow = viz.addWindow()\r\nsubview = viz.addView()\r\nsubwindow.setView( subview )\r\nsubwindow.setSize( .35,.35 )\r\nsubwindow.setPosition( .65,1)\r\nsubwindow.visible( viz.OFF )\r\n\r\n#Link the subview to the position \r\n#of the main view but put it up a distance.\r\nsubview_link = viz.link( viz.MainView, subview )\r\nsubview_link.setMask( viz.LINK_POS )\r\nsubview_link.setOffset( [0,8,0] )\r\nsubview.setEuler( [0, 90, 0 ])\r\n\r\n#Link a dart to the main view.\r\ndart = viz.add( 'art/dart.ive' )\r\ndart.setScale( 2,2,2)\r\nlink = viz.link( viz.MainView, dart )\r\nlink.preTrans( [0,.15,0] )\r\n\r\n#Add text fields to a dictionary.\r\ntext_dict = {}\r\nfor kind in ['score','instructions','time' ]:\r\n\ttext = viz.addText('', viz.SCREEN )\r\n\ttext.setScale( .5,.5)\r\n\ttext.alignment( viz.TEXT_CENTER_BASE )\r\n\ttext.alpha( 1 )\r\n\ttext_dict[ kind ] = text\r\ntext_dict['score'].setPosition( .1,.9 )\r\ntext_dict['instructions'].setPosition( .5,.5 )\r\ntext_dict['time'].setPosition( .1,.85 )\r\n\r\n\r\n#Add a blank screen to the viewpoint to \r\n#block out everything in the beginning.\r\nblank_screen = viz.addTexQuad( viz.SCREEN )\r\nblank_screen.color( viz.BLACK )\r\nblank_screen.setPosition( .5, .5 )\r\nblank_screen.setScale( 100,100 )\r\n\r\n######################### Tasks ############################################\r\n\r\ndef set_the_stage():\r\n\t#Put the viewpoint in the right position and freeze\r\n\t#navigation.\r\n\tviz.MainView.setPosition(0,1.8,-3)\r\n\tviz.MainView.setEuler(0,0,0)\r\n\tviz.mouse( viz.OFF )\r\n\t#Make the instructions text appear.\r\n\ttext = text_dict[ 'instructions' ]\r\n\ttext.alpha( 1 )\r\n\t#Put a message in that text.\r\n\ttext.message( 'Press s to begin.' )\r\n\t#Wait for the s key to be hit.\r\n\tyield viztask.waitKeyDown( 's' )\r\n\ttext.message( '' )\r\n\r\n##Schedule the above task.\r\n#viztask.schedule( set_the_stage() )\r\n\r\ndef game_instructions():\r\n\ttext = text_dict[ 'instructions' ]\r\n\ttext.alpha( 1 )\r\n\tsentences = ['You will get one point for each balloon that you pop.', \r\n\t'You are racing against the clock.',\r\n\t'Get ready . . .' ]\r\n\tfor sentence in sentences:\r\n\t\ttext.alpha(0)\r\n\t\ttext.message( sentence )\r\n\t\t#Add a fading action to the text and wait.\r\n\t\tyield viztask.addAction( text, vizact.fadeTo(1, time = 1 ))\r\n\t\t#Wait a second.\r\n\t\tyield viztask.waitTime( 1 )\r\n\t\t#Wait to fade out.\r\n\t\tyield viztask.addAction( text, vizact.fadeTo(0, time = 1 ))\r\n\r\ndef game_timer_task():\r\n\t#Grab the text field for\r\n\t#time.\r\n\ttext = text_dict[ 'time' ]\r\n\ttime = 0\r\n\ttext.message( 'Time: 0' )\r\n\t#Loop through as long as time\r\n\t#is under a certain number of\r\n\t#seconds.\r\n\twhile time < 30:\r\n\t\tyield viztask.waitTime( 1 )\r\n\t\ttime += 1\r\n\t\ttext.message( 'Time: ' + str( time ) )\r\n\t\r\ndef balloon_popping_task():\r\n\t#Grab the text field for\r\n\t#the score.\r\n\ttext = text_dict[ 'score' ]\r\n\tscore = 0\r\n\ttext.message( 'Score: 0' )\r\n\t#Loop through as long as the score\r\n\t#is below the winning limit.\r\n\twhile score <5:\r\n\t\t#Create a data object to accept\r\n\t\t#data from the event.\r\n\t\tdata = viz.Data()\r\n\t\t#Yield for collision events.\r\n\t\tyield viztask.waitEvent( viz.COLLISION_EVENT, data )\r\n\t\t#From the data object, get the object\r\n\t\t#that the viewpoint collided with.\r\n\t\tintersected_object = data.data[0].object\r\n\t\t#If it was a balloon, pop it and\r\n\t\t#add a point to the score.\r\n\t\tif balloons.count( intersected_object ):\r\n\t\t\tpop.play()\r\n\t\t\tscore += 1\r\n\t\t\ttext.message( 'Score: ' + str( score ) )\r\n\t\t\tintersected_object.visible( viz.OFF )\r\n\r\n\r\ndef game():\r\n\t#Begin the game.\r\n\t#Turn on mouse navigation.\r\n\tviz.mouse( viz.ON )\r\n\t#Make the subwindow visible.\r\n\tsubwindow.visible( viz.ON )\r\n\t#Get rid of the blank screen\r\n\t#that blocks the view.\r\n\tblank_screen.visible( viz.OFF )\r\n\t\r\n\t#Create two tasks for two outcomes of game.\r\n\tballoon_popping = viztask.waitTask( balloon_popping_task() )\r\n\ttime_passing = viztask.waitTask( game_timer_task() )\r\n\t\r\n\t#Wait for the game to end.\r\n\t#Create a data object that \r\n\t#we can pass to the next yield\r\n\t#statement.\r\n\tdata = viz.Data()\r\n\t#Wait for the game to end one way\r\n\t#or another.\r\n\tyield viztask.waitAny( [balloon_popping, time_passing], data )\r\n\t\r\n\t#Once the game has ended, hide things.\r\n\tviz.mouse( viz.OFF )\r\n\tblank_screen.visible( viz.ON )\r\n\tsubwindow.visible( viz.OFF )\r\n\tviz.MainView.reset(viz.HEAD_ORI | viz.HEAD_POS| viz.BODY_ORI)\r\n\t\t\r\n\t#Give different feedback depending on \r\n\t#how the game ended.\r\n\ttext = text_dict[ 'instructions' ]\r\n\tif data.condition == balloon_popping:\r\n\t\ttext.message( 'GAME OVER, YOU WON!' )\r\n\telif data.condition == time_passing:\r\n\t\ttext.message( 'GAME OVER, YOU LOST.' )\r\n\ttext.alpha( 1 )\r\n\t\r\n\t#Wait a moment.\r\n\tyield viztask.waitTime( 4 )\r\n\r\n\r\ndef play_again():\r\n\t#Ask a question.\r\n\ttext_dict[ 'instructions' ].message( 'Want to play again (y/n)?' )\r\n\t#Create a data object to accept the \r\n\t#event's data.\r\n\tdata = viz.Data()\r\n\t#Yield to a keydown event.\r\n\tyield viztask.waitKeyDown(('n','y'),data ) \r\n\t#If the key that was pressed\r\n\t#is 'n', quit.\r\n\tif data.key == 'n':\r\n\t\tviz.quit()\r\n\t#Otherwise reset the world.\r\n\tif data.key == 'y':\r\n\t\tfor balloon in balloons:\r\n\t\t\tballoon.visible( viz.ON )\r\n\t\tfor value in text_dict.values():\r\n\t\t\tvalue.alpha(0)\r\n\r\n\r\n\r\n#Set up a task to handle the main \r\n#sequence of events.\r\ndef main_sequence():\r\n\twhile True:\r\n\t\t#Set the stage for the game.\r\n\t\tyield set_the_stage()\r\n\t\t\t\r\n\t\t#Begin with instructions.\r\n\t\tyield game_instructions()\r\n\t\t\r\n\t\t#Play the game.\r\n\t\tyield game()\r\n\t\t\r\n\t\t#See if the user wants to play \r\n\t\t#again.\r\n\t\tyield play_again()\r\n\r\n#Schedule the main sequence task.\r\nviztask.schedule( main_sequence() )","sub_path":"Vizard/teacher in a book code snippets (R4)/tasks example.py","file_name":"tasks example.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"73924725","text":"import dvrkArm\nimport time\nimport math\nimport rospy\n\nMILLION = 10**6\n\nps = dvrkArm.dvrkArm('/PSM1')\npos_des = [0.0, 0.0, -0.13] # Position (m)\nrot_des = [0, 0, 0] # Euler angle ZYX (or roll-pitch-yaw)\njaw_des = [0]\nps.set_pose(pos_des, rot_des, 'deg')\n\ncnt = 0.0\ninterval_ms = 10\namp = 0.04\nperiod = 5.0\nrate = rospy.Rate(1000.0 / interval_ms)\nt_sleep = 0.5\nwhile not rospy.is_shutdown():\n try:\n p = amp*math.sin(2*math.pi*cnt/period)\n ps.set_pose([0.0, p, -0.13], [0.0, 0.0, 0.0], 'deg', True)\n cnt += 1000.0 / MILLION * interval_ms\n rate.sleep()\n except rospy.ROSInterruptException:\n pass","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571799422","text":"# BSD 3-Clause License\n\n# Copyright (c) 2020, Institut Curie\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom .InitialState import InitialState\nfrom .StateTransitionSubGraphs import StateTransitionSubGraphs\nfrom .Solution import Solution\nimport numpy as np\nimport pandas as pd\n\nclass Simulation:\n \n def __init__(self, model, initial_fixed_nodes, initial_fixed_nodes_vals):\n self.model = model\n self.initial_state = InitialState(initial_fixed_nodes, initial_fixed_nodes_vals, model.nodes)\n self.stateTransitionSubGraphs = StateTransitionSubGraphs(model.stateTransitionGraph.A_sparse, self.initial_state.x0)\n self.solution = Solution(model.stateTransitionGraph.A_sparse, self.stateTransitionSubGraphs, model.transitionRatesTable, self.initial_state.x0)\n self.last_states_probtraj = None\n \n def get_last_states_probtraj(self):\n \n \n probs = np.zeros((len(self.solution.stat_sol.nonzero()[0])))\n states = []\n \n for i, stateval in enumerate(self.solution.stat_sol.nonzero()[0]):\n \n binstate = np.zeros((len(self.model.nodes)))\n c = len(self.model.nodes)-1\n t_stateval = stateval\n \n while t_stateval > 0:\n binstate[c] = t_stateval % 2\n t_stateval = t_stateval // 2\n c -= 1\n \n inds_states, = np.where(np.flip(binstate))\n \n if len(inds_states) > 0:\n t_state = [self.model.nodes[ind] for ind in inds_states]\n states.append(\" -- \".join(t_state))\n \n else:\n states.append(\"\")\n \n probs[i] = self.solution.stat_sol[stateval, 0]\n \n self.last_states_probtraj = pd.DataFrame([probs], columns=states)\n self.last_states_probtraj.sort_index(axis=1, inplace=True)\n \n return self.last_states_probtraj\n","sub_path":"exastolog/Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484606104","text":"from common import TreeNode\nfrom copy import deepcopy\n\nclass Solution:\n def sumNumbers(self, root: TreeNode) -> int:\n if not root:\n return 0\n nums = []\n def helper(root, num):\n tmp = deepcopy(num)\n tmp.append(root.val)\n if not root.left and not root.right:\n nums.append(tmp)\n return\n\n if root.left:\n helper(root.left, tmp)\n if root.right:\n helper(root.right, tmp)\n helper(root, [])\n sum = 0\n for num in nums:\n s = [str(i) for i in num]\n sum += int(\"\".join(s)) \n return sum \n\nif __name__ == \"__main__\":\n root = TreeNode(4)\n root.left = TreeNode(9)\n root.right = TreeNode(0)\n root.left.left = TreeNode(5)\n root.left.right = TreeNode(1)\n print(Solution().sumNumbers(root))","sub_path":"leetcode/tree/sum_root_to_leaf_numbers.py","file_name":"sum_root_to_leaf_numbers.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"415559631","text":"import time, subprocess\nfrom datetime import datetime\nfrom rtstock.stock import Stock\nfrom moneywagon import get_current_price\n\n# Use telegram-send command-line tool in order to send a Telegram message\ndef telegram_alert(message):\n # telegram-send app must be in PATH and first run with --configure-channel\n # More info in https://github.com/rahiel/telegram-send\n subprocess.call('telegram-send --config trader.conf \"' + message + '\"', shell=True)\n print(message)\n\n\n# Define custom stock market alerts\nTSLA_low = 355 # Tesla\nTSLA_high = 385\nMSFT_low = 60 # Microsoft\nMSFT_high = 80\nNVDA_low = 145 # NVIDIA\nNVDA_high = 175\nWESAS_low = 14 # WES.AS\nWESAS_high = 16\n\nbtc_low = 2400 # Bitcoin\nbtc_high = 2800\neth_low = 225 # Ethereum\neth_high = 275\netc_low = 10 # Ethereum Classic\netc_high = 25\nzec_low = 225 # Zcash\nzec_high = 275\n\n\nprint(\"Monitoring stock market and cryptocurrency rates...\")\nwhile True:\n # Print actual timestamp\n print(datetime.now().strftime(\"%d/%m/%y %H:%M:%S\"))\n\n # Get actual stock market prices\n TSLA = float(Stock('TSLA').get_latest_price()[0][\"LastTradePriceOnly\"]) # Tesla\n MSFT = float(Stock('MSFT').get_latest_price()[0][\"LastTradePriceOnly\"]) # Microsoft\n NVDA = float(Stock('NVDA').get_latest_price()[0][\"LastTradePriceOnly\"]) # NVIDIA\n WESAS = float(Stock('WES.AS').get_latest_price()[0][\"LastTradePriceOnly\"]) # WES.AS\n\n btc = get_current_price('btc', 'eur') # Bitcoin\n eth = get_current_price('eth', 'eur') # Ethereum\n etc = get_current_price('etc', 'eur') # Ethereum Classic\n zec = get_current_price('zec', 'eur') # Zcash\n\n\n # Trigger telegram's alert (if any)\n if TSLA < TSLA_low: telegram_alert(\"Tesla low price: \" + str(TSLA))\n if TSLA > TSLA_high: telegram_alert(\"Tesla high price: \" + str(TSLA))\n if MSFT < MSFT_low: telegram_alert(\"Microsoft low price: \" + str(MSFT))\n if MSFT > MSFT_high: telegram_alert(\"Microsoft high price: \" + str(MSFT))\n if NVDA < NVDA_low: telegram_alert(\"NVIDIA low price: \" + str(NVDA))\n if NVDA > NVDA_high: telegram_alert(\"NVIDIA high price: \" + str(NVDA))\n if WESAS < WESAS_low: telegram_alert(\"WES.AS low price: \" + str(WESAS))\n if WESAS > WESAS_high: telegram_alert(\"WES.AS high price: \" + str(WESAS))\n\n if btc < btc_low: telegram_alert(\"Bitcoin low price: \" + str(btc))\n if btc > btc_high: telegram_alert(\"Bitcoin high price: \" + str(btc))\n if eth < eth_low: telegram_alert(\"Ethereum low price: \" + str(eth))\n if eth > eth_high: telegram_alert(\"Ethereum high price: \" + str(eth))\n if etc < etc_low: telegram_alert(\"Ethereum Classic low price: \" + str(etc))\n if etc > etc_high: telegram_alert(\"Ethereum Classic high price: \" + str(etc))\n if zec < zec_low: telegram_alert(\"Zcash low price: \" + str(zec))\n if zec > zec_high: telegram_alert(\"Zcash high price: \" + str(zec))\n\n # We will monitor the rates every 5 minutes\n time.sleep(300)\n","sub_path":"scripts/StockMarket_CryptocurrencyRates.py","file_name":"StockMarket_CryptocurrencyRates.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"554444113","text":"ns = list(range(10))\r\n\r\nevens = [ 2*n for n in ns]\r\npowers = [2**n for n in ns]\r\n\r\ndef main():\r\n row = 2\r\n col = 3\r\n mat = [[0]*col for i in range(row)]\r\n print(mat)\r\n initlist = [1,9, -12, 20, -5, 7]\r\n k = 0\r\n\r\n for i in range(row):\r\n for j in range(col):\r\n mat[i][j] = initlist[k]\r\n k += 1\r\n\r\n return mat\r\n\r\n # print(mat)\r\n\r\ndef filter_neg(mat):\r\n row = len(mat)\r\n col = len(mat[0])\r\n\r\n mat2 = [[0]*col for i in range(row)]\r\n\r\n for i in range(row):\r\n for j in range(col):\r\n if mat[i][j] < 0 :\r\n mat2[i][j] = 0\r\n else:\r\n mat2[i][j] = mat[i][j]\r\n return mat2\r\n\r\ndef transpose(mat):\r\n row = len(mat)\r\n col = len(mat[0])\r\n mat2 = [[0]* row for i in range(col)]\r\n for i in range(row):\r\n for j in range(col):\r\n mat2[j][i] = mat[i][j]\r\n return mat2\r\n\r\n\r\ndef iproduct(v1, v2):\r\n p = 0\r\n for i in range(len(v1)):\r\n p += v1[i] * v2[i]\r\n return p\r\n\r\ndef rowvec(m, i):\r\n v = [m[i][j] for j in range(len(m[0]))]\r\n return v\r\n\r\n\r\ndef colvec(m, j):\r\n v = [m[i][j] for i in range(len(m))]\r\n return v\r\n\r\ndef product(m1, m2):\r\n r1 = len(m1)\r\n c1 = len(m1[0])\r\n r2 = len(m2)\r\n c2 = len(m2[0])\r\n assert c1 == r2\r\n mat = [[0] * c2 for i in range(r1) ]\r\n for i in range(r1):\r\n for j in range(c2):\r\n mat[i][j] = iproduct(rowvec(m1, i), colvec(m2, j))\r\n\r\n return mat\r\n\r\n#import numpy as np\r\n#help(np.matrix)\r\n\r\n\r\nls = ['one', 'two', 'three', 'four']\r\nval = {name:i + 1 for (i,name) in enumerate(ls)}\r\nval['five'] = 5\r\nprint(val)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(filter_neg(main()))\r\n print(transpose(filter_neg(main())))\r\n print(product(filter_neg(main()), transpose(filter_neg(main()))))","sub_path":"comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240463731","text":"from re import template\nimport re\nfrom typing import DefaultDict\nfrom flask import Flask, render_template, redirect, url_for, request, Response, send_from_directory\nimport os\nimport signal\nimport sys\nimport json\nfrom flask.helpers import url_for\nfrom flask_socketio import SocketIO, emit\nimport psycopg2\nfrom psycopg2 import OperationalError, sql\n\nos.environ[\"GEVENT_SUPPORT\"] = 'True'\n\n##############################\n### CREDENTIALS FOR THE DB ###\n##############################\nDB_NAME = \"project\"\nUSER = \"pi\"\nPASSWORD = \"foobar\"\nHOST = \"127.0.0.1\"\nPORT = 5432\n\n#########################\n### DB HELPER METHODS ###\n#########################\n# This will create a connection with the database.\ndef create_connection(db_name, db_user, db_password, db_host, db_port):\n connection = None\n try:\n connection = psycopg2.connect(\n database=db_name,\n user=db_user,\n password=db_password,\n host=db_host,\n port=db_port,\n )\n print(\"Connection to PostgreSQL DB successful\")\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n return connection\n\n# This can be used for executing queries where tables need to be created, or\n# when we need to update or delete certain records.\ndef execute_query(connection, query):\n connection.rollback()\n connection.autocommit = True\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n print(\"Query executed successfully\")\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n\n# This can be used for queries where we are selecting records to read\n# (no CRUD).\ndef execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n\n################\n### DB SETUP ###\n################\n# Connect to the DB.\nconnection = create_connection(DB_NAME, USER, PASSWORD, HOST, PORT)\n\n# Verify the connection succeeded.\nif (connection is None):\n quit()\n\n# Now, setup our event handler for when the script is killed to close\n# the connection.\ndef signal_handler(sig, frame):\n print('Cleaning up...')\n connection.close()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n##################\n### QUERY DEFS ###\n##################\n# Retrieves the ID of a truck given its name.\ndef getTruckID(truckName):\n select_query = sql.SQL('''\n SELECT Truck.ID\n FROM Truck\n WHERE Truck.Name = {truckName}\n ''').format(truckName = sql.Literal(truckName),)\n results = execute_read_query(connection, select_query)\n if (len(results) == 0):\n return None\n else:\n return results[0][0]\n\n# Retrieves the ID of the address of a truck given its name.\ndef getAddressID(truckName):\n select_query = sql.SQL('''\n SELECT Truck.AddressID\n FROM Truck\n WHERE Truck.Name = {truckName}\n ''').format(truckName = sql.Literal(truckName),)\n results = execute_read_query(connection, select_query)\n if (len(results) == 0):\n return None\n else:\n return results[0][0]\n\n# Retrieves the ID of a mealtype given its description.\ndef getMealTypeID(mealType):\n select_query = sql.SQL('''\n SELECT MealType.ID\n FROM MealType\n WHERE MealType.Description = {mealType}\n ''').format(mealType = sql.Literal(mealType),)\n results = execute_read_query(connection, select_query)\n if (len(results) == 0):\n return None\n else:\n return results[0][0]\n\n# Retrieves the ID of a meal given its name.\ndef getMealID(mealName):\n select_query = sql.SQL('''\n SELECT Meal.ID\n FROM Meal\n WHERE Meal.Name = {mealName}\n ''').format(mealName = sql.Literal(mealName))\n results = execute_read_query(connection, select_query)\n if (len(results) == 0):\n return None\n else:\n return results[0][0]\n\n# Retrieves the ID of an ingredient given its name.\ndef getIngredientID(ingredientName):\n select_query = sql.SQL('''\n SELECT Ingredient.ID\n FROM Ingredient\n WHERE Ingredient.Name = {ingredientName}\n ''').format(ingredientName = sql.Literal(ingredientName))\n results = execute_read_query(connection, select_query)\n if (len(results) == 0):\n return None\n else:\n return results[0][0]\n\n# Get all ingredients in the system.\ndef getAllIngredients():\n select_query = '''\n SELECT Ingredient.Name\n FROM Ingredient\n ORDER BY Ingredient.Name ASC\n '''\n return execute_read_query(connection, select_query)\n\n# Get all ingredients for a specific meal.\ndef getIngredients(mealName):\n select_query = sql.SQL('''\n SELECT Ingredient.Name\n FROM Meal\n JOIN MealIngredient ON (Meal.ID = MealIngredient.MealID)\n JOIN Ingredient ON (MealIngredient.IngredientID = Ingredient.ID)\n WHERE Meal.Name = {mealName}\n ''').format(mealName = sql.Literal(mealName),)\n return execute_read_query(connection, select_query)\n\n# Get a list of all meals in the system.\ndef getMeals():\n select_query = '''\n SELECT Meal.Name\n FROM Meal\n ORDER BY Meal.Name ASC\n '''\n return execute_read_query(connection, select_query)\n\n# Get the information for a meal for a certian truck.\ndef getMealInfo(truckName, mealName):\n select_query = sql.SQL('''\n SELECT Meal.Name, MealType.Description, Inventory.Number\n FROM Truck\n JOIN Inventory ON (Truck.ID = Inventory.TruckID)\n JOIN Meal ON (Inventory.MealID = Meal.ID)\n JOIN MealType ON (Meal.TypeID = MealType.ID)\n WHERE Truck.Name = {truckName} AND Meal.Name = {mealName}\n ''').format(truckName = sql.Literal(truckName),\n mealName = sql.Literal(mealName),)\n return execute_read_query(connection, select_query)\n\n# Get all the different meal types in the DB.\ndef getMealTypes():\n select_query = '''\n SELECT MealType.Description\n FROM MealType\n ORDER BY MealType.Description ASC\n '''\n return execute_read_query(connection, select_query)\n\n# Search for meals given the specified search query across the entire fleet.\ndef mealFleetSearch(searchQuery):\n searchQuery = searchQuery.strip()\n searchQuery = '%' + searchQuery + '%'\n select_query = sql.SQL('''\n SELECT Truck.Name, Meal.Name, Inventory.Number, Address.Street, Address.City, Address.State, Address.Zip\n FROM Truck\n JOIN Address ON (Truck.AddressID = Address.ID)\n JOIN Inventory ON (Truck.ID = Inventory.TruckID)\n JOIN Meal ON (Inventory.MealID = Meal.ID)\n WHERE Meal.Name ILIKE {searchQuery}\n ORDER BY Truck.Name ASC\n ''').format(searchQuery = sql.Literal(searchQuery),)\n return execute_read_query(connection, select_query)\n\n# Creates a meal given its attributes and links it to all trucks.\n# This has 5 queries to be prepared.\ndef addMealToDB(mealName, mealType, ingredients, truckName, availNumber):\n print(\"name: {0}, type: {1}, ingredients: {2}, truckName: {3}, availNumber: {4}\".format(mealName, mealType, ingredients, truckName, availNumber))\n\n # First, create the meal entity itself.\n insert_query = sql.SQL('''\n INSERT INTO Meal (Name, TypeID)\n VALUES ({mealName}, {mealTypeID})''').format(mealName = sql.Literal(mealName),\n mealTypeID = sql.Literal(getMealTypeID(mealType)),)\n execute_query(connection, insert_query)\n\n # Link all the ingredients.\n # All the ingredients, so we don't need to worry about injection attacks.\n mealIngredients = []\n\n mealID = getMealID(mealName)\n\n if (mealID != None):\n for ingredient in ingredients:\n mealIngredients.append((mealID, getIngredientID(ingredient)))\n\n mealIngredients_records = \", \".join([\"%s\"] * len(mealIngredients))\n\n insert_query = f'''\n INSERT INTO MealIngredient (MealID, IngredientID)\n VALUES {mealIngredients_records}\n '''\n\n connection.rollback()\n connection.autocommit = True\n cursor = connection.cursor()\n cursor.execute(insert_query, mealIngredients)\n\n # Link the new meal to the current truck.\n # No need to worry about injection attacks here.\n connection.rollback()\n connection.autocommit = True\n cursor = connection.cursor()\n cursor.execute('''\n INSERT INTO Inventory (TruckID, MealID, Number)\n VALUES (%s, %s, %s)''', (getTruckID(truckName), mealID, availNumber))\n\n # Link the new meal to all other trucks.\n # No need to worry about injection attacks here either - we are not using\n # any of the provided input from the user.\n\n # First, get a list of all trucks.\n select_query = '''\n SELECT Truck.ID\n FROM Truck\n '''\n trucksIDs = execute_read_query(connection, select_query)\n\n # Now we build the things for our query.\n inventory = []\n\n for truckID in trucksIDs:\n inventory.append((truckID, mealID, 0))\n\n inventory_records = \", \".join([\"%s\"] * len(inventory))\n\n insert_query = f'''\n INSERT INTO Inventory (TruckID, MealID, Number)\n VALUES {inventory_records}\n '''\n \n connection.rollback()\n connection.autocommit = True\n cursor = connection.cursor()\n cursor.execute(insert_query, inventory)\n\n # And we're done!\n # This indicates all is good and the meal was successfully added.\n return True\n else:\n # This indicates there was an injection attack.\n return False\n\n# Gets the information of a specific truck.\ndef retrieveTruckInfo(truckName):\n select_query = sql.SQL('''\n SELECT Truck.Number, Address.Street, Address.City, Address.State, Address.Zip\n FROM Truck\n JOIN Address ON (Truck.AddressID = Address.ID)\n WHERE Truck.Name = {truckName}\n ''').format(truckName = sql.Literal(truckName))\n return execute_read_query(connection, select_query)\n\n# Gets the names of all the trucks in the fleet.\ndef getFleet():\n select_query = \"\"\"\n SELECT Truck.Name\n FROM Truck\n ORDER BY Truck.Name ASC\n \"\"\"\n return execute_read_query(connection, select_query)\n\n# Gets the information of all trucks in the fleet except the one specified.\ndef getAllTrucksInfo():\n select_query = '''\n SELECT Truck.Name, Truck.Number, Address.Street, Address.City, Address.State, Address.Zip\n FROM Truck\n JOIN Address ON (Truck.AddressID = Address.ID)\n ORDER BY Truck.Name ASC;\n '''\n return execute_read_query(connection, select_query)\n\n######################\n### HELPER METHODS ###\n######################\ndef isValidTruck(truckName):\n fleet = getFleet()\n\n for truck in fleet:\n if (truck[0] == truckName):\n return True\n \n return False\n\ndef isValidMeal(mealName):\n meals = getMeals()\n \n for meal in meals:\n if (meal[0] == mealName):\n return True\n \n return False\n\ndef isValidMealType(mealType):\n mealTypes = getMealTypes()\n \n for type in mealTypes:\n if (type[0] == mealType):\n return True\n \n return False\n\ndef isValidIngredient(ingredientName):\n ingredients = getAllIngredients()\n\n for ingredient in ingredients:\n if (ingredient[0] == ingredientName):\n return True\n \n return False\n\ndef isValidLength(input, maxLength):\n return len(input) <= maxLength\n\n###############\n### APP DEF ###\n###############\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n# Event handler for the favicon.\n@app.route('/favicon.ico') \ndef favicon(): \n return send_from_directory(os.path.join(app.root_path, 'static'), 'foodtruck.png', mimetype='image/vnd.microsoft.icon')\n\n# Event handler for the 404 page - in case we missed something.\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('404Page.html'), 404\n\n# Event handler for the main page.\n@app.route(\"/\")\ndef index():\n # Get the list of trucks.\n truckNames = getFleet()\n\n templateData = {\n 'truckNames' : truckNames\n }\n return render_template('MainPage.html', **templateData)\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef getTruckInfo(truckName):\n # First, check if is is a valid truck (the only time it won't be is if the\n # user is trying something malicious).\n if (isValidTruck(truckName)):\n # Check if they are updating the truk data.\n if (request.method == 'POST'):\n # Grab the values from the form.\n street = request.form['street']\n city = request.form['city']\n state = request.form['state']\n zip = request.form['zip']\n phoneNumber = request.form['phoneNumber']\n addressID = getAddressID(truckName)\n\n # Make sure the truck name is valid.\n if (addressID != None):\n # Make sure all the attributes are valid.\n if (isValidLength(street, 20) and isValidLength(city, 20) and isValidLength(state, 20) and isValidLength(zip, 5) and isValidLength(phoneNumber, 10)):\n # Update truck data.\n update_query = sql.SQL('''\n UPDATE Address\n SET Street = {street}, City = {city}, State = {state}, Zip = {zip}\n WHERE Address.ID = {addressID}\n ''').format(street = sql.Literal(street),\n city = sql.Literal(city),\n state = sql.Literal(state),\n zip = sql.Literal(zip),\n addressID = sql.Literal(addressID),)\n execute_query(connection, update_query)\n\n # Update phone number.\n update_query = sql.SQL('''\n UPDATE Truck\n SET Number = {phoneNumber}\n WHERE Truck.Name = {truckName}\n ''').format(phoneNumber = sql.Literal(phoneNumber),\n truckName = sql.Literal(truckName),)\n execute_query(connection, update_query)\n else:\n # Otherwise, redirect them from where they came from to try again.\n return redirect(request.url)\n else:\n # Otherwise, return the 404 page - invlaid URL.\n return render_template('404Page.html'), 404\n\n # Query the DB for the truck info.\n truckInfo = retrieveTruckInfo(truckName)\n\n templateData = {\n 'name': truckName,\n 'truckInfo' : truckInfo\n }\n return render_template('FoodTruckInfo.html', **templateData)\n else:\n return render_template('404Page.html'), 404\n\n@app.route(\"//menu\")\ndef menu(truckName):\n # First, make sure the truck name is valid.\n if (isValidTruck(truckName)):\n templateData = {\n 'name': truckName\n }\n\n return render_template('Menu.html', **templateData)\n else:\n return render_template('404Page.html'), 404\n\n@app.route(\"//fleet\")\ndef fleet(truckName):\n # First, make sure the truck name is valid.\n if (isValidTruck(truckName)):\n # Query the DB for all trucks.\n fleet = getAllTrucksInfo()\n\n templateData = {\n 'name': truckName,\n 'fleet': fleet\n }\n\n return render_template('Fleet.html', **templateData)\n else:\n return render_template('404Page.html'), 404\n\n@app.route(\"//meal_info/\")\ndef meal_info(truckName, mealName):\n # First, make sure the truck name is valid.\n if (isValidTruck(truckName)):\n # Query the DB for a list of all meals for the dropdown.\n meals = getMeals()\n\n # Query the DB for info on the selected meal.\n if (mealName == \"def\" or not isValidMeal(mealName)):\n # We were taken here from the menu, so just choose the first meal in the\n # list.\n # Or, the meal name is valid. Could be an injection attack or just bad\n # input.\n mealName = meals[0][0]\n \n # Get the information and ingredients for the selected meal.\n chosen_meal_info = getMealInfo(truckName, mealName)\n ingredients = getIngredients(mealName)\n\n # Return the rendered template.\n templateData = {\n 'name' : truckName,\n 'chosenMeal': mealName,\n 'meals': meals,\n 'chosen_meal_info': chosen_meal_info,\n 'ingredients': ingredients\n }\n\n return render_template('MealInfo.html', **templateData)\n else:\n return render_template('404Page.html'), 404\n\n@app.route(\"//delete_meal/\")\ndef removeMeal(truckName, mealName):\n # Make sure the truck and meal are valid.\n if (isValidTruck(truckName) and isValidMeal(mealName)):\n mealID = getMealID(mealName)\n\n # Remove all MealIngredient entities.\n remove_query = sql.SQL('''\n DELETE FROM MealIngredient\n WHERE MealID = {mealID}\n ''').format(mealID = sql.Literal(mealID))\n execute_query(connection, remove_query)\n\n # Remove all Inventory entities.\n remove_query = sql.SQL('''\n DELETE FROM Inventory\n WHERE MealID = {mealID}\n ''').format(mealID = sql.Literal(mealID))\n execute_query(connection, remove_query)\n\n # Remove the meal itself.\n remove_query = sql.SQL('''\n DELETE FROM Meal\n WHERE ID = {mealID}\n ''').format(mealID = sql.Literal(mealID))\n execute_query(connection, remove_query)\n\n # Redirect back to meal info.\n return redirect(url_for('meal_info', truckName=truckName, mealName='def'))\n else:\n return render_template('404Page.html'), 404\n\n@socketio.on('connect', namespace='/meal')\ndef onConnect():\n print('Client connected to namespace meal')\n\n@socketio.on('updateInventory', namespace='/meal')\ndef updateInventory(data):\n data = json.loads(data)\n\n # Grab the data.\n truckName = data['truckName']\n mealName = data['mealName']\n newInventory = data['updatedInventory']\n\n try:\n newInventory = int(newInventory)\n except:\n newInventory = -1\n\n newInventoryWithinRange = newInventory >= 0 and newInventory <= 1000\n\n # Verify the data is valid.\n if (isValidTruck(truckName) and isValidMeal(mealName) and newInventoryWithinRange):\n # Grab the ID of the truck and meal.\n truckID = getTruckID(truckName)\n mealID = getMealID(mealName)\n\n # Update the inventory in the DB.\n update_query = sql.SQL('''\n UPDATE Inventory\n SET Number = {newInventory}\n WHERE TruckID = {truckID} AND MealID = {mealID}\n ''').format(newInventory = sql.Literal(newInventory),\n truckID = sql.Literal(truckID),\n mealID = sql.Literal(mealID),)\n execute_query(connection, update_query)\n\n print(\"Updated {0}'s inventory of {1} to be {2}\".format(truckName, mealName, newInventory))\n\n@socketio.on('disconnect', namespace='/meal')\ndef onDisconnect():\n print('Client disconnected from namespace meal')\n\n@app.route(\"//search\", methods=['GET', 'POST'])\ndef search(truckName):\n # First, check the truck name is valid.\n if (isValidTruck(truckName)):\n\n templateData = {\n 'name': truckName,\n 'searchQuery': None,\n 'searchResults': None\n }\n\n if (request.method == 'POST'):\n searchQuery = request.form['query']\n\n if (searchQuery != '' and str.isspace(searchQuery) == False):\n templateData['searchQuery'] = searchQuery\n print('Query: {0}'.format(searchQuery))\n\n # Perform the search\n searchResults = mealFleetSearch(searchQuery)\n\n if (len(searchResults) > 0):\n templateData['searchResults'] = searchResults\n\n return render_template('Search.html', **templateData)\n else:\n return render_template('404Page.html'), 404\n\n@app.route(\"//ingredients\", methods=['GET', 'POST'])\ndef ingredientManager(truckName):\n # Make sure the truck name is valid.\n if (isValidTruck(truckName)):\n # Determine if we need to add a new ingredient.\n if (request.method == 'POST'):\n ingredientName = request.form['ingredientName']\n\n # Make sure the ingredient name isn't just whitespace or an empty string.\n if (ingredientName != '' and str.isspace(ingredientName) == False):\n # Make sure it is the right length and new.\n if (not isValidIngredient(ingredientName) and isValidLength(ingredientName, 30)):\n print('New Ingredient: {0}'.format(ingredientName))\n\n # Put it in the database.\n insert_query = sql.SQL('''\n INSERT INTO Ingredient (Name) VALUES ({ingredientName})\n ''').format(ingredientName = sql.Literal(ingredientName),)\n\n # Execute the INSERT command.\n execute_query(connection, insert_query)\n\n # Grab a list of all ingredients.\n ingredients = getAllIngredients()\n\n templateData = {\n 'name': truckName,\n 'ingredients': ingredients\n }\n\n return render_template('IngredientList.html', **templateData)\n else:\n # Otherwise, give them a 404.\n return render_template('404Page.html'), 404\n\n@app.route(\"//delete_ingredient/\")\ndef removeIngredient(truckName, ingredientName):\n # Make sure the truck and ingredient are valid.\n if (isValidTruck(truckName) and isValidIngredient(ingredientName)):\n ingredientID = getIngredientID(ingredientName)\n\n # Remove all MealIngredient entities.\n remove_query = sql.SQL('''\n DELETE FROM MealIngredient\n WHERE IngredientID = {ingredientID}\n ''').format(ingredientID = sql.Literal(ingredientID))\n execute_query(connection, remove_query)\n\n # Remove the ingredient itself.\n remove_query = sql.SQL('''\n DELETE FROM Ingredient\n WHERE ID = {ingredientID}\n ''').format(ingredientID = sql.Literal(ingredientID))\n execute_query(connection, remove_query)\n\n # Redirect back to the ingredient manager.\n return redirect(url_for('ingredientManager', truckName=truckName))\n else:\n return render_template('404Page.html'), 404\n\n# Parses the selected ingredients from the form on the create meal page.\ndef parseIngredients(form):\n ingredients = []\n\n for key, value in form.items():\n if (key != 'mealName' and key != 'mealType' and key != 'availNumber'):\n ingredients.append(key)\n \n return ingredients\n\n@app.route(\"//create_meal\", methods=['GET', 'POST'])\ndef createMeal(truckName):\n # Make sure the truck name is valid.\n if (isValidTruck(truckName)):\n if (request.method == 'GET'):\n # Get the different meal types and all available ingredients.\n mealTypes = getMealTypes()\n availIngredients = getAllIngredients()\n\n templateData = {\n 'name': truckName,\n 'mealTypes': mealTypes,\n 'availIngredients': availIngredients\n }\n\n # Return the page for them to create the meal.\n return render_template('CreateMeal.html', **templateData)\n else:\n # First, grab all the attributes of the meal.\n mealName = request.form['mealName']\n mealType = request.form['mealType']\n availNumber = request.form['availNumber']\n ingredients = parseIngredients(request.form)\n\n # Attempt to parse the available number of the meal for the currently\n # connected truck.\n try:\n availNumber = int(availNumber)\n except:\n availNumber = -1\n \n # Check all the attributes are valid.\n mealNameValid = (not isValidMeal(mealName)) and (isValidLength(mealName, 20)) and (mealName != '') and (str.isspace(mealName) == False)\n mealTypeValid = isValidMealType(mealType)\n availNumberValid = availNumber >= 0 and availNumber <= 1000\n\n # First, make sure we have some ingredients - you cannot have a meal without an ingredient.\n ingredientsValid = len(ingredients) > 0\n # If we have at least one ingredient:\n if (ingredientsValid):\n # Go through them all to make sure they're in the DB.\n for ingredient in ingredients:\n if (not isValidIngredient(ingredient)):\n ingredientsValid = False\n break\n \n # Only proceed to create the meal if all items are valid.\n if (mealNameValid and mealTypeValid and availNumberValid and ingredientsValid):\n # Create the meal.\n # Remember, as this point everything is valid, however, we still may have an\n # injection attack in mealName.\n success = addMealToDB(mealName, mealType, ingredients, truckName, availNumber)\n\n if (success):\n # Redirect the user to the Meal Info page, selecting the new meal.\n return redirect(url_for('meal_info', truckName=truckName, mealName=mealName))\n else:\n # Otherwise, there was an attempted injection attack, so redirect them back\n # to the create meal page.\n return redirect(request.url)\n else:\n # Otherwise, redirect them back to the create meal page.\n return redirect(request.url)\n else:\n return render_template('404Page.html'), 404\n\nif __name__ == \"__main__\":\n socketio.run(app, host=\"0.0.0.0\", debug=True)\n","sub_path":"IMSBackend.py","file_name":"IMSBackend.py","file_ext":"py","file_size_in_byte":26873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"409556565","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#date:\"2018-01-07,17:09\"\nfrom flask_wtf import FlaskForm\nfrom wtforms.fields import SubmitField,StringField,PasswordField,FileField,TextAreaField\nfrom wtforms.validators import DataRequired,EqualTo,Email,Regexp,ValidationError\nfrom ..models import User\n#会员注册表单\nclass RegisterForm (FlaskForm):\n name = StringField (\n label = \"昵称\",validators = [DataRequired (\"请输入昵称!\")],\n description = \"昵称\",render_kw = {\"class\":\"form-control input-lg\"}\n )\n email = StringField (label = \"邮箱\",validators = [DataRequired (\"请输入邮箱!\"),Email(\"邮箱格式不正确!\")],\n description = \"邮箱\",render_kw = {\"class\":\"form-control input-lg\",\"placeholder\":\"请输入邮箱!\"}\n )\n phone = StringField(\n label = \"手机\",\n validators = [DataRequired(\"请输入手机号!\"),Regexp(\"1[3,4,5,7,8]\\\\d{9}\",message = \"手机格式不正确!\")],\n description = \"手机\",\n render_kw = {\"class\":\"form-control input-lg\",\"placeholder\":\"请输入手机号!\"}\n )\n pwd = PasswordField(\n label = \"密码\",\n validators = [DataRequired(\"请输入密码!\")],description = \"密码\",\n render_kw = {\"class\":\"form-control input-lg\",\"placeholder\":\"请输入密码!\"}\n )\n repwd = PasswordField (\n label = \"确认密码\",\n validators = [DataRequired (\"请输入密码!\"),EqualTo(\"pwd\",message = \"两次输入密码不一致!\")],description = \"确认密码\",\n render_kw = {\"class\":\"form-control input-lg\",\"placeholder\":\"请输入密码!\"}\n )\n submit = SubmitField(\n \"提交\",render_kw = {\"class\":\"btn btn-lg btn-success btn-block\"}\n )\n def validate_name(self,field):\n name = field.data\n user = User.query.filter_by(name=name).count()\n if user:\n raise ValidationError(\"昵称已经存在!\")\n def validate_email(self,field):\n email = field.data\n email = User.query.filter_by(email=email).count()\n if email:\n raise ValidationError(\"邮箱已经存在!\")\n\n def validate_phone(self,field):\n phone = field.data\n phone = User.query.filter_by(phone=phone).count()\n if phone:\n raise ValidationError(\"手机号已经存在!\")\n\n#会员登录表单\nclass LoginForm(FlaskForm):\n name = StringField(\n label = \"账号\",validators = [DataRequired(\"请输入账号!\")],\n render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入账号!\"}\n )\n pwd = PasswordField(\n label = \"密码\",validators = [DataRequired(\"请输入密码!\")],\n render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入密码!\"}\n )\n submit = SubmitField(\n \"登录\",render_kw = {\"class\":\"btn btn-lg btn-primary btn-block\"}\n )\n\n#会员资料修改表单\nclass UserDetailForm(FlaskForm):\n name = StringField(\n label = \"账号\",validators = [DataRequired(\"请输入账号!\")],\n render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入账号!\"}\n )\n email = StringField(\n label = \"邮箱\",validators = [DataRequired(\"请输入邮箱!\"),Email(\"邮箱格式不正确!\")],\n render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入邮箱!\"}\n )\n phone = StringField(\n label = \"手机\",validators = [DataRequired(\"请输入手机号码!\"),Regexp(\"1[34578]\\\\d{9}\",message = \"手机格式不正确!\")],\n render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入手机号!\"}\n )\n face = FileField(\n label = \"头像\",validators = [DataRequired(\"请上传头像!\")]\n )\n info = TextAreaField(\n label = \"简介\",validators = [DataRequired(\"请输入简介!\")],\n render_kw = {\"class\":\"form-control\",\"rows\":10}\n )\n submit = SubmitField(\"确认修改!\",render_kw = {\"class\":\"btn btn-success\"})\n\n#会员密码修改\nclass PwdForm(FlaskForm):\n old_pwd = StringField(label = \"旧密码\",validators = [DataRequired(\"请输入旧密码!\")],\n description = \"旧密码\",render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入旧密码\"})\n new_pwd = StringField(label = \"新密码\",validators = [DataRequired(\"请输入新密码!\")],\n description = \"新密码\",render_kw = {\"class\":\"form-control\",\"placeholder\":\"请输入新密码\"})\n submit = SubmitField(\"提交\",render_kw = {\"class\":\"form-control\"})\n\n#评论表单\nclass CommentForm(FlaskForm):\n content = TextAreaField(\n label = \"内容\",\n validators = [DataRequired(\"请输入内容!\")],\n description = \"内容\",render_kw = {\"id\":\"input_content\"}\n )\n submit = SubmitField(\n \"提交评论\",\n render_kw = {\"class\":\"btn btn-success\",\"id\":\"btn-sub\"}\n )\n\n","sub_path":"app/home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"345985772","text":"import pandas as pd\nfrom aws_xray_sdk.core import patch_all, xray_recorder\nfrom dataplatform.awslambda.logging import logging_wrapper\n\nimport common.transform as transform\nimport common.aws as common_aws\nfrom common.aggregateV2 import Aggregate, ColumnNames\nfrom common.output import Output, Metadata\nfrom common.templates import TemplateK\nfrom common.population_utils import generate_population_df\nfrom common.event import event_handler\n\npatch_all()\n\nvalue_columns = [\n \"antall_redusert_funksjonsevne\",\n \"ikke_vestlig_kort\",\n \"lav_utdanning\",\n \"antall_fattige_barnehusholdninger\",\n \"antall_ikke_sysselsatte\",\n \"ikke_fullfort_vgs\",\n \"dodsrate\",\n \"antall_trangbodde\",\n]\n\n\ngraph_metadata = Metadata(\n heading=\"Personer fra 16 til 66 år med redusert funksjonsevne\",\n series=[\n {\"heading\": \"Redusert funksjonsevne\", \"subheading\": \"\"},\n {\n \"heading\": \"Innvandrere fra Afrika, Asia mv. med kort botid\",\n \"subheading\": \"\",\n },\n {\"heading\": \"Lav utdanning\", \"subheading\": \"\"},\n {\"heading\": \"Lavinntektshusholdninger med barn\", \"subheading\": \"\"},\n {\"heading\": \"Ikke sysselsatte\", \"subheading\": \"\"},\n {\"heading\": \"Ikke fullført vgs\", \"subheading\": \"\"},\n {\"heading\": \"Dødelighet\", \"subheading\": \"\"},\n {\"heading\": \"Trangbodde\", \"subheading\": \"\"},\n ],\n)\n\nkey_cols = ColumnNames().default_groupby_columns()\n\n\n@logging_wrapper(\"levekar_totalt\")\n@xray_recorder.capture(\"event_handler\")\n@event_handler(\n redusert_funksjonsevne_raw=\"redusert-funksjonsevne\",\n botid_ikke_vestlige_raw=\"botid-ikke-vestlige\",\n lav_utdanning_raw=\"lav-utdanning\",\n fattige_husholdninger_raw=\"fattige-husholdninger\",\n sysselsatte_raw=\"sysselsatte\",\n befolkning_raw=\"befolkning-etter-kjonn-og-alder\",\n ikke_fullfort_vgs_raw=\"ikke-fullfort-vgs\",\n dodsrater_raw=\"dodsrater\",\n trangbodde_raw=\"trangbodde\",\n)\ndef start(\n redusert_funksjonsevne_raw,\n botid_ikke_vestlige_raw,\n lav_utdanning_raw,\n fattige_husholdninger_raw,\n sysselsatte_raw,\n befolkning_raw,\n ikke_fullfort_vgs_raw,\n dodsrater_raw,\n trangbodde_raw,\n output_prefix,\n type_of_ds,\n):\n redusert_funksjonsevne_input_df = generate_redusert_funksjonsevne_df(\n redusert_funksjonsevne_raw\n )\n ikke_vestlig_kort_botid_input_df = generate_ikke_vestlig_innvandrer_kort_botid_df(\n botid_ikke_vestlige_raw, befolkning_raw.copy()\n )\n lav_utdanning_input_df = generate_lav_utdanning_df(lav_utdanning_raw)\n fattige_barnehusholdninger_input_df = generate_fattige_barnehusholdninger_df(\n fattige_husholdninger_raw\n )\n ikke_sysselsatte_input_df = generate_ikke_sysselsatte_df(\n sysselsatte_raw, befolkning_raw.copy()\n )\n ikke_fullfort_vgs_input_df = generate_ikke_fullfort_vgs_df(ikke_fullfort_vgs_raw)\n dodsrater_input_df = generate_dosrater_df(dodsrater_raw)\n trangbodde_input_df = generate_trangbodde_input_df(\n trangbodde_raw, befolkning_raw.copy()\n )\n\n input_df = Aggregate({}).merge_all(\n *[\n redusert_funksjonsevne_input_df,\n ikke_vestlig_kort_botid_input_df,\n lav_utdanning_input_df,\n fattige_barnehusholdninger_input_df,\n ikke_sysselsatte_input_df,\n ikke_fullfort_vgs_input_df,\n dodsrater_input_df,\n trangbodde_input_df,\n ]\n )\n output_list = []\n if type_of_ds == \"status\":\n output_list = output_status(input_df, value_columns)\n\n else:\n raise Exception(f\"Invalid config type: {type_of_ds}\")\n\n if output_list:\n common_aws.write_to_intermediate(\n output_key=output_prefix, output_list=output_list\n )\n else:\n raise Exception(\"No data in outputlist\")\n\n\ndef add_relative_ratio(df, ratio_col):\n df = df[~df[\"bydel_id\"].isin([\"16\", \"17\", \"99\"])]\n oslo_ratio_col = f\"{ratio_col}_oslo\"\n district_ratio_col = f\"{ratio_col}_district\"\n oslo_total_df = df[df[\"bydel_id\"] == \"00\"]\n result_df = pd.DataFrame(columns=[*key_cols, district_ratio_col, oslo_ratio_col])\n for (date, district_id), group_df in df.groupby(by=[\"date\", \"bydel_id\"]):\n tmp_df = group_df.copy()\n district_mean = tmp_df[tmp_df[\"delbydel_id\"].isnull()][ratio_col].unique()[0]\n oslo_mean = oslo_total_df[oslo_total_df[\"date\"] == date][ratio_col].unique()[0]\n tmp_df[district_ratio_col] = tmp_df[ratio_col] / district_mean\n tmp_df[oslo_ratio_col] = tmp_df[ratio_col] / oslo_mean\n result_df = result_df.append(\n tmp_df[[*key_cols, district_ratio_col, oslo_ratio_col]]\n )\n\n return result_df\n\n\ndef generate_trangbodde_input_df(trangbodde_raw, befolkning_raw):\n data_point = \"antall_trangbodde\"\n population_df = generate_population_df(befolkning_raw)\n\n agg = {\"population\": \"sum\"}\n population_district_df = Aggregate(agg).aggregate(df=population_df)\n\n df = pd.merge(\n trangbodde_raw,\n population_district_df[[\"date\", \"bydel_id\", \"delbydel_id\", \"population\"]],\n how=\"inner\",\n on=[\"bydel_id\", \"date\", \"delbydel_id\"],\n )\n\n df[f\"{data_point}_ratio\"] = df[\"andel_som_bor_trangt\"] / 100\n df[data_point] = df[\"population\"] * df[f\"{data_point}_ratio\"]\n\n return add_relative_ratio(df, f\"{data_point}_ratio\")\n\n\ndef generate_dosrater_df(dodsrater_raw):\n data_point = \"dodsrate\"\n df = dodsrater_raw\n\n df[f\"{data_point}_ratio\"] = df[data_point] / 100\n\n return add_relative_ratio(df, f\"{data_point}_ratio\")\n\n\ndef generate_ikke_fullfort_vgs_df(ikke_fullfort_vgs_raw):\n df = ikke_fullfort_vgs_raw\n data_point = \"ikke_fullfort_vgs\"\n df = df.rename(\n columns={\"antall_personer_ikke_fullfort_i_lopet_av_5_aar\": data_point}\n )\n\n data_point_ratio = f\"{data_point}_ratio\"\n df[data_point_ratio] = df[\"andelen_som_ikke_har_fullfort_i_lopet_av_5_aar\"] / 100\n\n return add_relative_ratio(df, data_point_ratio)\n\n\ndef generate_ikke_sysselsatte_df(sysselsatte_raw, befolkning_raw):\n data_point = \"antall_ikke_sysselsatte\"\n population_col = \"population\"\n\n # Numbers for \"sysselsatte\" is only for age 30 to 59\n befolkning_df = generate_population_df(befolkning_raw, min_age=30, max_age=59)\n\n sub_districts = befolkning_df[\"delbydel_id\"].unique()\n\n sysselsatte_df = sysselsatte_raw\n # Value for date in \"sysselsatte\" was measured in 4th. quarter of 2017, while date for \"befolkning\" was measured 1.1.2018.\n sysselsatte_df[\"date\"] = sysselsatte_df[\"date\"] + 1\n sysselsatte_df = sysselsatte_df[sysselsatte_df[\"delbydel_id\"].isin(sub_districts)]\n\n sysselsatte_befolkning_df = pd.merge(\n sysselsatte_df,\n befolkning_df[[\"date\", \"delbydel_id\", \"population\"]],\n how=\"inner\",\n on=[\"date\", \"delbydel_id\"],\n )\n # Ignoring \"Marka\", \"Sentrum\" and \"Uten registrert adresse\"\n ignore_districts = [\"16\", \"17\", \"99\"]\n sysselsatte_befolkning_df = sysselsatte_befolkning_df[\n ~sysselsatte_befolkning_df[\"bydel_id\"].isin(ignore_districts)\n ]\n\n sysselsatte_befolkning_df[data_point] = (\n sysselsatte_befolkning_df[population_col]\n - sysselsatte_befolkning_df[\"antall_sysselsatte\"]\n )\n\n agg = Aggregate({population_col: \"sum\", data_point: \"sum\"})\n aggregated_df = agg.aggregate(sysselsatte_befolkning_df)\n\n input_df = agg.add_ratios(\n aggregated_df, data_points=[data_point], ratio_of=[population_col]\n )\n return add_relative_ratio(input_df, f\"{data_point}_ratio\")\n\n\ndef generate_fattige_barnehusholdninger_df(fattige_husholdninger_raw):\n data_point = \"antall_fattige_barnehusholdninger\"\n data_point_ratio = f\"{data_point}_ratio\"\n df = fattige_husholdninger_raw\n\n df[data_point_ratio] = (\n df[\"husholdninger_med_barn_under_18_aar_eu_skala_andel\"] / 100\n )\n return add_relative_ratio(df, data_point_ratio)\n\n\ndef generate_lav_utdanning_df(lav_utdanning_raw):\n data_point = \"lav_utdanning\"\n education_categories = [\n \"ingen_utdanning_uoppgitt\",\n \"grunnskole\",\n \"videregaende\",\n \"universitet_hogskole_kort\",\n \"universitet_hogskole_lang\",\n ]\n\n lav_utdanning_raw[\"total\"] = lav_utdanning_raw[education_categories].sum(axis=1)\n lav_utdanning_raw[data_point] = lav_utdanning_raw[\n [\"ingen_utdanning_uoppgitt\", \"grunnskole\"]\n ].sum(axis=1)\n\n aggregations = {data_point: \"sum\", \"total\": \"sum\"}\n aggregator = Aggregate(aggregations)\n input_df = aggregator.aggregate(lav_utdanning_raw)\n\n input_df = aggregator.add_ratios(\n input_df, data_points=[data_point], ratio_of=[\"total\"]\n )\n return add_relative_ratio(input_df, f\"{data_point}_ratio\")\n\n\ndef generate_ikke_vestlig_innvandrer_kort_botid_df(\n botid_ikke_vestlige_raw, befolkning_raw\n):\n data_point = \"ikke_vestlig_kort\"\n kort_botid = \"Innvandrer, kort botid (<=5 år)\"\n ikke_vestlig = \"asia_afrika_latin_amerika_og_ost_europa_utenfor_eu\"\n\n df = botid_ikke_vestlige_raw.drop(columns=[\"norge\"])\n df = pivot_table(df, \"botid\", ikke_vestlig)\n df[data_point] = df[kort_botid]\n\n aggregator = Aggregate({data_point: \"sum\"})\n df = aggregator.aggregate(df)\n\n population_df = generate_population_df(befolkning_raw)\n population_district_df = Aggregate({\"population\": \"sum\"}).aggregate(\n df=population_df\n )\n\n df = pd.merge(\n df,\n population_district_df[[\"date\", \"bydel_id\", \"delbydel_id\", \"population\"]],\n how=\"inner\",\n on=[\"bydel_id\", \"date\", \"delbydel_id\"],\n )\n\n df = aggregator.add_ratios(\n df=df, data_points=[\"ikke_vestlig_kort\"], ratio_of=[\"population\"]\n )\n\n return add_relative_ratio(df, \"ikke_vestlig_kort_ratio\")\n\n\ndef generate_redusert_funksjonsevne_df(redusert_funksjonsevne_raw):\n data_point = \"antall_redusert_funksjonsevne\"\n input_df = redusert_funksjonsevne_raw.rename(\n columns={\"antall_personer_med_redusert_funksjonsevne\": data_point}\n )\n input_df[f\"{data_point}_ratio\"] = (\n input_df[\"andel_personer_med_redusert_funksjonsevne\"] / 100\n )\n return add_relative_ratio(input_df, f\"{data_point}_ratio\")\n\n\ndef output_status(input_df, data_points):\n [input_df] = transform.status(input_df)\n output = Output(\n values=data_points, df=input_df, metadata=graph_metadata, template=TemplateK()\n ).generate_output()\n return output\n\n\ndef pivot_table(df, pivot_column, value_columns):\n key_columns = list(\n filter(lambda x: x not in [pivot_column, *value_columns], list(df))\n )\n df_pivot = pd.concat(\n (df[key_columns], df.pivot(columns=pivot_column, values=value_columns)), axis=1\n )\n return df_pivot.groupby(key_columns).sum().reset_index()\n","sub_path":"functions/levekar_totalt.py","file_name":"levekar_totalt.py","file_ext":"py","file_size_in_byte":10683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307446585","text":"import bottle\n\n@bottle.route ('/')\ndef home_page():\n #return \"Hello World\\n\"\n mythings = ['apple', 'orange', 'banana', 'peach']\n #return bottle.template ('hello_world', username='Steve', things=mythings)\n return bottle.template ('hello_world', {'username' : 'Stephen', \n 'things' : mythings})\n\n#@bottle.route ('/testpage')\n#def test_page():\n# return \"This is a test page\"\n\n@bottle.post ('/favorite_fruit')\ndef favorite_fruit():\n fruit = bottle.request.forms.get (\"fruit\")\n if (fruit == None or fruit == \"\"):\n fruit = \"No fruit selected.\"\n \n #return bottle.template ('fruit_selection', {'fruit':fruit})\n bottle.response.set_cookie (\"fruit\", fruit)\n bottle.redirect (\"/show_fruit\")\n\n@bottle.route ('/show_fruit')\ndef show_fruit():\n fruit = bottle.request.get_cookie (\"fruit\")\n return bottle.template ('fruit_selection.tpl', {'fruit':fruit})\n\nbottle.debug (True)\nbottle.run (host='localhost', port=8080)\n","sub_path":"Week1/helloworld_bottle.py","file_name":"helloworld_bottle.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"255729607","text":"\"\"\"Tests for mixing schemes\n\"\"\" \nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport numpy.testing as nt\nimport unittest\n\nfrom . import input\nfrom . import mix\nfrom . import NON\n\nclass TestPulay(unittest.TestCase):\n \"\"\"Tests for the Pulay mixer\n \n \"\"\" \n\n def setUp(self):\n \"\"\" Sets up harmonic oscillator system \"\"\"\n pm = input.Input()\n pm.run.name = 'unittest'\n pm.run.save = False\n pm.run.verbosity = 'low'\n\n # It might still be possible to speed this up\n pm.sys.NE = 2 #: Number of electrons\n pm.sys.grid = 61 #: Number of grid points (must be odd)\n pm.sys.xmax = 7.5 #: Size of the system\n pm.sys.acon = 1.0 #: Smoothing of the Coloumb interaction\n pm.sys.interaction_strength = 1#: Scales the strength of the Coulomb interaction\n def v_ext(x):\n \"\"\"Initial external potential\"\"\"\n return 0.5*(0.25**2)*(x**2)\n pm.sys.v_ext = v_ext\n \n pm.ext.ctol = 1e-5\n\n self.pm = pm\n\n def test_array_update_1(self):\n \"\"\"Testing internal variables of Pulay mixer\n \n Just checking that the maths works as expected from\n [Kresse1996]_ p.34 ...\n \"\"\"\n pm = self.pm\n pm.lda.kerker_length = 100\n order = 4\n mixer = mix.PulayMixer(pm, order=order, preconditioner=None)\n\n x = np.linspace(-pm.sys.xmax, pm.sys.xmax, pm.sys.grid)\n den_in = 1 + 0.1*np.sin(x)\n den_out = 1 - 0.1*np.sin(x)\n\n den_in_new = mixer.mix(den_in, den_out)\n\n nt.assert_allclose(mixer.den_in[0], den_in)\n #nt.assert_allclose(mixer.den_delta[0], den_in-0)\n nt.assert_allclose(mixer.res[0], -0.2*np.sin(x))\n #nt.assert_allclose(mixer.res_delta[0], -0.2*np.sin(x)-0)\n\n #overlaps = 0.04*np.dot(np.sin(x),np.sin(x))\n #A_bar = overlaps\n #A_bar_inv = 1/overlaps\n #alpha_bar = - A_bar_inv * overlaps\n\n #nt.assert_allclose(alpha_bar, -1)\n\n\nclass TestKerker(unittest.TestCase):\n \"\"\"Tests for the Kerker preconditioner\n \n \"\"\" \n\n def setUp(self):\n \"\"\" Sets up harmonic oscillator system \"\"\"\n pm = input.Input()\n pm.run.name = 'unittest'\n pm.run.save = False\n pm.run.verbosity = 'low'\n\n # It might still be possible to speed this up\n pm.sys.NE = 2 #: Number of electrons\n pm.sys.grid = 61 #: Number of grid points (must be odd)\n pm.sys.xmax = 7.5 #: Size of the system\n pm.sys.acon = 1.0 #: Smoothing of the Coloumb interaction\n pm.sys.interaction_strength = 1#: Scales the strength of the Coulomb interaction\n def v_ext(x):\n \"\"\"Initial external potential\"\"\"\n return 0.5*(0.25**2)*(x**2)\n pm.sys.v_ext = v_ext\n \n pm.ext.ctol = 1e-5\n\n pm.setup_space()\n self.pm = pm\n\n\n def test_screening_length_1(self):\n \"\"\"Testing screening length in Kerker\n \n Check that for infinite screening length, simple mixing is recovered.\n [Kresse1996]_ p.34 ...\n \"\"\"\n pm = self.pm\n pm.lda.kerker_length = 1e6\n pm.lda.mix = 1.0\n\n mixer = mix.PulayMixer(pm, order=20, preconditioner='kerker')\n\n den = NON.main(pm).gs_non_den\n # Note: Kerker always removes G=0 cmponent\n # (it is intended to be used on density *differences*, where the G=0\n # component vanishes anyhow)\n den -= np.average(den)\n den_cond = mixer.precondition(den, None, None)\n\n\n nt.assert_allclose(den, den_cond, 1e-3)\n\n\nclass TestRPA(unittest.TestCase):\n \"\"\"Tests for the RPA preconditioner\n \n \"\"\" \n\n def setUp(self):\n \"\"\" Sets up harmonic oscillator system \"\"\"\n pm = input.Input()\n pm.run.name = 'unittest'\n pm.run.save = False\n pm.run.verbosity = 'low'\n\n # It might still be possible to speed this up\n pm.sys.NE = 2 #: Number of electrons\n pm.sys.grid = 61 #: Number of grid points (must be odd)\n pm.sys.xmax = 7.5 #: Size of the system\n pm.sys.acon = 1.0 #: Smoothing of the Coloumb interaction\n pm.sys.interaction_strength = 1#: Scales the strength of the Coulomb interaction\n def v_ext(x):\n \"\"\"Initial external potential\"\"\"\n return 0.5*(0.25**2)*(x**2)\n pm.sys.v_ext = v_ext\n\n pm.lda.mix = 1.0\n\n self.pm = pm\n\n def test_chi_1(self):\n \"\"\"Testing potential-density response\n \n Testing some basic symmetry properties of the\n potential-density response and the preconditioning matrices\n required for density/potential mixing.\n\n \"\"\"\n pm = self.pm\n\n mixer = mix.PulayMixer(pm, order=1, preconditioner='rpa')\n\n results = NON.main(pm)\n den = results.gs_non_den\n eigv = results.gs_non_eigv\n eigf = results.gs_non_eigf\n\n chi = mixer.preconditioner.chi(eigv, eigf)\n v = mixer.preconditioner.coulomb_repulsion\n dx = mixer.preconditioner.x_delta\n nx = mixer.preconditioner.x_npt\n\n nt.assert_allclose(chi, chi.T, 1e-6)\n nt.assert_allclose(v, v.T, 1e-6)\n\n # this is just for testing purposes\n eps_pmix = np.eye(nx)/dx - np.dot(v,chi)*dx\n eps_dmix = np.eye(nx)/dx - np.dot(chi,v)*dx\n nt.assert_allclose(eps_pmix, eps_dmix.T, 1e-6)\n","sub_path":"iDEA/test_mix.py","file_name":"test_mix.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509189001","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: bigpanda\nauthor: \"Hagai Kariti (@hkariti)\"\nshort_description: Notify BigPanda about deployments\nversion_added: \"1.8\"\ndescription:\n - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.\noptions:\n component:\n description:\n - \"The name of the component being deployed. Ex: billing\"\n required: true\n aliases: ['name']\n version:\n description:\n - The deployment version.\n required: true\n token:\n description:\n - API token.\n required: true\n state:\n description:\n - State of the deployment.\n required: true\n choices: ['started', 'finished', 'failed']\n hosts:\n description:\n - Name of affected host name. Can be a list.\n required: false\n default: machine's hostname\n aliases: ['host']\n env:\n description:\n - The environment name, typically 'production', 'staging', etc.\n required: false\n owner:\n description:\n - The person responsible for the deployment.\n required: false\n description:\n description:\n - Free text description of the deployment.\n required: false\n url:\n description:\n - Base URL of the API server.\n required: False\n default: https://api.bigpanda.io\n validate_certs:\n description:\n - If C(no), SSL certificates for the target url will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n type: bool\n\n# informational: requirements for nodes\nrequirements: [ ]\n'''\n\nEXAMPLES = '''\n- bigpanda:\n component: myapp\n version: '1.3'\n token: '{{ bigpanda_token }}'\n state: started\n\n- bigpanda:\n component: myapp\n version: '1.3'\n token: '{{ bigpanda_token }}'\n state: finished\n\n# If outside servers aren't reachable from your machine, use delegate_to and override hosts:\n- bigpanda:\n component: myapp\n version: '1.3'\n token: '{{ bigpanda_token }}'\n hosts: '{{ ansible_hostname }}'\n state: started\n delegate_to: localhost\n register: deployment\n\n- bigpanda:\n component: '{{ deployment.component }}'\n version: '{{ deployment.version }}'\n token: '{{ deployment.token }}'\n state: finished\n delegate_to: localhost\n'''\n\n# ===========================================\n# Module execution.\n#\nimport json\nimport socket\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.urls import fetch_url\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n component=dict(required=True, aliases=['name']),\n version=dict(required=True),\n token=dict(required=True, no_log=True),\n state=dict(required=True, choices=['started', 'finished', 'failed']),\n hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),\n env=dict(required=False),\n owner=dict(required=False),\n description=dict(required=False),\n message=dict(required=False),\n source_system=dict(required=False, default='ansible'),\n validate_certs=dict(default='yes', type='bool'),\n url=dict(required=False, default='https://api.bigpanda.io'),\n ),\n supports_check_mode=True,\n )\n\n token = module.params['token']\n state = module.params['state']\n url = module.params['url']\n\n # Build the common request body\n body = dict()\n for k in ('component', 'version', 'hosts'):\n v = module.params[k]\n if v is not None:\n body[k] = v\n\n if not isinstance(body['hosts'], list):\n body['hosts'] = [body['hosts']]\n\n # Insert state-specific attributes to body\n if state == 'started':\n for k in ('source_system', 'env', 'owner', 'description'):\n v = module.params[k]\n if v is not None:\n body[k] = v\n\n request_url = url + '/data/events/deployments/start'\n else:\n message = module.params['message']\n if message is not None:\n body['errorMessage'] = message\n\n if state == 'finished':\n body['status'] = 'success'\n else:\n body['status'] = 'failure'\n\n request_url = url + '/data/events/deployments/end'\n\n # Build the deployment object we return\n deployment = dict(token=token, url=url)\n deployment.update(body)\n if 'errorMessage' in deployment:\n message = deployment.pop('errorMessage')\n deployment['message'] = message\n\n # If we're in check mode, just exit pretending like we succeeded\n if module.check_mode:\n module.exit_json(changed=True, **deployment)\n\n # Send the data to bigpanda\n data = json.dumps(body)\n headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}\n try:\n response, info = fetch_url(module, request_url, data=data, headers=headers)\n if info['status'] == 200:\n module.exit_json(changed=True, **deployment)\n else:\n module.fail_json(msg=json.dumps(info))\n except Exception as e:\n module.fail_json(msg=to_native(e), exception=traceback.format_exc())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/monitoring/bigpanda.py","file_name":"bigpanda.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534033317","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef generalSearch(problem, prirotyFunction, debug=False):\n frontier = util.PriorityQueue()\n frontier.push(item=(problem.getStartState(), [], 0), priority=0)\n explored = set()\n\n while not frontier.isEmpty():\n current_node, actions_to_curr_node, cost_to_curr = frontier.pop()\n\n if problem.isGoalState(current_node):\n return actions_to_curr_node\n\n if debug:\n print('----------------------------------------------------------------')\n print('\\033[95m') # Magenta\n print('| Current Node\\t| Cost to Current\\t|')\n print('| ------------\\t| ---------------\\t|')\n print('\\033[92m') # Green\n print(f'| {current_node}\\t| {cost_to_curr}\\t|')\n print()\n print('\\033[96m') # cyan\n print('| Successor\\t| Cost to Next\\t| Actions to Next\\t|')\n print('| ---------\\t| ------------\\t| ---------------\\t|')\n print('\\033[0m')\n\n for successor in problem.getSuccessors(current_node):\n successor_node, action_to_neighbour, _ = successor\n if successor_node not in explored:\n cost_to_neighbour = prirotyFunction(current_node, successor, cost_to_curr)\n\n for priority, _, heap_item in frontier.heap:\n if successor_node == heap_item[0]:\n if priority <= cost_to_neighbour:\n break\n\n frontier.update(\n item=\n (successor_node, actions_to_curr_node.copy() + [action_to_neighbour], cost_to_neighbour),\n priority=cost_to_neighbour\n )\n break\n else:\n frontier.push(\n item=(successor_node, actions_to_curr_node.copy() + [action_to_neighbour], cost_to_neighbour),\n priority=cost_to_neighbour\n )\n\n if debug:\n print('\\033[93m') # yellow\n print(f'| {successor_node}\\t| {cost_to_neighbour}\\t| {action_to_neighbour}\\t|')\n print('\\033[0m')\n\n explored.add(current_node)\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \"\"\"\n def dfsPriorityFunction(_node, _successor_state, cost_to_node):\n return cost_to_node - 1\n\n return generalSearch(problem=problem, prirotyFunction=dfsPriorityFunction)\n\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n def bfsPriorityFunction(_node, _successor_state, cost_to_node):\n return cost_to_node + 1\n\n return generalSearch(problem=problem, prirotyFunction=bfsPriorityFunction)\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\"\"\"\n def ucsPriorityFunction(_node, successor_state, cost_to_node):\n _, _, step_cost = successor_state\n # print(f'---\\t Node: {_node}, Successor: {successor_state[0]}')\n # print(f'---\\t Cost: {cost_to_node}, Step Cost: {step_cost}')\n\n return cost_to_node + step_cost\n\n return generalSearch(problem=problem, prirotyFunction=ucsPriorityFunction)\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n def aStarPriorityFunction(node, successor_state, cost_to_node):\n prev_heuristic = heuristic(node, problem)\n successor_node, _, step_cost = successor_state\n new_heuristic = heuristic(successor_node, problem)\n\n return cost_to_node - prev_heuristic + (step_cost + new_heuristic)\n\n return generalSearch(problem=problem, prirotyFunction=aStarPriorityFunction)\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","sub_path":"Search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"610447412","text":"import os\nfrom os.path import abspath, expanduser, basename\nimport re\nfrom collections import namedtuple\nfrom enum import Enum\n\nToken = namedtuple('Token', 'index value')\n\nclass NotAnAddress(Exception):\n pass\n\nclass Direction(Enum):\n backward = 0\n forward = 1\n\nclass Address(object):\n def __init__(self, spec=None):\n self.path = None\n self.instructions = []\n if spec:\n self.respec(spec)\n\n def respec(self, spec):\n \"\"\"\n build the instructions to navigate to `spec`.\n \"\"\"\n self.instructions = []\n data = spec.split(\":\")\n if len(data) > 1:\n if data[0] != '':\n test_path = abspath(expanduser(data[0]))\n if os.access(test_path, os.R_OK):\n self.path = os.path.abspath(test_path)\n\n if data[1] != '':\n chars = list(data[1])\n tokens = []\n c_token = ''\n for c in chars:\n if re.match('[+-]', c):\n tokens.append(c_token)\n c_token = ''\n c_token = c_token + c\n tokens.append(c_token)\n\n tokens = list(filter(lambda x: x != '', tokens))\n for tok in zip(range(len(tokens)), tokens):\n ttok = Token(*tok)\n self.instructions.extend(self._simple_address(ttok))\n else:\n raise NotAnAddress\n\n def _simple_address(self, token):\n \"\"\"\n build instructions from a simple address.\n `token` is a Token namedtuple\n \"\"\"\n instructions = []\n\n # Set direction\n if token.value.startswith('-'):\n direction = Direction.backward\n else:\n direction = Direction.forward\n\n # Strip direction marks\n if re.match('[-+]', token.value[0]):\n token = Token(token.index, token.value[1:])\n\n # Compile addresses depending on their type\n\n if token.value == '0': # beggining of file\n instructions = ['normal gg']\n\n elif token.value == '$': # end od file\n instructions = ['normal G$']\n\n if token.value.startswith('#'): # character position\n if token.index == 0:\n instructions.append('normal gg')\n\n pos = token.value[1:]\n if direction == Direction.forward and token.index == 0:\n instructions.append(pos + 'go')\n else:\n if direction == Direction.forward:\n pos = str(int(pos) - 1)\n instructions.append('exe \"go \".eval(\"line2byte(line(\\'.\\')) - 1 + col(\\'.\\') - ' + pos + '\")')\n\n elif token.value.startswith('/') or token.value.startswith('?'): # search\n if token.index == 0:\n if token.value.startswith('/'):\n instructions.append('normal gg')\n else:\n instructions.append('normal G$')\n\n if token.value.startswith('?') or direction == Direction.backward:\n flags = 'b'\n else:\n flags = ''\n\n instructions.append(\"call search('\" + token.value[1:-1] \\\n + \"', '\" + flags + \"')\")\n\n else: # line number\n instructions = [token.value]\n\n return instructions\n\n def __call__(self, vim, mode=\"botright vsplit\"):\n if vim:\n if self.path:\n vim.command(\" \".join([mode, self.path]))\n for i in self.instructions:\n vim.command(i)\n else:\n print(\" \".join([mode, self.path]))\n for i in self.instructions:\n print(i)\n","sub_path":"rplugin/python3/oxberry/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"248122370","text":"\"\"\"In Progess, COMP110 EX01 relational_operators, bool questions that ouput an answer that are determined by user input.\"\"\"\n\n__author__ = \"730447313\"\n\nuser_input_1: str = input(\"Left-hand side: \")\nuser_input_2: str = input(\"Right-hand side: \")\ninteger_value_1: int = int(user_input_1)\ninteger_value_2: int = int(user_input_2)\nless_than_output: bool = (integer_value_1) < (integer_value_2)\nis_at_least_output: bool = (integer_value_1) >= (integer_value_2)\neqaul_to_output: bool = (integer_value_1) == (integer_value_2)\nnot_eqaul_to_output: bool = (integer_value_1) != (integer_value_2)\nprint((user_input_1) + \" < \" + (user_input_2) + \" is \" + str(less_than_output))\nprint((user_input_1) + \" >= \" + (user_input_2) + \" is \" + str(is_at_least_output))\nprint((user_input_1) + \" == \" + (user_input_2) + \" is \" + str(eqaul_to_output))\nprint((user_input_1) + \" != \" + (user_input_2) + \" is \" + str(not_eqaul_to_output))","sub_path":"exercises/ex01/relational_operators.py","file_name":"relational_operators.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"436966697","text":"import csv\nimport sys\n\nwith open(\"budget_data.csv\") as csvfile:\n csv_reader = csv.DictReader(csvfile)\n\n rows = list(csv_reader)\n total_rows = len(rows)\n tot = 0\n prev = 0\n diff_list = []\n maxchange = 0\n minchange = 0\n\n for row in rows:\n current = int(row['Profit/Losses'])\n\n tot = tot + current\n # build list of changes for average\n if current != prev:\n change = int(current - prev)\n diff_list.append(change)\n # account for no change on 1st value\n if row == rows[0]:\n diff_list.pop(0)\n change = 0\n # conditional to find max and min changes\n if change > maxchange:\n maxchange = change\n max_date = row['Date']\n if change < minchange:\n minchange = change\n min_date = row['Date']\n prev = current\n\n\ndivider = len(diff_list)\navg = (sum(diff_list) / divider)\n\nwith open(\"results.txt\", 'w')as f:\n sys.stdout = f\n print(\"Financial Analysis\")\n print(\"-\" * 25)\n print(\"Total Months {}\".format(total_rows))\n print(\"Total: ${}\".format(tot))\n print(\"Average Change: ${}\".format(round(avg, 2)))\n print(\"Greatest Increase in Profits: {} (${})\".format(max_date, maxchange))\n print(\"Greatest Decrease in Profits: {} (${})\".format(min_date, minchange))\nf.close()\nsys.stdout = sys.__stdout__\n\nwith open('results.txt', 'r') as file:\n print(file.read())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"178489508","text":"class Solution(object):\n def primePalindrome(self, N):\n def is_prime(n):\n return n > 1 and all(n%d for d in xrange(2, int(n**.5) + 1))\n\n for length in xrange(1, 6):\n for root in xrange(10**(length - 1), 10**length):\n s = str(root)\n x = int(s + s[-2::-1])\n if x >= N and is_prime(x):\n return x\n\n for root in xrange(10**(length - 1), 10**length):\n s = str(root)\n x = int(s + s[-1::-1]) \n if x >= N and is_prime(x):\n return x\n","sub_path":"笔试面试题/2020BISHI/公司笔试记录/新建文件夹/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107045871","text":"#!/home/pi/.virtualenvs/py3cv4/bin/python\n\nfrom __future__ import print_function\nimport pycreate2\nimport socketserver, threading, time\nimport struct\n\nimport io\nimport picamera\nimport logging\nimport datetime as dt\nimport os\nfrom threading import Condition\nfrom http import server\n\n# Web Server Streaming Section #\nPAGE=\"\"\"\\\n\n\nRaspberry Pi - Surveillance Camera\n\n\n

Raspberry Pi - Surveillance Camera

\n\n\n\n\n\"\"\"\n\nkeep_running = True\nObstacle_detect = False\n\nclass StreamingOutput(object):\n def __init__(self):\n self.frame = None\n self.buffer = io.BytesIO()\n self.condition = Condition()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n # New frame, copy the existing buffer's content and notify all\n # clients it's available\n self.buffer.truncate()\n with self.condition:\n self.frame = self.buffer.getvalue()\n self.condition.notify_all()\n self.buffer.seek(0)\n return self.buffer.write(buf)\n\nclass ThreadedStreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n\n global keep_running\n global Obstacle_detect\n\n if self.path == '/':\n self.send_response(301)\n self.send_header('Location', '/index.html')\n self.end_headers()\n elif self.path == '/index.html':\n content = PAGE.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif self.path == '/Pictures/videostill_1.jpg':\n path_to_image = os.getcwd() + self.path\n if os.path.isfile(path_to_image):\n img = open(path_to_image, 'rb')\n statinfo = os.stat(path_to_image)\n img_size = statinfo.st_size\n self.send_response(200)\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', img_size)\n self.end_headers()\n self.wfile.write(img.read())\n self.wfile.write(b'\\r\\n')\n img.close() \n elif self.path == '/stream.mjpg':\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n try:\n while keep_running == True:\n with output.condition:\n output.condition.wait()\n frame = output.frame\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(frame))\n self.end_headers()\n self.wfile.write(frame)\n self.wfile.write(b'\\r\\n')\n \n if Obstacle_detect == True:\n\n timestamp = dt.datetime.now().strftime('%m-%d-%Y-%H:%M:%S')\n videostillfilename='./Pictures/videostill_' + timestamp + '.jpg'\n with open(videostillfilename, \"wb\") as outfile:\n outfile.write(frame)\n \n videostillfilename='./Pictures/videostill_1' + '.jpg'\n with open(videostillfilename, \"wb\") as outfile:\n outfile.write(frame)\n\n Obstacle_detect = False\n\n if keep_running == False:\n\n Obstacle_detect = True\n keep_running = True\n \n except Exception as e:\n logging.warning(\n 'Removed streaming client %s: %s',\n self.client_address, str(e))\n else:\n self.send_error(404)\n self.end_headers()\n\nclass ThreadedStreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n# UDP Server Section #\nbot = None\nudp_client_address = None\nudp_socket = None\n\nclass ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):\n\n def handle(self):\n\n global bot\n global udp_client_address\n global udp_socket\n global keep_running\n \n data = self.request[0].strip()\n udp_client_address = self.client_address\n udp_socket = self.request[1]\n current_thread = threading.current_thread()\n \n #print(\"{}: client: {}, wrote: {}\".format(current_thread.name, self.client_address, data))\n #udp_socket.sendto(data.upper(), self.client_address)\n\n print(data)\n bot.SCI.ser.write(data)\n\nclass ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):\n pass\n\nif __name__ == \"__main__\":\n\n # Initialise Pi camera and start up Web Server for streaming video\n camera = picamera.PiCamera(resolution='640x480', framerate=24)\n output = StreamingOutput()\n camera.rotation = 180\n camera.start_recording(output, format='mjpeg')\n\n timestamp = dt.datetime.now().strftime('%m-%d-%Y-%H:%M:%S')\n videostillfilename='./Pictures/videostill_' + timestamp + '.jpg'\n camera.capture(videostillfilename, use_video_port=True)\n print('Captured image: ' + videostillfilename)\n\n videostillfilename='./Pictures/videostill_1' + '.jpg'\n camera.capture(videostillfilename, use_video_port=True)\n\n camera.wait_recording(2.0)\n \n web_server_address = ('', 8000)\n web_server = ThreadedStreamingServer(web_server_address, ThreadedStreamingHandler)\n web_server_thread = threading.Thread(target=web_server.serve_forever)\n web_server_thread.daemon = True\n \n # Create2 or Ranger Bot Setup\n config = {}\n\n config[\"transport\"] = '' # For use with serial port interface\n config[\"robot\"] = 'Ranger'\n config[\"logfile_name\"] = 'Megapi_Ranger_U1_log_'\n\n config[\"port\"] = '/dev/ttyAMA0' # this is the serial port on Raspberry Pi 3\n config[\"baud\"] = 115200\n \n bot = pycreate2.Create2(config)\n bot.SCI.buffersize = 1024 # Currently, this parameter is only defined for UDP interface\n\n bot.start()\n bot.safe()\n\n # UDP Server Setup\n HOST, PORT = \"0.0.0.0\", 1025\n\n server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)\n\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.daemon = True\n\n try:\n web_server_thread.start()\n print(\"Web Server started at port 8000\")\n\n server_thread.start()\n print(\"Server started at {} port {}\".format(HOST, PORT))\n\n while True:\n #time.sleep(5.0)\n #print(\"Going to check the serial port for messages\")\n\n #data_read = bot.SCI.read(bot.SCI.buffersize).decode(\"utf-8\",errors=\"ignore\")\n data_read = bot.SCI.read(bot.SCI.buffersize)\n data_read_string = data_read.decode(\"utf-8\",errors=\"ignore\")\n if data_read_string is not \"\":\n print(data_read_string)\n if \"Obstacle\" in data_read_string:\n #time.sleep(0.01)\n keep_running = False\n\n if udp_client_address is not None:\n udp_socket.sendto(data_read, udp_client_address)\n \n except (KeyboardInterrupt, SystemExit):\n server.shutdown()\n server.server_close()\n bot.drive_stop() \n time.sleep(0.5)\n camera.stop_recording()\n web_server.shutdown()\n web_server.server_close()\n\n print('Shutting down ... bye')","sub_path":"examples/Megapi_Raspberry_Pi_Webstream.py","file_name":"Megapi_Raspberry_Pi_Webstream.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215266034","text":"from api import Resource, reqparse, db, auth\nfrom api.models.author import AuthorModel\nfrom api.schemas.author import author_schema, authors_schema\n\n\nclass AuthorResource(Resource):\n def get(self, author_id=None):\n if author_id is None:\n authors = AuthorModel.query.all()\n return authors_schema.dump(authors), 200\n\n author = AuthorModel.query.get(author_id)\n if not author:\n return f\"Author id={author_id} not found\", 404\n\n return author_schema.dump(author), 200\n\n @auth.login_required\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\", required=True)\n parser.add_argument(\"surname\", required=True)\n author_data = parser.parse_args()\n author = AuthorModel(author_data[\"name\"], author_data[\"surname\"])\n db.session.add(author)\n db.session.commit()\n return author_schema.dump(author), 201\n\n def put(self, author_id):\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\", required=True)\n parser.add_argument(\"surname\", required=True)\n author_data = parser.parse_args()\n author = AuthorModel.query.get(author_id)\n if author is None:\n author = AuthorModel(author_data[\"name\"], author_data[\"surname\"])\n db.session.add(author)\n db.session.commit()\n return author_schema.dump(author), 201\n author.name = author_data[\"name\"]\n author.surname = author_data[\"surname\"]\n db.session.commit()\n return author_schema.dump(author), 200\n","sub_path":"api/resources/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338104013","text":"#Rosalind Dictionaries Problem\r\n\r\ninp = open('rosalind_ini6.txt', 'r')\r\ndata = inp.read().split() #split on white space get a list of words \r\n\r\nwordfreq = {} #create an empty dictionary \r\n\r\nfor word in data:\r\n\twordfreq[word] = wordfreq.get(word,0) + 1 #for each item in the list data add it to the dictionary\r\n\t\t\t\t#if the key is not in the dictionary set value to 1. if it is in the dictionary increment its value\r\n\t\r\n\t\r\nfor word,count in wordfreq.items():\r\n\tprint(word,count)\r\n\t\r\ninp.close()","sub_path":"DictionariesProblem.py","file_name":"DictionariesProblem.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"630719383","text":"\n\n#calss header\nclass _PALLID():\n\tdef __init__(self,): \n\t\tself.name = \"PALLID\"\n\t\tself.definitions = [u'very pale, in a way that looks unhealthy and not attractive: ', u'showing no enthusiasm or excitement: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_pallid.py","file_name":"_pallid.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223204138","text":"\"\"\"\nAuthor: PyDev\nDescription: Stack implementation using Doubly-Linked List\n\"\"\"\n\nclass Element:\n \"\"\" A class that represent single skeleton of the Linked List\"\"\"\n def __init__(self, value):\n \"\"\"\n initiliaze new elements of the linked list with new value\n\n Arguments:\n - value: any object reference to assign new element to Linked List\n\n Instance varabiles:\n - value: an object value\n - next: a reference/pointer to next element in the linked list and default value is None\n - previous: a reference/pointer to previous element in the linked list and default value is None\n \"\"\"\n self.value = value\n self.next = None\n self.previous = None\n\nclass DoublyLinkedListWithTail:\n \"\"\"\n A class of Doubly-Linked List where it have attributes and properties, such as:\n - Doubly Linked List\n - With tail\n Methods:\n - pushFront(new_element)\n - topFront()\n - popFront()\n - pushBack(new_element)\n - topBack()\n - popBack()\n - find()\n - erase()\n - isEmpty()\n - addBefore()\n - addAfter()\n \"\"\"\n def __init__(self, head = None):\n \"\"\"\n initilize DoublyLinkedListWithoutTail object instance\n\n Arguments:\n - head: default to None\n\n Instance variables:\n - head: an object value which refer to head/start of the Linked List\n - Tail: an object value which refer to the back/end of the Linked List\n \"\"\"\n self.head = self.tail = head\n\n def pushFront(self, new_element):\n \"\"\"\n add new element to the front\n\n Arguments:\n - new_element: an object that reference to new element to be added\n \"\"\"\n if self.tail:\n new_element.next = self.head\n self.head = new_element\n new_element.next.previous = self.head\n else:\n self.head = self.tail = new_element\n\n def topFront(self):\n \"\"\"\n return front element/item\n\n Returns:\n - the front element/item object\n \"\"\"\n return self.head\n\n def popFront(self):\n \"\"\"remove and return front element/item\"\"\"\n if self.head:\n next_element = self.head.next\n if next_element:\n next_element.previous = None\n else:\n self.tail = None\n previous_head_object = self.head\n self.head = next_element\n return previous_head_object.value\n else:\n print(\"Doubly Linked List With Tail is empty!\")\n\n def pushBack(self, new_element):\n \"\"\"\n add to back,also known as append\n\n Arguments:\n - new_element: an object that reference to new element to be added\n \"\"\"\n if self.tail:\n self.tail.next = new_element\n new_element.previous = self.tail\n self.tail = new_element\n \n else:\n self.head = self.tail = new_element\n new_element.previous = None\n\n def topBack(self):\n \"\"\"\n return back/last element/item\n\n Returns:\n - the back/last element/item object\n \"\"\"\n if self.tail:\n return self.tail\n else:\n print(\"Doubly Linked List With Tail is empty!\")\n\n def popBack(self):\n \"\"\"\n remove back element/item\n \"\"\"\n if self.head:\n if self.head == self.tail:\n self.head = self.tail = None\n else:\n self.tail = self.tail.previous\n self.tail.next = None\n else:\n print(\"Error! Doubly Linked List With Tail is empty!\")\n\n def find(self, value):\n \"\"\"\n find if the value of an object is available in the current Linked List\n\n Arguments:\n - value: an object that represent a value we want to look for\n\n Returns:\n - boolean object\n \"\"\"\n current = self.head\n if self.head:\n while current.value != value and current.next:\n current = current.next\n if current.value == value:\n return True\n else:\n return False\n else:\n print(\"Doubly Linked List Without Tail is empty!\")\n\n def erase(self, value):\n \"\"\"\n remove an element/item from Linked List\n\n Arguments:\n - value: an object that represent a value we want to look for\n \"\"\"\n current = self.head\n while current.value != value and current.next:\n current = current.next\n if current.value == value:\n if self.head.value == value:\n # We can use self.popFront() or\n self.head = current.next\n current.next = None\n elif not current.next:\n # We can use self.popBack() or\n self.tail = current.previous\n current.previous = None\n else:\n next_element = current.next\n previous_element = current.previous\n\n previous_element.next = next_element\n next_element.previous = previous_element\n\n current.next = current.previous = None\n\n def isEmpty(self):\n \"\"\"\n check if the Linked List is empty or not\n\n Reutruns:\n - boolean object\n \"\"\"\n if self.head:\n return False\n else:\n return True\n\n def addBefore(self, new_element, node):\n \"\"\"\n add new element/item before a position in the Linked List\n\n Arguments:\n - new_element: an object that reference to a new element to be added\n - node: an object that reference to an integer object that tells the\n method to where place the new element/item\n \"\"\"\n new_element.next = node\n new_element.previous = node.previous\n node.previous = new_element\n if new_element.previous:\n new_element.previous.next = new_element\n if self.head == node:\n self.head = new_element\n\n def addAfter(self, new_element, node):\n \"\"\"\n add new element/item after a node/element/item in the Linked List\n\n Arguments:\n - new_element: an object that reference to a new element to be added\n - node: an object that is part of the Linked List elements\n \"\"\"\n new_element.next = node.next\n new_element.previous = node\n node.next = new_element\n if new_element.next:\n new_element.next.previous = new_element\n if self.tail == node:\n self.tail = new_element\n\n\n\nclass Stack:\n\n def __init__(self):\n \"\"\"Initilze Stack object instance\"\"\"\n self.new_element = None\n self.linked_list = DoublyLinkedListWithTail()\n\n def push(self, new_element):\n \"\"\"Push an element to the front of the Stack\"\"\"\n self.new_element = Element(new_element)\n self.linked_list.pushFront(self.new_element)\n\n def top(self):\n \"\"\"Return most recenlty-added element\"\"\"\n if self.isEmpty():\n print (\"Error! Stack is empty!\")\n else:\n return self.linked_list.topFront()\n\n def pop(self):\n \"\"\"Remove and returns most recently added element\"\"\"\n if self.isEmpty():\n print (\"Error! Stack is empty!\")\n else:\n return self.linked_list.popFront()\n\n def isEmpty(self):\n \"\"\"Check if a Stack object is empty\"\"\"\n return self.linked_list.isEmpty()\n","sub_path":"stack_using_linked_list.py","file_name":"stack_using_linked_list.py","file_ext":"py","file_size_in_byte":7657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"417902725","text":"# Time: O(n^2), n is the number of steps\n# Space: O(n)\n\n# 1269 weekly contest 164 11/23/2019\n\n# You have a pointer at index 0 in an array of size arrLen. At each step, you can move 1 position to the left,\n# 1 position to the right in the array or stay in the same place (The pointer should not be placed outside the array at any time).\n#\n# Given two integers steps and arrLen, return the number of ways such that your pointer still at index 0 after exactly steps steps.\n#\n# Since the answer may be too large, return it modulo 10^9 + 7.\n\nclass Solution(object):\n def numWays(self, steps: int, arrLen: int) -> int: # USE THIS (dict, not array): no need to allocate space for positions won't be reached\n import collections\n MOD, dp = 10**9+7, {0:1}\n for _ in range(steps):\n ndp = collections.defaultdict(int)\n for k,v in dp.items():\n ndp[k]= (ndp[k] + v) % MOD\n if k>0:\n ndp[k-1] = (ndp[k-1] + v) % MOD\n if k 0:\n steps -= 1\n new_dp = [0]*(l+2)\n for i in xrange(1, l+1):\n new_dp[i] = (dp[i] + dp[i-1] + dp[i+1]) % MOD\n dp = new_dp\n return dp[1]\n\nprint(Solution().numWays(3,2)) # 4\nprint(Solution().numWays(2,4)) # 2\nprint(Solution().numWays(4,2)) # 8","sub_path":"Python/number-of-ways-to-stay-in-the-same-place-after-some-steps.py","file_name":"number-of-ways-to-stay-in-the-same-place-after-some-steps.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"593636387","text":"from common.ActionItem import ActionItem\nfrom module.Student import Student\nfrom data.Student import queryStudent\n\n\n# 添加学生\ndef query():\n stus = queryStudent(Student.getInputStuno(False, True))\n print('查询结果为:')\n print('编号\\t\\t姓名\\t\\t年龄\\t\\t分数')\n for stu in stus:\n print(stu)\n\n\nqueryAction = ActionItem('4', '查找学生信息', query)\n","sub_path":"control/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491513945","text":"'''\nThe function is expected to check if the two trees passed to it are similar in shape or not.\n\n\nSample Input:\n\n24\n10 20 50 -1 60 -1 -1 30 70 -1 80 110 -1 120 -1 -1 90 -1 -1 40 100 -1 -1 -1\n\n24\n1 2 5 -1 6 -1 -1 3 7 -1 8 11 -1 12 -1 -1 9 -1 -1 4 10 -1 -1 -1\n\nSample Output:\n\ntrue\n\n\nhttps://www.youtube.com/watch?v=rumfdyFR-_A&list=PL-Jc9J83PIiEmjuIVDrwR9h5i9TT2CEU_&index=35\nhttps://www.youtube.com/watch?v=y_PIhsd9vt0&list=PL-Jc9J83PIiEmjuIVDrwR9h5i9TT2CEU_&index=36\n\n\n'''\n\nclass Node:\n def __init__(self):\n self.data = None\n self.children = []\n \n \ndef create_generic_tree(arr):\n \n root = Node()\n \n st = [] # create empty stack\n \n for i in range(len(arr)):\n \n if(arr[i] == -1):\n st.pop()\n else:\n t = Node()\n t.data = arr[i]\n \n if(len(st) > 0):\n st[-1].children.append(t)\n else:\n root = t\n \n st.append(t)\n \n return root\n\ndef areSimilar(root_1, root_2):\n \n if(len(root_1.children) != len(root_2.children)):\n return False\n \n for i in range(len(root_1.children)):\n \n c1 = root_1.children[i]\n c2 = root_2.children[i]\n \n if(areSimilar(c1, c2) == False):\n return False\n \n return True\n\n\nif __name__ == \"__main__\":\n \n arr1 = [10, 20, 50, -1, 60, -1, -1, 30, 70, -1, 80, 110, -1, 120, -1, -1, 90, -1, -1, 40, 100, -1, -1, -1]\n arr2 = [1, 2, 5, -1, 6, -1, -1, 3, 7, -1, 8, 11, -1, 12, -1, -1, 9, -1, -1, 4, 10, -1, -1, -1]\n \n root_1 = create_generic_tree(arr1)\n root_2 = create_generic_tree(arr2)\n print(root_1.data)\n print(root_2.data)\n \n print(areSimilar(root_1, root_2))","sub_path":"pepcoding/generic_tree/15_are_trees_similar_in_shape.py","file_name":"15_are_trees_similar_in_shape.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"84321557","text":"from flask.json import jsonify\nfrom datetime import date, datetime\nfrom myapp.model.post_model import Posts\nfrom myapp.model.db_extension import db\nfrom webargs import fields\nfrom webargs.flaskparser import use_args\nfrom myapp.model.post_model import Posts, PostsSchema\nfrom flask import Blueprint, render_template, request, url_for, flash, redirect\nfrom flask_login import login_required, current_user\n\nposts = Blueprint('posts', __name__, url_prefix='/api')\n\n@posts.route('/getposts', methods=['GET', 'POST'])\n@login_required\ndef get_posts():\n all_posts = Posts.query.filter_by(user_id=current_user.id)\n if all_posts:\n user_posts = all_posts.all()\n post_schema = PostsSchema(many=True)\n output = post_schema.dump(user_posts)\n return render_template('posts.html', all_posts=output)\n\n # return jsonify(output)\n else:\n return render_template('posts.html', all_posts=[])\n\n@posts.route('/createpost')\n@login_required\ndef create_post_get():\n return render_template('create_post.html')\n\n@posts.route('/createpost', methods=['POST'])\n@login_required\ndef create_post():\n post_title = request.form.get('title')\n post_description = request.form.get('description')\n created_at = datetime.now()\n user_id = current_user.id\n \n # Incodicates Post already exists in database\n prev_post = Posts.query.filter_by(title=post_title).first()\n \n # Redirect to Create_Post Page if Post already exists\n if prev_post:\n flash('A Post with Same Title Already Exists in Database')\n return redirect(url_for('posts.create_post'))\n\n new_post = Posts(title=post_title, description=post_description, created_at=created_at, user_id=user_id)\n\n db.session.add(new_post)\n db.session.commit()\n \n return redirect(url_for('posts.get_posts'))\n\n@login_required\n@posts.route('/updatepost',methods=['PUT', 'POST'])\ndef update_post():\n post_id = request.form.get('id')\n target_post_data = Posts.query.get_or_404(post_id)\n if target_post_data:\n post_title = target_post_data.get('title')\n post_description = target_post_data.get('description')\n created_at = target_post_data.get('created_at')\n user_id = current_user.id\n updated_at = datetime.now()\n \n pass\n\n@login_required\n@posts.route('/deletepost',methods=['GET'])\ndef delete_post():\n post_id = request.args.get('id')\n breakpoint()\n target_post = Posts.query.get_or_404(post_id)\n if target_post:\n db.session.delete(target_post)\n db.session.commit()\n return redirect(url_for('posts.get_posts'))\n else:\n return redirect(url_for('posts.get_posts'))\n","sub_path":"myapp/views/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481525154","text":"dict = []\n\nwhile True:\n\n yesno = input('Enter Customer (Yes/No): ').lower()\n\n if yesno == 'y' or yesno == 'yes':\n try:\n fName, lName = input('Enter Customer Name: ').split()\n dict.append({'fName': fName, 'lName': lName})\n except:\n print('Please input LASTNAME and SECONDNAME.')\n continue\n\n elif yesno == 'n' or yesno == 'no':\n for cust in dict:\n print(cust['fName'], cust['lName'])\n break\n","sub_path":"pro7.py","file_name":"pro7.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"453584410","text":"\"\"\"\nSimply removes given char sequence from source file.\nEx. named badly and want to remove \"Page\" in every line.\n\"\"\"\n\nimport re\n\nclass RemovePageFromName(object):\n\n source_file = \"removePageFromNameSource.txt\"\n target_file = \"removePageFromNameTarget.txt\"\n source_content_as_list = []\n target_content_prepped_list = []\n substring_to_remove = \"Page\"\n \n\n\n def __init__(self):\n self.get_source_file_contents_into_list()\n self.parse_through_and_remove_substring()\n self.write_sorted_version_into_target_file()\n\n def get_source_file_contents_into_list(self):\n with open(self.source_file, 'r') as f:\n \tself.source_content_as_list = f.read().splitlines()\n return self.source_content_as_list\n\n def parse_through_and_remove_substring(self):\n \t\tfor line in self.source_content_as_list:\n \t\t\tself.target_content_prepped_list.append(re.sub(self.substring_to_remove, \"\", line))\n \t\treturn self.target_content_prepped_list\n \n def write_sorted_version_into_target_file(self):\n \t\tf = open(self.target_file, 'w')\n \t\tfor line in sorted(self.target_content_prepped_list):\n \t\t\tf.write(\"%s\\n\" %(line))\n \t\tf.close()\n \t\tprint(\"DONE!\")\n\n\n\n\n\n\n\nRemovePageFromName()","sub_path":"Archives/archived_CB/androidPhase1Helpers/removePageFromName/removePageFromName.py","file_name":"removePageFromName.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313059176","text":"import tkinter as interface\n\nclass ThemedButton(interface.Button):\n def __init__(self, parent = None, **configs):\n interface.Button.__init__(self, parent, **configs)\n self.pack()\n self.config(fg='black', bg = 'white',width = 20,height = 2, font='courier 10', relief='solid', bd=3)\n\nclass ThemedMenu(interface.OptionMenu):\n def __init__(self, parent = None, *values, **configs):\n interface.OptionMenu.__init__(self, parent, *values, **configs)\n self.pack\n self.config( fg='black', bg='white', width=10, height=2, font='courier 10', relief='solid', bd=3 )\n\nclass Navs(interface.Frame):\n def __init__(self, parent = None, **configs):\n interface.Frame.__init__(self, parent, **configs)\n self.pack(side = \"left\")\n self.config( bg = 'white', padx = 0, pady = 30)\n\n\nclass Navs1( interface.Frame ):\n def __init__(self, parent=None, **configs):\n interface.Frame.__init__( self, parent, **configs )\n self.pack( side=\"top\" )\n self.config( bg='white', padx=60, pady=30 )\n\n\nclass Container(interface.Frame):\n def __init__(self, parent = None, **configs):\n interface.Frame.__init__(self, parent, **configs)\n self.pack()\n self.config(bg = 'white', pady = 30)\n\nclass MainFrame(interface.Frame):\n def __init__(self,parent = None, **configs):\n interface.Frame.__init__(self, parent, **configs)\n self.pack(side = \"right\", expand = True, fill = 'both')\n self.config(bg = '#fce5ff', padx = 30, pady = 30 )\n\nclass ThemedText(interface.Text):\n def __init__(self, parent = None, **configs):\n interface.Text.__init__(self, parent, **configs)\n self.pack(side = \"right\", expand = True, fill = 'both')\n self.config(bg = '#fce5ff', font = 'verdana 15')\n\nclass OutLabel(interface.Label):\n def __init__(self,parent = None, **configs):\n interface.Label.__init__(self, parent, **configs)\n self.pack()\n self.config(fg='black', bg = 'white', font = 'verdana 8', relief = 'solid', justify = 'center', width = 25, bd = 1)\n\nclass ThemedMessage(interface.Entry):\n def __init__(self, parent = None, **configs):\n interface.Entry.__init__(self, parent, **configs)\n self.pack()\n self.config(bd = 1, fg='black', bg = 'white', font = 'verdana 13', relief='solid', justify = 'center', width = 15, disabledbackground='#fce5ff' , disabledforeground='black')\n\nclass CommandButton(interface.Button):\n def __init__(self, parent = None, **configs):\n interface.Button.__init__(self, parent, **configs)\n self.pack()\n self.config(fg = 'black', bg = '#dce6f7', font = 'Times 12', width = 10, relief = 'solid', justify = 'center', bd = 1, highlightcolor=\"#37d3ff\",\n highlightbackground=\"#37d3ff\")\n\nclass ScrolledList(interface.Frame):\n def __init__(self, options, parent = None):\n interface.Frame.__init__(self, parent)\n self.pack()\n self.config(bd = 1, width = 15, height = 15)\n self.makeWidgets(options)\n\n\n def handleList(self,event):\n self.index = self.listbox.curselection()\n self.label = self.listbox.get(self.index)\n self.listbox.activate( self.index )\n self.runCommand(self.index)\n\n\n\n def makeWidgets(self, options):\n sbar = interface.Scrollbar(self)\n list = interface.Listbox(self, relief = 'solid', bd = 1, font = 'verdana 12')\n sbar.config(command = list.yview)\n list.config(yscrollcommand = sbar.set)\n sbar.pack(side='right', fill = 'y')\n list.pack(side = 'left', expand = True, fill = 'both')\n pos = 0\n for label in options:\n list.insert(pos,label)\n pos += 1\n list.config(selectmode = 'single')\n list.bind('', self.handleList)\n self.listbox = list\n\n\n def runCommand(self,selection):\n var3 = selection\n\n\nclass ThemedOut(interface.Message):\n def __init__(self, parent = None, **configs):\n interface.Message.__init__(self, parent, **configs)\n self.pack(expand = True, fill = 'both')\n self.config(bd = 1, fg='black', bg = 'white', font = 'verdana 13', relief='solid', justify = 'center')\n\n","sub_path":"widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509630908","text":"# Django imports\nfrom django.shortcuts import render, redirect, HttpResponseRedirect\nfrom .forms import BrokenLinkCheckerModelForm\nfrom .models import BrokenLinkCheckerModel\n# Web scraping imports\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef broken_link_checker(request):\n\t# checkers = BrokenLinkCheckerModel.objects.all()\n\tform = BrokenLinkCheckerModelForm()\n\t# So nothing shows up when I do {% if code %}\n\taccess = \"\"\n\tlinks_404 = {}\n\tother_links = {}\n\n\tif request.method == 'POST':\n\t\tform = BrokenLinkCheckerModelForm(request.POST or None)\n\t\tif form.is_valid():\n\t\t\tname = form.cleaned_data['site_name']\n\t\t\tlinks = get_404_links(name)\n\t\t\tlinks_404 = links[0]\n\t\t\tother_links = links[1]\n\t\t\t# form.save()\n\t\t\tform = BrokenLinkCheckerModelForm(request.POST or None)\n\t\telse:\n\t\t\taccess = \"Unable to access site. The site may be blocking the bot. Did you enter the site correctly?\"\n\telse:\n\t\taccess = True\n\t\tform = BrokenLinkCheckerModelForm(request.POST or None)\n\n\tcontext = {'access': access, 'form': form, 'links_404': links_404, 'other_links': other_links}\n\ttemplate_name = 'broken_link_checker/broken_link_checker.html'\n\treturn render(request, template_name, context)\n\n\ndef get_404_links(link: str):\n\t\"\"\"\n\tFinds the 404 links on the page.\n\t:param link: The link\n\t:return: A list of broken links. OR None if an error occurred\n\t\"\"\"\n\t# Some sites don't allow you to access content without a user agent\n\t# Google 'what is my user agent'\n\theaders = {\n\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',\n\t}\n\ttry:\n\t\trequest = requests.get(link, headers=headers)\n\t\tsoup = BeautifulSoup(request.text, 'html.parser')\n\t\tif soup is None:\n\t\t\treturn None\n\texcept:\n\t\treturn None\n\n\tsoup = BeautifulSoup(request.text, 'html.parser')\n\tlinks_404 = {}\n\tother_links = {}\n\tfor link in soup.find_all('a', href=True):\n\t\tlink_text = link.string\n\t\tlink = link['href']\n\t\tif 'http://' or 'https://' in link:\n\t\t\ttry:\n\t\t\t\trequest = requests.get(link, headers=headers)\n\t\t\t\tcode = request.status_code\n\t\t\t\tif code == 404:\n\t\t\t\t\tlinks_404[link] = link_text, code\n\t\t\t\telif code != 200:\n\t\t\t\t\tother_links[link] = link_text, code\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\tpass\n\n\treturn links_404, other_links\n\n\n","sub_path":"broken_link_checker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236450277","text":"# Esta version eliminara toda la seccion de correo, intentara crear un proceso ficticio\n# E intentara monitorearlo\n\nimport os\nfrom flask import Flask, request, render_template, session, flash, redirect, url_for, jsonify\n# Esta es la funcion que esta en task.py\nfrom tasks import long_task\n\n\n# Por alguna razon no puedo borrar esto, tiene que ver con 'session'\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'nadiesabe'\n\n\n# despliegue de la pagina principal\n@app.route('/', methods=['GET'])\ndef index():\n if request.method == 'GET': # si get regresara el index y de ahi obtendra el correo\n return render_template('index.html')# return redirect(url_for('index')) # redirige a la pagina principal\n\n# Pagina para arrancar aplicacion en segundos\n@app.route('/ini', methods=['GET'])\ndef ini():\n if request.method == 'GET':\n return render_template('ini.html')\n\n\n# esta es la seccion que hara 'longtask'\n@app.route('/longtask', methods=['POST'])\ndef longtask():\n task = long_task.apply_async(task_id = '182') #aqui task sera 'long_task'\n return jsonify({}), 202, {'respuesta': url_for('taskstatus', # regresara jsonify con 202 para decir que esta trabajando\n task_id=task.id)}\n\n\n# status la parte de flask que se encargara de recibir los estados\n@app.route('/status/')\ndef taskstatus(task_id):\n task = long_task.AsyncResult(task_id)\n if task.state == 'PENDING': # esta es cuando el 'task' no ha comenzado por eso muestra pendiente\n response = { # respondera con un json que contenga state, current, total y status\n 'state': task.state, # state sera llevado por task.state\n 'current': 0, # current sera 0\n 'total': 1, # total sera 1\n 'status': 'Pending...' # estatus mostrara la palabra pendiente\n }\n elif task.state != 'FAILURE': # si diferente de failure\n response = { # aqui obtiene los datos utilizando task.info.get para cada uno de las variables\n 'state': task.state, # state sera task.state\n 'current': task.info.get('current', 0), # current sera task.info.get (current) no se por que el cero\n 'total': task.info.get('total', 1), #lo mismo hace para total y status\n 'status': task.info.get('status', '')\n }\n if 'result' in task.info:\n response['result'] = task.info['result'] # cuando encuentre un resultdo en task.info respodera con la respuesta\n else:\n # something went wrong in the background job\n response = { # esto es si task.state = failure\n 'state': task.state,\n 'current': 1,\n 'total': 1,\n 'status': str(task.info), # this is the exception raised ! aqui dira exatamente que paso al usuario\n }\n return jsonify(response) # despues de esto regresara una respuesta utilizando jsonify\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n#### no tengo idea de que sea esto para parece que es un standar que va al final de cada archivo","sub_path":"celery/celv4.4/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"620821249","text":"import envtest # modifies path\nfrom raytracing import *\n\ninf = float(\"+inf\")\n\n\nclass TestMatrix(envtest.RaytracingTestCase):\n def testMatrix(self):\n m = Matrix()\n self.assertIsNotNone(m)\n\n def testThorlabsLensesWarning(self):\n l = thorlabs.AC254_030_A()\n\n\nif __name__ == '__main__':\n envtest.main()\n","sub_path":"raytracing/tests/testsWarnings.py","file_name":"testsWarnings.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"337953079","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, g, render_template, flash, session, request, redirect, url_for, make_response\nfrom functools import wraps\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.dates import DateFormatter\nfrom matplotlib.figure import Figure\nfrom kanasta import model\nimport crypt\nimport logging\nimport logging.handlers\nimport random\nimport StringIO\n\n# Default configuration\nDEBUG = True\nSECRET_KEY = 'dev key'\nHOST = '0.0.0.0'\nDB_DATABASE = 'kanasta'\nDB_PORT = 5920\nDB_HOST = '/tmp'\nDB_USER = 'kanasta'\nPOINTS_FOR_FINISHING = 300\nLOG_FILENAME = 'log/kanasta.log'\nMAIL_SERVER = '127.0.0.1'\nMAIL_FROM = 'depesz@depesz.com'\nAPP_ADMIN = 'depesz@depesz.com'\n# Default configuration\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_envvar('KANASTARC', silent=True)\n\ndb_conn_details = {}\nkeys = ('database', 'user', 'password', 'host', 'port', 'sslmode')\nfor conn_key, conf_key in zip(keys, ['DB_%s' % i.upper() for i in keys]):\n if conf_key in app.config:\n db_conn_details[conn_key] = app.config[conf_key]\n\ndb = model.Model(db_conn_details)\n\n\nif not app.debug:\n file_logger = logging.FileHandler(LOG_FILENAME)\n file_logger.setLevel(logging.INFO)\n app.logger.addHandler(file_logger)\n\n mail_logger = logging.handlers.SMTPHandler(MAIL_SERVER, MAIL_FROM, APP_ADMIN, 'Critical error in %s' % (__name__))\n mail_logger.setLevel(logging.CRITICAL)\n app.logger.addHandler(mail_logger)\n\n file_logger.setFormatter(\n logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n )\n )\n\n mail_logger.setFormatter(\n logging.Formatter(\n '''\n Message type: %(levelname)s\n Location: %(pathname)s:%(lineno)d\n Module: %(module)s\n Function: %(funcName)s\n Time: %(asctime)s\n\n Message:\n\n %(message)s\n '''\n )\n )\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'username' in session:\n return f(*args, **kwargs)\n flash('NOT_LOGGED')\n return redirect(url_for('index'))\n return decorated_function\n\n\n@app.teardown_request\ndef teardown_request(exception):\n db.rollback()\n\n\n@app.errorhandler(Exception)\ndef special_exception_handler(error):\n app.logger.critical('Fatal exception', exc_info=1)\n return render_template('error.html'), 500\n\n\n@app.route('/main')\n@login_required\ndef main():\n return render_template('main.html')\n\n\n@app.route('/logout')\ndef logout():\n for key in ('username', 'is_admin'):\n if key in session:\n session.pop(key)\n flash('LOGGED_OUT')\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n user_data = db.get_user_data(request.form['username'])\n\n if user_data is None or user_data[\"password\"] != crypt.crypt(request.form['password'], user_data['password']):\n for key in ('username', 'is_admin'):\n if key in session:\n session.pop(key)\n app.logger.error('Bad login for user %s with password %s', request.form['username'], request.form['password'])\n flash('BAD_USER_PASSWORD')\n return redirect(url_for('index'))\n\n session['username'] = user_data['username']\n session['admin'] = user_data['is_admin']\n app.logger.warning('User %s logged in', user_data['username'])\n return redirect(url_for('main'))\n\n\n@app.route('/')\ndef index():\n if 'username' in session:\n return redirect(url_for('main'))\n\n return render_template('index.html')\n\n\n@app.route('/new_game', methods=['POST', 'GET'])\n@login_required\ndef new_game():\n if request.method == 'POST' and new_game_validate():\n new_game_id = db.register_new_game(\n request.form['pair1_player1'],\n request.form['pair1_player2'],\n request.form['pair2_player1'],\n request.form['pair2_player2'],\n request.form['first_dealing'],\n request.form['second_dealing']\n )\n return redirect(url_for('game', game_id=new_game_id))\n\n users = db.get_list_of_players()\n\n return render_template('new_game.html', users=users)\n\n\ndef new_game_validate():\n required = ('pair1_player1', 'pair1_player2', 'pair2_player1', 'pair2_player2', 'first_dealing', 'second_dealing')\n for key in required:\n if key in request.form and request.form[key]:\n continue\n g.errors = ['MISSING_DATA']\n return False\n\n seen = {}\n for key in ('pair1_player1', 'pair1_player2', 'pair2_player1', 'pair2_player2'):\n if request.form[key] in seen:\n g.errors = ['DUPLICATE_DATA']\n return False\n seen[key] = 1\n\n if request.form['first_dealing'] == request.form['second_dealing']:\n g.errors = ['DUPLICATE_DATA']\n return False\n\n first_dealing_pair = 2\n second_dealing_pair = 2\n if request.form['first_dealing'] in (request.form['pair1_player1'], request.form['pair1_player2']):\n first_dealing_pair = 1\n if request.form['second_dealing'] in (request.form['pair1_player1'], request.form['pair1_player2']):\n second_dealing_pair = 1\n\n if first_dealing_pair == second_dealing_pair:\n g.errors = ['BAD_DEALING']\n return False\n\n return True\n\n\n@app.route('/game/', methods=['POST', 'GET'])\n@login_required\ndef game(game_id):\n if request.method == 'POST' and new_deal_insert(game_id):\n return redirect(url_for('game', game_id=game_id))\n\n (game, deals) = db.get_game_details(game_id)\n\n pair1 = set([game['pair1_player1'], game['pair1_player2']])\n pair2 = set([game['pair2_player1'], game['pair2_player2']])\n dealing_order = []\n dealing_order.append(game['first_dealing'])\n dealing_order.append(game['second_dealing'])\n\n if game['first_dealing'] in pair1:\n pair1.remove(game['first_dealing'])\n pair2.remove(game['second_dealing'])\n dealing_order.extend(pair1)\n dealing_order.extend(pair2)\n else:\n pair2.remove(game['first_dealing'])\n pair1.remove(game['second_dealing'])\n dealing_order.extend(pair2)\n dealing_order.extend(pair1)\n\n return render_template('game.html', game=game, deals=deals, dealing_order=dealing_order)\n\n\ndef new_deal_insert(game_id):\n if 'finish_game' in request.form:\n db.finish_game(game_id)\n return True\n\n scores = {'p1': 0, 'p2': 0}\n\n if not 'finished' in request.form:\n g.errors = ['NO_FINISHED']\n return False\n\n for pair in ('p1', 'p2'):\n for player in ('p1', 'p2'):\n for place in ('hand', 'table'):\n partial_score_key = '%s%s_%s' % (pair, player, place)\n if not partial_score_key in request.form:\n continue\n try:\n partial_score = int(request.form[partial_score_key])\n if place == \"hand\":\n partial_score = -1 * abs(partial_score)\n scores[pair] = scores[pair] + partial_score\n except ValueError:\n g.errors = ['BAD_VALUE']\n return False\n\n if request.form['finished'].startswith(pair):\n scores[pair] = scores[pair] + POINTS_FOR_FINISHING\n\n db.register_new_deal(game_id, scores['p1'], scores['p2'])\n return True\n\n\n@app.route(\"/games\")\n@login_required\ndef games():\n games = db.get_list_of_games()\n return render_template('games.html', games=games)\n\n\n@app.route('/game_stats')\n@login_required\ndef game_stats():\n games = {}\n deals = {}\n\n games['summary'] = db.get_game_stat('summary')\n games['massacres'] = db.get_game_stat('massacres')\n games['longest'] = db.get_game_stat('longest')\n games['surprises'] = db.get_game_stat('surprises')\n games['streaks'] = db.get_game_stat('streaks')\n\n deals['worst'] = db.get_deal_stat('worst')\n deals['best'] = db.get_deal_stat('best')\n deals['massacres'] = db.get_deal_stat('massacres')\n return render_template('stats.html', games=games, deals=deals)\n\n\n@app.route('/game-.png')\n@login_required\ndef game_graph(game_id):\n (game, deals) = db.get_game_details(game_id)\n\n x = [0] + [int(r['deal_no']) for r in deals]\n team1 = [0] + [int(r['pair1_sum']) for r in deals]\n team2 = [0] + [int(r['pair2_sum']) for r in deals]\n team_diff = [abs(i[0] - i[1]) for i in zip(team1, team2)]\n team1_label = '%s + %s' % (game['pair1_player1'], game['pair1_player2'])\n team2_label = '%s + %s' % (game['pair2_player1'], game['pair2_player2'])\n min_range = min(team1 + team2 + team_diff)\n max_range = max(team1 + team2 + team_diff)\n guide_lines = [i for i in (0, 2500, 5000, 10000) if min_range <= i <= max_range]\n\n fig = Figure(facecolor=\"white\")\n ax = fig.add_subplot(111, axis_bgcolor=\"#eeeeee\")\n\n ax.plot(x, team1, color='red', linestyle='-', linewidth=2, label=team1_label)\n ax.plot(x, team2, color='blue', linestyle='-', linewidth=2, label=team2_label)\n ax.plot(x, team_diff, color='magenta', linestyle='--', linewidth=1, label=u'różnica')\n for i in guide_lines:\n ax.axhline(y=i, linewidth=1, color='green')\n\n ax.get_xaxis().set_ticks(x)\n ax.legend(loc='upper left')\n ax.grid()\n canvas = FigureCanvas(fig)\n\n png_output = StringIO.StringIO()\n canvas.print_png(png_output)\n\n response = make_response(png_output.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response\n\n\nif __name__ == '__main__':\n params = {}\n if 'HOST' in app.config:\n params['host'] = app.config['HOST']\n if 'PORT' in app.config:\n params['port'] = app.config['PORT']\n app.run(**params)\n","sub_path":"kanasta/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509208156","text":"from application import db\nfrom sqlalchemy import text\nfrom application.models import Base\n\nclass Thread(Base): \n title = db.Column(db.String(144), nullable=False)\n content = db.Column(db.String(8192), nullable=False)\n created = db.Column(db.DateTime, default=db.func.current_timestamp())\n edited = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp())\n account_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)\n\n def __init__(self, title, content):\n self.title = title\n self.content = content\n\n\n @staticmethod\n def threadsInfo(threadsid):\n\n stmt = text (\"SELECT Thread.title, Thread.content, Thread.created, Thread.edited, Account.name, Thread.id FROM Thread \"\n \"JOIN Account ON Account.id = Thread.account_id \"\n \"WHERE Thread.id = :tid\")\n \n res = db.engine.execute(stmt, tid=threadsid)\n\n response = []\n\n for row in res:\n response.append({\"title\":row[0],\"content\":row[1],\"created\":row[2],\"edited\":row[3],\"username\":row[4],\"id\":row[5]})\n \n return response\n\n\n @staticmethod\n def listAllThreads():\n\n stmt = text (\"SELECT Thread.title, Thread.created, Account.name, Thread.id FROM Thread \"\n \"JOIN Account ON Account.id = Thread.account_id\")\n \n res = db.engine.execute(stmt)\n\n response = []\n\n for row in res:\n response.append({\"title\":row[0],\"created\":row[1],\"username\":row[2],\"id\":row[3]})\n \n return response\n\n","sub_path":"application/thread/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"523280267","text":"string = str(input(\"Ingresa una cadena de caracteres para obtener la mediana\\n\"))\nlista = []\nfinal = []\naux = \"\"\ni = 0\nwhile i < len(string):\n lista.append(ord(string[i]))\n i += 1\ni = 0\nwhile i < len(lista):\n a = 0\n while i-a >= 0:\n if lista[i] < lista[i-a]:\n aux = lista[i-a]\n lista[i-a] = lista[i]\n lista[i] = aux\n a = 0\n else:\n aux = lista[i]\n lista[i] = lista[i-a]\n lista[i-a] = aux\n a += 1\n i += 1\nfinal.append(lista[len(lista)-1])\ni = 0\nwhile i < len(lista)-1:\n final.append(lista[i])\n i += 1\ni = 0\nmaxcamb = 0\ncamb = 0\nmoda = 0\nletra_camb = final[0]\nwhile i < len(final)-1:\n if letra_camb == final[i]:\n camb += 1\n else:\n camb = 1\n letra_camb = final[i]\n if camb > maxcamb:\n moda = i\n maxcamb = camb\n else:\n pass\n i += 1\nprint(\"La moda es: %s\"%(chr(final[moda])))\n","sub_path":"Tarea05/ejercicio11.py","file_name":"ejercicio11.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"626013517","text":"#from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow.compat.v1 as tf \ntf.disable_v2_behavior()\nimport matplotlib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom Siamese import Siamese\nimport numpy as np\n#from tensorflow.keras.utils import to_categorical\n\ndef visualize(embed, labels):\n labelset = set(labels)\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n for label in labelset:\n indices = np.where(labels == label)\n ax.scatter(embed[indices,0], embed[indices,1], label = label, s = 20)\n ax.legend()\n #fig.savefig('embed.jpeg', format='jpeg', dpi=600, bbox_inches='tight')\n plt.show()\n plt.close()\n\ndef main():\n # Load MNIST dataset\n mnist = tf.keras.datasets.mnist\n (x_train, y_train),(x_test, y_test) = mnist.load_data()\n x_train, x_test = x_train / 255.0, x_test / 255.0\n x_train=x_train.reshape(x_train.shape[0],x_train.shape[1]*x_train.shape[2])\n x_test=x_test.reshape(x_test.shape[0],x_test.shape[1]*x_test.shape[2])\n #s = pd.Series(y_test)\n #y_test=pd.get_dummies(s)\n\n # mnist = input_data.read_data_sets('MNIST_data', one_hot = False)\n # mnist_test_labels = mnist.test.labels\n \n siamese = Siamese()\n siamese.trainSiamese(x_train, y_train, 10, 128)\n #siamese.saveModel()\n #siamese.loadModel()\n siamese.trainSiameseForClassification(x_train, y_train, 10, 128)\n \n # Test model\n embed = siamese.test_model(input = x_test)\n #embed.tofile('embed.txt')\n #embed = np.fromfile('embed.txt', dtype = np.float32)\n\n embed = embed.reshape([-1, 2])\n #visualize(embed, y_test)\n\n siamese.computeAccuracy(x_test,y_test)#?\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"SiameseMNIST/SiameseMNIST.py","file_name":"SiameseMNIST.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139099613","text":"from rooms import RoomsTracker\nimport threading\n\nclass MockSIOApp:\n def __init__(self):\n self.enter_room_calls = {}\n\n def enter_room(self, _, room_name):\n if not room_name in self.enter_room_calls:\n self.enter_room_calls[room_name] = 0\n\n self.enter_room_calls[room_name] += 1\n\nclass TestSinglesRoomsTracker:\n def setup_method(self):\n self.sio_app = MockSIOApp()\n self.rooms_tracker = RoomsTracker(self.sio_app)\n\n def test_single_add_to_room(self):\n self.rooms_tracker.add_to_singles_room(123)\n\n assert self.sio_app.enter_room_calls == {\"Room0\": 1}\n\n def test_two_add_to_room(self):\n self.rooms_tracker.add_to_singles_room(123)\n self.rooms_tracker.add_to_singles_room(5)\n\n assert self.sio_app.enter_room_calls == {\"Room0\": 1, \"Room1\": 1}\n\nclass TestGetRoomAndRoommates:\n def setup_method(self):\n self.sio_app = MockSIOApp()\n self.rooms_tracker = RoomsTracker(self.sio_app)\n\n def test_get_existing_singles_room(self):\n self.rooms_tracker.add_to_singles_room(123)\n self.rooms_tracker.add_to_singles_room(5)\n self.rooms_tracker.add_to_singles_room(12)\n\n assert self.rooms_tracker.get_room(123) == \"Room0\"\n assert self.rooms_tracker.get_room(5) == \"Room1\"\n assert self.rooms_tracker.get_room(12) == \"Room2\"\n\nclass TestAsyncAdds:\n def setup_method(self):\n self.sio_app = MockSIOApp()\n self.rooms_tracker = RoomsTracker(self.sio_app)\n\n def test_stress_additions(self):\n class RoomAdder(threading.Thread):\n def __init__(self, count, seed, room_tracker):\n self.count = count\n self.seed = seed\n self.room_tracker = room_tracker\n super(RoomAdder, self).__init__()\n\n def run(self):\n for i in range(0, self.count):\n self.room_tracker.add_to_singles_room(i + self.seed)\n\n t1 = RoomAdder(10, 0, self.rooms_tracker)\n t2 = RoomAdder(10, 10, self.rooms_tracker)\n t3 = RoomAdder(10, 20, self.rooms_tracker)\n t4 = RoomAdder(10, 30, self.rooms_tracker)\n\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n\n rooms = 0\n for _, value in self.sio_app.enter_room_calls.items():\n assert value == 1\n rooms += 1\n\n assert rooms == 40\n","sub_path":"server/tests/rooms_test.py","file_name":"rooms_test.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"234055740","text":"import os\nimport tempfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List, Optional\nfrom zipfile import ZipFile, ZipInfo\n\nimport httpx\n\nfrom util.files import (\n Files,\n bytes_fingerprint,\n ensure_parent,\n file_fingerprint,\n file_info,\n file_mimetype,\n)\nfrom util.github_api import github_client\n\n\ndef pull_github(\n source: dict, path: Optional[str] = None, secrets: dict = {}, **kwargs\n) -> Files:\n \"\"\"\n Pull a GitHub repo/subpath.\n\n If a user token is provided in `secrets` it will be used to authenticate\n as that user.\n \"\"\"\n assert source.get(\"repo\"), \"GitHub source must have a repo\"\n\n subpath = source.get(\"subpath\") or \"\"\n if subpath.endswith(\"/\"):\n subpath = subpath[:-1]\n\n path = path or \".\"\n\n # Get the possibly token protected link for the repo archive\n # See https://developer.github.com/v3/repos/contents/#download-a-repository-archive\n client = github_client(secrets.get(\"token\"))\n repo_resource = client.get_repo(source[\"repo\"])\n archive_link = repo_resource.get_archive_link(\"zipball\")\n\n # Get the archive. To avoid it filling up memory, stream directly to file,\n # Increase timeout over the default of 5s.\n zip_file = tempfile.NamedTemporaryFile(suffix=\".zip\", delete=False)\n with httpx.stream(\"GET\", archive_link, timeout=60) as response:\n for data in response.iter_bytes():\n zip_file.write(data)\n zip_file.close()\n\n return pull_zip(zip_file.name, subpath=subpath, path=path)\n\n\ndef pull_zip(\n zip_file: str, subpath: str = \"\", path: str = \".\", strip: int = 1\n) -> Files:\n \"\"\"\n Pull files from a Zip file.\n\n :param zip_file: The path to the zip file.\n :param subpath: The file or directory in the zip file to extract.\n :param path: The destination path\n :param strip: Number of leading components from filenames to ignore.\n Similar to `tar`'s `--strip-components` option.\n \"\"\"\n files = {}\n\n with ZipFile(zip_file, \"r\") as zip_archive:\n for zip_info in zip_archive.infolist():\n zip_path = zip_info.filename\n\n # Skip directories\n if zip_path[-1] == \"/\":\n continue\n\n # Remove the first element of the path (the repo name + hash)\n inner_path = os.path.join(*(zip_path.split(\"/\")[strip:]))\n\n # Save if in the subpath\n remainder_path = None\n if subpath == \"\":\n remainder_path = inner_path\n elif inner_path.startswith(subpath + \"/\"):\n chars = len(subpath) + 1\n remainder_path = inner_path[chars:]\n elif inner_path == subpath:\n remainder_path = inner_path\n\n if remainder_path:\n dest_path = os.path.join(path, remainder_path)\n\n # Using `extract` is much much faster than reading bytes\n # and then writing them to file. Also it maintains other file info\n # such as modified time in the file written to disk. This speed up\n # is much more important for real world zips than any speed advantage\n # due to not reading bytes twice for fingerprint generation.\n zip_info.filename = dest_path\n zip_archive.extract(zip_info)\n\n files[remainder_path] = file_info(dest_path)\n\n return files\n","sub_path":"worker/jobs/pull/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"572199120","text":"\ndef triangleNumber(nums):\n # implementation of binary search \n def binarySearch(nums, l, r, target): # input: (array, left bound, right bound, target)\n while l < r: # while the left bound is still less than the right bound \n mid = (l + r) / 2 # calculate the midpoint\n if target <= nums[mid]: # if the target is less than or equal to the midpoint\n r = mid # make the new right bound = mid\n else:\n l = mid + 1 # make the new left bound 1 past mid \n return l if target <= nums[l] else l+1 # returns the value \n \n nums.sort() # sort the array so we can use binary search on it\n result = 0\n for i in range(len(nums)-2): # only need to check i values until a certain point\n for j in range(i+1, len(nums)-1): # same with j values \n k = binarySearch(nums, j+1, len(nums)-1, nums[i] + nums[j]) # set left bound to j+1, right bound to end of the list\n # target set to nums[i] + nums[j] because that's what we're looking for \n result += k - j - 1 # calculate the result\n return result # return the result \n\n\n\nif __name__ == '__main__':\n num = triangleNumber([2,2,3,4])\n print(num)\n\n\n\"\"\"\nTheorem: Triangle is valid if sum of its two sides is greater than its third side. \nStuck: On how to check every combination of sides in the array, on how to implement binary search and adapt it for this question \nPattern #1: We can make life easier by sorting our array and implementing binary search if we're searching for a particular value \nPattern #2: Understand what values you're looking for, and visualize how you need to go about accessing and updating them \n\"\"\"\n\n\"\"\"\nBrute Force: O(n^3)\n(1) Set a count variable \n(2) Consider every possible triplet in the array with three nested for loops \n(3) Make sure each nested loop starts from the previous variable + 1 to the length of the array\n(4) Check if the theorem holds, if so, increment count\n(5) Return count \n\n\nOptimized with Binary Search: O(n^2)\n(1) Iterate through the array twice finding a nums[i] and nums[j]\n(2) Use binary search to go through the list and find the value of nums[k] that satisfies nums[i] + nums[j] > nums[k]\n (a) Binary search: set a midpoint, if target <= value, set right bound to mid else set left bound to mid + 1 \n(3) All the values below our upper bound of k but between our lower bound of j satsify the inequality (k-j-1) and are counted up\n(4) Return the count \n\"\"\"","sub_path":"PYTHON/valid_triangle_number_611.py","file_name":"valid_triangle_number_611.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"421147452","text":"from django.urls import path\nfrom . import views\n\napp_name = 'orchid'\nurlpatterns = [\n path('', views.orchid_home, name='orchid_home'),\n path('genera/', views.genera, name='genera'), # -- All genera\n path('species/', views.all_species, name='species'), # -- All species\n path('species_detail//', views.species, name='species_detail'),\n path('hybrid_detail//', views.hybrids, name='hybrid_detail'),\n path('/species_detail/', views.species, name='species_detail'),\n path('/hybrid_detail/', views.hybrids, name='hybrid_detail'),\n path('/family_tree/', views.family_tree, name='family_tree'),\n path('ancestor/', views.ancestor, name='ancestor'),\n path('progeny/', views.progeny, name='progeny'),\n path('search_match/', views.search_match, name='search_match'),\n path('browse/', views.browse, name='browse'),\n]\n","sub_path":"orchid/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"33491278","text":"#!/usr/bin/env python\n\n# Written in 2018 by Lynn Kirby \n#\n# To the extent possible under law, the author has dedicated all copyright and\n# related and neighboring rights to this software to the public domain worldwide.\n# This software is distributed without any warranty.\n#\n# You should have received a copy of the CC0 Public Domain Dedication along with\n# this software. If not, see .\n\nimport ctypes\nimport os\nfrom tempfile import TemporaryDirectory\nfrom subprocess import run, check_output\n\nthis_dir = os.path.dirname(__file__)\nhelper_file = os.path.abspath(os.path.join(this_dir, 'helper.c'))\nout_file = os.path.join(this_dir, '../../include/lpd_win2errno.h')\ntemplate = os.path.join(this_dir, 'win2errno.template.h')\n\n# Must be VS 2013.\n# Later versions removed `_dosmaperr()` and friends from the C runtime.\ncommand = 'call \"C:/Program Files (x86)/Microsoft Visual Studio 12.0/VC/vcvarsall.bat\"'\ncommand += ' && cl ' + helper_file\n\nwith TemporaryDirectory() as build_dir:\n run(command, shell=True, cwd=build_dir, check=True)\n\n helper = os.path.join(build_dir, 'helper.exe')\n result = check_output(helper, encoding='utf8')\n\n with open(template) as temp:\n with open(out_file, 'w') as out:\n for line in temp:\n if line.startswith('// INSERT HERE'):\n out.write(result)\n else:\n out.write(line)\n","sub_path":"scripts/generate-win2errno/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215265765","text":"import subprocess\nimport re\n\ndef status(remotehost=\"localhost\"):\n \"\"\"\nRetrieve the status of the peer group.\n\nReturns a dict in the form of:\n{'host': {'server1': {'state': {'server2': 'Peer in Cluster (Connected)', 'server3': 'Peer in Cluster (Connected)', 'server4': 'Peer in Cluster (Connected)'}, 'uuid': '2d85014b-3e4c-4b53-b274-c25d2fa14771'}, 'server4': {'state': {'server1': 'Peer in Cluster (Connected)', 'server3': 'Peer in Cluster (Connected)', 'server2': 'Peer in Cluster (Connected)'}, 'uuid': 'fcac92e9-b7c5-440a-bac0-8fb6dfe4b899'}, 'server3': {'state': {'server1': 'Peer in Cluster (Connected)', 'server2': 'Peer in Cluster (Connected)', 'server4': 'Peer in Cluster (Connected)'}, 'uuid': '09366c55-a8b6-4b27-b23b-ee40bb9fd224'}, 'server2': {'state': {'server1': 'Peer in Cluster (Connected)', 'server3': 'Peer in Cluster (Connected)', 'server4': 'Peer in Cluster (Connected)'}, 'uuid': '68211a37-3497-4920-a86a-128db5e0fe49'}}, 'peers': 4}\n\nIf ``remotehost`` is set, status will be run on remote host.\n\"\"\"\n return _status(remotehost)\n\ndef _status(remotehost=\"localhost\",recursion=False):\n peerstatus = {\"host\": {},}\n program = [\"/usr/sbin/gluster\", \n \"--remote-host=%s\" % remotehost, \n \"peer\", \n \"status\"]\n try:\n response = str(subprocess.check_output(program,stderr=subprocess.STDOUT), encoding=\"utf8\").split(\"\\n\")\n except subprocess.CalledProcessError as e:\n print(e.output)\n raise\n\n # step through the output and build the dict\n for line in response:\n if line == \"No peers present\":\n peerstatus[\"peers\"] = 0\n return peerstatus\n m = re.match(\"^Number of Peers: (\\d+)$\", line)\n if m:\n peerstatus[\"peers\"] = int(m.group(1)) + 1\n m = re.match(\"^Hostname: (.+)$\", line)\n if m:\n hostname = m.group(1)\n peerstatus[\"host\"][hostname] = {}\n peerstatus[\"host\"][hostname][\"state\"] = {}\n m = re.match(\"Uuid: ([-0-9a-f]+)\", line)\n if m:\n peerstatus[\"host\"][hostname][\"uuid\"] = m.group(1)\n\n # our first pass through\n if not recursion:\n remotehost = [x for x in \n list(_status(remotehost=list(peerstatus[\"host\"].keys())[0],recursion=True)[\"host\"].keys())\n if x not in list(peerstatus[\"host\"].keys())][0]\n peerstatus[\"host\"][remotehost] = {}\n peerstatus[\"host\"][remotehost][\"self\"] = True\n peerstatus[\"host\"][remotehost][\"uuid\"] = _status(remotehost=list(peerstatus[\"host\"].keys())[0],recursion=True)[\"host\"][hostname][\"uuid\"]\n peerstatus[\"host\"][remotehost][\"state\"] = {}\n for host in list(peerstatus[\"host\"].keys()):\n remotestatus = _status(host,recursion=True)\n for statehost in remotestatus[\"host\"]:\n for state in remotestatus[\"host\"][statehost][\"state\"]:\n peerstatus[\"host\"][statehost][\"state\"][state] = remotestatus[\"host\"][statehost][\"state\"][state]\n for line in response:\n m = re.match(\"^Hostname: (.+)$\", line)\n if m:\n hostname = m.group(1)\n m = re.match(\"State: (.+)\", line)\n if m:\n peerstatus[\"host\"][hostname][\"state\"][remotehost] = m.group(1)\n\n return peerstatus\n","sub_path":"src/peer/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444398541","text":"import tkinter\nfrom point import Point\nfrom line import *\nfrom circle import *\nimport turtle\n\nfrom tkinter import colorchooser\n\nfrom collections import deque\n\nCURVE = 1\nLINE_BRESENHAM = 2\nLINE_DDA = 3\nLINE_MID = 4\nCIRCLE_MID = 5\nCIRCLE_BRESENHAM = 6\nELLIPSE_BRESENHAM = 7\nPOLYGON = 8\nERASE = 20\nglobal busy\nglobal now_item\nglobal pre_point\nbusy = False\nnow_item=None #移动多边形时选中的图形元素对象id\npre_point=None #鼠标移动事件的上一个鼠标位置\n\napp = tkinter.Tk()\napp.title('画板')\napp.attributes(\"-alpha\", 0.9) # 设置窗体透明度0-1之间\n\n# 绘制多边形时储存的状��,选择的点的位置,和生成的点的图像元素\nclickpoints = deque(maxlen=1000)\ntemppoints = deque(maxlen=100)\n\n\n# 控制是否允许画图的变量,1:允许,0:不允许\nis_draw = tkinter.IntVar(value=0)\n# 控制画图类型的变量,1:曲线,2:直线,3:矩形,4:文本,5:橡皮\nmode = tkinter.IntVar(value=1)\n# 记录鼠标位置的变量\npoint_start = Point(tkinter.IntVar(value=0), tkinter.IntVar(value=0))\n# 前景色\nlineColor = 'red'\nfillColor = 'green'\nbackColor = '#bcbcbc'\n\ncanvas = tkinter.Canvas(app, width=800, height=600)\ncanvas.pack(side=tkinter.LEFT, expand=tkinter.YES, fill=tkinter.NONE)\nturtleCv = turtle.TurtleScreen(canvas)\nturtleCv.setworldcoordinates(0, -800, 600, 0)\n\ncanvas['bg'] = backColor\n\ncanvas.create_text(400, 300, # 使用create_text方法在坐标(302,77)处绘制文字\n font=\"Times 100 italic bold\",\n text='' # 所绘制文字的内容\n , fill='LightGrey') # 所绘制文字的颜色为灰色\n\n\n# 画传入的点集 temp为True时,在move中会删除\ndef create_point(points, fill=lineColor, temp=False):\n if points is not None:\n for p in points:\n x, y = p.x, p.y\n if temp:\n canvas.create_oval(x, y, x, y, outline=fill, fill=fill, tag='temp')\n else:\n canvas.create_oval(x, y, x, y, outline=fill, fill=fill)\n\n\n# 鼠标左键单击,允许画图\ndef on_left_button_down(event):\n is_draw.set(1)\n point_start.set(event.x, event.y)\n\n\ncanvas.bind('', on_left_button_down)\n# 记录最后绘制图形的id\nlastDraw = 0\n\n\n# 按住鼠标左键移动,画图\ndef on_left_button_move(event):\n global pre_point\n global now_item\n if is_draw.get() == 0:\n return\n\n if mode.get() == CURVE:\n # 使用当前选择的前景色绘制曲线\n canvas.create_line(point_start.x, point_start.y, event.x, event.y, fill=lineColor)\n point_start.set(event.x, event.y)\n elif mode.get() == LINE_BRESENHAM:\n # 绘制直线,先删除刚刚画过的直线,再画一条新的直线\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = bresenham_line(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == LINE_DDA:\n # 绘制直线,先删除刚刚画过的直线,再画一条新的直线\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = dda_line(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == LINE_MID:\n # 绘制直线,先删除刚刚画过的直线,再画一条新的直线\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = mid_line(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == CIRCLE_MID:\n # 绘制圆形,先删除刚刚画过的圆形,再画一个新的圆形\n # global lastDraw\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = mid_circle(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == CIRCLE_BRESENHAM:\n # 绘制圆形,先删除刚刚画过的圆形,再画一个新的圆形\n # global lastDraw\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = bresenham_circle(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == ELLIPSE_BRESENHAM:\n # 绘制圆形,先删除刚刚画过的圆形,再画一个新的圆形\n # global lastDraw\n try:\n canvas.delete('temp')\n except Exception as e:\n pass\n points = bresenham_ellipse(point_start, Point(event.x, event.y))\n create_point(points, fill=lineColor, temp=True)\n\n elif mode.get() == ERASE:\n # 橡皮,使用背景色填充10*10的矩形区域\n canvas.create_rectangle(event.x - 5, event.y - 5, event.x + 5, event.y + 5,\n outline=backColor, fill=backColor)\n\n elif mode.get() == POLYGON:\n\n now_item=canvas.find_closest(event.x,event.y)[0]\n if pre_point==None:\n pre_point=[event.x,event.y]\n\n if pre_point!=None:\n canvas.tag_raise(now_item)\n canvas.move(now_item,event.x-pre_point[0],event.y-pre_point[-1])\n pre_point = [event.x, event.y]\n\n\ncanvas.bind('', on_left_button_move)\n\n\ndef init_bind(self):\n self.cv.bind('', self.mouse_move)\n self.cv.bind(\"\", self.StartMove) # 监听左键按下操作响应函数\n self.cv.bind(\"\", self.StopMove) # 监听左键松开操作响应函数\n self.cv.bind(\"\", self.OnMotion) # 监听鼠标移动操作响应函数\n\ndef drawPolygon(event):\n if mode.get() != POLYGON:\n return\n try:\n for x, y in clickpoints:\n if x in range(event.x - 8, event.x + 8) and y in range(event.y - 8, event.y + 8):\n print('封闭')\n for item in temppoints:\n canvas.delete(item)\n canvas.create_polygon(*clickpoints, outline=lineColor, fill=fillColor)\n clearPolygonState()\n return\n except Exception as e:\n print(e)\n clearPolygonState()\n return\n\n clickpoints.append((event.x, event.y))\n temppoints.append(canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, outline='red', fill='red'))\n\n\ncanvas.bind('', drawPolygon)\n\n\n# 鼠标左键抬起,结束画图\ndef on_left_button_up(event):\n global now_item\n global pre_point\n if mode.get() == LINE_BRESENHAM:\n # 绘制直线\n points = bresenham_line(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n elif mode.get() == LINE_DDA:\n points = dda_line(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n elif mode.get() == LINE_MID:\n points = mid_line(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n elif mode.get() == CIRCLE_MID:\n # 绘制圆形\n points = mid_circle(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n elif mode.get() == CIRCLE_BRESENHAM:\n # 绘制圆形\n points = bresenham_circle(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n elif mode.get() == ELLIPSE_BRESENHAM:\n # 绘制圆形\n points = bresenham_ellipse(point_start, Point(event.x, event.y))\n create_point(points, lineColor)\n\n # elif mode.get() == select_pig:\n\n # points = bresenham_ellipse(point_start, Point(event.x, event.y))\n # create_point(points, lineColor)\n elif mode.get() == POLYGON:\n\n global pre_point\n pre_point=None\n is_draw.set(0)\n\n\ncanvas.bind('', on_left_button_up)\n\n\n# 创建菜单\n\n\ndef clearPolygonState():\n temppoints.clear()\n clickpoints.clear()\n\n\ndef clear():\n for item in canvas.find_all():\n canvas.delete(item)\n\n\n\n\ndef drawCurve():\n mode.set(CURVE)\n print(mode.get())\n\n\ndef select_line_bresenham():\n mode.set(LINE_BRESENHAM)\n\n\ndef select_line_dda():\n mode.set(LINE_DDA)\n\n\ndef select_line_mid():\n mode.set(LINE_MID)\n\n\ndef select_circle_mid():\n mode.set(CIRCLE_MID)\n\n\ndef select_circle_bresenham():\n mode.set(CIRCLE_BRESENHAM)\n\n\ndef select_polygon():\n print('set polygon')\n mode.set(POLYGON)\n\n\ndef select_ellipse_bresenham():\n mode.set(ELLIPSE_BRESENHAM)\n\n\n# 选择线条颜色\ndef chooseLineColor():\n global lineColor\n lineColor = colorchooser.askcolor()[1]\n\n\n# 选择填充颜色\ndef chooseFillColor():\n global fillColor\n fillColor = colorchooser.askcolor()[1]\n\n\n# 选择绘图板底色\ndef chooseBackColor():\n pass\n # global backColor\n # backColor = colorchooser.askcolor()[1]\n # canvas['bg']=backColor\n\nmenubar = tkinter.Menu(app)\n\n\n\n# 在顶级菜单实例下创建子菜单实例\n\nmenu3 = tkinter.Menu(menubar, tearoff=0)\nfor each, command in zip(['绘图线条颜色', '多边形填充颜色'], [chooseLineColor, chooseFillColor]):\n menu3.add_command(label=each, command=command)\nmenubar.add_cascade(label='颜色', menu=menu3)\n#顶级菜单\n#menubar.add_command(label='曲线', command=drawCurve)\n# 在顶级菜单实例下创建子菜单实例\nmenu1 = tkinter.Menu(menubar, tearoff=0)\nfor each, command in zip(['DDA画直线', '中点画直线', 'line_bresenham画直线'],\n [select_line_dda, select_line_mid, select_line_bresenham]):\n menu1.add_command(label=each, command=command)\nmenubar.add_cascade(label='直线', menu=menu1)\n# 在顶级菜单实例下创建子菜单实例\nmenu2 = tkinter.Menu(menubar, tearoff=0)\nfor each, command in zip(['中点画圆', 'bresenham画圆'],\n [select_circle_mid, select_circle_bresenham]):\n menu2.add_command(label=each, command=command)\nmenubar.add_cascade(label='圆', menu=menu2)\n# 顶级菜单\nmenubar.add_command(label='椭圆', command=select_ellipse_bresenham)\n\n\n# 顶级菜单\nmenubar.add_command(label='多边形', command=select_polygon)\n\n\n\n\n\n\n\n\n# 顶级菜单\nmenubar.add_command(label='清除', command=clear)\n\n# 顶级菜单实例应用到大窗口中\napp['menu'] = menubar\ncanvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)\napp.mainloop()\n\n\n\n","sub_path":"job/画图板/main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"278193022","text":"\n\nimport prims\nimport llvm.core as llc \n\nsigned_int_comparisons = {\n prims.equal : llc.ICMP_EQ, \n prims.not_equal : llc.ICMP_NE, \n prims.greater : llc.ICMP_SGT, \n prims.greater_equal : llc.ICMP_SGE, \n prims.less : llc.ICMP_SLT, \n prims.less_equal : llc.ICMP_SLE\n}\n\nunsigned_int_comparisons = {\n prims.equal : llc.ICMP_EQ, \n prims.not_equal : llc.ICMP_NE, \n prims.greater : llc.ICMP_UGT, \n prims.greater_equal : llc.ICMP_UGE, \n prims.less : llc.ICMP_ULT, \n prims.less_equal : llc.ICMP_ULE\n} \n\nfloat_comparisons = { \n prims.equal : llc.FCMP_OEQ, \n prims.not_equal : llc.FCMP_ONE, \n prims.greater : llc.FCMP_OGT, \n prims.greater_equal : llc.FCMP_OGE, \n prims.less : llc.FCMP_OLT, \n prims.less_equal : llc.FCMP_OLE \n}\n\n\nsigned_binops = { \n prims.add : 'add', \n prims.subtract : 'sub', \n prims.multiply : 'mul', \n prims.divide : 'sdiv', \n prims.mod : 'srem'\n}\n\nunsigned_binops = { \n prims.add : 'add', \n prims.subtract : 'sub', \n prims.multiply : 'mul', \n prims.divide : 'udiv', \n prims.mod : 'urem', \n}\n\nfloat_binops = { \n prims.add : 'fadd', \n prims.subtract : 'fsub', \n prims.multiply : 'fmul', \n prims.divide : 'fdiv', \n prims.mod : 'frem', \n}\n\n# Note: there is no division instruction between booleans\n# so b1 / b2 should be translated to int(b1) / int(b2) \nbool_binops = { \n prims.add : 'or_',\n prims.multiply : 'and_', \n prims.subtract : 'xor',\n #prims.logical_not : 'not_'\n}\n","sub_path":"parakeet/llvm_prims.py","file_name":"llvm_prims.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"55139557","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom dataclasses import dataclass, field\n\nimport torch\nfrom fairseq import metrics, utils\nfrom fairseq.criterions import FairseqCriterion, register_criterion\nfrom fairseq.dataclass import FairseqDataclass\nfrom omegaconf import II\n\n\n@dataclass\nclass RegLabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):\n label_smoothing: float = field(\n default=0.0,\n metadata={\"help\": \"epsilon for label smoothing, 0 means no label smoothing\"},\n )\n reg_alpha: float = field(\n default=5.0,\n metadata={\"help\": \"weight for KL loss\"},\n )\n ignore_prefix_size: int = field(\n default=0,\n metadata={\"help\": \"Ignore first N tokens\"},\n )\n sentence_avg: bool = II(\"optimization.sentence_avg\")\n\n\ndef label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):\n if target.dim() == lprobs.dim() - 1:\n target = target.unsqueeze(-1)\n nll_loss = -lprobs.gather(dim=-1, index=target)\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n if ignore_index is not None:\n pad_mask = target.eq(ignore_index)\n nll_loss.masked_fill_(pad_mask, 0.0)\n smooth_loss.masked_fill_(pad_mask, 0.0)\n else:\n nll_loss = nll_loss.squeeze(-1)\n smooth_loss = smooth_loss.squeeze(-1)\n if reduce:\n nll_loss = nll_loss.sum()\n smooth_loss = smooth_loss.sum()\n eps_i = epsilon / (lprobs.size(-1) - 1)\n loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss\n return loss, nll_loss\n\n\n@register_criterion(\n \"reg_label_smoothed_cross_entropy\", dataclass=RegLabelSmoothedCrossEntropyCriterionConfig\n)\nclass RegLabelSmoothedCrossEntropyCriterion(FairseqCriterion):\n def __init__(\n self,\n task,\n sentence_avg,\n label_smoothing,\n ignore_prefix_size=0,\n reg_alpha=5.0,\n ):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n self.eps = label_smoothing\n self.ignore_prefix_size = ignore_prefix_size\n self.reg_alpha = reg_alpha\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n if self.training:\n net_output_1 = model(**sample['net_input'])\n net_output_2 = model(**sample['net_input'])\n net_output = torch.cat([net_output_1, net_output_2], dim=0)\n\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n target = model.get_targets(sample, net_output)\n pad_mask = target.unsqueeze(-1).eq(self.padding_idx)\n\n kl_loss = self.compute_kl_loss(lprobs, pad_mask)\n\n lprobs = lprobs.view(-1, lprobs.size(-1))\n target = torch.cat([target, target.clone()], dim=0)\n loss, nll_loss = label_smoothed_nll_loss(\n lprobs, target.view(-1, 1), self.eps, ignore_index=self.padding_idx, reduce=reduce,\n )\n\n nll_loss = nll_loss / 2\n sample_size = (\n sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n )\n loss = loss / 2 + self.reg_alpha * kl_loss\n\n logging_output = {\n \"loss\": loss.data,\n \"nll_loss\": nll_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n \"kl_loss\": kl_loss.data,\n }\n else:\n net_output = model(**sample[\"net_input\"])\n loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = (\n sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n )\n logging_output = {\n \"loss\": loss.data,\n \"nll_loss\": nll_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n return loss, sample_size, logging_output\n\n def get_lprobs_and_target(self, model, net_output, sample):\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n target = model.get_targets(sample, net_output)\n if self.ignore_prefix_size > 0:\n if getattr(lprobs, \"batch_first\", False):\n lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()\n target = target[:, self.ignore_prefix_size :].contiguous()\n else:\n lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()\n target = target[self.ignore_prefix_size :, :].contiguous()\n return lprobs.view(-1, lprobs.size(-1)), target.view(-1)\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n lprobs, target = self.get_lprobs_and_target(model, net_output, sample)\n loss, nll_loss = label_smoothed_nll_loss(\n lprobs,\n target,\n self.eps,\n ignore_index=self.padding_idx,\n reduce=reduce,\n )\n return loss, nll_loss\n\n def compute_kl_loss(self, lprobs, pad_mask=None, reduce=True):\n net_prob = lprobs\n net_prob_tec = torch.exp(lprobs)\n\n p, q = torch.split(net_prob, net_prob.size(0)//2, dim=0)\n p_tec, q_tec = torch.split(net_prob_tec, net_prob_tec.size(0)//2, dim=0)\n\n p_loss = torch.nn.functional.kl_div(p, q_tec, reduction='none')\n q_loss = torch.nn.functional.kl_div(q, p_tec, reduction='none')\n\n if pad_mask is not None:\n p_loss.masked_fill_(pad_mask, 0.)\n q_loss.masked_fill_(pad_mask, 0.)\n\n if reduce:\n p_loss = p_loss.sum()\n q_loss = q_loss.sum()\n\n loss = (p_loss + q_loss) / 2\n return loss\n\n @classmethod\n def reduce_metrics(cls, logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n nll_loss_sum = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"nll_loss\", nll_loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n\n if \"kl_loss\" in logging_outputs[0]:\n kl_sum = sum(log.get(\"kl_loss\", 0) for log in logging_outputs)\n metrics.log_scalar(\"kl_loss\", kl_sum / ntokens, ntokens, round=3)\n\n total = utils.item(sum(log.get(\"total\", 0) for log in logging_outputs))\n if total > 0:\n metrics.log_scalar(\"total\", total)\n n_correct = utils.item(\n sum(log.get(\"n_correct\", 0) for log in logging_outputs)\n )\n metrics.log_scalar(\"n_correct\", n_correct)\n metrics.log_derived(\n \"accuracy\",\n lambda meters: round(\n meters[\"n_correct\"].sum * 100.0 / meters[\"total\"].sum, 3\n )\n if meters[\"total\"].sum > 0\n else float(\"nan\"),\n )\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n","sub_path":"fairseq/criterions/label_smoothed_cross_entropy_rdrop.py","file_name":"label_smoothed_cross_entropy_rdrop.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"195251251","text":"from stack_and_queue.stacks_and_queues import Queue\n\nclass Node:\n \"\"\"K-Ary Tree Node\"\"\"\n\n def __init__(self, value):\n self.value = value\n self.children = []\n\n def clone(self, converter=None):\n value = self.value\n\n if converter:\n value = converter(value)\n\n return Node(value)\n\nclass KaryTree:\n def __init__(self, root=None):\n self.root = root\n\n def breadth_first(self):\n queue = Queue()\n\n collection = []\n\n queue.enqueue(self.root)\n\n while not queue.is_empty():\n node = queue.dequeue()\n collection.append(node.value)\n for child in node.children:\n queue.enqueue(child)\n\n return collection\n\n def clone(self, converter=None):\n \"\"\"\n return clone of self\n applies optional converter function to value of each node\n which is handy for things like fizz_buzz\n \"\"\"\n\n clone_root = self.root.clone(converter)\n clone_tree = KaryTree(clone_root)\n\n pairs = Queue()\n\n pairs.enqueue((self.root, clone_root))\n\n while not pairs.is_empty():\n source_node, clone_node = pairs.dequeue()\n for source_child_node in source_node.children:\n clone_child_node = source_child_node.clone(converter)\n pair = (source_child_node, clone_child_node)\n pairs.enqueue(pair)\n clone_node.children.append(clone_child_node)\n return clone_tree","sub_path":"python/karytree/karytree.py","file_name":"karytree.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300286689","text":"#! python3\r\n\r\nimport openpyxl\r\nimport csv\r\nimport os\r\nimport argparse\r\nimport logging\r\n\r\nlogging.disable(logging.CRITICAL)\r\n\r\n\r\ndef extract_cell_values(row):\r\n values = []\r\n for cell in row:\r\n values.append(cell.value)\r\n return values\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.DEBUG, format='%(message)s')\r\n\r\n excel_files = filter(lambda file: file.endswith('.xlsx'), os.listdir())\r\n os.mkdir('converted_csv')\r\n \r\n for file in excel_files:\r\n print('Converting workbook...')\r\n wb = openpyxl.load_workbook(file)\r\n\r\n for sheet in wb.worksheets:\r\n with open('converted_csv\\\\' + sheet.title + '_csv.csv', 'w', \r\n newline='') as output:\r\n writer = csv.writer(output)\r\n for row in sheet.rows:\r\n writer.writerow(extract_cell_values(row))\r\n","sub_path":"Ch. 14 CSV and JSON/excel_to_csv.py","file_name":"excel_to_csv.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"7239462","text":"import cv2\nimport numpy as np\nfrom PIL import Image\n\nthug_mask=\"mask.png\"\np1=\"ronaldo.jpg\"\nimg=cv2.imread(\"ronaldo.jpg\")\ngray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nface_cascade=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nfaces=face_cascade.detectMultiScale(gray,1.3,0)\nbackground=Image.open(p1)\nfor (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\n cv2.waitKey(0)\n mask=Image.open(thug_mask)\n r_mask=mask.resize((w,h),Image.ANTIALIAS)\n offset=(x,y)\n background.paste(mask,offset,mask=r_mask)\nbackground.save(\"OKIT.jpg\")\n","sub_path":"PyQt5/Masks/ronaldo's thug.py","file_name":"ronaldo's thug.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"20388884","text":"import logging\n\nfrom plenum.common.looper import Looper\nfrom plenum.server.node import Node\nfrom plenum.test.eventually import eventually\nfrom plenum.test.helper import TestNodeSet, sendRandomRequest, \\\n checkSufficientRepliesRecvd\n\nnodeCount = 4\n\n\n# noinspection PyIncorrectDocstring\ndef testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, client1):\n \"\"\"\n Checking if average latency is being set\n \"\"\"\n\n for i in range(5):\n req = sendRandomRequest(client1)\n looper.run(eventually(checkSufficientRepliesRecvd,\n client1.inBox, req.reqId, 1,\n retryWait=1, timeout=5))\n\n for node in nodeSet: # type: Node\n mLat = node.monitor.getAvgLatencyForClient(client1.defaultIdentifier,\n node.instances.masterId)\n bLat = node.monitor.getAvgLatencyForClient(client1.defaultIdentifier,\n *node.instances.backupIds)\n logging.debug(\"Avg. master latency : {}. Avg. backup latency: {}\".\n format(mLat, bLat))\n assert mLat > 0\n assert bLat > 0\n","sub_path":"plenum/test/monitoring/test_avg_latency.py","file_name":"test_avg_latency.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"416952897","text":"from PIL import Image\nimport numpy as np\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, Dropout, Conv2D\n\nnumber_of_image = 299\n\n\ndef rgb2gray(rgb):\n gray = np.mean(rgb, axis=2)\n return gray.reshape(512, 512, 1)\n\n\ndef get_features(labels):\n features = []\n for rgb in labels:\n gray_image = rgb2gray(rgb)\n features.append(gray_image)\n features = np.array(features)\n print(\"Features shape: \" + str(features.shape))\n return features\n\n\ndef get_features_labels():\n labels = []\n for id in range(number_of_image):\n image_name = \"feature/id\" + str(id) + \".jpeg\"\n image = np.asarray(Image.open(image_name)) / 255\n labels.append(image)\n\n features = get_features(labels)\n labels = np.array(labels)\n print(\"Labels shape: \" + str(labels.shape))\n return features, labels\n\n\ndef get_model(model_name=None):\n def create_model():\n model = Sequential()\n model.add(Conv2D(16, (3, 3), activation='relu', padding=\"same\", input_shape=(512, 512, 1)))\n model.add(Dropout(.2))\n model.add(Conv2D(16, (3, 3), activation='relu', padding=\"same\"))\n model.add(Dropout(.2))\n model.add(Conv2D(3, (3, 3), activation='relu', padding=\"same\"))\n model.compile(optimizer='adam', loss='mse')\n return model\n\n if model_name:\n try:\n model_l = load_model('keras_model/' + model_name)\n model_l.compile(optimizer='adam', loss='mse')\n print('Sucessfully loaded model')\n return model_l, True\n except:\n print('Failed to load model. Creating new model')\n return create_model(), False\n else:\n print('Creating new model')\n return create_model(), False\n\n\ndef output_images(predictions, to, start_index):\n for i, prediction in enumerate(predictions):\n image = (prediction * 255).astype(np.uint8)\n Image.fromarray(image).save(to + str(i + start_index) + '.jpeg')\n\n\ndef input_images(arr, to, start_index):\n for i, prediction in enumerate(arr):\n image2 = np.zeros((512, 512, 3))\n image2[:, :, 0] = prediction.reshape(512, 512)\n image2[:, :, 1] = prediction.reshape(512, 512)\n image2[:, :, 2] = prediction.reshape(512, 512)\n image = (image2 * 255).astype(np.uint8)\n Image.fromarray(image).save(to + str(i + start_index) + '.jpeg')\n\n\ndef main():\n features, labels = get_features_labels()\n cutoff = round(len(features) * .8)\n train_feature, test_feature = features[0:cutoff], features[cutoff:]\n train_label, test_label = labels[0:cutoff], labels[cutoff:]\n model_name = 'best_model'\n model, loaded = get_model(model_name)\n if not loaded:\n for i in range(1000):\n model.fit(train_feature, train_label, validation_data=(test_feature, test_label), epochs=1)\n model.save('keras_model/' + model_name + str(i))\n predictions = model.predict(test_feature)\n input_images(test_feature, 'test_input/input', cutoff)\n output_images(predictions, 'test_output/output', cutoff)\n\n\nif __name__ == '__main__':\n main()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96872791","text":"from .models import AppointmentSlot, DemoAppointment\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nimport datetime\nfrom django.conf import settings\nfrom oauth2client.contrib import xsrfutil\nimport httplib2\nfrom apiclient import discovery\nfrom oauth2client import client, GOOGLE_TOKEN_URI, GOOGLE_REVOKE_URI\nfrom oauth2client.file import Storage\nfrom oauth2client import tools\nfrom oauth2client.client import GoogleCredentials\nimport httplib2\nimport os\n\n\n'''compute available slots for a date. basically counts all\n available slots to 30 minute gaps. removes slots which \n already have appointments. return slot availability list'''\ndef get_available_slots_by_date(date):\n available_slots = dict()\n app_slots = AppointmentSlot.objects.filter(date=date).all()\n for slot in app_slots:\n start = slot.start_time\n while start0:\n available_slots_list.\\\n append(datetime.time(*list(map(int,key.split(':')))))\n\n return sorted(available_slots_list)\n\n\n'''compute sales reps available for given date and time.\n return sales rep with min appointments scheduled for date'''\ndef get_next_sales_rep(date, time):\n available_users = User.objects.\\\n filter(Q(slots__date=date) & \\\n Q(slots__start_time__lte=time) & \\\n Q(slots__end_time__gt=time)).\\\n filter(~Q(appointments__start_time=time)).all()\n return min([u for u in available_users],\n key=lambda user: len(user.appointments.filter(date=date).all()))\n\n\ndef schedule_g_calendar_event(appointment):\n credentials = client.OAuth2Credentials(\n None, settings.G_CLIENT_ID, settings.G_CLIENT_SECRET, \n appointment.user.caleder.first().refresh_token, None, GOOGLE_TOKEN_URI,\n None, revoke_uri=GOOGLE_REVOKE_URI)\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n event = {\n 'summary': 'Sales call Demo',\n 'location': 'Online',\n 'description': 'Demo call for sales',\n 'start': {\n 'dateTime': datetime.datetime.combine(appointment.date, appointment.start_time).isoformat(),\n 'timeZone': 'UTC',\n },\n 'end': {\n 'dateTime': (datetime.datetime.combine(appointment.date, appointment.start_time) +\\\n datetime.timedelta(minutes=30)).isoformat(),\n 'timeZone': 'UTC',\n },\n 'attendees': [\n {'email': appointment.client_email},\n ],\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n event = service.events().insert(calendarId=appointment.user.caleder.first().calender_id, body=event, sendNotifications=True,).execute()\n","sub_path":"scheduler/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"29094240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 22 16:26:11 2016\n\n@author: egron\n\"\"\"\n\n\n\n\nimport time\nimport subprocess\n#from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.backends.backend_pdf as pbp\nfrom PIL import Image\nimport pyzdde.zdde as pyz\nimport zzdde_SylvainVersion as pzz\nimport camera_functions as Cam\nimport motors_functions as Motors\nimport XPS_Q8_drivers \nimport sys\nimport z_alignment_functions as Zalign\nimport result_analysis_functions as result\n\n\n#Definition of the properties of the system: \nwith open('Z:\\\\Testbeds\\\\JOST\\\\Alignment\\\\data\\\\date_for_python.txt', 'r') as pf:\n Date_ = np.loadtxt(pf)\n\nDate=str(int(Date_[0]))+'-'+str(int(Date_[1]))+'-'+str(int(Date_[2]))+'-'+str(int(Date_[3]))+'h-'+str(int(Date_[4]))+'min'\n \nwith open('Z:\\Testbeds\\JOST\\Alignment\\data\\\\'+Date+'\\\\python_config_file_'+Date+'.txt', 'r') as pf:\n configFile_python = np.loadtxt(pf)\n \n#FoV to define the Interaction matrix, and simulate images from a pertubation vector\nnb_FoV = (np.size(configFile_python)-6)/8\n\n \n##############################################################################################################################\n#LINEAR CONTROL CALCULATION\n#Getting field vector\nfield_vector=np.zeros((2*nb_FoV))\nfor ind in range(nb_FoV):\n field_vector[2*ind]=configFile_python[6+ind*8]\n field_vector[2*ind+1]=configFile_python[7+ind*8]\n \n#Number of Zernikes to descibre the Wavefront (Piston, Tip and Tilt are not included): \n#It's number of zernike I get from the phase diversity. \nnbr_zernikes = configFile_python[4]-2\ncond_val=0.1\n\nnbr_iterations=5\n\nnoise_level=2.\npert=np.array([1.,1.,1.,1.]) \n \nactuator_value=np.array([0.5,0.5,0.5,0.5])\n\n# Getting the Interaction Matrix Vector \naligned_state=np.zeros((4)) \nIM=Zalign.IM_on_vectors(field_vector,actuator_value,aligned_state,nbr_zernikes)\n\n#Getting the measured vector : stacks the wavefront measured for all the fields, and removes the tip-tilt values\nmeasure_vect=Zalign.Compute_measure_vector(Date,configFile_python)\n\n#Getting the residual wavefront vector\nresidual_vect=Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,aligned_state,nbr_zernikes)\n\n###########################################################################################################\n#Choosing the sensing modes: put 1 if you want to keep the mode, 0 if you want to reject it\nsensing_modes=np.zeros((nbr_zernikes))\nsensing_modes[0]=1\nsensing_modes[2]=1\nsensing_modes[4]=1\nsensing_modes[8]=1\nnbr_sensing_modes=np.count_nonzero(sensing_modes)\n\nIM_bis=Zalign.decoup_line(IM,sensing_modes)\nmeasure_vect_bis=Zalign.decoup_element(measure_vect,sensing_modes)\nresidual_vect_bis=Zalign.decoup_element(residual_vect,sensing_modes) \n\nMMSE=np.zeros((4,nbr_iterations+1))\nMMSE[:,0]=np.zeros((4))\nLS=np.zeros((4,nbr_iterations+1))\nLS[:,0]=np.zeros((4))\n\nerror_distance_MMSE=np.zeros((nbr_iterations+1))\nerror_distance_MMSE[0]=result.distance(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,MMSE[:,0],nbr_zernikes),measure_vect)\nerror_distance_LS=np.zeros((nbr_iterations+1))\nerror_distance_LS[0]=result.distance(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,LS[:,0],nbr_zernikes),measure_vect)\n\nresidual_vect_MMSE_bis=np.zeros((nbr_sensing_modes*nb_FoV,nbr_iterations+1))\nresidual_vect_MMSE_bis[:,0]=residual_vect_bis\nresidual_vect_LS_bis=np.zeros((nbr_sensing_modes*nb_FoV,nbr_iterations+1))\nresidual_vect_LS_bis[:,0]=residual_vect_bis \n\nIM_MMSE_bis=np.zeros((nbr_sensing_modes*nb_FoV,4,nbr_iterations))\nIM_LS_bis=np.zeros((nbr_sensing_modes*nb_FoV,4,nbr_iterations))\n\n#Loop on Zemax\nfor ind in range(nbr_iterations):\n\n #For MMSE \n IM_MMSE_bis[:,:,ind]=Zalign.decoup_line(Zalign.IM_on_vectors(field_vector,actuator_value/np.sqrt(ind+1),MMSE[:,ind],nbr_zernikes),sensing_modes) \n MMSE[:,ind+1]=Zalign.MMSE_reconstruction_on_vectors(nb_FoV,noise_level,pert,IM_MMSE_bis[:,:,ind],measure_vect_bis,residual_vect_MMSE_bis[:,ind])\n MMSE[:,ind+1]= MMSE[:,ind+1]+MMSE[:,ind] #add the correction to the former one, MMSE should converge to right result\n #WF simulated\n residual_vect_MMSE_bis[:,ind+1]=Zalign.decoup_element(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,MMSE[:,ind+1],nbr_zernikes),sensing_modes) \n #Correction quality\n error_distance_MMSE[ind+1]=result.distance(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,MMSE[:,ind+1],nbr_zernikes),measure_vect)\n \n #For LS \n IM_LS_bis[:,:,ind]=Zalign.decoup_line(Zalign.IM_on_vectors(field_vector,actuator_value/np.sqrt(ind+1),LS[:,ind],nbr_zernikes),sensing_modes) \n LS[:,ind+1]=Zalign.LS_reconstruction_on_vectors(cond_val,IM_LS_bis[:,:,ind],measure_vect_bis,residual_vect_LS_bis[:,ind])\n LS[:,ind+1]= LS[:,ind+1]+LS[:,ind] #add the correction to the former one, LS should converge to right result \n #WF simulated\n residual_vect_LS_bis[:,ind+1]=Zalign.decoup_element(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,LS[:,ind+1],nbr_zernikes),sensing_modes) \n #Correction quality\n error_distance_LS[ind+1]=result.distance(Zalign.Simulated_Wavefront_Zalignment_on_vector(field_vector,LS[:,ind+1],nbr_zernikes),measure_vect)\n","sub_path":"After_WFS_recalculating_IM_in_the_Zemax_loop.py","file_name":"After_WFS_recalculating_IM_in_the_Zemax_loop.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240639438","text":"\"\"\"\r\n@author: Tiancheng Dai and Kevin Kha\r\n\"\"\"\r\n#pip3 install cryptography\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport sys\r\n\r\nfrom algorithm import isPrime\r\nfrom algorithm import nthPrime\r\nfrom algorithm import getMod\r\nfrom encryption import get_private_key\r\nfrom encryption import encrypt\r\nfrom encryption import decrypt\r\nfrom decode import decToBin\r\nfrom decode import get2LSB\r\nfrom decode import filter2LSB\r\nfrom encode import bytesNeededBin\r\nfrom encode import bytesNeededDec\r\nfrom encode import msgBinary\r\nfrom encode import msgLSB\r\nfrom encode import mod2LSB\r\nfrom encode import binToDec\r\nfrom encode import newPixels\r\nfrom encode import finalImage\r\n \r\ndef encoding(data, listdict, bytes_needed, pixels_bin, msg): \r\n picTo8BitBinary = bytesNeededBin(bytes_needed, pixels_bin) #picture in 8bit binary: ['11111111', ...]\r\n msgTo8BitBinary = msgBinary(msg) #message in 8 bit binary\r\n split8BitBinary = msgLSB(msgTo8BitBinary) #list individual binary: [0,1,...]\r\n msgIn2LSB = mod2LSB(listdict, picTo8BitBinary, split8BitBinary) # list 8bit binary: ['11111101', ...]\r\n LSBtoDec = binToDec(msgIn2LSB) #list of binary above to dec\r\n newDecToList = newPixels(bytes_needed, data, LSBtoDec) #adds the decimal to list\r\n rgbValues = finalImage(data, newDecToList, bytes_needed) #the list turns into the RGB image values\r\n secretImg = Image.fromarray((rgbValues).astype(np.uint8))\r\n secretImg.save(\"Secret_Image.png\")\r\n\r\n print(\"Encoding Success!\")\r\n return \r\n \r\ndef decoding(data, listdict, bytes_needed, pixels_bin, pw):\r\n#def decoding(data, listdict, bytes_needed, pixels_bin): \r\n picToDec = bytesNeededDec(data, bytes_needed)#picture in dec: ['255', ...]\r\n picToBin = decToBin(picToDec) #picture from dec to 8-bit binary\r\n pic2Lsb = get2LSB(picToBin) #gets the 2 LSB from the list above\r\n get2Lsb = filter2LSB(listdict, pic2Lsb)\r\n combineLsb = ''.join(map(str,[str(x) for x in get2Lsb])) \r\n newPic2Lsb = [combineLsb[i:i+8] for i in range(0, len(combineLsb), 8)] \r\n newPicDec = binToDec(newPic2Lsb)\r\n getChar = [chr(x) for x in newPicDec]\r\n \r\n print(\"The secret message is:\")\r\n secretMsg = ''.join(map(str,[str(x) for x in getChar]))\r\n #print(j) #this line is for no encryption\r\n\r\n # Encryption:\r\n encodedMsg = secretMsg.encode(\"utf-8\") \r\n finalMsg = decrypt(encodedMsg,pw)\r\n print(bytes.decode(finalMsg))\r\n return\r\n \r\ndef main(): \r\n args = len(sys.argv)\r\n argPlace = sys.argv\r\n pw = \"CIS628 Syracuse University\"\r\n \r\n # Encoding\r\n if args == 4 and argPlace[1] == \"-e\":\r\n print(\"Encoding:\")\r\n file = open(argPlace[2], \"r\")\r\n msg = file.read()\r\n im = Image.open(argPlace[3])\r\n im = im.convert('RGB')\r\n \r\n #Encryption:\r\n w, h = im.size\r\n sz = (w*h*3)/4\r\n data = np.array(im)\r\n encMsg = (encrypt(msg, pw))\r\n secMsg = encMsg.decode(\"utf-8\")\r\n \r\n listdict = getMod(w, h)\r\n #bytes_needed = (len(secMsg) * 4)\r\n bytes_needed = (len(secMsg) * 2 * len(listdict['2']))\r\n pixels_bin = [[[f'{x:08b}' for x in y] for y in z] for z in data] # all picture pixel values in binary\r\n \r\n if len(msg) > sz:\r\n print(\"Encoding failed!\")\r\n print(\"Picture is too small to contain the message.\")\r\n print(\"Pick a larger image.\")\r\n else: \r\n # encoding(data, bytes_needed, pixels_bin, msg) # for no encryption\r\n encoding(data, listdict, bytes_needed, pixels_bin, secMsg)\r\n file.close()\r\n \r\n # Decoding\r\n if args == 4 and argPlace[1] == \"-d\":\r\n print(\"Decoding:\")\r\n file = open(argPlace[2], \"r\")\r\n msg = file.read()\r\n \r\n im = Image.open(argPlace[3])\r\n im = im.convert('RGB')\r\n w, h = im.size\r\n data = np.array(im)\r\n encMsg = (encrypt(msg, pw))\r\n secMsg = encMsg.decode(\"utf-8\")\r\n listdict = getMod(w, h)\r\n #bytes_needed = (len(secMsg) * 4)\r\n bytes_needed = (len(secMsg) * 2 * len(listdict['2']))\r\n pixels_bin = [[[f'{x:08b}' for x in y] for y in z] for z in data]\r\n #decoding(data, bytes_needed, pixels_bin) #for no encryption\r\n decoding(data, listdict, bytes_needed, pixels_bin, pw)\r\n file.close()\r\n \r\n if args != 4:\r\n print(\"Wrong arguments entered.\")\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"steganography.py","file_name":"steganography.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"193504784","text":"import os\r\n\r\ndef subdata(path, newpath, rate=0.5):\r\n\r\n samples = [[] for i in range(101)]\r\n with open(path,'r') as f:\r\n lines = f.readlines()\r\n for j in range(101):\r\n for i in range(len(lines)):\r\n name, label = lines[i].split()\r\n if int(label)==j+1:\r\n samples[j].append(lines[i])\r\n for i in range(len(samples)):\r\n num = len(samples[i]) * rate\r\n #internal = int(len(samples[i]) / num)\r\n #newsamples = samples[i][:num]\r\n for j in range(int(num)):\r\n line = samples[i][j]\r\n name, label = line.split()\r\n samples[i][j] = name + ' ' + '0'+'\\n'\r\n\r\n with open(newpath, 'w') as f:\r\n for i in range(len(samples)):\r\n for j in range(len(samples[i])):\r\n print(\"writing {}\".format(samples[i][j]))\r\n f.write(samples[i][j])\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n path = \"/home/share2/ziming/bitaction/data/ucf101/annotations/trainlist01.txt\"\r\n newpath = \"/home/share2/ziming/bitaction/data/ucf101/annotations/trainlist01_20p.txt\"\r\n subdata(path,newpath,rate=0.2)\r\n # rate*samples is set as unlabeled\r\n\r\n\r\n","sub_path":"data_tools/ucf101/g_subdataset.py","file_name":"g_subdataset.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"43956917","text":"from tkinter import StringVar, Tk, Label, Button, Entry, CENTER, filedialog, END, messagebox, RAISED\nimport constant as cons\nimport util\nfrom keras.models import load_model\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\nfrom datetime import datetime\n\nclass gui_obj(Tk):\n def __init__(self):\n Tk.__init__(self)\n self.title(cons.TITLE)\n self.minsize(width=cons.WIDTH, height=cons.HEIGHT)\n self.resizable(False, False)\n self.iconbitmap(cons.ICON_PATH)\n self.configure(background='darkslategray')\n \n self.input_file = StringVar()\n self.__input_path = ''\n self.model_file = StringVar()\n self.__model_path = ''\n \n self.__is_test_set_img = False\n \n self.lb_topic = Label(self, text=cons.TOPIC, bg='darkslategray', fg=\"gold\", font=(\"\", 23, 'bold'))\n self.lb_topic.place(relx=0.5, rely=0.15, anchor=CENTER)\n \n self.lb_img_info = Label(self, text=\"\", bg='darkslategray', fg=\"lawngreen\", font=(\"\", 10, ''))\n self.lb_img_info.place(relx=0.20, rely=0.31)\n self.lb_input_file = Label(self, text=\"Input image:\", bg='darkslategray', fg=\"white\", font=(\"\", 14, ''))\n self.lb_input_file.place(relx=0.01, rely=0.38)\n self.en_input_file = Entry(self, borderwidth=2, state='readonly', textvariable=self.input_file)\n self.en_input_file.place(relx=0.20, rely=0.38, relwidth=0.62, relheight=0.11)\n self.btn_choose_input_file = Button(self, text =\"Choose file\", bg='lightgrey', fg=\"black\", borderwidth=4, relief=RAISED, command=lambda:self.upload_image())\n self.btn_choose_input_file.place(relx=0.84, rely=0.38, relwidth=0.14, relheight=0.11)\n \n self.lb_model_file = Label(self, text=\"Input model:\", bg='darkslategray', fg=\"white\", font=(\"\", 14, ''))\n self.lb_model_file.place(relx=0.01, rely=0.55)\n self.en_model_file = Entry(self, borderwidth=2, state='readonly', textvariable=self.model_file)\n self.en_model_file.place(relx=0.20, rely=0.55, relwidth=0.62, relheight=0.11)\n self.btn_choose_model_file = Button(self, text =\"Choose file\", bg='lightgrey', fg=\"black\", borderwidth=4, relief=RAISED, command=lambda:self.upload_model())\n self.btn_choose_model_file.place(relx=0.84, rely=0.55, relwidth=0.14, relheight=0.11)\n \n self.btn_predict = Button(self, text =\"Predict\", bg='teal', fg=\"white\", font=(\"\", 14, 'bold'), borderwidth=4, relief=RAISED, command=lambda:self.predict())\n self.btn_predict.place(relx=0.06, rely=0.75, relwidth=0.28, relheight=0.2)\n \n self.btn_clear = Button(self, text =\"Clear all\", bg='teal', fg=\"white\", font=(\"\", 14, 'bold'), borderwidth=4, relief=RAISED, command=lambda:self.clear_all())\n self.btn_clear.place(relx=0.36, rely=0.75, relwidth=0.28, relheight=0.2)\n \n self.btn_quit = Button(self, text =\"Exit\", bg='teal', fg=\"white\", font=(\"\", 14, 'bold'), borderwidth=4, relief=RAISED, command=lambda:self.exit_app())\n self.btn_quit.place(relx=0.66, rely=0.75, relwidth=0.28, relheight=0.2)\n \n util.make_center(self)\n \n def __check_img(self):\n if cons.IMG_TESTSET in self.en_input_file.get():\n self.lb_img_info.config(text=cons.IMG_INFO)\n self.__is_test_set_img = True\n else:\n self.lb_img_info.config(text='')\n self.__is_test_set_img = False\n \n def upload_image(self):\n try:\n self.__input_path = filedialog.askopenfilename(title=\"Select image file\", \n filetypes=[(\"image files\", \"*.png *.PNG *.jpg *.JPG *.jpeg *.JPEG\")], \n initialdir='./')\n if self.__input_path:\n self.input_file.set(self.__input_path)\n \n self.__check_img()\n except Exception as ex:\n print(ex)\n messagebox.showerror(\"Upload Image Error\", \"Error occurred while uploading image!\")\n \n def upload_model(self):\n try:\n self.__model_path = filedialog.askopenfilename(title=\"Select model file\", \n filetypes=[(\"model files\", \"*.h5\")], \n initialdir='./')\n if self.__model_path:\n self.model_file.set(self.__model_path)\n except Exception as ex:\n print(ex)\n messagebox.showerror(\"Upload Model Error\", \"Error occurred while uploading model!\")\n\n def predict(self):\n try:\n if not util.check_valid_path(self.en_input_file.get()):\n messagebox.showwarning(\"Warning\",\"Your input image is invalid.\")\n elif not util.check_valid_path(self.en_model_file.get()):\n messagebox.showwarning(\"Warning\",\"Your input model is invalid.\")\n elif util.check_valid_path(self.en_input_file.get()) and util.check_valid_path(self.en_model_file.get()):\n # load label\n print(\"[INFO]: LOADING LABEL...\")\n self.__label_data = pd.read_csv(cons.SIGNNAMES)\n self.__label_values = self.__label_data['SignName'].values\n print(\"[INFO]: FINISH LOADING LABEL.\")\n \n # load Model\n print(\"[INFO]: LOADING MODEL...\")\n self.__model = load_model(self.en_model_file.get())\n print(\"[INFO]: FINISH LOADING MODEL.\")\n \n # PREDICT PROCESS\n print(\"[INFO]: PREDICTING IMAGE...\")\n img = cv2.imread(self.en_input_file.get())\n \n # get ROI values in Test.csv\n y_test = pd.read_csv(\"./input/Test.csv\")\n x1_val = y_test['Roi.X1'].values\n y1_val = y_test['Roi.Y1'].values\n x2_val = y_test['Roi.X2'].values\n y2_val = y_test['Roi.Y2'].values\n \n if self.__is_test_set_img:\n try:\n img_bbx = img.copy()\n x1, y1, x2, y2 = util.get_roi(self.en_input_file.get(), x1_val, y1_val, x2_val, y2_val)\n proposal = img[y1:y2, x1:x2]\n result = util.recognize_sign([proposal], self.__model)[0]\n sign_name = util.load_name(result, self.__label_values)\n if len(sign_name) > 0:\n # wm = plt.get_current_fig_manager()\n # wm.window.showMaximized()\n plt.imshow(cv2.cvtColor(img_bbx, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n plt.title(\"Result of prediction: \" + sign_name)\n plt.show()\n else:\n messagebox.showinfo(\"Infomation\", \"Sorry. Can not recognize any traffic sign in this image.\")\n except Exception as ex:\n print(ex)\n messagebox.showinfo(\"Infomation\", \"Sorry. Can not recognize any traffic sign in this image.\")\n else:\n # convert Image to Binary Image\n img_bbx = img.copy()\n rows, cols, _ = img.shape\n img_bin = util.preprocess_img(img, False)\n \n # localize Traffic Sign (find Contours and draw to Image)\n min_area = img_bin.shape[0]*img.shape[1]/(25*25)\n rects = util.detect_contour(img_bin, min_area=min_area)\n img_rects = util.draw_rects_on_img(img, rects)\n \n sign_names = []\n sep = ', '\n \n # recognize Traffic sign\n for rect in rects:\n xc = int(rect[0] + rect[2]/2)\n yc = int(rect[1] + rect[3]/2)\n size = max(rect[2], rect[3])\n x1 = max(0, int(xc-size/2))\n y1 = max(0, int(yc-size/2))\n x2 = min(cols, int(xc + size/2))\n y2 = min(rows, int(yc + size/2))\n proposal = img[y1:y2, x1:x2]\n result = util.recognize_sign([proposal], self.__model)[0]\n cv2.rectangle(img_bbx, (rect[0],rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255), 2)\n cv2.putText(img_bbx, str(result), (rect[0], rect[1]), 1, 1.5, (0, 0, 255), 2)\n cv2.putText(img_bbx, util.load_name(result, self.__label_values), (rect[0], rect[1] + rect[3] + 20), 1, 1.5, (0, 0, 255), 2)\n sign_names.append(util.load_name(result, self.__label_values))\n \n if len(sign_names) > 0:\n sep_res = sep.join(sign_names).replace('/', '-')\n time = datetime.now().strftime(\"%Y-%m-%d %Hh-%Mm-%Ss\")\n dir_path = cons.RESULT_PATH + time\n util.create_directory(dir_path)\n res_path = dir_path + '\\\\{}_' + sep_res.replace(',','-') + '.jpg'\n cv2.imwrite(res_path.format(\"BIN_IMAGE\"), img_bin)\n print(\"[INFO]: Saved binary image to \" + res_path.format(\"BIN_IMAGE\"))\n cv2.imwrite(res_path.format(\"RECTS_IMAGE\"), img_rects)\n print(\"[INFO]: Saved rects image to \" + res_path.format(\"RECTS_IMAGE\"))\n cv2.imwrite(res_path.format(\"PREDICTED_IMAGE\"), img_bbx)\n print(\"[INFO]: Saved predicted image to \" + res_path.format(\"PREDICTED_IMAGE\"))\n # wm = plt.get_current_fig_manager()\n # wm.window.showMaximized()\n plt.imshow(cv2.cvtColor(img_bbx, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n plt.title(\"Result of prediction: \" + sep_res)\n plt.show()\n messagebox.showinfo(\"Infomation\", \"Finish. Your result is saved in {}\\\\\".format(dir_path))\n else:\n messagebox.showinfo(\"Infomation\", \"Sorry. Can not recognize any traffic sign in this image.\")\n \n print(\"[INFO]: FINISH PREDICTING IMAGE.\")\n except Exception as ex:\n print(ex)\n messagebox.showerror(\"Error\", \"Error occurred while predicting image!\")\n \n def exit_app(self):\n try:\n msg = messagebox.askquestion('Exit Application','Are you sure to exit?',icon = 'info')\n if msg == 'yes':\n self.destroy()\n except Exception as ex:\n print(ex)\n messagebox.showerror(\"Error\", \"Error occurred while exiting the application!\")\n \n def clear_all(self):\n try:\n if len(self.en_input_file.get()) > 0 or len(self.en_model_file.get()) > 0:\n msg = messagebox.askquestion('Clear Action','Are you sure to clear?',icon = 'info')\n if msg == 'yes':\n self.lb_img_info.config(text='')\n self.__is_test_set_img = False\n self.__input_path = ''\n self.__model_path = ''\n self.en_input_file.config(state='normal')\n self.en_model_file.config(state='normal')\n self.en_input_file.delete('0', END)\n self.en_model_file.delete('0', END)\n self.en_input_file.config(state='readonly')\n self.en_model_file.config(state='readonly')\n except Exception as ex:\n print(ex)\n messagebox.showerror(\"Error\", \"Error occurred while clearing!\")","sub_path":"source-code/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"395334568","text":"from tkinter import *\nfrom PIL import ImageTk\n\nc\nroot.title(\"Calculator\")\n\ne = Entry(root)\ne.grid(row=0, column=0, columnspan=5)\nfirst = \"\"\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef add():\n global first\n first = e.get()\n e.delete(0, END)\n return first\n\n\ndef clear():\n e.delete(0, END)\n\n\ndef equal():\n global first\n second = e.get()\n e.delete(0, END)\n result = int(first) + int(second)\n e.insert(0, result)\n\n\nbutton_1 = Button(root, text=\"1\", padx=40, pady=20, command=lambda: button_click(1))\nbutton_2 = Button(root, text=\"2\", padx=40, pady=20, command=lambda: button_click(2))\nbutton_3 = Button(root, text=\"3\", padx=40, pady=20, command=lambda: button_click(3))\nbutton_4 = Button(root, text=\"4\", padx=40, pady=20, command=lambda: button_click(4))\nbutton_5 = Button(root, text=\"5\", padx=40, pady=20, command=lambda: button_click(5))\nbutton_6 = Button(root, text=\"6\", padx=40, pady=20, command=lambda: button_click(6))\nbutton_7 = Button(root, text=\"7\", padx=40, pady=20, command=lambda: button_click(7))\nbutton_8 = Button(root, text=\"8\", padx=40, pady=20, command=lambda: button_click(8))\nbutton_9 = Button(root, text=\"9\", padx=40, pady=20, command=lambda: button_click(9))\nbutton_0 = Button(root, text=\"0\", padx=40, pady=20, command=lambda: button_click(0))\nbutton_add = Button(root, text=\"+\", padx=40, pady=20, command=lambda: add())\nbutton_clear = Button(root, text=\"C\", padx=40, pady=20, command=lambda: clear())\nbutton_equal = Button(root, text=\"=\", padx=40, pady=20, command=lambda: equal())\n\nbutton_1.grid(row=3, column=0)\nbutton_2.grid(row=3, column=1)\nbutton_3.grid(row=3, column=2)\n\nbutton_4.grid(row=2, column=0)\nbutton_5.grid(row=2, column=1)\nbutton_6.grid(row=2, column=2)\n\nbutton_7.grid(row=1, column=0)\nbutton_8.grid(row=1, column=1)\nbutton_9.grid(row=1, column=2)\n\nbutton_0.grid(row=4, column=1)\nbutton_add.grid(row=5, column=0)\nbutton_clear.grid(row=5, column=1)\nbutton_equal.grid(row=5, column=2)\n\n# def button_click():\n# pass\n\n\n# my_label = Label(root, text=\"Hello World!\")\n# my_label2 = Label(root, text=\"Manders\")\n# my_label3 = Label(root, text=\"Farty\")\n\n# my_label.grid(row=0, column=0)\n# my_label2.grid(row=1, column=5)\n# my_label3.grid(row=1, column=3)\n\n# def my_click():\n# hello = Label(root, text=\"Hello \" + e.get())\n# my_label.pack()\n\n\n# my_button = Button(root, text=\"click\", padx=50, command=my_click)\n# my_button.pack()\n\n\nroot.mainloop()\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"199392483","text":"from scipy.stats import chisquare\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport scipy.integrate as integrate\nfrom scipy.special import gamma\n\nimport os,sys,inspect\n# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n# parentdir = os.path.dirname(currentdir)\n# sys.path.insert(0,parentdir) \n# from empirical_tests import _freqs \n\ndef _freqs(seq, lower_bound, upper_bound, n, normalized=False):\n \"\"\"\n Return frequences of sequence values\n\n Inputs:\n - seq: Array of integers. Observable sequence\n - lower_bound: Integer lower bound of the domain\n of generator values\n - upper_bound: Integer upper bound of the domain\n of generator values\n - n: Integer number of regions\n\n Outputs:\n - freqs: Array of occurences of values in each region \n with region_width\n - region_width: Float width of regions\n \"\"\"\n freqs = []\n region_width = (upper_bound - lower_bound) / n \n\n for i in range(n):\n low = lower_bound + i * region_width\n high = lower_bound + i * region_width + region_width\n freqs.append( np.logical_and(seq >= low, seq < high).sum() )\n\n # because last interval has '[a;b]' - bounds, not '[a,b)'\n freqs[-1] += 1\n \n if normalized:\n freqs = np.array(freqs) / len(seq)\n\n return np.array(freqs), region_width\n\ndef chisquare(empiric_freqs, probs, n, alpha):\n \"\"\"\n Inputs:\n - empiric: Empirical frequencies\n - probs: Theoretical probabilities\n - n: Size of sequence\n \"\"\"\n k = len(empiric_freqs)\n r = k - 1\n s = (np.square(empiric_freqs - probs) / probs).sum() * n \n p = integrate.quad( lambda x: x**(r/2 - 1) * np.exp(-x/2) , s, np.inf)[0] \\\n / (2**(r/2) * gamma(r/2))\n is_rejected = p <= alpha\n\n return is_rejected, s, p\n\n\n\ndef chisquare_uniform(seq, \n lower_bound, \n upper_bound, \n alpha=0.05, \n n = None, \n verbose=True):\n \"\"\"\n Statistical test applied to sets of categorical data to \n evaluate how likely it is that any observed difference \n between the sets arose by chance.\n\n Inputs:\n - seq: Array of integers\n - lower_bound: Integer lower bound of the domain of \n generator values\n - upper_bound: Integer upper bound of the domain of \n generator values\n - alpha: Float desirible level of significance\n - n: Integer number of regions (default is None)\n - verbose: Boolean; If set to true then print logs\n\n Outputs:\n - Boolean; If hypothesis is ejected\n \"\"\"\n seq = np.array(seq)\n if n is None:\n n = len(seq)\n else:\n seq = np.random.choice(seq, n)\n\n k = int(5 * np.log(n))\n r = k - 1 \n\n freqs, _ = _freqs(seq, lower_bound, upper_bound, k)\n\n freqs = np.array(freqs) / n \n p = 1 / k\n s = (np.square(freqs - p) / p).sum() * n \n\n p = integrate.quad( lambda x: x**(r/2 - 1) * np.exp(-x/2) , s, np.inf)[0] \\\n / (2**(r/2) * gamma(r/2))\n is_rejected = p <= alpha\n\n # list for x-values for barplot\n region_size = upper_bound / k \n left_borders = []\n for i in range(k):\n low = i * region_size\n high = i * region_size + region_size\n left_borders.append(low)\n\n if verbose:\n plt.ylim(-0.0, 0.15)\n plt.bar(left_borders, height=freqs, align='edge', width=upper_bound/k,\n color='blue', label='frequences')\n plt.hlines(1 / k, lower_bound, upper_bound, 'r', label='n / k')\n plt.title(f'Frequency bar plot (1/k = {1/k})') \n plt.legend()\n plt.show() \n\n print(f'S = {s}')\n print(f'Number of interval k = {k}')\n print(f'Sequence length n = {n}')\n print('P = %f' % p)\n\n # show bar plot\n # plt.ylim(-0.1, 0.3)\n # plt.bar(left_borders, height=freqs, align='edge', \n # color='blue', label='frequences')\n # plt.title(f'Frequency bar plot (1/k = {1/k})') \n # plt.legend()\n # plt.show()\n\n return is_rejected \n","sub_path":"4_th lab/hypothesis/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"523631355","text":"import math\nfrom math import atan2, degrees\nfrom typing import Tuple\nfrom rcj_soccer_robot import RCJSoccerRobot, TIME_STEP, ROBOT_NAMES\nimport utils\n\n\nblueTeamNames = ['B1', 'B2', 'B3']\nyellowTeamNames = ['Y1', 'Y2', 'Y3']\n\n\nclass MyRobot1(RCJSoccerRobot):\n\n left = 0\n right = 0\n role = 'goalkeeper'\n attackerFlag = True\n lastBallPosition = None\n ballPostion = None\n ballLackOfProgressCounter=0\n ballLackOfProgress=False\n\n def newDataProccessing(self):\n self.rawData = self.get_new_data()\n self.position = self.rawData[self.name]['x'], self.rawData[self.name]['y']\n self.lastBallPosition=self.ballPostion\n self.ballPostion = self.rawData['ball']['x'], self.rawData['ball']['y']\n\n self.distances = {}\n for i in ROBOT_NAMES:\n self.distances[i] = utils.getDistance(\n (self.rawData[i]['x'], self.rawData[i]['y']), (self.rawData['ball']['x'], self.rawData['ball']['y']))\n\n self.orientations = {}\n for i in ROBOT_NAMES:\n self.orientations[i] = math.degrees(self.rawData[i]['orientation'])\n\n self.positions={}\n for i in ROBOT_NAMES:\n self.positions[i]=self.rawData[i]['x'], self.rawData[i]['y']\n\n self.distance = self.distances[self.name]\n\n self.orientation = utils.correctAngle(self.orientations[self.name])\n if self.team=='B':\n self.orientation -= 180\n self.orientation=utils.correctAngle(self.orientation)\n\n self.ballAngle = self.get_angles(\n self.rawData['ball'], self.rawData[self.name])[0]\n\n def move(self, position):\n x, y = position\n angle = utils.correctAngle(self.get_angles(\n {'x': x, 'y': y}, self.rawData[self.name])[0])\n direction = utils.getDirectionBy4(angle)\n value = self.distance/0.15*1.5\n if value < 1:\n value = 1\n if angle < 90 or angle > 270:\n if direction == 1:\n self.left = -10\n self.right = -value\n elif direction == 3:\n self.left = -value\n self.right = -10\n elif direction == 0:\n self.left = -10\n self.right = -10\n else:\n if direction == 1:\n self.left = 10\n self.right = value\n elif direction == 3:\n self.left = value\n self.right = 10\n elif direction == 2:\n self.left = 10\n self.right = 10\n if self.left>10:\n self.left=10\n if self.right>10:\n self.right=10\n if self.left<-10:\n self.left=-10\n if self.right<-10:\n self.right=-10\n\n def stop(self):\n self.left = 0\n self.right = 0\n\n def getRouteAngle(self, pos1: iter, pos2: iter) -> float:\n xDiff = pos2[0] - pos1[0]\n yDiff = pos2[1] - pos1[1]\n return utils.correctAngle(degrees(atan2(yDiff, xDiff)))\n\n def checkLackOfProgress(self):\n error=0.012\n if self.ballPostion!=None and self.lastBallPosition!=None:\n if abs(self.ballPostion[0]-self.lastBallPosition[0])<=error and abs(self.ballPostion[1]-self.lastBallPosition[1])<=error:\n self.ballLackOfProgressCounter+=1\n else:\n self.ballLackOfProgressCounter=0\n self.ballLackOfProgress=False\n if self.ballLackOfProgressCounter>=30:\n self.ballLackOfProgress=True\n\n def getSegment(self, position:Tuple[float, float]):\n return position[0]//0.35+2, position[1]//0.3+2\n\n def getCrowdedSegment(self):\n segmentCounter={}\n for i in ROBOT_NAMES:\n segment=str(self.getSegment(self.positions[i]))\n if segment in segmentCounter.keys():\n segmentCounter[segment]+=1\n else:\n segmentCounter[segment]=1\n segment=str(self.getSegment(self.ballPostion))\n if segment in segmentCounter.keys():\n segmentCounter[segment]+=1\n else:\n segmentCounter[segment]=1\n sorted(segmentCounter.items(), key=lambda x:x[1])\n\n def run(self):\n if self.team=='B':\n blueTeamNames.remove(self.name)\n else:\n yellowTeamNames.remove(self.name)\n\n while self.robot.step(TIME_STEP) != -1:\n if self.is_new_data():\n\n self.newDataProccessing()\n # print(self.name, self.role)\n # d={}\n # for i in blueTeamNames:\n # d[i]=self.distances[i]\n # d[self.name]=self.distance\n # ls=sorted(d.items(), key=lambda x:x[1])\n\n self.checkLackOfProgress()\n\n\n if self.team=='B':\n if self.role=='shooter':\n self.checkLackOfProgress()\n if self.ballLackOfProgress:\n self.move((0.2, 0))\n if 0.15self.orientation>80:\n self.stop()\n elif 90>self.orientation>0 or 360>self.orientation>270:\n self.left=-5\n self.right=5\n else:\n self.left=5\n self.right=-5\n else:\n if self.ballPostion[0]<0.55:\n self.move(self.ballPostion)\n else:\n self.stop()\n elif self.role=='attacker':\n if self.ballPostion[0]<0.55:\n self.move(self.ballPostion)\n else:\n self.stop()\n elif self.role=='goalkeeper':\n if self.position[0]<0.65:\n self.move((0.7, 0))\n else:\n if self.orientation>170 and self.orientation<190:\n if self.ballPostion[1]>self.position[1]:\n self.left=-10\n self.right=-10\n elif self.ballPostion[1]180 and self.orientation<360:\n self.left=-5\n self.right=5\n else:\n self.left=5\n self.right=-5\n \n\n \n else:\n if self.role=='shooter':\n self.checkLackOfProgress()\n if self.ballLackOfProgress:\n self.move((-0.2, 0))\n if -0.15>self.position[0]>-0.25 and -0.05self.orientation>80:\n self.stop()\n elif 90>self.orientation>0 or 360>self.orientation>270:\n self.left=-5\n self.right=5\n else:\n self.left=5\n self.right=-5\n else:\n if self.ballPostion[0]>-0.55:\n self.move(self.ballPostion)\n else:\n self.stop()\n elif self.role=='attacker':\n if self.ballPostion[0]>-0.55:\n self.move(self.ballPostion)\n else:\n self.stop()\n elif self.role=='goalkeeper':\n if self.position[0]>-0.65:\n self.move((-0.7, 0))\n else:\n if self.orientation>350 or self.orientation<10:\n if self.ballPostion[1]>self.position[1]:\n self.left=-10\n self.right=-10\n elif self.ballPostion[1]0 and self.orientation<180:\n self.left=-5\n self.right=5\n else:\n self.left=5\n self.right=-5\n \n\n self.left_motor.setVelocity(self.left)\n self.right_motor.setVelocity(self.right)","sub_path":"robot/robot1.py","file_name":"robot1.py","file_ext":"py","file_size_in_byte":9515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"37052144","text":"import re\nimport sys\n\ndef mipmap(filename):\n with open(filename+\".bak\", \"w\") as targetf:\n lines = []\n with open(filename) as sourcef:\n for line in sourcef:\n line = line.strip()\n if not line:\n continue\n if re.search(r\"^\\d+\", line):\n continue\n line, _ = re.subn(r\"<.*?\", \" \", line)\n for item in line:\n item.lower()\n lines.append(line)\n targetf.write(\"\".join(lines))\n\n\nmipmap(\"Jim_Acoasta_Trump.srt\")\n","sub_path":"handle_cc.py","file_name":"handle_cc.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"625637313","text":"\"\"\"Scanner class for syntax and static semantic analyzer.\n\nTypical usage example:\n new_scn = Scanner(cio_instance)\n new_tok = scan_instance.next_token()\n\"\"\"\n\n\nfrom typing import List\nimport token\nimport chario\n\n\nclass Scanner(object):\n \"\"\"Recognizes token from chario text stream and provide token to parser.\n\n Attributes:\n chario: A chario object to receive text stream from.\n buffer: A list of strings for temporary character storage.\n char: A string of single character received from chario.\n \"\"\"\n\n def __init__(self, cio: chario.Chario) -> None:\n \"\"\"Init with chario.\"\"\"\n self.chario: chario.Chario = cio\n self.buffer: List[str] = list()\n self.char: str = self.chario.get_char()\n\n def __get_char(self) -> None:\n \"\"\"Update self char attribute with next character from chario.\"\"\"\n self.char = self.chario.get_char()\n\n def __buffer_to_str(self) -> str:\n \"\"\"Create string by joining characters in buffer attribute.\n\n Return:\n A string combined from characters in buffer.\n \"\"\"\n return \"\".join(self.buffer) if self.buffer else None\n\n def __reset_buffer(self) -> None:\n \"\"\"Clear buffer attribute.\"\"\"\n self.buffer.clear()\n\n def __skip_whitespaces(self) -> None:\n \"\"\"Read stream until character is not whitespace.\"\"\"\n while self.char.isspace() and self.chario:\n self.__get_char()\n\n def __get_token_integer(self) -> token.Token:\n \"\"\"Read stream and recognize integer token.\"\"\"\n while self.char.isdigit():\n self.buffer.append(self.char)\n self.__get_char()\n return token.Token(self.__buffer_to_str(), \"int\")\n\n # Disabled due to incompatibility with TinyAda EBNF\n # def __get_token_string(self) -> token.Token:\n # \"\"\"Read stream and recognize string token.\"\"\"\n # self.__get_char()\n # while not self.char == '\"':\n # self.buffer.append(self.char)\n # self.__get_char()\n # self.__get_char()\n # return token.Token(self.__buffer_to_str(), \"str\")\n\n def __get_token_keyword_identifier(self) -> token.Token:\n \"\"\"Read stream and recognize identifier or keyword token.\"\"\"\n while self.char.isalpha() or self.char.isdigit() or self.char == \"_\":\n self.buffer.append(self.char)\n self.__get_char()\n word: str = self.__buffer_to_str()\n new_tok: token.Token = token.lit_to_tok(word)\n return new_tok if new_tok else token.Token(word, \"id\")\n\n def __get_token_double_operator(self) -> token.Token:\n \"\"\"Read stream and recognize double operator token.\"\"\"\n self.buffer.append(self.char)\n self.__get_char()\n self.buffer.append(self.char)\n new_tok: token.Token = token.lit_to_tok(self.__buffer_to_str())\n return new_tok\n\n def __get_token_single_operator(self) -> token.Token:\n \"\"\"Read stream and recognize single operator token.\"\"\"\n return token.lit_to_tok(self.buffer[0])\n\n def next_token(self) -> token.Token:\n \"\"\"Recognize token from text stream provided by chario object.\n\n Return:\n A Token class instance containinig recognized token.\n \"\"\"\n self.__skip_whitespaces()\n if self.char == chr(3):\n return token.Token(\"eof\", \"eof\")\n self.__reset_buffer()\n new_tok: token.Token = None\n if self.char.isdigit():\n new_tok = self.__get_token_integer()\n elif self.char.isalpha(): # Ada lang allows only letter start\n new_tok = self.__get_token_keyword_identifier()\n else:\n new_tok = self.__get_token_double_operator()\n if not new_tok:\n new_tok = self.__get_token_single_operator()\n if not new_tok:\n self.chario.put_error(\"An unknown symbol\")\n else:\n self.__get_char()\n return new_tok if new_tok else self.next_token()\n","sub_path":"src/semantic_analyzer/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"465507481","text":"#!/usr/bin/env python3\n\n\"\"\"\nA simple example from \nhttps://rdflib.readthedocs.io/en/stable/intro_to_creating_rdf.html.\n\"\"\"\n\nfrom rdflib import URIRef, BNode, Literal\n\n# Creating nodes:\nbob = URIRef(\"http://example.org/people/Bob\")\nlinda = BNode() # a GUID is generated. BNode = blank node\n\nname = Literal(\"Bob\") # passing a string\nage = Literal(24) # passing a python int\nheight = Literal(76.5) # passing a python float\n\n# To create many URIRefs in the same namespace, that is, URIs with the\n# same prefix, RDFLib has the rdflib.namespace.Namespace class:\n\nfrom rdflib import Namespace\n\nn = Namespace(\"http://example.org/people/\")\nn.bob # = rdflib.term.URIRef(u\"http://example.org/people/bob\")\nn.eve # = rdflib.term.URIRef(u\"http://example.org/people/eve\")\n\n# This is very useful for schemas where all properties and classes\n# have the same URI prefix, RDFLib pre-defines Namespaces for the most\n# common RDF schemas:\n\nfrom rdflib.namespace import RDF, FOAF\n\nRDF.type\n# = rdflib.term.URIRef(u\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\")\n\nFOAF.knows\n# = rdflib.term.URIRef(u\"http://xmlns.com/foaf/0.1/knows\")\n\nfrom rdflib import Graph\ng = Graph()\n\ng.add((bob, RDF.type, FOAF.Person))\ng.add((bob, FOAF.name, name))\ng.add((bob, FOAF.knows, linda))\ng.add((linda, RDF.type, FOAF.Person))\ng.add((linda, FOAF.name, Literal(\"Linda\")))\n\noutput = g.serialize(format=\"turtle\").decode(\"UTF-8\")\nprint(output.replace(\"\\\\n\", \"\\n\"))\n","sub_path":"python-ref/rdflib/ex4-adding-triples.py","file_name":"ex4-adding-triples.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91986808","text":"import sqlite3\nimport csv\nfrom datetime import datetime\nfrom common import TripDataEntry\n\nwith open('trip_data_1.csv', newline='') as csvfile, \\\n sqlite3.connect('trip.db') as conn:\n reader = csv.reader(csvfile)\n next(reader) # skip header\n \n buffer = []\n BUFFER_SIZE = 1000\n def insert_entry(entry, oncommit):\n def write_buffer():\n cur = conn.cursor()\n def to_tuple(entry):\n return (entry.medallion, entry.hack_license,\n entry.pickUpTime, entry.dropOffTime,\n entry.passengerCount, entry.tripDuration,\n entry.tripDistance, \n entry.pickUpCoords[0], entry.pickUpCoords[1],\n entry.dropOffCoords[0], entry.dropOffCoords[1])\n cur.executemany(\"\"\"\n INSERT OR IGNORE INTO Trip \n (MEDALLION, HACK_LICENSE, PICKUP_TIME, DROPOFF_TIME, \n PASSENGER_COUNT, TRIP_DURATION, TRIP_DISTANCE,\n PICKUP_LONGITUDE, PICKUP_LATITUDE,\n DROPOFF_LONGITUDE, DROPOFF_LATITUDE) \n VALUES (?,?,?,?,?,?,?,?,?,?,?);\n \"\"\", map(to_tuple, buffer))\n buffer.clear()\n conn.commit()\n oncommit()\n\n if not entry and buffer:\n write_buffer()\n return\n buffer.append(entry)\n if len(buffer) >= BUFFER_SIZE:\n write_buffer()\n\n rowCount = 0\n def on_commit():\n print(\"Rows inserted: {}\".format(rowCount))\n\n for row in reader:\n rowCount += 1\n try:\n entry = TripDataEntry(next(reader))\n insert_entry(entry, oncommit = on_commit)\n except Exception as e:\n print(\"Error on row {}\\nError:{}\\nRow:{}\".format(rowCount, row, str(e)))\n insert_entry(None, oncommit = on_commit)\n","sub_path":"dumpdb.py","file_name":"dumpdb.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"293884429","text":"#!/usr/bin/env python\n#\n# Copyright 2014 - 2015 The BCE Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the license.txt file.\n#\n\nimport bce.option as _opt\nimport bce.api as _api\nimport bce.utils.input_checker as _input_chk\nimport bce_compute_server.packages.common.lang_util as _lu\n\n\ndef balancer_processor(input_data):\n \"\"\"Processor of balancing chemical equation.\n\n :type input_data: dict\n :param input_data: Input data.\n \"\"\"\n\n # Input data format:\n # {\n # \"expression\": \"[Unbalanced chemical equation / expression]\",\n # \"language_name\": \"[Language name]\"\n # }\n\n # Initialize BCE option instance.\n opt = _opt.Option()\n\n # Set protected unknown symbol header.\n opt.set_protected_math_symbol_header(\"X\")\n\n # Check 'language_name' argument.\n if not (\"language_name\" in input_data):\n return {\n \"success\": False,\n \"error\": \"BCE_NO_ARG_LNAME\"\n }\n\n # Get language name.\n lang_name = input_data[\"language_name\"]\n\n if not isinstance(lang_name, str):\n return {\n \"success\": False,\n \"error\": \"BCE_TYPE_ERROR_LNAME\",\n }\n\n # Set language.\n opt.set_message_language(_lu.convert_language_name_to_bce_message_id(lang_name))\n\n # Check 'expression' argument.\n if not (\"expression\" in input_data):\n return {\n \"success\": False,\n \"error\": \"BCE_NO_ARG_ORIG_EXPR\"\n }\n\n # Get origin expression.\n unbalanced_expr = input_data[\"expression\"].strip()\n\n if not isinstance(unbalanced_expr, str):\n return {\n \"success\": False,\n \"error\": \"BCE_TYPE_ERROR_UBEXPR\",\n }\n\n unbalanced_expr = unbalanced_expr.strip()\n\n # Check expression length.\n if len(unbalanced_expr) == 0:\n return {\n \"success\": False,\n \"error\": \"BCE_EMPTY_EXPRESSION\"\n }\n\n # Check the validity of characters.\n if not _input_chk.check_input_expression_characters(unbalanced_expr):\n return {\n \"success\": False,\n \"error\": \"BCE_INVALID_CHAR\"\n }\n\n # Balance chemical equation.\n try:\n balanced_result = _api.balance_chemical_equation(unbalanced_expr,\n [_api.DECOMPILER_TEXT,\n _api.DECOMPILER_MATHML,\n _api.DECOMPILER_COLLECT_SYMBOLS],\n opt)\n except _api.ParserErrorWrapper as p_err:\n return {\n \"success\": False,\n \"error\": \"BCE_PARSER_ERROR\",\n \"detail\": str(p_err)\n }\n except _api.LogicErrorWrapper as l_err:\n return {\n \"success\": False,\n \"error\": \"BCE_LOGIC_ERROR\",\n \"detail\": str(l_err)\n }\n\n # Write balanced expression and MathML expression.\n ret = {\n \"success\": True,\n \"balanced\": {\n \"mathml\": balanced_result[1],\n \"normal\": balanced_result[0],\n },\n \"symbols\": balanced_result[2],\n }\n\n return ret","sub_path":"bce_compute_server/packages/processors/balancer/bce_processor.py","file_name":"bce_processor.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338070214","text":"from os import path\n\nfrom setuptools import setup\n\nversion = {}\nwith open(\"timml/version.py\") as fp:\n exec(fp.read(), version)\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n l_d = f.read()\n\nsetup(\n name=\"timml\",\n version=version[\"__version__\"],\n description=\"Steady multi-layer AEM Model\",\n long_description=l_d,\n long_description_content_type='text/markdown',\n author=\"Mark Bakker\",\n author_email=\"markbak@gmail.com\",\n url=\"https://github.com/mbakker7/timml\",\n license=\"MIT\",\n packages=[\"timml\", \"timml/besselaesnumba\"],\n python_requires='>3.5',\n install_requires=[\"numpy>=1.12\", \"scipy>=0.19\",\n \"numba>=0.39\", \"matplotlib>=3.0\"],\n classifiers=['Topic :: Scientific/Engineering :: Hydrology'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464624071","text":"import random\nimport numpy as np\nimport scipy.stats as stats\nimport Meta_DQN as dqn\n# import DDPG.ddpg_custom as ddpgc\n#import matplotlib.pyplot as plt\nimport AdamOpt as AdamOpt\nimport NeuralApprox as NA\n# import gym\nfrom collections import deque\nfrom statistics import mean\n\n# import pickle\n\n# import tensorflow as tf\nclass Elements:\n def __init__(self,r_file,r_user,r_time):\n self.req_file = np.array(r_file)\n self.set_of_users = np.array(r_user)\n self.time_of_request=np.array(r_time)\n\nclass LRU_MQ_Cache:\n def __init__(self, cache_size, L): #L is the total number of users\n self.cache_size = [cache_size for i in range(L)]\n self.hit = [0 for i in range(L)]\n self.requests = [0 for i in range(L)]\n self.cache = [deque(maxlen=cache_size) for i in range(L)]\n #self.user_ID = user_ID\n self.fwd_requests = [set([]) for i in range(L)]\n self.L=L\n self.fwd_temp= [set([]) for i in range(self.L)]\n\n # This function serves the requests if the file is in cache and updates the cache,\n # if the file is not available then forwards the same is forwarded.\n def requestsToCache(self, requests=np.array([]), users=np.array([])):\n fwd_index=[] #indices of requests forwarded\n self.fwd_temp= [set([]) for i in range(self.L)]\n for i in range(requests.size):\n Cind=int(users[i])\n element = int(requests[i])\n\n self.requests[Cind] += 1\n curr_length = len(self.cache[Cind])\n if curr_length == self.cache_size[Cind]:\n if element in self.cache[Cind]:\n self.cache[Cind].remove(element)\n self.hit[Cind] += 1\n self.cache[Cind].appendleft(element)\n else:\n self.fwd_temp[Cind].add(element)\n fwd_index.append(i)\n #self.fwd_requests.append(element)\n else:\n if element in self.cache:\n self.cache[Cind].remove(element)\n self.hit[Cind] += 1\n self.cache[Cind].appendleft(element)\n else:\n self.fwd_temp[Cind].add(element)\n fwd_index.append(i)\n return fwd_index\n def MQ2Cache(self,element_in_service):\n if element_in_service.req_file.size==0:\n element={0}\n else:\n element=set(element_in_service.req_file.tolist())\n #print(element, element_in_service.req_file)\n for Cind in range(self.L):\n if (element).issubset(self.fwd_requests[Cind]):\n if self.cache[Cind].__len__()==self.cache_size:\n self.cache[Cind].pop()\n self.cache[Cind].appendleft(list(element)[0])\n self.fwd_requests[Cind].remove(list(element)[0])\n self.fwd_requests[Cind]=self.fwd_requests[Cind].union(self.fwd_temp[Cind])\n self.fwd_temp[Cind]=set([])\n\nclass DQNMulticastFadingQueue:\n def __init__(self, requests, timelines, users, service_time, total_users, total_good_users, cache_size,total_services,threadName,meta_parameter,meta_param_len=1, id=0):\n self.load = 0\n self.ThreadName=threadName\n self.meta_loop=0\n self.meta_interval=500\n self.meta_loop_counter=0\n self.meta_loop_max=10\n self.requests = np.array(requests)\n self.timelines = np.array(timelines)\n self.users= np.array(users)\n self.total_users=total_users\n self.total_good_users=total_good_users\n self.total_bad_users=total_users-total_good_users\n self.queue = deque() #Multicast Queue\n self.defer_queue=np.array([])\n\n self.noise_power=1\n self.bandwidth=10 #MHz\n self.rate=10 #Mbps\n\n self.service_time = service_time\n self.serve_start_index = 0\n self.serve_start_time = 0\n self.serve_stop_time = self.serve_start_time + self.service_time\n self.sojournTimes=np.array([])\n self.powerVecs=np.array([])\n self.powerVecsPolicy=np.array([7])\n self.element_in_service = Elements([],[],[])\n self.userCaches=LRU_MQ_Cache(cache_size, total_users)\n self.services = 0\n self.servicable_users=np.array([])\n\n\n #DQN Parameters\n self.enable_ddpg=1\n self.enable_sch=1\n self.enable_meta=1\n self.retransmit_no=1\n self.stop_sch_training=0\n self.inputvector=[]\n self.LoopDefState=np.array([])\n self.act_dist=[]\n self.queue_window=1 # represents total actions, state dimension is this*5: See AutoEncoder\n self.service_vecs = [0 for i in range(self.queue_window)]\n self.TransLoopDefer_vec=[0,0,0]\n self.schWindow=100\n self.metaWindow = 10\n self.schTime=0\n self.state_memory=deque(maxlen=100000)\n self.target_memory=deque(maxlen=100000)\n self.starting_vector=np.random.randint(0,self.schWindow,size=(self.schWindow,3))\n self.starting_vector=np.divide(self.starting_vector,self.starting_vector.sum(axis=1).reshape(self.schWindow,1))\n self.reward_window_sch=deque(maxlen=100000)\n self.reward_window_meta=deque(maxlen=100000)\n self.meta_reward_counter=0\n self.reward_sch=0\n self.reward_window=deque(maxlen=10000) #Holds last 500 sojourn times\n self.power_window=deque(maxlen=1000) #Holds a maximum of 1000 power actions\n self.max_power=20\n self.avg_power_constraint=7\n self.transmit_power=self.avg_power_constraint\n self.power_beta=1/self.avg_power_constraint\n self.eta_beta=.00001 #.0005 working\n self.tau_ddpg=.01 #.001 working\n self.AdamOpt=AdamOpt.AdamOpt(step=self.eta_beta)\n self.sojournTimes_window_avg=np.array([])\n self.LoopDefWindow=1\n self.action=0\n self.action_prob=np.array([1, 0, 0])\n self.meta_parameter=meta_parameter\n self.actionProbVec=np.array([])\n #self.ddpg_action_prob=np.array([-1,1,-1,self.transmit_power*2/self.max_power-1])\n # self.ddpg_action_prob=np.array([self.transmit_power*2/self.max_power-1])\n self.reward=0\n self.ddpg_action_prob=np.array([0])\n #self.DQNA = dqn.DQNAgent(int(self.queue_window*5+self.total_users), int(self.queue_window))\n self.imit_decay=1/2500\n self.imitate_prob=1/(1+self.imit_decay*np.arange(np.round(total_services*1.5).astype(int))) #Number inside the arange is larger tham the simulation time\n self.imit_choose=np.random.binomial(1,self.imitate_prob)\n self.fading_samples=np.random.exponential(1,(total_users,np.round(total_services*1.5).astype(int)))\n self.fading_samples[int(total_good_users):int(total_users)]=0.1*self.fading_samples[int(total_good_users):int(total_users)] #bad user fading states\n self.imit_times=0\n #self.Autoencode()\n self.queue_decision=1\n [self.AutoEncoderLoopDef() for i in range(0,self.LoopDefWindow)]\n self.action_vector=np.array([1,2,4,6,8,10,12,14,16,18,20,25,30,40,50])\n # self.DDPGA = ddpgc.DDPG(self.ddpg_action_prob.size, self.LoopDefState.shape,1,1,50,lr=.05,tau=self.tau_ddpg)\n self.DDQNA = dqn.DQNAgent(self.LoopDefState.size,self.action_vector.size,self.avg_power_constraint,self.action_vector,meta_param_len,id)\n self.DNN=NA.DNNApproximator((1,3),1,.01,.01)\n self.reward_array=np.array([])\n self.first=0\n self.curr_state=self.LoopDefState\n self.next_state=self.LoopDefState\n self.LoopDefState=np.array([])\n self.time=0\n self.load=1\n # This function serves the requests and updates the cache\n # def live_plotter(self,x_vec,y1_data,identifier='',pause_time=0.1):\n # if self.first==0:\n # # this is the call to matplotlib that allows dynamic plotting\n # plt.ion()\n # #fig = plt.figure(figsize=(13,6))\n # #ax = fig.add_subplot(111)\n # # create a variable for the line so we can later update it\n # self.line, = plt.plot(x_vec,y1_data,'-o',alpha=0.8)\n # #update plot label/title\n # plt.ylabel('Reward')\n # plt.title('Title: {}'.format(identifier))\n # plt.show()\n # self.first=1\n # else:\n # # after the figure, axis, and line are created, we only need to update the y-data\n # self.line.set_ydata(y1_data)\n # self.line.set_xdata(x_vec)\n # # adjust limits if new data goes beyond bounds\n # if np.min(y1_data)<=self.line.axes.get_ylim()[0] or np.max(y1_data)>=self.line.axes.get_ylim()[1]:\n # plt.ylim([np.min(y1_data)-np.std(y1_data),np.max(y1_data)+np.std(y1_data)])\n # if np.min(x_vec)<=self.line.axes.get_xlim()[0] or np.max(x_vec)>=self.line.axes.get_xlim()[1]:\n # plt.xlim([np.min(x_vec)-np.std(x_vec),np.max(x_vec)+np.std(x_vec)])\n # # this pauses the data so the figure/axis can catch up - the amount of pause can be altered above\n # plt.pause(pause_time)\n\n def thresholdPowerIndex(self,j):\n thresh=(self.noise_power/self.transmit_power)*(2**(self.rate/self.bandwidth)-1)\n #self.servicable_users=[i for i in range(self.queue[j].set_of_users.size) if self.fading_samples[self.queue[j].set_of_users[i],self.services]>=thresh]\n #print(self.queue[j].set_of_users,self.services)\n# print(self.queue.__len__())\n if self.queue.__len__()>0:\n self.queue[j].set_of_users=np.array(self.queue[j].set_of_users)\n if self.queue[j].set_of_users.size > 0:\n #print([np.around(self.queue[j].set_of_users),self.services])\n u_ind=self.queue[j].set_of_users.astype(int)\n fad_users=self.fading_samples[np.around(u_ind),self.services]\n fad_users=np.reshape(fad_users,np.array(self.queue[j].set_of_users).size)\n self.servicable_users=[i for i in range(fad_users.size) if(fad_users[i]>=thresh)]\n self.servicable_users=np.reshape(self.servicable_users,np.array(self.servicable_users).size)\n else:\n self.servicable_users=np.array([])\n else:\n self.servicable_users=np.array([])\n\n def makeElementInService(self,i):\n if self.queue.__len__()>0:\n usr_temp=np.array(self.queue[i].set_of_users)\n time_temp=np.array(self.queue[i].time_of_request)\n if self.servicable_users.size > 0:\n self.element_in_service.set_of_users=usr_temp[self.servicable_users]\n self.element_in_service.time_of_request=time_temp[self.servicable_users]\n self.element_in_service.req_file=self.queue[i].req_file\n else:\n self.element_in_service.set_of_users=np.array([])\n self.element_in_service.time_of_request=np.array([])\n self.element_in_service.req_file=np.array([])\n else:\n self.element_in_service.set_of_users=np.array([])\n self.element_in_service.time_of_request=np.array([])\n self.element_in_service.req_file=np.array([])\n def deleteUsers(self,i):\n if self.servicable_users.__len__()>0:\n self.queue[i].set_of_users=list(np.delete(self.queue[i].set_of_users,self.servicable_users,0))\n self.queue[i].time_of_request=list(np.delete(self.queue[i].time_of_request,self.servicable_users,0))\n if len(self.queue[i].set_of_users)==0:\n del self.queue[i]\n q_size=self.queue.__len__()\n del_vec=np.array([])\n k=0\n for j in range(q_size):\n if (k>=0)&(self.queue[k].req_file.size==0):\n del self.queue[k]\n j=j-1\n k=min(j,self.queue.__len__()-1)\n\n def acceptServeRequests(self): #Accepts Requests and Serves the Multicast queue\n req_index = np.argwhere((self.timelines >= self.serve_start_time) & (self.timelines < self.serve_stop_time))\n req_index = np.reshape(req_index,req_index.size)\n #req_index = np.argwhere((self.timelines >= self.timelines[self.serve_start_index]) & (self.timelines < self.timelines[self.serve_start_index]+self.service_time))\n #print(self.serve_start_time,np.max(self.timelines))\n fwd_index = self.userCaches.requestsToCache(self.requests[req_index], self.users[req_index])\n req_index=req_index[fwd_index]\n if len(req_index) > 0:\n if self.queue.__len__() == 0:\n\n self.element_in_service=Elements([self.requests[req_index[0]]],[self.users[req_index[0]]],[self.timelines[req_index[0]]])\n for ser in range(0,self.defer_queue.__len__()):\n if self.defer_queue[ser].req_file == self.element_in_service.req_file:\n #temp=[self.queue[queue_ind].set_of_users.append(tp) for tp in (add_ele.set_of_users)]\n #temp=[self.queue[queue_ind].time_of_request.append(tp) for tp in (add_ele.time_of_request)]\n self.element_in_service.set_of_users=np.append(self.element_in_service.set_of_users, self.defer_queue[ser].set_of_users)\n self.element_in_service.time_of_request=np.append(self.element_in_service.time_of_request,self.defer_queue[ser].time_of_request)\n self.defer_queue=np.delete(self.defer_queue,ser)\n break\n self.queue.append(self.element_in_service)\n #print(len(self.queue))\n self.serveQueueDQN()\n # self.thresholdPowerIndex(0)\n # self.makeElementInService(0)\n # self.deleteUsers(0)\n self.serve_start_time=self.timelines[req_index[0]]\n stop_time_old=self.serve_stop_time\n self.serve_stop_time=self.serve_start_time+self.service_time\n #add Missed requests\n temp_req_index = np.argwhere((self.timelines >= stop_time_old) & (self.timelines < self.serve_stop_time))\n temp_req_index=np.reshape(temp_req_index,temp_req_index.size)\n temp_fwd_index = self.userCaches.requestsToCache(self.requests[temp_req_index], self.users[temp_req_index])\n #print(req_index, temp_fwd_index)\n temp_req_index=temp_req_index[temp_fwd_index]\n req_index=np.append(req_index,temp_req_index)\n #delete queued request\n np.delete(req_index,0)\n else: ## Serve requests using DQN\n self.serveQueueDQN()\n #self.element_in_service=self.queue[0] # Enable for Simple multicast\n #self.queue.popleft() # Enable for Simple multicast\n\n for i in req_index:\n add_ele = Elements([self.requests[i]], [self.users[i]], [self.timelines[i]])\n for ser in range(0,self.defer_queue.__len__()):\n if self.defer_queue[ser].req_file == add_ele.req_file:\n #temp=[self.queue[queue_ind].set_of_users.append(tp) for tp in (add_ele.set_of_users)]\n #temp=[self.queue[queue_ind].time_of_request.append(tp) for tp in (add_ele.time_of_request)]\n add_ele.set_of_users=np.append(add_ele.set_of_users, self.defer_queue[ser].set_of_users)\n add_ele.time_of_request=np.append(add_ele.time_of_request,self.defer_queue[ser].time_of_request)\n self.defer_queue=np.delete(self.defer_queue,ser)\n break\n if self.queue.__len__() == 0:\n self.queue.append(add_ele)\n else:\n #temp_queue = self.queue\n queue_ind = 0\n while queue_ind0:\n # print(self.serve_stop_time-self.element_in_service.time_of_request)\n # for rm_nan in range(self.element_in_service.time_of_request.size-1,-1,-1):\n x=self.serve_stop_time-self.element_in_service.time_of_request\n x = x[~np.isnan(x)]\n self.sojournTimes=np.append(self.sojournTimes, x)\n self.serve_start_time=self.serve_stop_time\n self.serve_stop_time=self.serve_stop_time+self.service_time\n self.services+=1\n if (((self.serve_start_time>self.timelines[-1]))):\n return 0\n return 1\n\n\n def Autoencode(self):#AutoEncoder\n self.inputvector=[]\n for i in range(len(self.queue)):\n if i < self.queue_window:\n fileState=self.queue[i].req_file\n if fileState.size>0:\n fileState=np.reshape(fileState,1)\n #print(self.queue[i].set_of_users)\n temp_usr_state=np.array(self.queue[i].set_of_users)\n temp_time_state=np.array(self.queue[i].time_of_request)\n user_pow=map(np.unique,temp_usr_state)\n user_pow=np.fromiter(user_pow, dtype=np.int)\n userState=2**user_pow\n #userState=[2**j for j in (set((temp_usr_state[self.servicable_users]).tolist()))]\n sojVec=(self.serve_stop_time-temp_time_state)\n sojoState=np.array([np.max(sojVec), np.mean(sojVec), np.min(sojVec)])\n #print(fileState,np.array([np.sum(userState)]))\n fileState=np.array([-1]) # Disable to include File State in the State vector\n #userState=np.array([-1]) # Disable to include User State in the State vector\n self.inputvector.append(np.concatenate([fileState,np.array([np.sum(userState)]),sojoState]))\n else:\n self.inputvector.append([-1,-1,10000,10000,10000])\n #self.thresholdPowerIndex(i)\n \"\"\" \n if self.servicable_users.__len__()>0:\n fileState=self.queue[i].req_file\n fileState=np.reshape(fileState,1)\n #print(self.queue[i].set_of_users)\n temp_usr_state=np.array(self.queue[i].set_of_users)\n temp_time_state=np.array(self.queue[i].time_of_request)\n user_pow=map(np.unique,temp_usr_state[self.servicable_users])\n user_pow=np.fromiter(user_pow, dtype=np.int)\n userState=2**user_pow\n #userState=[2**j for j in (set((temp_usr_state[self.servicable_users]).tolist()))]\n sojVec=(self.serve_stop_time-temp_time_state[self.servicable_users])\n sojoState=np.array([np.max(sojVec), np.mean(sojVec), np.min(sojVec)])\n #print(fileState,np.array([np.sum(userState)]))\n self.inputvector.append(np.concatenate([fileState,np.array([np.sum(userState)]),sojoState]))\n else:\n self.inputvector.append([-1,-1,10000,10000,10000])\n \"\"\"\n if len(self.queue)0:\n sojActionVec=(self.serve_stop_time-temp_time_rw[self.servicable_users])\n [self.reward_window.append(i) for i in sojActionVec]\n self.reward=1*np.mean(self.reward_window)\n temp_vec=np.array([])\n for j in range(0,np.min([self.queue.__len__(),self.queue_window])):\n temp_time_rw=np.array(self.queue[j].time_of_request)\n #if self.servicable_users.__len__()>0:\n sojActionVec=np.array(self.serve_stop_time-temp_time_rw)\n temp_vec=np.append(temp_vec, sojActionVec)\n if (temp_vec.size!=0):\n self.reward=self.reward+.5*np.mean(temp_vec)\n\n #self.reward=np.max(sojActionVec)\n def quantize_fading(self,input):\n quant_stat=np.array([.001,.01,.04,.06,.08,.1,.4,.6,.8,1,np.inf])\n ret_vec=np.ones(shape=input.shape)\n for i in range(input.size):\n for j in range(quant_stat.size-1):\n if input[i]>=quant_stat[0]:\n if (input[i]>=quant_stat[j]) & (input[i]0:\n if self.queue[0].req_file.size>0:\n fileState=self.queue[0].req_file\n fileState=np.reshape(fileState,1)\n #print(self.queue[i].set_of_users)\n temp_usr_state=np.array(self.queue[0].set_of_users)\n temp_time_state=np.array(self.queue[0].time_of_request)\n user_pow=map(np.unique,temp_usr_state)\n user_pow=np.fromiter(user_pow, dtype=np.int)\n userState=2**user_pow\n userState=np.zeros(shape=(self.total_users,))\n for i in range(user_pow.size):\n userState[user_pow[i].astype(int)]=1\n #userState=[2**j for j in (set((temp_usr_state[self.servicable_users]).tolist()))]\n # sojVec=(self.serve_stop_time-temp_time_state)\n # sojoState=np.array([np.max(sojVec), np.mean(sojVec), np.min(sojVec)])\n #print(fileState,np.array([np.sum(userState)]))\n fileState=np.array([-1]) # Disable to include File State in the State vector\n #userState=np.array([-1]) # Disable to include User State in the State vector\n self.inputvector=np.append(self.inputvector,np.concatenate([np.array((userState))]))\n # self.inputvector=np.append(self.inputvector,np.concatenate([np.reshape([self.queue_decision],(1,)),fileState,np.array([np.sum(userState)])]))\n # self.inputvector=np.append(self.inputvector,np.concatenate([np.reshape([self.queue_decision],(1,)),np.array([np.sum(userState)])]))\n else:\n self.inputvector=np.append(self.inputvector,np.zeros(shape=(self.total_users,)))\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1])\n else:\n self.inputvector=np.append(self.inputvector,np.zeros(shape=(self.total_users,)))\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1])\n fading_state_input=np.reshape(self.fading_samples[:,self.services],self.total_users)\n # self.inputvector=np.concatenate(self.inputvector)\n # self.inputvector=np.append(self.inputvector,self.quantize_fading(fading_state_input))\n self.inputvector=np.append(self.inputvector,self.quantize_fading(fading_state_input))\n self.LoopDefState=np.append(self.LoopDefState,self.inputvector)\n\n def AutoEncoderLoopDef_old(self):\n self.inputvector=np.array([])\n if self.queue.__len__()>0:\n if self.queue[0].req_file.size>0:\n fileState=self.queue[0].req_file\n fileState=np.reshape(fileState,1)\n #print(self.queue[i].set_of_users)\n temp_usr_state=np.array(self.queue[0].set_of_users)\n temp_time_state=np.array(self.queue[0].time_of_request)\n user_pow=map(np.unique,temp_usr_state)\n user_pow=np.fromiter(user_pow, dtype=np.int)\n userState=2**user_pow\n #userState=[2**j for j in (set((temp_usr_state[self.servicable_users]).tolist()))]\n # sojVec=(self.serve_stop_time-temp_time_state)\n # sojoState=np.array([np.max(sojVec), np.mean(sojVec), np.min(sojVec)])\n #print(fileState,np.array([np.sum(userState)]))\n fileState=np.array([-1]) # Disable to include File State in the State vector\n #userState=np.array([-1]) # Disable to include User State in the State vector\n self.inputvector=np.append(self.inputvector,np.concatenate([fileState,np.array([np.sum(userState)])]))\n # self.inputvector=np.append(self.inputvector,np.concatenate([np.reshape([self.queue_decision],(1,)),fileState,np.array([np.sum(userState)])]))\n # self.inputvector=np.append(self.inputvector,np.concatenate([np.reshape([self.queue_decision],(1,)),np.array([np.sum(userState)])]))\n else:\n self.inputvector=np.append(self.inputvector,[-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1])\n else:\n self.inputvector=np.append(self.inputvector,[-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1,-1])\n # self.inputvector=np.append(self.inputvector,[self.queue_decision,-1])\n fading_state_input=np.reshape(self.fading_samples[:,self.services],self.total_users)\n # self.inputvector=np.concatenate(self.inputvector)\n self.inputvector=np.append(self.inputvector,self.quantize_fading(fading_state_input))\n self.LoopDefState=np.append(self.LoopDefState,self.inputvector)\n\n def AutoEncoderLoopDef_Soj(self):\n # self.inputvector=[self.queue_decision]\n self.LoopDefState=np.append(self.LoopDefState,self.queue_decision)\n\n def InstRewardSch(self):\n temp_time_rw=np.array(self.queue[self.action].time_of_request)\n if self.servicable_users.__len__()>0:\n sojActionVec=(self.serve_stop_time-temp_time_rw[self.servicable_users])\n [self.reward_window_sch.append(i) for i in sojActionVec]\n\n def InstRewardMeta(self):\n temp_time_rw=np.array(self.queue[self.action].time_of_request)\n if self.servicable_users.__len__()>0:\n sojActionVec=(self.serve_stop_time-temp_time_rw[self.servicable_users])\n [self.reward_window_meta.append(i) for i in sojActionVec]\n self.meta_reward_counter=self.meta_reward_counter+1\n\n def InstRewardLoopDef(self):\n temp_time_rw=np.array(self.queue[self.action].time_of_request)\n if self.servicable_users.__len__()>0:\n sojActionVec=(self.serve_stop_time-temp_time_rw[self.servicable_users])\n # [self.reward_window.append(i) for i in sojActionVec]\n # self.reward_window.append(np.unique(self.servicable_users).__len__())\n self.reward_window.append(np.unique(self.queue[self.action].set_of_users[self.servicable_users]).__len__())\n else:\n self.reward_window.append(0)\n self.power_window.append(self.transmit_power)\n self.powerVecs=np.append(self.powerVecs,self.transmit_power)\n def deferElement(self):\n if self.queue.__len__()>1:\n self.defer_queue=np.append(self.defer_queue,self.queue[0])\n self.queue.popleft()\n\n def loopElement(self):\n\n firstElement=self.queue[0]\n self.queue.popleft()\n self.queue.append(firstElement)\n\n def serveQueueDQN(self):\n #self.Autoencode()#Encodes Input Vector Giving Current state\n #curr_state=self.inputvector\n # self.AutoEncoderLoopDef()\n\n self.act_dist=stats.rv_discrete(name='act_dist', values=([1,2,3], self.action_prob))\n if np.remainder(self.service_vecs[0],self.retransmit_no)==0:\n self.queue_decision=self.act_dist.rvs(size=1)\n if self.queue_decision==2:\n self.loopElement()\n elif self.queue_decision==3:\n self.deferElement()\n old_state=self.curr_state\n self.LoopDefState=np.array([])\n self.AutoEncoderLoopDef()\n self.curr_state=self.LoopDefState\n if self.enable_ddpg==1:\n self.DDQNA.remember(old_state, self.ddpg_action_prob, self.reward, self.curr_state, False)\n\n if (self.meta_loop==0):\n self.ddpg_action_prob=self.DDQNA.get_action(self.curr_state)\n self.transmit_power = self.action_vector[self.ddpg_action_prob]\n self.powerVecsPolicy=np.append(self.powerVecsPolicy,self.transmit_power)\n else:\n self.ddpg_action_prob = self.DDQNA.get_meta_action(self.curr_state)\n self.transmit_power = self.action_vector[self.ddpg_action_prob]\n self.powerVecsPolicy = np.append(self.powerVecsPolicy, self.transmit_power)\n\n self.action=0\n\n self.thresholdPowerIndex(self.action)#Identify servicable users and calculate reward\n self.InstRewardLoopDef() #Gets instantatneous reward for action using servicable users\n self.InstRewardSch() #Reward for Scheduling\n if(self.meta_loop==1):\n self.InstRewardMeta()\n self.makeElementInService(self.action) #Include only servicable users\n self.deleteUsers(self.action) #Delete only serviced users\n self.service_vecs[self.action]+=1\n\n\n\n if (self.enable_ddpg == 1):\n\n self.time += 1\n # self.reward=1*np.mean(self.reward_window)-self.power_beta*self.transmit_power\n self.reward=1*np.mean(self.reward_window)\n self.reward_window.clear()\n self.power_window.clear()\n # self.DDPGA.cumul_reward+=self.reward\n # self.reward_array=np.append(self.reward_array, self.DDPGA.cumul_reward/self.time)\n\n # self.DDPGA.cumul_reward+=self.reward\n # power_grad=0\n if self.time > (300): # replay if memory has more than 200 elements\n # self.DDPGA.critic.tau=np.max([self.tau_ddpg/(self.time-300)**0,0])\n # self.DDPGA.actor.tau=np.max([self.tau_ddpg/(self.time-300)**0,0])\n # self.DDPGA.train()\n\n if (self.meta_loop == 0):\n self.DDQNA.replay()\n if self.time%50==0:\n self.DDQNA.update_global_weight()\n if (self.time%self.meta_interval==0):\n self.meta_loop=self.enable_meta\n print(\"\\n Starting Meta Loop\")\n else:\n self.DDQNA.penalty_step()\n if self.meta_reward_counter==self.metaWindow:\n self.reward_meta = np.mean(self.reward_window_meta)\n self.reward_window_meta.clear()\n self.meta_reward_counter=0\n self.DDQNA.meta_remember(self.meta_parameter,self.reward_meta)\n self.DDQNA.meta_replay(np.min([len(self.DDQNA.meta_memory),50]))\n self.meta_parameter=self.DDQNA.meta_step(self.meta_parameter)\n self.DDQNA.update_meta_actor(self.DDQNA.get_meta_actor_weight(self.meta_parameter))\n self.meta_loop_counter=self.meta_loop_counter+1\n #print(self.ThreadName,self.meta_loop)\n if (self.meta_loop_counter==self.meta_loop_max):\n self.meta_loop_counter=0\n self.meta_loop=0\n print(\"\\n Out of Meta Loop...\")\n\n\n # if np.remainder(self.time,1)==0:\n # if np.abs(np.mean(self.powerVecs[-300:])-self.avg_power_constraint)>.01:\n # power_grad=np.mean(self.powerVecs[-300:])-self.avg_power_constraint\n # else:\n # power_grad=0\n\n # power_grad=np.mean(self.powerVecs[-300:])-self.avg_power_constraint\n # self.power_beta=np.clip(self.power_beta+self.eta_beta*(power_grad)**0,-self.total_good_users/self.avg_power_constraint,self.total_good_users/self.avg_power_constraint)\n # self.power_beta=self.AdamOpt.AdamOptimizer(self.power_beta,power_grad,1/(self.time-300)**0)\n # self.power_beta=np.clip(self.AdamOpt.AdamOptimizer(self.power_beta,power_grad,1/(self.time-300)**0), -self.total_good_users/self.avg_power_constraint,self.total_good_users/self.avg_power_constraint)\n self.reward_array=np.append(self.reward_array, np.mean(self.sojournTimes[-np.min([2000,self.sojournTimes.__len__()]):]))\n self.reward_array=self.reward_array[~np.isnan(self.reward_array)]\n\n if (np.remainder(self.service_vecs[0], self.schWindow) == 0) & (self.enable_sch == 1):\n\n self.schTime+=1\n self.reward_sch=1*np.mean(self.reward_window_sch)\n self.reward_window_sch.clear()\n if self.schTime>=10:\n if(np.sum(np.abs(self.action_prob-self.state_memory[-2]))>0):\n self.state_memory.append(self.action_prob)\n self.target_memory.append([self.reward_sch])\n self.stop_sch_training=0\n else:\n self.stop_sch_training+=0\n else:\n self.state_memory.append(self.action_prob)\n self.target_memory.append([self.reward_sch])\n if self.schTime<20:\n self.action_prob=self.starting_vector[self.schTime]\n samp_ind=np.random.randint(0,self.target_memory.__len__(),50)\n tar=(np.array(self.target_memory)[samp_ind]).reshape(50,1,1)\n stat=(np.array(self.state_memory)[samp_ind]).reshape(50,1,3)\n # tar=(np.array(self.target_memory)[-100:]).reshape(100,1,1)\n # stat=(np.array(self.state_memory)[-100:]).reshape(100,1,3)\n\n [self.DNN.train_on_batch(stat,tar) for i in range(0,1)]\n # elif self.schTime==20:\n # self.action_prob=np.array([1,1,1])/3\n else: # replay if memory has more than 32 elements\n if self.stop_sch_training<10:\n samp_ind=np.random.randint(0,self.target_memory.__len__(),50)\n tar=(np.array(self.target_memory)[samp_ind]).reshape(50,1,1)\n stat=(np.array(self.state_memory)[samp_ind]).reshape(50,1,3)\n # tar=(np.array(self.target_memory)[-100:]).reshape(100,1,1)\n # stat=(np.array(self.state_memory)[-100:]).reshape(100,1,3)\n\n [self.DNN.train_on_batch(stat,tar) for i in range(0,10)]\n grad=self.DNN.approx_gradient(self.action_prob)\n # decay_dnn=1/(1+0.00001*(self.schTime-19)*(np.log10(10+np.log10(10+self.schTime-19))))\n decay_dnn=1\n self.action_prob=np.clip(self.DNN.AdamOpt.AdamOptimizer(self.action_prob,grad,decay_dnn)+.0005*(.99**(self.schTime-0))*np.random.uniform(0,1,size=3),0,1) # To avoid zero gradients in the beginning\n # self.action_prob=np.clip(self.DNN.AdamOpt.AdamOptimizer(self.action_prob,grad,1/(self.schTime-9)**0)+.0005*(.99**(self.schTime-0))*np.random.uniform(0,1,size=3),0,1) # To avoid zero gradients in the beginning\n # self.action_prob=np.clip((self.action_prob-.01*grad)+.00005*(.9**(self.schTime-100))*np.random.uniform(0,1,size=3),0,1) # To avoid zero gradients in the beginning\n # self.action_prob=np.clip(self.DNN.AdamOpt.AdamOptimizer(self.action_prob,grad),0,1)\n self.action_prob=self.action_prob/np.sum(self.action_prob)\n self.actionProbVec=np.append(self.actionProbVec,self.action_prob)\n # if np.remainder(self.time,100)==0:\n if np.remainder(self.service_vecs[0],100)==0:\n if self.reward_array.size:\n #self.live_plotter(np.arange(0,self.reward_array.size),self.reward_array)\n print(self.ThreadName, self.reward_array[-1], self.meta_parameter, self.action_prob,self.transmit_power,self.DDQNA.penalty_lambda,[np.min(self.powerVecsPolicy[-np.min([1000,self.powerVecs.__len__()]):]),np.mean(self.powerVecs[-np.min([1000,self.powerVecs.__len__()]):]),np.max(self.powerVecsPolicy[-np.min([1000,self.powerVecs.__len__()]):])],np.std(self.powerVecsPolicy[-np.min([1000,self.powerVecs.__len__()]):]))\n # print(noise_var,self.transmit_power,self.ddpg_action_prob*(self.max_power-self.avg_power_constraint))\n #plt.savefig('Reward.png')\n # if np.remainder(self.time,1000)==0:\n # print(self.service_vecs)\n\n","sub_path":"MQ_Scheduling_PC_Inst.py","file_name":"MQ_Scheduling_PC_Inst.py","file_ext":"py","file_size_in_byte":38178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"526761694","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm\n\nfrom api import models as apiModels\nclass signupForm(UserChangeForm):\n \n class Meta:\n model = User\n #fields = ('first_name', 'last_name', 'email','username','password1', 'password2',) \n fields = \"__all__\"\n labels = {\n 'email' : 'Email',\n }\n\nclass empForm(forms.ModelForm):\n\n class Meta:\n model = apiModels.empData\n fields = \"__all__\"","sub_path":"api/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454152944","text":"import cv2\nimport os\nimport numpy as np\nimport faceRecognition as fr\nfrom gtts import gTTS\n\n\n\ndef audio():\n my_text = \"user found\"\n language = 'en'\n myobj = gTTS(text=my_text, lang=language, slow=False)\n myobj.save(\"welcome.mp3\")\n os.system(\"welcome.mp3\")\n\n\ntest_img=cv2.imread('TestImages/divyanshu.jpg')\n\nfaces_detected,gray_img=fr.faceDetection(test_img)\nprint(\"faces_detected:\",faces_detected)\n\n\n\nfaces,faceID=fr.labels_for_training_data('trainingImages')\nface_recognizer=fr.train_classifier(faces,faceID)\nface_recognizer.write('trainingData.yml')\n\n\nname={0:\"chirag\",1:\"divyanshu\"}\n\nfor face in faces_detected:\n (x,y,w,h)=face\n roi_gray=gray_img[y:y+h,x:x+h]\n label,confidence=face_recognizer.predict(roi_gray)\n print(\"confidence:\",confidence)\n print(\"label:\",label)\n fr.draw_rect(test_img,face)\n predicted_name=name[label]\n if(confidence>37):\n continue\n \n fr.put_text(test_img,predicted_name,x,y)\n audio()\n\nresized_img=cv2.resize(test_img,(1000,1000))\ncv2.imshow(\"face dtecetion \",resized_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows\n\n\n\n\n\n","sub_path":"FaceRecognition-master/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"597750326","text":"import os\nimport urllib.request\n\nfrom zipfile import ZipFile\nfrom io import BytesIO\n\nFONT_DIR = 'tle/assets/fonts'\nURL_BASE = 'https://noto-website-2.storage.googleapis.com/pkgs/'\nFONTS = ['NotoSansCJK-Bold.ttc', 'NotoSansCJK-Regular.ttc']\n\ndef _unzip(font, archive):\n with ZipFile(archive, 'r') as zipfile:\n zipfile.extract(font, FONT_DIR)\n\ndef _download(font):\n with urllib.request.urlopen(f'{URL_BASE}{font}.zip') as resp:\n _unzip(font, BytesIO(resp.read()))\n\ndef maybe_download():\n os.makedirs(FONT_DIR, exist_ok=True)\n for font in FONTS:\n if not os.path.isfile(os.path.join(FONT_DIR, font)):\n _download(font)\n","sub_path":"tle/util/font_downloader.py","file_name":"font_downloader.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"647248558","text":"##===================================\n### input\n##==================================\ninputs=[(root['INPUTS']['input.tglf'],'input.tglf'),\n (root['INPUTS']['jobtglf.pbs'],'jobtglf.pbs'),\n (root['INPUTS']['monitePBStglf.sh'],'monitePBStglf.sh')\n\t]\nif root['SETTINGS']['PHYSICS']['mode']=='nonlin':\n root['INPUTS']['input.tglf']['USE_TRANSPORT_MODEL']=True\nelse:\n root['INPUTS']['input.tglf']['USE_TRANSPORT_MODEL']=False\n##----------------------\n### output\n##----------------------\noutputs=['out.tglf.run']\n#executable ='chmod 777 monitePBStglf.sh ; ./monitePBStglf.sh';\nexecutable ='pbsMonitor -cn 1 -exe tglf -e . -n 1'\nret_code=OMFITx.executable(root, inputs=inputs, outputs=outputs, executable=executable)\n#-----------------------\n# load the results\n#-----------------------\nfor item in outputs:\n root['OUTPUTS'][item]=OMFITasciitable(item)\n","sub_path":"SCRIPTS/runTGLF.py","file_name":"runTGLF.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"580192180","text":"#!/usr/bin/env python2\nimport os\nimport sys\nimport pdb\nimport glob\nimport bz2\nimport lzma\nimport gzip\nimport os.path\n\nfrom operator import itemgetter\nfrom collections import OrderedDict\n\n# when break, call pdb\ndef install_pdb():\n def info(type, value, tb):\n if hasattr(sys, 'ps1') or not sys.stderr.isatty():\n # You are in interactive mode or don't have a tty-like\n # device, so call the default hook\n sys.__execthook__(type, value, tb)\n else:\n import traceback\n # You are not in interactive mode; print the exception\n traceback.print_exception(type, value, tb)\n print()\n # ... then star the debugger in post-mortem mode\n pdb.pm()\n\n sys.excepthook = info\n\n# get the latest file from pn\ndef get_latest_file(pn):\n rtn = []\n for f in glob.glob(pn):\n rtn.append([f, os.stat(f).st_mtime])\n if len(rtn) == 0:\n return None\n return max(rtn, key=itemgetter(1))[0]\n\n# iter down to zero\ndef to_zero(start):\n return range(start-1, -1, -1)\n\n# clean split\ndef split(line, tok):\n (lhs, rhs) = line.rsplit(tok, 1)\n return (lhs.strip(), rhs.strip())\n\n# get content\ndef read_file(pn):\n data = \"\"\n with open(pn) as fd:\n data = fd.read()\n return data\n\nLOADERS = OrderedDict([\n (\".xz\", lzma.open),\n (\".lzma\", lzma.open),\n (\".bz2\", bz2.open),\n (\".gz\", gzip.open),\n (\".gzip\", gzip.open),\n])\n\ndef get_supported_extensions(ext=\".as\"):\n \"\"\"\n Returns the supported extensions.\n \"\"\"\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result\n\ndef smart_open(filename, *args, **kwargs):\n \"\"\"\n Uses the file name's extension to transparently decompress files.\n \"\"\"\n return LOADERS.get(os.path.splitext(filename)[1], open)(filename, *args, **kwargs)\n\ndef get_files(out_d):\n for root, dirs, files in os.walk(out_d):\n for name in files:\n pn = os.path.join(root, name)\n if any(map(pn.endswith, get_supported_extensions())):\n yield pn\n\ndef get_all_files(in_d):\n if os.path.isdir(in_d):\n files = []\n for fn in get_files(in_d):\n files.append(fn)\n return files\n else:\n with open(in_d) as f:\n result = []\n for line in f.readlines():\n line = line.strip()\n if line.startswith(\"#\"):\n continue\n result.append(line)\n return result\n\ndef is_debug():\n return \"DEBUG\" in os.environ\n","sub_path":"analyzer/apisan/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"177544057","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tool', '0003_checkoutitem'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='checkoutitem',\n name='room',\n field=models.ForeignKey(help_text=b'Only rooms marked \"has checkoutitems\" appear here', to='geo.Room'),\n ),\n ]\n","sub_path":"tool/migrations/0004_auto_20161006_1138.py","file_name":"0004_auto_20161006_1138.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"253512950","text":"class Solution(object):\r\n def maximumProduct(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n minus = []\r\n positive = []\r\n zero = []\r\n while len(nums):\r\n v = nums.pop()\r\n if v > 0:\r\n positive.append(v)\r\n continue\r\n if v < 0:\r\n minus.append(v)\r\n continue\r\n zero.append(v)\r\n temp = 0\r\n if len(positive) > 1:\r\n for i in xrange(min(3, len(positive)-1)):\r\n for j in xrange(len(positive)-1, i, -1):\r\n if positive[j] > positive[j-1]:\r\n temp = positive[j-1]\r\n positive[j-1] = positive[j]\r\n positive[j] = temp\r\n if len(minus) > 1:\r\n for i in xrange(min(3, len(minus)-1)):\r\n for j in xrange(len(minus)-1, i, -1):\r\n if minus[j] < minus[j-1]:\r\n temp = minus[j-1]\r\n minus[j-1] = minus[j]\r\n minus[j] = temp\r\n if len(minus)+len(positive) < 3:\r\n return 0\r\n if len(positive) > 2:\r\n if len(minus) > 1:\r\n return max(positive[0]*positive[1]*positive[2], minus[0]*minus[1]*positive[0])\r\n return positive[0]*positive[1]*positive[2]\r\n if len(positive) > 0 and len(minus) > 1:\r\n return positive[0]*minus[0]*minus[1]\r\n if len(zero) == 0:\r\n return minus[0] * minus[1]*minus[2]\r\n return 0\r\n","sub_path":"628.py","file_name":"628.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"533351294","text":"# build the neural network to approximate the solution\r\nimport torch\r\nfrom torch import tanh, squeeze, sin, cos, sigmoid, autograd\r\nfrom torch.nn.functional import relu\r\n\r\ntorch.set_default_tensor_type('torch.cuda.DoubleTensor')\r\n\r\n# a 2-layer feed forward network for the PDE solution\r\nclass network(torch.nn.Module):\r\n def __init__(self, d, m, activation_type = 'ReLU3', boundary_control_type = 'none', flag_zero_initially = False):\r\n super(network, self).__init__()\r\n self.layer1 = torch.nn.Linear(d,m)\r\n self.layer2 = torch.nn.Linear(m,m)\r\n self.layer3 = torch.nn.Linear(m,1)\r\n if activation_type == 'ReLU3':\r\n self.activation = lambda x: relu(x**3)\r\n elif activation_type == 'sigmoid':\r\n self.activation = lambda x: sigmoid(x)\r\n elif activation_type == 'tanh':\r\n self.activation = lambda x: tanh(x)\r\n elif activation_type == 'sin':\r\n self.activation = lambda x: sin(x)\r\n self.boundary_control_type = boundary_control_type\r\n if boundary_control_type == 'none':\r\n self.if_boundary_controlled = False\r\n else:\r\n self.if_boundary_controlled = True\r\n if flag_zero_initially == True:\r\n torch.nn.init.constant_(self.layer3.weight, 0.0)\r\n torch.nn.init.constant_(self.layer3.bias, 0.0) \r\n\r\n\r\n def forward(self, tensor_x_batch):\r\n y = self.layer1(tensor_x_batch)\r\n y = self.layer2(self.activation(y))\r\n y = self.layer3(self.activation(y))\r\n if self.boundary_control_type == 'none':\r\n return y.squeeze(1)\r\n elif self.boundary_control_type == 'homo_unit_cube':\r\n return torch.prod(tensor_x_batch**2-1, 1)*y.squeeze(1)\r\n elif self.boundary_control_type == 'homo_unit_sphere':\r\n return (torch.sum(tensor_x_batch**2, 1)-1)*y.squeeze(1)\r\n \r\n # to evaluate the solution with numpy array input and output\r\n def predict(self, x_batch):\r\n tensor_x_batch = torch.Tensor(x_batch)\r\n tensor_x_batch.requires_grad=False\r\n y = self.forward(tensor_x_batch)\r\n return y.cpu().detach().numpy()\r\n \r\n # evaluate the second derivative at for k-th coordinate\r\n def D2_exact(self, tensor_x_batch, k):\r\n y = self.forward(tensor_x_batch)\r\n tensor_weight = torch.ones(y.size())\r\n grad_y = autograd.grad(y, tensor_x_batch, grad_outputs=tensor_weight, retain_graph=True, create_graph=True, only_inputs=True)\r\n D2y_k = autograd.grad(outputs=grad_y[0][:,k], inputs=tensor_x_batch, grad_outputs=tensor_weight, retain_graph=True)[0][:,k]\r\n return D2y_k\r\n\r\n # evaluate the Laplace at tensor_x_batch\r\n def Laplace(self, tensor_x_batch):\r\n d = tensor_x_batch.shape[1]\r\n y = self.forward(tensor_x_batch)\r\n tensor_weight = torch.ones(y.size())\r\n grad_y = autograd.grad(y, tensor_x_batch, grad_outputs=tensor_weight, retain_graph=True, create_graph=True, only_inputs=True)\r\n Laplace_y = torch.zeros(y.size())\r\n for i in range(d):\r\n Laplace_y = Laplace_y + autograd.grad(outputs=grad_y[0][:,i], inputs=tensor_x_batch, grad_outputs=tensor_weight, retain_graph=True)[0][:,i]\r\n return Laplace_y\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"network_3.py","file_name":"network_3.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252210341","text":"import requests\nimport json\nimport time\nfrom nose.tools import with_setup\nimport logging\n\nlog = logging.getLogger(__name__)\nsh = logging.StreamHandler()\nlog.addHandler(sh)\nlog.setLevel(logging.INFO)\n\nadm_user = 'test@user.com'\ntest_data = type('',(object,),{})()\nbase_url = 'http://localhost:8080/api'\n\ndef _build_url(_id=None, requestor=adm_user):\n if _id is None:\n url = test_data.proj_url + '?user=' + requestor\n else:\n url = test_data.proj_url + '/' + _id + '?user=' + requestor\n return url\n\ndef setup_db():\n global session\n session = requests.Session()\n # all the requests will be performed as root\n session.params = {\n 'user': adm_user,\n 'root': True\n }\n\n # Create a group\n test_data.group_id = 'test_group_' + str(int(time.time()*1000))\n payload = {\n '_id': test_data.group_id\n }\n payload = json.dumps(payload)\n r = session.post(base_url + '/groups', data=payload)\n assert r.ok\n payload = {\n 'group': test_data.group_id,\n 'label': 'test_project',\n 'public': False\n }\n payload = json.dumps(payload)\n r = session.post(base_url + '/projects', data=payload)\n test_data.pid = json.loads(r.content)['_id']\n assert r.ok\n log.debug('pid = \\'{}\\''.format(test_data.pid))\n test_data.proj_url = base_url + '/projects/{}/notes'.format(test_data.pid)\n\ndef teardown_db():\n r = session.delete(base_url + '/projects/' + test_data.pid)\n assert r.ok\n r = session.delete(base_url + '/groups/' + test_data.group_id)\n assert r.ok\n\n@with_setup(setup_db, teardown_db)\ndef test_notes():\n url_post = test_data.proj_url\n\n data = {'text':'test note'}\n r = session.post(url_post, data=json.dumps(data))\n assert r.ok\n r = session.get(base_url + '/projects/{}?user={}'.format(test_data.pid, adm_user))\n assert r.ok\n p = json.loads(r.content)\n assert len(p['notes']) == 1\n assert p['notes'][0]['user'] == adm_user\n note_id = p['notes'][0]['_id']\n url_get = test_data.proj_url + '/' + note_id\n r = session.get(url_get)\n assert r.ok\n assert json.loads(r.content)['_id'] == note_id\n data = {'text':'modified test note'}\n r = session.put(url_get, data=json.dumps(data))\n assert r.ok\n r = session.get(url_get)\n assert r.ok\n assert json.loads(r.content)['text'] == 'modified test note'\n r = session.delete(url_get)\n assert r.ok\n r = session.get(url_get)\n assert r.status_code == 404\n\n","sub_path":"test/integration_tests/test_notes.py","file_name":"test_notes.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"248305686","text":"from __future__ import unicode_literals\n\nimport textwrap\n\nfrom cmarkgfm import cmark\n\n\ndef test_markdown_to_html():\n text = u\"Hello, **world**!\"\n result = cmark.markdown_to_html(text)\n assert result == '

Hello, world!

\\n'\n\n\ndef test_render_html_with_extensions():\n text = u\"Hello, https://pypa.io!\"\n result = cmark.markdown_to_html_with_extensions(\n text, extensions=['autolink'])\n assert result == textwrap.dedent(\"\"\"\\\n

Hello, https://pypa.io!

\\n\"\"\")\n\n\ndef test_github_flavored_markdown_to_html():\n text = u\"Hello, https://pypa.io!\"\n result = cmark.github_flavored_markdown_to_html(text)\n assert result == textwrap.dedent(\"\"\"\\\n

Hello, https://pypa.io!

\\n\"\"\")\n\n\ndef test_github_flavored_markdown_to_html_pre_tag():\n text = u\"```python\\nprint('hello')\\n```\"\n result = cmark.github_flavored_markdown_to_html(text)\n assert result == textwrap.dedent(\"\"\"\\\n
print('hello')\n        
\n \"\"\")\n\n\ndef test_parse_document():\n text = u\"Hello, **world**!\"\n result = cmark.parse_document(text)\n assert result is not None\n\n\ndef test_render_html():\n text = u\"Hello, **world**!\"\n root = cmark.parse_document(text)\n result = cmark.render_html(root)\n assert result == '

Hello, world!

\\n'\n\n\ndef test_parser_interface():\n text = u\"Hello, **world**!\"\n parser = cmark.parser_new()\n cmark.parser_feed(parser, text)\n root = cmark.parser_finish(parser)\n result = cmark.render_html(root)\n cmark.parser_free(parser)\n assert result == '

Hello, world!

\\n'\n\n\ndef test_core_extensions_ensure_registered():\n cmark.core_extensions_ensure_registered()\n\n\ndef test_find_syntax_extension():\n extension = cmark.find_syntax_extension('table')\n assert extension is not None\n\n\ndef test_find_syntax_extension_doesnt_exist():\n extension = cmark.find_syntax_extension('notarealext')\n assert extension is None\n","sub_path":"tests/test_cmark.py","file_name":"test_cmark.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604616670","text":"import torch\nimport torch.nn as nn\nimport torch.functional as f\nimport torch.optim as optim\n\nimport torchvision.datasets as dataset\nimport torchvision.transforms as transforms\n\ndata = dataset.MNIST('./data', train=True, download=True, transform=transforms.ToTensor())\ntest = dataset.MNIST('./data', train=False, download=True, transform=transforms.ToTensor())\n\ndata_loader = torch.utils.data.DataLoader(dataset=data, batch_size=32)\ntest_loader = torch.utils.data.DataLoader(dataset=test, batch_size=32, drop_last = True)\n\nEPOCH = 0 + 1\n\nclass network(nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = nn.RNN(28, 64, batch_first=True, num_layers=2)\n self.fc = nn.Linear(2*64, 10)\n\n def forward(self, x):\n hidden = torch.zeros(2, 32, 64)\n output, hidden = self.rnn(x, hidden)\n hidden = torch.reshape(hidden.permute(1,0,2), (32, -1))\n out = self.fc(hidden)\n return out\n\nnet = network()\n\nloss_function = nn.CrossEntropyLoss().cuda()\noptimizer = optim.Adam(net.parameters(), lr=1e-3)\n\n\nfor i in range(EPOCH):\n for x, y in data_loader:\n x = x.squeeze()\n #x = x.cuda()\n #y = y.cuda()\n #train\n\n res = net(x)\n loss = loss_function(res, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n inferred_tensor = torch.argmax(res, dim=-1)\n acc = torch.mean((inferred_tensor == y).to(torch.float), dim=-1)\n print('epoch %d acc %.3f'%(i,acc))\n\n#test\nnet.eval()\n\nfor x, y in test_loader:\n x = x.squeeze()\n\n res = net(x)\n\ninferred_tensor = torch.argmax(res, dim=-1)\nacc = torch.mean((inferred_tensor == y).to(torch.float), dim=-1)\nprint('test acc %.3f' %(acc))\n","sub_path":"week4/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"415401216","text":"# -*- coding: utf-8 -*-\n#python operate mysql database\nimport sys\n#reload(sys)\n#sys.setdefaultencoding('utf8')\nimport os\nimport MySQLdb\nimport datetime\nnow = datetime.datetime.now()\nfrom PyQt4 import QtCore, QtGui, uic\nfrom method import get_conn,get_cursor,gain_length,warn_length_worker_number,warn_length_ID_number,warn_length_loan_number,warn_length_repay_number,judge_account_exist,warn_worker_number_mistake,warn_loan_number,judge_account_exist2,judge_passwd2,warn_id_card_number_mistake,query_loan_debt,gain_debt,modificate_information,insert_repayment,warn_repayment_successful\nqtCreatorFile = \"repayment.ui\" # Enter file here.\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\nloan_number=''\n\nclass MyApp(QtGui.QMainWindow, Ui_MainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.add_loan_number()\n self.connect(self.comboBox, QtCore.SIGNAL('activated(QString)'), self.OnActivated)\n self.pushButton.clicked.connect(self.sure1)\n self.pushButton2.clicked.connect(self.sure2)\n\n def add_loan_number(self):#添加还款单号\n self.comboBox.addItem('')\n fp=open('id_card_number.txt')\n ID_number=fp.read()\n sql = 'select loan_number from loan where id_card_number = %s and debt>0'%ID_number\n conn = get_conn()\n cursor = get_cursor(conn)\n result = cursor.execute(sql)\n for row in cursor.fetchall():\n row1 = row[0]\n self.comboBox.addItem(str(row1))\n def sure1(self):#点击查询欠款以后\n self.text.setText('')#清空\n fp=open('id_card_number.txt')\n ID_number=fp.read()\n if gain_length(loan_number)==0:\n self.warn_loan_number()\n else:\n date=gain_debt(loan_number,'billing_date_of_loan','loan')#读取还清日期\n repay_date=date.strftime('%Y-%m-%d')#还清日期转换成字符串\n money=self.lineEdit.text()\n if gain_length(money)==0:\n self.warn_money()\n elif float(money)<=0:\n self.warn_money()\n else:\n money0=float(money)\n fp=open('number.txt')\n worker_number=fp.read()\n ret1=self.warn_confirm()\n if ret1==1:\n debt=gain_debt(loan_number,'debt','loan')\n debt0=float(debt)\n debt=debt0-money0\n if debt<0:\n self.warn_repay_more_than_debt()\n #os.system('repayment.py')\n elif debt==0:\n modificate_information('loan','debt',debt,'loan_number',loan_number)#更新最新债务\n whether_to_pay_off=1\n modificate_information('loan','whether_to_pay_off',whether_to_pay_off,'loan_number',loan_number)\n insert_repayment(worker_number,loan_number,money)\n repayment_date = datetime.date.today()\n sql = \"select * from repayment where loan_number = %s and repayment_amount = %s and date=%s \"\n pa = (loan_number,money,repayment_date)\n conn = get_conn()\n cursor = get_cursor(conn)\n result = cursor.execute(sql,pa)\n row = cursor.fetchone()\n self.text.append(u'业务员工号:'+str(row[1]))\n self.text.append(u'客户身份证号:'+ID_number)\n self.text.append(u'对应贷款单号:'+loan_number)\n self.text.append(u'本次还款单号:'+str(row[0]))\n self.text.append(u'本次还款日期:'+str(row[4]))\n self.text.append(u'本次还款金额:'+money)\n self.text.append(u'剩余欠款:'+str(debt))\n warn_repayment_successful()\n self.warn_repay_finish()\n elif debt>0:\n modificate_information('loan','debt',debt,'loan_number',loan_number)#更新最新债务\n insert_repayment(worker_number,loan_number,money)\n repayment_date = datetime.date.today()\n sql = \"select * from repayment where loan_number = %s and repayment_amount = %s and date=%s \"\n pa = (loan_number,money,repayment_date)\n conn = get_conn()\n cursor = get_cursor(conn)\n result = cursor.execute(sql,pa)\n row = cursor.fetchone()\n self.text.append(u'业务员工号:'+str(row[1]))\n self.text.append(u'客户身份证号:'+ID_number)\n self.text.append(u'对应贷款单号:'+loan_number)\n self.text.append(u'本次还款单号:'+str(row[0]))\n self.text.append(u'本次还款日期:'+str(row[4]))\n self.text.append(u'本次还款金额:'+money)\n self.text.append(u'剩余欠款:'+str(debt))\n self.text.append(u'贷款还清日期:'+repay_date)\n warn_repayment_successful()\n self.warn_repay_on_time()\n #else:\n #os.system('repayment.py')\n \n def sure2(self):#点击返回以后\n os.system('structure_choose.py')\n def warn_confirm(self):#弹出提醒\n qtm=QtGui.QMessageBox\n msg_box = qtm.question(self, u\"Warning\", u\"请确认还款金额?\",qtm.Yes| qtm.Cancel) ##这行最后一个参数为默认按钮,没有起作用\n if msg_box == qtm.Yes:\n return 1\n elif msg_box == qtm.Cancel: \n return 0\n\n def warn_repay_more_than_debt(self):#还款金额大于欠款金额\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"还款金额大于欠款金额,请先查询欠款金额再进行还款!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes:\n self.label.setText(\"Question button/Ok\")\n else: \n return\n def warn_service_type(self):#业务类型不能为空\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"业务类型不能为空,请重新填写!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes: \n self.label.setText(\"Question button/Ok\")\n else: \n return\n def warn_loan_number(self):#贷款单号不能为空\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"贷款单号不能为空,请重新填写!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes: \n self.label.setText(\"Question button/Ok\")\n else: \n return\n def warn_repay_finish(self):#恭喜此次贷款已还清\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"恭喜此次贷款已还清!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes: \n self.label.setText(\"Question button/Ok\")\n else: \n return\n def warn_repay_on_time(self):#请按时返还剩余欠款\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"请按时返还剩余欠款!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes: \n self.label.setText(\"Question button/Ok\")\n else: \n return\n def warn_money(self):#还款金额格式不正确提醒(OK)\n qtm=QtGui.QMessageBox\n msg_box = qtm(qtm.Warning, u\"Warning\", u\"还款金额格式不正确,请重新填写!\",qtm.Yes) ##这行最后一个参数为默认按钮,没有起作用\n msg_box.exec_()\n if msg_box == qtm.Yes: \n self.label.setText(\"Question button/Ok\")\n else: \n return\n def OnActivated(self,txt):\n global loan_number\n loan_number=txt#贷款单号\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n window = MyApp()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"软工课设最终版/软件工程课设(最终版)/repayment.py","file_name":"repayment.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"121854441","text":"import botcoin\n\nclass MovingAverage(botcoin.Strategy):\n def initialize(self):\n self.SYMBOL_LIST = ['CSL','IAG','MQG','NAB','ORG','QBE','RIO','SCG','SUN','TLS']\n\n self.DATE_FROM = '2014'\n self.DATE_TO = '2015'\n\n self.MAX_LONG_POSITIONS = 5\n self.MAX_SHORT_POSITIONS = 0\n self.COMMISSION_FIXED = 0.0\n self.COMMISSION_PCT = 0.0008\n self.COMMISSION_MIN = 6.0\n\n self.fast = self.get_arg(0, 5)\n self.slow = self.get_arg(1, 15)\n\n def after_close(self):\n for symbol in self.SYMBOL_LIST:\n try:\n slow = self.market.bars(symbol, self.slow).mavg('close')\n fast = self.market.bars(symbol, self.fast).mavg('close')\n\n if self.is_neutral(symbol) and fast > slow:\n self.buy(symbol)\n elif self.is_long(symbol) and fast < slow:\n self.sell(symbol)\n except botcoin.BarError as e:\n pass\n","sub_path":"tests/test-strategies/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471193050","text":"#!/usr/bin/python\n\nimport os\nimport math\n#import heapq #needed if we want to use a priority queue\nimport graph\nimport tkinter as tk\n\n\nclass TownGraph:\n\n def __init__(self,num_vertices):\n\n self.__graph_vertices=[graph.Graph(vertex_name=i+1) for i in range(num_vertices)]\n self.path_str=None\n\n #constant dictionary mapping each vertex number to the name of a town\n #change if the number of vertices changes\n self.__vertex_map=[\n \"None\",\n \"Nakuru\",\n \"Thika\",\n \"Kisumu\",\n \"Nairobi\",\n \"Garissa\",\n \"Mombasa\",\n \"Malindi\"\n ]\n self.__idx_map={\n \"Nakuru\":1,\n \"Thika\":2,\n \"Kisumu\":3,\n \"Nairobi\":4,\n \"Garissa\":5,\n \"Mombasa\":6,\n \"Malindi\":7\n }\n\n def mapVertexIndex(self,vertex_name:str):\n return self.__idx_map[vertex_name]\n\n\n def getGraphVertices(self):\n return self.__graph_vertices\n\n def addEdge(self,v:graph.Graph,adj_v:graph.Graph,cost:int):\n #where v is a vertex and adj_v is the corresponding adjacent vertex \n #cost is basically the distance or whatever\n self.__graph_vertices[v-1].addAdjacentVertex(vertex=self.__graph_vertices[adj_v-1],cost=cost)\n\n def createGraph(self):\n #basically adding edges and their costs\n #this is doing it manually\n\n # Test digraph\n # self.addEdge(v=1,adj_v=2,cost=2)\n # self.addEdge(1,4,1)\n # self.addEdge(2,4,3)\n # self.addEdge(2,5,10)\n # self.addEdge(3,1,4)\n # self.addEdge(3,6,5)\n # self.addEdge(4,3,2)\n # self.addEdge(4,5,2)\n # self.addEdge(4,6,8)\n # self.addEdge(4,7,4)\n # self.addEdge(5,7,6)\n # self.addEdge(7,6,1)\n\n #town graph\n self.addEdge(v=1,adj_v=2,cost=166)\n self.addEdge(1,3,185)\n self.addEdge(1,4,166)\n self.addEdge(1,5,495)\n #self.addEdge(2,1,166)\n self.addEdge(2,4,45)\n self.addEdge(2,5,322)\n self.addEdge(3,1,185)\n self.addEdge(3,4,346)\n #self.addEdge(3,6,829)\n self.addEdge(4,1,166)\n self.addEdge(4,2,45)\n self.addEdge(4,3,346)\n self.addEdge(4,5,373)\n self.addEdge(4,6,490)\n #self.addEdge(4,7,466)\n self.addEdge(5,1,495)\n self.addEdge(5,2,322)\n self.addEdge(5,4,373)\n self.addEdge(5,7,347)\n #self.addEdge(6,3,829)\n self.addEdge(6,4,490)\n self.addEdge(6,7,115)\n #self.addEdge(7,4,466)\n self.addEdge(7,5,347)\n self.addEdge(7,6,115)\n\n def setStartVertex(self,vertex_idx):\n self.__start_vertex=self.__graph_vertices[vertex_idx]\n\n\n def setSentinel(self):\n for vertex in self.__graph_vertices:\n vertex.distance=math.inf\n vertex.known=False\n vertex.path=None\n\n def findMinimumVertex(self):\n #best would be to use a priority queue\n #but using a priority queue is a bit complicated\n cost=math.inf\n mcv=None #minimum cost vertex\n\n for vertex in self.__graph_vertices:\n if vertex.distance0:\n vertex=queue.pop(0)\n\n for adjacent_vertex_tuple in vertex.getAdjacentVertex():\n adjacent_vertex=adjacent_vertex_tuple[0]\n edge_cost=adjacent_vertex_tuple[1]\n if adjacent_vertex.distance==math.inf:\n adjacent_vertex.distance=vertex.distance+edge_cost\n adjacent_vertex.path=vertex\n queue.append(adjacent_vertex)\n\n def depthFirstSearch(self):\n #for depth first search we use a stack\n #using python list basically append and then use pop()\n\n stack=list()\n\n self.setSentinel()\n\n self.__start_vertex.distance=0\n stack.append(self.__start_vertex)\n\n while len(stack)>0:\n vertex=stack.pop()\n\n for adjacent_vertex_tuple in vertex.getAdjacentVertex():\n adjacent_vertex=adjacent_vertex_tuple[0]\n edge_cost=adjacent_vertex_tuple[1]\n if adjacent_vertex.distance==math.inf:\n adjacent_vertex.distance=vertex.distance+edge_cost\n adjacent_vertex.path=vertex\n stack.append(adjacent_vertex)\n\n #A* search which uses the same concept of uniform cost search is basically the dijkstra_algorithm\n def AStarSearch(self):\n num_vertices=len(self.__graph_vertices)\n no_unknowns=num_vertices\n\n self.setSentinel()\n\n self.__start_vertex.distance=0\n\n while no_unknowns:\n mcv=self.findMinimumVertex()\n mcv.known=True\n no_unknowns-=1\n\n for adjacent_vertex_tuple in mcv.getAdjacentVertex():\n adjacent_vertex=adjacent_vertex_tuple[0]\n\n if not adjacent_vertex.known:\n #cost of edge (mcv,adjacent_vertex)\n cost_mcv_adjacent_vertex=adjacent_vertex_tuple[1]\n\n if (mcv.distance+cost_mcv_adjacent_vertex\",end=\" \")\n vertex.showAdjacentVertices()\n\n\n\n\nif __name__==\"__main__\":\n print(\"Enter the number of vertices: \",end=\"\")\n n=int(input())\n\n town_graph=TownGraph(num_vertices=n)\n vertices=town_graph.getGraphVertices()\n\n for vertex in town_graph.getGraphVertices():\n while True:\n print(\"Enter the vertices adjacent to \",vertex,\" -1 to quit\")\n vert=int(input())\n\n if vert==-1:\n break\n\n print(\"Enter the cost associated with the edge (%s,%d): \"%(vertex,vert),end=\"\")\n cost=int(input())\n\n vertex.addAdjacentVertex(vertex=vertices[vert-1],cost=cost)\n\n os.system(\"clear\")\n\n print(\"Vertex ...............> adjacent vertices\")\n\n town_graph.printGraph()\n\n town_graph.setStartVertex(0)\n print(\"using breadth first search algorithm\")\n town_graph.breadthFirstSearch()\n town_graph.print_Path()\n\n print(\"using depth first search algorithm\")\n town_graph.depthFirstSearch()\n town_graph.print_Path()\n\n print(\"using A* Search\")\n town_graph.AStarSearch()\n town_graph.print_Path()\n","sub_path":"towns_graph.py","file_name":"towns_graph.py","file_ext":"py","file_size_in_byte":9085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"581115233","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 20 17:49:51 2017\n\n@author: ssprau\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob as glob \nimport os as os \n\npath =r'\\\\bus.local\\files\\Battery Engineering\\Test_Data\\Validation Tests\\C-Sample\\Track 2 - Thermal Durability\\P-001-C5\\4 - 50% PTCE\\Test Data' \nP001PTCE50= glob.glob(os.path.join(path, \"*.csv\"))\n\n\n\ndf_from_each_file = (pd.read_csv(x,header=10) for x in P001PTCE50)\n\n\ncombined= pd.concat(df_from_each_file, ignore_index=True, )\n\ncombined.set_index('Time stamp', inplace=True)\n\nPressures=combined[['SupplyPressure','ReturnPressure','moduleTemperature01_BMS1',\n 'moduleTemperature02_BMS1', 'moduleTemperature03_BMS1',\n 'moduleTemperature04_BMS1', 'moduleTemperature05_BMS1',]]\n\nModuleTemp=combined[['moduleTemperature01_BMS1',\n 'moduleTemperature02_BMS1', 'moduleTemperature03_BMS1',\n 'moduleTemperature04_BMS1', 'moduleTemperature05_BMS1',]]\n\n\n\n\n\npd.options.mode.chained_assignment = None \n\n\n\n\nPressures['Change In PSI']=Pressures.SupplyPressure-Pressures.ReturnPressure\n \n \nDeltaP=Pressures[['Change In PSI']]\n\n\nprint(DeltaP.max())\nprint(DeltaP.min())\nprint(DeltaP.mean())\n\n\n\n\n\n \n \n\n\n","sub_path":"Other projects/Delta P analysis for flow rate.py","file_name":"Delta P analysis for flow rate.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"428771179","text":"#47.\tEscribe un algoritmo o el respectivo diagrama de flujo que determine la suma de los números naturales contenidos entre dos números n y m (verificar que m>n)\r\nprint(\"*****Suma de los números naturales*****\")\r\nprint(\"Ingrese dos núemeros (recuerde que el primer número debe ser menor al segundo)\")\r\nn=int(input())\r\nm=int(input())\r\nsum=0\r\nif m>n:\r\n for i in range(n+1,m):\r\n sum=i+sum\r\n print(f\"La suma de los números entre ellos: {sum}\")\r\nelse:\r\n print(\"Los números no son válidos..:(\")","sub_path":"Punto 47.py","file_name":"Punto 47.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"447131570","text":"#from __future__ import print_function\nimport pandas as pd\nimport glob, os, sys\nimport csv\nfrom datetime import datetime as dt\nimport openpyxl\n\n\nfill_character = 0 # Alternately, '' - looks a bit ugly in .txt\n\n\ndef source_files_check(filename):\n\n # Looks for files in the current directory matching a filename prefix, \n # asks for clarification if there are more than one, and complains \n # if it can't find any.\n\n files = glob.glob(filename+'*')\n if not files:\n print('ERROR: No source file found. Please refer to https://github.com/BenQuigley/Check-In-Counts-Report for '\\\n 'instructions on how to create a source file for this report.\\n')\n raise Exception('There was an error with the source data.')\n elif len(files) > 1:\n print('More than one file found. Please select the one containing today\\'s most current check-in data:\\n')\n for i, file in enumerate(files):\n print(i+1, ': ', file, sep='')\n filename = files[int(input('>').strip())-1]\n return filename\n\n\ndef process_headers(row):\n # Add a display date to the headers.\n new_headers = row\n display_date_needed = True\n print('Parsing headers. Please wait.')\n for header in row:\n if header == 'Check-In Date':\n display_date_needed = False\n if display_date_needed:\n new_headers.append('Check-In Date')\n return new_headers\n\n\ndef get_metadata(csv_object, today):\n\n # Creates a dictionary called 'terms' that stores date-related information,\n # instead of storing the dates in variables.\n\n r = 0\n done = False\n terms = {}\n max_year = 0\n for row in csv_object:\n if r == 0:\n header_row = process_headers(row)\n elif r > 0 and len(terms) < 2:\n term = {}\n id = row[header_row.index('Term')]\n term['year'] = int(float(id[:4]))\n term['start date'] = row[header_row.index('Term Start Date')]\n if not id in terms:\n terms[id] = term\n if term['year'] > max_year:\n max_year = term['year']\n else:\n done = True\n r += 1\n if done:\n break\n\n for record in terms:\n if terms[record]['year'] == max_year:\n terms[record]['current year'] = True\n cur_start_date = to_date(terms[record]['start date'])\n else:\n terms[record]['current year'] = False\n\n for record in terms:\n if terms[record]['year'] != max_year:\n py_date = today-cur_start_date + to_date(terms[record]['start date'])\n terms[record]['py date'] = py_date\n user_date = input('Last year\\'s date was {}. \\nIf that\\'s wrong, enter the correct date here in the '\\\n 'following format: 17-May-16. Otherwise, just press return.\\n> '\n .format(str(terms[record]['py date']).split()[0]))\n if user_date:\n terms[record]['py date'] = pd.to_datetime(user_date, format='%d-%b-%y')\n\n return terms, header_row\n\n\ndef update_data(reader, col_nums, terms, today):\n new_rows = []\n next(reader)\n r = 0\n bar = '#'\n print(bar,' Parsing row ',str(r),', please wait.',sep='')\n for row in reader:\n r += 1\n if r % 250 == 0:\n bar += '#'\n print(bar,' Parsing row ',str(r),', please wait.',sep='')\n new_row = row\n if new_row[col_nums.index('Term')] == '':\n print('Skipping blank row at row ',str(r),'.',sep='')\n continue\n swiped_date = to_date(new_row[col_nums.index('Swiped Date')])\n if str(type(swiped_date)) != pd.NaT:\n display_date = str(swiped_date.month)+'/'+str(swiped_date.day)+'/'+str(swiped_date.year)\n else:\n display_date = 'None'\n new_row.append(display_date)\n status = new_row[col_nums.index('Status')]\n term = new_row[col_nums.index('Term')]\n current = terms[term]['current year']\n if swiped_date == today:\n new_row[col_nums.index('Status')] = 'Checked In Today'\n elif current == False:\n py_date = to_date(terms[term]['py date'])\n if swiped_date == py_date:\n new_row[col_nums.index('Status')] = 'Checked In Today One Year Ago'\n elif swiped_date < py_date:\n new_row[col_nums.index('Status')] = 'Checked In by Today One Year Ago'\n elif status == 'Checked In':\n new_row[col_nums.index('Status')] = 'Registered'\n new_rows.append(new_row)\n return new_rows\n\n\ndef write_updated_data(filename, new_rows):\n write_file = open(filename, 'w', encoding='UTF-8')\n writer = csv.writer(write_file, delimiter=',', quotechar='\"',quoting=csv.QUOTE_NONNUMERIC)\n writer.writerows(new_rows)\n write_file.close()\n print('Data updated succesfully.')\n\n\ndef pandas_load_data(filename):\n\n # This gets the file from the Downloads folder on a Mac.\n\n data_file = pd.read_csv(filename, header=0,quotechar='\"')\n real_data = data_file[data_file['ID'] != None]\n\n # quotechar argument is null, because we rewrote the CSV file\n # in the update() function\n\n # And parsing out the undergraduates and graduates for later reference.\n\n undergrads = real_data[real_data['Level'] != 'GR']\n grads = real_data[real_data['Level'] == 'GR']\n lines = len(real_data.index)\n print('Data loaded successfully; ',lines,' lines.',sep='')\n return real_data, undergrads, grads\n\n\ndef move_data(file, directory, new_directory):\n path = directory+file\n new_path = new_directory+file\n if not os.path.exists(new_directory):\n os.makedirs(new_directory)\n os.rename(path, new_path)\n\ndef downloads_folder_work(full_filename, cur_date):\n\n start_directory = os.getcwd()\n target_directory = os.path.expanduser(full_filename[0])\n target_filename = full_filename[1]+full_filename[2]\n data_directory = target_directory+'Check-In Counts Data/'\n new_filename = full_filename[1]+' [updated by Python] '+str(cur_date)[:10]+full_filename[2]\n os.chdir(target_directory)\n\n full_filename[1] = source_files_check(full_filename[1])\n\n read_file = open(target_filename,\"r\",encoding='UTF-8')\n reader = csv.reader(read_file,delimiter=',', quotechar='\"')\n\n new_data = []\n terms, new_headers = get_metadata(reader, cur_date)\n new_data.append(new_headers)\n read_file.seek(0)\n new_rows = update_data(reader, new_headers, terms, cur_date)\n for new_row in new_rows:\n new_data.append(new_row)\n write_updated_data(new_filename, new_data)\n\n data, undergrads, grads = pandas_load_data(new_filename)\n print('Moving the updated data file to', data_directory)\n move_data(new_filename, target_directory, data_directory)\n dated_filename = full_filename[1] + ' ' + str(cur_date).split()[0] + full_filename[2]\n move_source_data_input = input('Move your original download to {} and move it to {}? (Y/n)\\n> '\n .format(dated_filename, data_directory))\n move_source_data = True if move_source_data_input.lower() == 'y' else False\n if move_source_data:\n print('OK, moving the original data file to', data_directory)\n os.rename(target_directory + target_filename, target_directory + dated_filename)\n move_data(dated_filename, target_directory, data_directory)\n else:\n print('OK, leaving the original data file in',target_directory,)\n os.chdir(start_directory)\n return data, undergrads, grads, terms\n\n\ndef compare(data, data_descr, pivot_rows, term_string, prev_term_string, excel_writer):\n # This is not working right now.\n\n # This is for publishing data when we have to compare this term to last term,\n # and only count someone as checked in if they have checked in \"by this date last term.\"\n\n cur = data[data['Term'] == term_string]\n ly = data[data['Term'] == prev_term_string]\n ly_table = pivot_table(pivot_rows, ly)\n cur_table = pivot_table(pivot_rows, cur) # The problem is that this comes out as an empty pivot table\n if len(cur_table.index):\n cur_table.to_excel(excel_writer, data_descr)\n if len(ly_table.index):\n ly_table.to_excel(excel_writer, data_descr, startrow=len(cur_table) + 2)\n\n print('Writing to Check-In Counts Excel sheet; tab = ', data_descr)\n\n\ndef pivot_table(pivot,dataframe):\n\n # Make pivot tables of whatever's requested.\n\n table = pd.pivot_table(dataframe,values='ID', index=pivot,columns='Status',aggfunc='count',fill_value=fill_character)\n return table\n\n\ndef publish(data, term, pivot, data_string, excel_writer):\n\n # This is for publishing data when we only care about the present term.\n undergrads = data[data['Term'] == term]\n table = pivot_table(pivot, undergrads)\n if len(table.index):\n table.to_excel(excel_writer, data_string)\n print('Writing to Check-In Counts Excel sheet; tab = ', data_string)\n\n\ndef to_date(date_string):\n date = pd.to_datetime(date_string, infer_datetime_format=True)\n return date\n\n\ndef print_pivots(undergrads, grads, terms):\n today = dt.today()\n for record in terms:\n if terms[record]['current year'] == True:\n cur_term = record\n else:\n prev_term = record\n output = 'Registration Counts for '+cur_term+' ['+str(today.month)+'-'+str(today.day)+'].xlsx'\n writer = pd.ExcelWriter(output)\n compare(undergrads, 'UG by Enrollment Source', ['Term', 'Source'], cur_term, prev_term, writer)\n compare(grads, 'Graduates', ['Term', 'Source'], cur_term, prev_term, writer)\n publish(undergrads, cur_term, 'Instrument Family', cur_term+' UG Instr Fam', writer)\n publish(undergrads, cur_term, 'Major', cur_term+' UG Majors', writer)\n writer.save()\n adjust_sheet_columns(output)\n\n\ndef make_pivots():\n print('Printing pivot tables.')\n today = dt.today()\n con = input('Today is {}; if that\\'s correct, just press return.\\n> '.format(str(today).split()[0]))\n if con:\n today = input('Enter today\\'s date in the following format: '\\\n '17-May-16\\n> ').strip()\n today = pd.to_datetime(today, format='%d-%b-%y')\n print(\"OK, today is\", str(today).split()[0])\n filename = ['~/Downloads/','Check-In Counts Data for Python','.csv']\n data, undergrads, grads, terms = downloads_folder_work(filename, today)\n if input().strip('Want to print these pivot tables to a file? (Y/n)\\n> ')[0].lower() == 'y':\n print_pivots(undergrads, grads, terms)\n\n\ndef adjust_sheet_columns(output_file):\n ws = openpyxl.load_workbook(output_file)\n dims = {}\n for sheet in ws.worksheets:\n for row in sheet.rows:\n for cell in row:\n if cell.value:\n dims[cell.column] = max((dims.get(cell.column, 0), len(str(cell.value))))\n for col, value in dims.items():\n sheet.column_dimensions[col].width = value + 2\n ws.save(output_file)\n\n\ndef main():\n print(\"What do you want to do?\")\n options = {1: {'name': 'Make pivots', 'function': make_pivots}}\n for option in options:\n print(option, ': ', options[option]['name'], sep='')\n userchoice = int(input('> ').strip())\n options[userchoice]['function']()\n\nmain()\n","sub_path":"create_report.py","file_name":"create_report.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"453820139","text":"# 1. build tree is O(nlogn), get & put & delete is O(logn)\n#\n# 2. Get:\n#\n# Use Node `cur` to do iterative traversal.\n#\n# 3. Put:\n#\n# Use `put()` and `_put()` to do recursive traversal.\n#\n# 4. Delete:\n#\n# Use `delete()` and `_delete()` to do recursive traversal.\n#\n# Find the successor:\n#\n# [ cur ] <---- If the right is None, the left node\n# / \\ is our successor.\n# / \\\n# [ ? ] [ None ]\n#\n#\n# [ cur ] <---- If the right is not None, we have to find\n# / \\ the leftest node in cur.right and it is \"X\".\n# / \\\n# [ ? ] [ ? ]\n# /\n# /\n# [ X ]\n#\n# 5. Rank:\n#\n# Number of nodes less than keys.\n# Use `rank()` and `_rank()` to do recursive traversal.\n#\n# --END--\n\n\nclass Node:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.count = 1\n self.left = self.right = None\n\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def get(self, key):\n cur = self.root\n while cur:\n if cur.key == key:\n return cur.val\n elif key < cur.key:\n cur = cur.left\n else:\n cur = cur.right\n return None\n\n def put(self, key, val):\n self.root = self._put(self.root, key, val)\n\n def _put(self, node, key, val):\n if node is None:\n return Node(key, val)\n if node.key == key:\n node.val = val\n elif key < node.key:\n node.left = self._put(node.left, key, val)\n else:\n node.right = self._put(node.right, key, val)\n\n # count is the total number of nodes include itself\n node.count = 1 + self._size(node.left) + self._size(node.right)\n\n return node\n\n def delete(self, key):\n self.root = self._delete(self.root, key)\n\n def _delete(self, node, key):\n if node is None:\n return None\n if key == node.key:\n # find successor and delete\n if node.right is None:\n return node.left\n suc = node.right\n while suc.left is not None:\n suc = suc.left\n node.key, node.val = suc.key, suc.val\n # after swapping, we delete it again\n node.right = self._delete(node.right, suc.key)\n elif key < node.key:\n node.left = self._delete(node.left, key)\n else:\n node.right = self._delete(node.right, key)\n node.count = 1 + self._size(node.left) + self._size(node.right)\n return node\n\n def rank(self, key):\n return self._rank(self.root, key)\n\n def _rank(self, node, key):\n # number of keys less than key.\n if node is None:\n return 0\n if key == node.key:\n return self._size(node.left)\n elif key < node.key:\n return self._rank(node.left, key)\n else:\n return 1 + self._size(node.left) + self._rank(node.right, key)\n\n def _size(self, node):\n if node is None:\n return 0\n return node.count\n\n def show(self):\n self._show(self.root)\n\n def _show(self, node):\n if node is None:\n return\n self._show(node.left)\n print(node.key)\n self._show(node.right)\n","sub_path":"algorithms-well-known/_binary_search_tree.py","file_name":"_binary_search_tree.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442728901","text":"#!/usr/bin/python3\n# coding: iso-8859-1\n\n\"\"\"\n La class Param permet de stocker des paramètres dans un fichier.\n Initialisation : nécessite un chemin d'accès au dossier\n Set : définit un paramètre en l'inscrivant dans un fichier et en l'ajoutant à la classe.\n param, val : crée le fichier _param.txt, ou le remplace, avec pour contenu val\n {p1:v1, p2:v2, ...} : définit les paramètres p1, p2,... avec pour valeurs respectives v1, v2,...\n Get : récupère un paramètre et modifie la variable dans la classe.\n param : retourne un le texte du fichier _param.txt, ou False s'il n'existe pas\n [p1, p2, ...] : retourne une liste contenant les valeurs de p1, p2,...\n\"\"\"\n\nimport os\nclass Param :\n def __init__(self, dossier):\n self.path = dossier +\"/\"\n\n def set(self, **doublets):\n l = []\n for parametre, valeur in doublets.items():\n f = open(self.path + \"_\"+ parametre +\".txt\",\"w\")\n f.write(str(valeur))\n f.close()\n self.__dict__[parametre] = valeur\n l.append(valeur)\n if len(l) == 1 : return l[0]\n else: return l\n\n def get(self, *parametres, fonction = \"\"):\n l = []\n for doublet in parametres:\n fun = \"\"\n if type(doublet) is tuple:\n parametre = doublet[0]\n fun = doublet[1]\n else: parametre = doublet\n fichier = self.path + \"_\"+ parametre +\".txt\"\n if os.path.exists(fichier):\n f = open(fichier,'r')\n valeur = f.read()\n f.close()\n if fun:\n valeur = eval(fun +\"('\"+ valeur +\"')\")\n elif fonction:\n valeur = eval(fonction +\"('\"+ valeur +\"')\")\n self.__dict__[parametre] = valeur\n l.append(valeur)\n else:\n l.append(False)\n if len(l) == 1 : return l[0]\n else: return l","sub_path":"param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276614854","text":"from .window import SubWindow\nfrom .string_buffer import StringBuffer\n\nimport traceback\n\nclass Control(SubWindow):\n\n def __init__(self):\n super().__init__()\n\n def run(self, key=None):\n return key\n\n def set_base_corner(self, x=0, y=0):\n self.base_corner = (x, y)\n anchor = self.parent.get_anchor()\n self.x = 0\n self.y = 0\n self.screen.set_base_corner(self.screen_id, x + anchor[0], y + anchor[1])\n\n def get_size(self) -> tuple:\n ...\n\n\nclass Button(Control):\n caption = \"\"\n on_enter = None\n\n def __init__(self, caption=''):\n super(Button, self).__init__()\n self.select_background_clr = 'red'\n self.background_clr = 'olive'\n self.selection = True\n self.caption = caption\n self.x = 0\n self.y = 0\n self.rx = 0\n self.ry = 0\n\n def set_base_corner(self, x=0, y=0):\n super(Button, self).set_base_corner(x, y)\n # self.rx = len(self.caption) + 6 - self.base_corner[0]\n # self.ry = 1 - self.base_corner[1]\n\n def get_size(self) -> tuple:\n self.work_size()\n return (len(self.caption) + 2, 1)\n\n def work_size(self):\n self.wl_x = self.x\n self.wl_y = self.y\n self.wr_x = self.x + len(self.caption) + 2\n self.wr_y = self.y + 1\n\n w = (self.wr_x - self.wl_x)\n h = (self.wr_y - self.wl_y)\n self.string_buffer.set_work_size(self.wl_x, self.wl_y, h, w)\n\n def set_right_corner(self, x, y):\n ...\n\n def __clear_scr(self):\n height = self.ry - self.y\n width = self.rx - self.x\n if self.selection:\n clr = getattr(self.term, 'on_' + self.select_background_clr)\n else:\n clr = getattr(self.term, 'on_' + self.background_clr)\n\n for i in range(width):\n for j in range(height):\n self.screen.echo(self.screen_id, i, j, f'{clr} {self.term.normal}{clr}')\n\n def on_focus(self, focus: bool = False):\n self.selection = focus\n self.on_paint()\n\n def render(self):\n if self.selection:\n clr = getattr(self.term, 'on_' + self.select_background_clr)\n else:\n clr = getattr(self.term, 'on_' + self.background_clr)\n self.__clear_scr()\n self.screen.echo(self.screen_id, 1, 0, f'{clr}[{self.caption}]{self.term.normal}{clr}')\n\n def run(self, key=None):\n key = super(Button, self).run(key)\n if key is not None and key.is_sequence:\n if key.name == 'KEY_ENTER':\n if self.on_enter is not None and callable(self.on_enter):\n self.on_enter()\n return None\n return key\n\nclass Edit(Control):\n caption = \"\"\n size = 0\n __begin = 0\n __end = 0\n # __string_buffer = None\n\n def __init__(self, caption=''):\n super(Edit, self).__init__()\n self.screen_id = self.screen.bind()\n self.edit_field_clr = 'darkgrey'\n self.background_clr = self.edit_field_clr\n self.caption_bck_clr = \"magenta\"\n self.__cursor_pos = 0\n self.string_buffer = StringBuffer(self.text_clr, self.background_clr, self.screen, self.screen_id)\n self.caption = caption\n\n def text(self):\n return self.string_buffer.get_string(0)\n\n def get_size(self) -> tuple:\n return (self.rx, self.ry)\n\n def render(self):\n caption_bck_clr = getattr(self.term, 'on_' + self.caption_bck_clr)\n edit_field_bck_clr = getattr(self.term, 'on_' + self.edit_field_clr)\n\n cursor_color = f'bold_{self.text_clr}_reverse'\n cursor_clr = getattr(self.term, cursor_color)\n blinking_mode = \"\\x1b[5m\"\n\n if not self.focus:\n cursor_clr = u''\n blinking_mode = u''\n\n self.screen.echo(self.screen_id, self.x, self.y, f'{caption_bck_clr}{self.caption}{self.term.normal}{caption_bck_clr}')\n for ii in range(self.size):\n self.screen.echo(self.screen_id,\n self.x + len(self.caption) + ii,\n self.y,\n f'{edit_field_bck_clr} {self.term.normal}{caption_bck_clr}')\n\n self.string_buffer.string_buffer_render(self.__begin, self.__end)\n\n symbol = self.get_string_buffer_symbol(self.__begin + self.__cursor_pos, 0)\n\n if symbol == u'':\n self.debug.write(f'Edit: {self.focus}\\n')\n if self.focus:\n symbol = '\\u2588'\n else:\n symbol = '\\u2395'\n\n self.screen.echo(self.screen_id,\n self.x + len(self.caption) + self.__cursor_pos,\n self.y,\n f'{edit_field_bck_clr}{cursor_clr}{blinking_mode}{symbol}{self.term.normal}{edit_field_bck_clr}')\n\n\n def on_paint(self):\n self.screen.begin()\n # self.work_size()\n self.render()\n self.screen.end()\n\n def on_focus(self, focus: bool = False):\n trace = traceback.extract_stack()\n self.debug.write(f'Edit: on_focus = {focus}\\n')\n self.debug.write(f'Edit = {trace}\\n')\n self.focus = focus\n self.work_size()\n if self.focus:\n self.main_wnd._focused_sub_wnd(self.handle, self, True)\n self.parent = self.main_wnd\n\n def work_size(self):\n self.wl_x = self.x + len(self.caption)\n self.wl_y = self.y\n self.wr_x = self.x + len(self.caption) + self.size\n self.wr_y = self.y + 1\n\n w = (self.wr_x - self.wl_x)\n h = (self.wr_y - self.wl_y)\n self.string_buffer.set_work_size(self.wl_x, self.wl_y, h, w)\n\n def run(self, key=None):\n # self.work_size()\n # self.debug.write(f'edit 1 - {key}\\n')\n if not key.is_sequence:\n self.string_buffer.print(key, self.__begin + self.__cursor_pos)\n\n if self.string_buffer.get_string_len(0) < self.size:\n # строка не заполнена\n self.__end += 1\n self.__cursor_pos += 1\n else:\n if self.__cursor_pos == self.size - 1:\n # курсор находится в крайней правой позиции\n self.__end += 1\n self.__begin += 1\n else:\n self.__cursor_pos += 1\n\n self.on_paint()\n # self.debug.write(f'edit 2 - {key}\\n')\n return None\n else:\n if key.name == 'KEY_BACKSPACE':\n if self.__cursor_pos > 0:\n self.string_buffer.remove_ch(self.__begin + self.__cursor_pos - 1)\n self.__cursor_pos -= 1\n if self.string_buffer.get_string_len(0) < self.size:\n if self.__end > 0:\n self.__end -= 1\n self.on_paint()\n return None\n if key.name == 'KEY_DELETE':\n # if self.__cursor_pos > 0:\n self.string_buffer.remove_ch(self.__begin + self.__cursor_pos)\n # self.__cursor_pos -= 1\n if self.string_buffer.get_string_len(0) < self.size:\n if self.__end > 0:\n self.__end -= 1\n self.on_paint()\n return None\n if key.name == 'KEY_LEFT':\n if self.__cursor_pos > 0:\n self.__cursor_pos -= 1\n else:\n if self.__begin > 0:\n self.__begin -= 1\n self.__end -= 1\n self.on_paint()\n return None\n if key.name == 'KEY_RIGHT':\n if self.__cursor_pos < self.size - 1:\n if self.string_buffer.get_string_len(0) > self.__cursor_pos:\n self.__cursor_pos += 1\n else:\n if self.__end <= self.string_buffer.get_string_len(0):\n self.__begin += 1\n self.__end += 1\n self.on_paint()\n return None\n return key","sub_path":"simpleinterface/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"76723836","text":"# HVA type calcium channel Ca_LVAst.mod Hay2011\n# exec(open('Ca_LVAst_Chan_(Hay2011).py').read())\n\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport moose\n\nSOMA_A = 3.14e-8\nF = 96485.3329\nR = 8.314\ncelsius = 32\ndt = 0.05e-3\nENa = 0.050\nEK = -0.085\nEh = -0.045\nECa = 0.128\nEm = -0.090\n\nVmin = -0.100\nVmax = 0.100\nVdivs = 3000\nv = np.linspace(Vmin,Vmax, Vdivs)\nCamin = 0.01e-3\nCamax = 1e-3\nCadivs = 4000\nca = np.linspace(Camin,Camax, Cadivs)\n\ndef Ca_LVAst_Chan(name):\n Ca_LVAst = moose.HHChannel( '/library/' + name )\n Ca_LVAst.Ek = ECa\n Ca_LVAst.Gbar = 300.0*SOMA_A\n Ca_LVAst.Gk = 0.0\n Ca_LVAst.Xpower = 2.0\n Ca_LVAst.Ypower = 1.0\n Ca_LVAst.Zpower = 0.0\n\n qt = 2.3**((34-21)/10)\n V = v+0.010 #Because Hay2011 shifted this\n mInf = 1.0000/(1+ np.exp((V*1e3 - -30.000)/-6))\n mTau = (5.0000 + 20.0000/(1+np.exp((V*1e3 - -25.000)/5)))/qt\n hInf = 1.0000/(1+ np.exp((V*1e3 - -80.000)/6.4))\n hTau = (20.0000 + 50.0000/(1+np.exp((V*1e3 - -40.000)/7)))/qt\n\n xgate = moose.element( Ca_LVAst.path + '/gateX' )\n xgate.min = Vmin\n xgate.max = Vmax\n xgate.divs = Vdivs\n xgate.tableA = mInf/mTau*1e3\n xgate.tableB = 1/mTau*1e3\n\n ygate = moose.element( Ca_LVAst.path + '/gateY' )\n ygate.min = Vmin\n ygate.max = Vmax\n ygate.divs = Vdivs\n ygate.tableA = hInf/hTau*1e3\n ygate.tableB = 1/hTau*1e3\n\n addmsg2 = moose.Mstring( Ca_LVAst.path + '/addmsg2' )\n addmsg2.value = '. IkOut ../Ca_conc current'\n return Ca_LVAst\n","sub_path":"2019-12-27-Hay2011_revisiting/MOOSE kinetics/Ca_LVAst_Chan_(Hay2011).py","file_name":"Ca_LVAst_Chan_(Hay2011).py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564307522","text":"\n\n#calss header\nclass _WASHCLOTH():\n\tdef __init__(self,): \n\t\tself.name = \"WASHCLOTH\"\n\t\tself.definitions = [u'a small cloth used to wash the body, especially the face and hands']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_washcloth.py","file_name":"_washcloth.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16333115","text":"tests = int(raw_input())\nfor i in range(1, tests + 1):\n base = raw_input()\n if base == '0':\n print(\"Case #%i: INSOMNIA\" % i)\n continue\n numbers = set(map(int, base))\n base_i = int(base)\n mult = 1\n while len(numbers) != 10:\n mult += 1\n n = mult * base_i\n numbers.update(set(map(int, str(n))))\n print(\"Case #%i: %i\" % (i, n))\n","sub_path":"codes/CodeJamCrawler/16_0_1/matheusgr/pa.py","file_name":"pa.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"188626100","text":"import os, sys, argparse, time\nfrom optparse import OptionParser\nfrom margin.utils import makeFastaSequenceNamesUnique, makeFastqSequenceNamesUnique\n\n\"\"\"Ensures that the first word of each fasta header is unique within the input file, \noutputting the result to a given output file.\n\"\"\"\n\ndef main(myCommandLine=None):\n #Parse the inputs args/options\n parser = OptionParser(usage=\"usage: inputFastqFile outputFastqFile\", \n version=\"%prog 0.1\")\n\n #Parse the options/arguments\n options, args = parser.parse_args()\n\n #Print help message if no input\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(0)\n\n #Exit if the arguments are not what we expect\n if len(args) != 2:\n raise RuntimeError(\"Expected two arguments, got: %s\" % \" \".join(args))\n \n makeFastqSequenceNamesUnique(args[0], args[1])\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/uniquifyFastq.py","file_name":"uniquifyFastq.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464518772","text":"\"\"\"\nRoutes and views for the flask application.\n\"\"\"\nfrom datetime import datetime\nfrom flask import render_template\nfrom FlaskMongoDB import app\nimport pymongo\nfrom FlaskMongoDB.forms import MongoForm\nfrom flask import request\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )\n\n@app.route('/contact')\ndef contact():\n \"\"\"Renders the contact page.\"\"\"\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/mongo',methods=('GET', 'POST'))\ndef mongo():\n \"\"\"mongo test.\"\"\"\n #myclient = pymongo.MongoClient('mongodb://localhost:27017/')\n #dblist = myclient.list_database_names()\n ## dblist = myclient.database_names() \n #if \"test_db1\" in dblist:\n # msg = \"test_db数据库已存在!\"\n #else :\n # msg = \"test_db1数据库不存在!\"\n\n #return render_template(\n # 'mongo.html',\n # title='Mongo',\n # year=datetime.now().year,\n # message = msg\n #)\n mongo_form = MongoForm()\n if mongo_form.validate_on_submit():\n client = pymongo.MongoClient('mongodb://localhost:27017/')\n db = client[\"test_db\"]\n collection = db[\"c_lbp\"]\n data = {\n 'name':mongo_form.name.data\n }\n result = collection.insert_one(data) \n print(result)\n # dblist = myclient.database_names() \n if result.inserted_id is not None:\n msg = \"数据插入成功\"\n else :\n msg = \"数据插入失败\"\n\n return render_template(\n 'mongo.html',\n title='Mongoadd',\n year=datetime.now().year,\n message = msg,\n mongo_form=mongo_form\n )\n #else :\n return render_template(\"mongo.html\",mongo_form=mongo_form)\n\n@app.route('/mongoadd')\ndef add():\n \"\"\"mongo test.\"\"\"\n client = pymongo.MongoClient('mongodb://localhost:27017/')\n db = client[\"test_db\"]\n collection = db[\"c_lbp\"]\n student = {\n 'id': '20210122',\n 'name': 'Jordan',\n 'age': 20,\n 'gender': 'male'\n }\n result = collection.insert_one(student) \n print(result)\n # dblist = myclient.database_names() \n if result.inserted_id is not None:\n msg = \"数据插入成功\"\n else :\n msg = \"数据插入失败\"\n\n return render_template(\n 'mongo.html',\n title='Mongoadd',\n year=datetime.now().year,\n message = msg\n )\n \n@app.route('/mongo_query')\ndef query():\n \"\"\"mongo test.\"\"\"\n client = pymongo.MongoClient('mongodb://localhost:27017/')\n db = client[\"test_db\"]\n collection = db[\"c_test\"]\n query = { \"name\": \"baoping\" }\n doc = collection.find(query)#doc是游标,所以需要知道游标有哪些操作和方法\n for x in doc:\n print(x)\n # dblist = myclient.database_names() \n if doc.count() > 0:\n doc = collection.find()\n msg = list(doc)[2]\n else :\n msg = \"查询失败\"\n\n return render_template(\n 'mongo.html',\n title='Mongo_query',\n year=datetime.now().year,\n message = msg,\n name = msg['name']\n )\n\n@app.route(\"/add_mongo\",methods=[\"GET\",\"POST\"])\ndef add_mongo():\n mongo_form = MongoForm()\n return render_template(\"mongo.html\",**locals())\n","sub_path":"FlaskMongoDB/FlaskMongoDB/FlaskMongoDB/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"495609656","text":"# -*- coding: utf-8 -*-\n\"\"\"consumption application URLs\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.dashboard, name='dashboard'),\n url(r'^user/$', views.dashboard, name='users'),\n # url(r'^user/detail/(\\d+)/$', views.user_detail, name=\"user_detail\"),\n url(\n r'^user/detail/(?P[^/]+)/$',\n views.UserDetailView.as_view(),\n name=\"user_detail\",\n\n ),\n url(r'^area/detail/([\\w\\-]+)/$', views.area_detail, name=\"area_detail\"),\n\n url(r'^api/areas/([\\w\\-]+)/$', views.area_data_by_month, name=\"api_area\"),\n url(\n r'^api/consumption/$',\n views.consumption_view,\n name=\"api_consumption\",\n ),\n url(\n r'^api/consumption/monthly/$',\n views.data_by_month,\n name=\"api_monthly_consumption\",\n ),\n url(\n r'^api/consumption/(\\d+)/$',\n views.user_data_by_month,\n name=\"api_user_consumption\",\n ),\n]\n","sub_path":"dashboard/consumption/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"245354759","text":"### Read healthcare cost data from vektis https://www.vektis.nl/intelligence/open-data ###\n### Please read the data description before using the data ###\n\nimport json\nimport func\nimport numpy as np\nimport pandas as pd\n\nwith open('request.json', 'r') as f:\n input = json.load(f)\n\nfile = input['data_file']\nyear = file[:-4]\ndf = pd.read_csv(file, delimiter=';')\n\n### Select features you are interested in ###\n### Feature descriptions are provided by https://www.vektis.nl/intelligence/open-data ###\n# KOSTEN_MEDISCH_SPECIALISTISCHE_ZORG\ncol = input['selected_features']\n\n\n### As some features are available in some years, we need to check before select certain features ###\ndata_col = df.columns\npresent = []\nfor c in col:\n if c in data_col:\n present.append(col.index(c))\n \ndf_vektis = df[np.array(col)[present]]\n\n\n### Give new columns names which are understandable for yourself ###\n# medical_specialist\nname_col = input['name_features']\nnew_col = np.array(name_col)[present]\ndf_vektis.columns = new_col\n\n\n### Change the types (int,float,str --> float) of values in the AGE column ###\nage = []\nfor i in df['LEEFTIJDSKLASSE']:\n if type(i) == str:\n try:\n age.append(float(i))\n except:\n age.append(float(i[:-1]))\n elif type(i) == float:\n age.append(i)\n elif type(i) == int:\n age.append(i)\n\n### Add new age column ###\ndf_vektis['AGE'] = age\n\n\n### For getting some basic info ###\nif input['check_missing'] == True:\n func.check_missing(df, col, year)\nif input['data_description'] == True:\n func.data_describe(df, col, year)\n\n### For three plots ###\nloop = input['age_range']\nfor i in loop:\n df_avg = func.groupAgeRange(df_vektis, i, 0)\n\n if input['correlation_matrix'] == True:\n func.corr_Matrix(df_avg, i, year)\n\n if input['pie_chart'] == True:\n func.pie_Chart(df_avg, i, year)\n\n if input['distribution_plot'] == True:\n func.dist_Plot(df_avg,'SUM', i, year)\n\n### Only for the Stack plot ###\nif input['stacked_area'] == True:\n loop = list(range(0,90,1))\n df_stack = pd.DataFrame()\n for i in loop:\n df_avg = func.groupAgeRange(df_vektis, i, df_stack)\n df_stack[i] = df_avg.mean(axis=0, skipna=True)\n df_stack_trans = df_stack.transpose()\n df_stack_trans = func.merge(df_stack_trans)\n func.stacked_Plot(df_stack_trans, year)\n print('Stacked Area plot is done')\n\n","sub_path":"containers/createContainer/Party_2_Container/StepRequest/requestBasicInfo.py","file_name":"requestBasicInfo.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"37472063","text":"#-*-coding:utf-8-*-\nfrom django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom mysite.forms import Login_Form\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom mysite.forms import New_Post_Form\nfrom mysite.models import Member_Model\nimport json\n\n@login_required(login_url='/mysite/login/')\ndef new_post(request):\n\t# get_email = request.user.username\n\t# if request.is_ajax():\n\t# \tpass\n\tif request.method == 'GET':\n\t\tprint('method is get')\n\t\tnew_form = New_Post_Form()\n\t\treturn render(request,'new_post_templates.html',{'form':new_form})\n\telif request.method == 'POST':\n\t\tget_form = New_Post_Form(request.POST)\n\t\tprint(\"check if is valid\")\n\t\tv = get_form.is_valid()\n\t\tprint(v)\n\t\tif v:\n\t\t\tget_post = get_form.save(commit=False)\n\t\t\tget_email = request.user.username\n\t\t\tuser = Member_Model.objects.get(email=get_email)\n\t\t\tprint(get_email)\n\t\t\tprint(user.member_id)\n\t\t\tget_post.author = user.nick_name\n\t\t\tif get_post.author is None:\n\t\t\t\tget_post.author = user.email\n\t\t\tget_post.save()\n\t\t\treturn HttpResponse(json.dumps({\"status\":\"200\"}))\n\t\telse:\n\t\t\treturn HttpResponse(json.dumps({\"status\":\"500\"}))\n\telse:\n\t\treturn render(request,'new_post_templates.html')\n\t\n","sub_path":"IBMsite/mysite/views/new_post_views.py","file_name":"new_post_views.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32924286","text":"\nclass Solution(object):\n\n def grayCode(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n\n ACE\n 60 ms\n \"\"\"\n def helper(n):\n if n == 0:\n return [\"0\"]\n if n == 1:\n return [\"0\", \"1\"]\n else:\n partial = helper(n - 1)\n return [\"0\" + part for part in partial] + [\"1\" + part for part in reversed(partial)]\n return [int(s, 2) for s in helper(n)]\n\n def grayCodeV2(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n\n ACE\n 60 ms\n\n My idea is to generate the sequence iteratively.\n For example, when n=3, we can get the result based on n=2.\n 00,01,11,10 -> (000,001,011,010 ) (110,111,101,100).\n\n The middle two numbers only differ at their highest bit, while the\n rest numbers of part two are exactly symmetric of part one.\n It is easy to see its correctness.\n \"\"\"\n res = [0]\n for i in range(n):\n for k in range(len(res) - 1, -1, -1):\n res.append(res[k] | 1 << i)\n return res\n\n def grayCodeV1(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n\n ACE\n 80 ms\n \"\"\"\n def helper(n):\n if n == 1:\n return [\"0\", \"1\"]\n else:\n partial = helper(n - 1)\n return [\"0\" + part for part in partial] + [\"1\" + part for part in reversed(partial)]\n if n == 0:\n return [0]\n else:\n return [int(s, 2) for s in helper(n)]\n\nif __name__ == '__main__':\n s = Solution()\n for i in range(1, 4):\n res = s.grayCode(i)\n print(i)\n print(res)\n print()","sub_path":"089_gray_code.py","file_name":"089_gray_code.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"322105769","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, row_number, date_format, expr\nimport os.path\nimport yaml\n\nif __name__ == '__main__':\n # Create the SparkSession\n sparkSession = SparkSession \\\n .builder \\\n .appName(\"Read Files\") \\\n .master('local[*]') \\\n .config('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.4') \\\n .getOrCreate()\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n appConfigFilePath = os.path.abspath(current_dir + \"/../../../\"+\"application.yml\")\n\n with open(appConfigFilePath) as conf:\n doc = yaml.load(conf,Loader=yaml.FullLoader)\n\n # Setup spark to use s3\n hadoop_conf = sparkSession.sparkContext._jsc.hadoopConfiguration()\n hadoop_conf.set(\"fs.s3.impl\", \"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n hadoop_conf.set(\"fs.s3a.access.key\", doc[\"s3_conf\"][\"access_key\"])\n hadoop_conf.set(\"fs.s3a.secret.key\", doc[\"s3_conf\"][\"secret_access_key\"])\n hadoop_conf.set(\"fs.s3a.endpoint\", \"s3-eu-west-1.amazonaws.com\")\n\n # write into NYC_OMO_YEAR_WISE\n dfFromParquet=sparkSession\\\n .read\\\n .format(\"parquet\")\\\n .load(\"s3a://\" + doc[\"s3_conf\"][\"s3_bucket\"] + \"/NYC_OMO\")\\\n .withColumn(\"OrderYear\", date_format(\"OMOCreateDate\", \"YYYY\"))\\\n .repartition(5)\n\n dfFromParquet.printSchema()\n dfFromParquet.show(5, False)\n\n dfFromParquet\\\n .write\\\n .partitionBy(\"OrderYear\")\\\n .mode(\"overwrite\")\\\n .parquet(\"s3a://\" + doc[\"s3_conf\"][\"s3_bucket\"] + \"/NYC_OMO_YEAR_WISE\")\n\n nycOmoDf = sparkSession.read\\\n .parquet(\"s3a://\" + doc[\"s3_conf\"][\"s3_bucket\"] + \"/NYC_OMO_YEAR_WISE\")\\\n .repartition(5)\n\n parquetExplianPlan = nycOmoDf \\\n .select(\"OMOID\", \"OMONumber\", \"BuildingID\") \\\n .filter((col(\"OrderYear\") == \"2018\") & (col(\"Lot\") > \"50\"))\n\n print(\"spark.sql.parquet.filterPushdown:\", sparkSession.conf.get(\"spark.sql.parquet.filterPushdown\"))\n print(\"spark.sql.parquet.mergeSchema:\", sparkSession.conf.get(\"spark.sql.parquet.mergeSchema\"))\n\n parquetExplianPlan.explain()\n\n # turn on Parquet push-down, stats filtering, and dictionary filtering\n sparkSession.conf.set('spark.sql.parquet.filterPushdown', \"true\")\n print(\"spark.sql.parquet.filterPushdown\", sparkSession.conf.get(\"spark.sql.parquet.filterPushdown\"))\n sparkSession.conf.set('parquet.filter.statistics.enabled', \"true\")\n sparkSession.conf.set('parquet.filter.dictionary.enabled', \"true\")\n\n #use the non-Hive read path\n sparkSession.conf.set(\"spark.sql.hive.convertMetastoreParquet\", \"true\")\n\n # turn off schema merging, which turns off push-down\n sparkSession.conf.set(\"spark.sql.parquet.mergeSchema\", \"false\")\n sparkSession.conf.set(\"spark.sql.hive.convertMetastoreParquet.mergeSchema\", \"false\")\n\n parquetExplianPlan1 =nycOmoDf \\\n .select(\"OMOID\", \"OMONumber\", \"BuildingID\") \\\n .filter((col(\"OrderYear\") == \"2018\") & (col(\"Lot\") > \"50\"))\n\n parquetExplianPlan1.explain()\n","sub_path":"dataframe/optimization/write_parquet_n_pushdown_filter.py","file_name":"write_parquet_n_pushdown_filter.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524083483","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport random\n\nfrom logging import getLogger, INFO\n\nimport time\n\nlogger = getLogger(__name__)\nlogger.setLevel(INFO)\n\n\nclass SelfPlayer(object):\n def __init__(self, make_sim_env_fn, config, api, play_config=None, model_cache=None):\n self.game_tree = GameTree(make_sim_env_fn=make_sim_env_fn, config=config, api=api, play_config=play_config, model_cache=model_cache)\n\n def prepare(self, root_env):\n self.game_tree.expand_root(root_env=root_env)\n\n def think(self, tau=0):\n return self.game_tree.mcts_and_play(tau)\n\n def play(self, act):\n self.game_tree.keep_only_subtree(act)\n\n\nclass EvaluatePlayer(SelfPlayer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Before the model becomes accurate, rotate_flip introduces randomness.\n self.game_tree.allow_rotate_flip_ob = False\n\n def play(self, act, env):\n self.game_tree.keep_only_subtree(act)\n if not self.game_tree.root_node.expanded:\n # possible that opposite plays an action which I haven't searched yet.\n self.game_tree.expand_root(root_env=env)\n\n def get_think_info(self):\n node = self.game_tree.root_node\n return node.full_N, node.full_Q, node.full_combined_V, node.full_P\n\n\nclass TimedEvaluatePlayer(EvaluatePlayer):\n def __init__(self, time_strategy, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.time_strategy = time_strategy\n\n def think(self, tau=0):\n timeout = self.time_strategy.get_seconds_for_thinking()\n return self.game_tree.mcts_and_play(tau, timeout)\n\n def play(self, *args, **kwargs):\n self.time_strategy.play()\n return super().play(*args, **kwargs)\n\n\nclass GameTree(object):\n def __init__(self, make_sim_env_fn, config=None, play_config=None, api=None, model_cache=None):\n self.make_sim_env_fn = make_sim_env_fn\n self.config = config\n self.play_config = play_config or self.config.play\n self.root_node = Node(self.play_config.c_puct)\n self.api = api\n self.model_cache = model_cache\n\n self.allow_rotate_flip_ob = True\n self.virtual_loss = self.config.play.virtual_loss\n\n def expand_root(self, root_env):\n p, v = self.api.predict(np.asarray(root_env.observation))\n self.root_node.expand_and_evaluate(p, v, root_env.legal_moves)\n\n def mcts_and_play(self, tau, timeout=None):\n self.mcts(timeout)\n return self.play(tau)\n\n def keep_only_subtree(self, action):\n self.root_node = self.root_node.child_by_value(action)\n assert self.root_node is not None\n\n def mcts(self, timeout=None):\n # idea borrowed from https://github.com/tensorflow/minigo/blob/master/strategies.py\n\n assert self.root_node.expanded\n\n # Question: is it correct doing it every time mcts starts, or should it be just first step of a game?\n self.root_node.add_dirichlet_noise(self.play_config.noise_eps, self.play_config.dirichlet_alpha)\n\n deadline = time.time() + timeout if timeout else None\n\n nodes_to_sim = [] if timeout else [(self.root_node, None) for _ in range(self.play_config.simulation_num_per_move)]\n nodes_to_predict = []\n n_sim, n_cont_sim = 0, 0\n while True:\n if n_sim and deadline and time.time() >= deadline:\n break\n if n_sim and not deadline and not nodes_to_sim:\n break\n\n n_sim += 1\n\n nodes_to_sim = nodes_to_sim or [(self.root_node, None)]\n\n cur_node, env = nodes_to_sim.pop(0)\n env = env or self.make_sim_env_fn()\n\n while True:\n next_node = cur_node.select_best_child_and_add_virtual_loss(self.virtual_loss)\n env.step(next_node.value)\n\n if env.done:\n v = -1 if env.last_player_wins else 1 if env.last_player_loses else 0\n self.backup(next_node, v)\n break\n\n if not next_node.expanded:\n\n if self.model_cache:\n ob, op = self.random_symmetric_ob(env)\n cache = self.model_cache.query(env.compress_ob(ob))\n else:\n cache, op = None, None\n\n if cache:\n p,v = cache\n p = p if op is None else env.counter_rotate_flip_pi(p, op)\n next_node.expand_and_evaluate(p, v, env.legal_moves)\n self.backup(next_node, v)\n else:\n nodes_to_predict.append((next_node, env))\n break\n\n cur_node = next_node\n\n if n_sim % self.play_config.prediction_queue_size == 0 and nodes_to_predict:\n nodes_not_backup = self.predict_and_backup(nodes_to_predict, always_backup=False)\n nodes_to_sim = nodes_not_backup + nodes_to_sim\n nodes_to_predict = []\n n_cont_sim += len(nodes_not_backup)\n\n # there maybe some unpredicted nodes\n if nodes_to_predict:\n self.predict_and_backup(nodes_to_predict, always_backup=True)\n\n # there maybe some ongoing nodes waiting to sim deeper\n for node,env in nodes_to_sim:\n if env:\n self.substract_virtual_loss(node)\n n_sim -= 1\n n_cont_sim -= 1\n\n logger.debug(f'think time: {timeout}; search times: {n_sim - n_cont_sim}')\n\n def random_symmetric_ob(self, env):\n op = random.randint(0, env.rotate_flip_op_count - 1) \\\n if self.allow_rotate_flip_ob and env.rotate_flip_op_count > 0 \\\n else None\n ob = env.observation if op is None else env.rotate_flip_ob(env.observation, op)\n\n return ob, op\n\n def predict_and_backup(self, node_envs, always_backup):\n ob_ops = [self.random_symmetric_ob(env) for _,env in node_envs]\n ops = [x[1] for x in ob_ops]\n obs = np.asarray([x[0] for x in ob_ops])\n\n ps, vs = self.api.predict(obs)\n\n nodes_not_backup = []\n for (node, env), p, v, ob, op in zip(node_envs, ps, vs, obs, ops):\n if self.model_cache:\n self.model_cache.suggest(env.compress_ob(ob), p, v)\n\n if op is not None:\n p = env.counter_rotate_flip_pi(p, op)\n\n if not node.expanded:\n node.expand_and_evaluate(p, v, env.legal_moves)\n self.backup(node, v)\n else:\n if always_backup:\n self.backup(node, v)\n else:\n nodes_not_backup.append((node, env))\n\n return nodes_not_backup\n\n def backup(self, leaf_node, v):\n cur_node = leaf_node\n while cur_node is not self.root_node:\n v = -v # important: reverse v\n parent = cur_node.parent\n parent.backup_and_stubstract_virtual_loss(self.virtual_loss, v, cur_node.sibling_index)\n cur_node = parent\n\n def substract_virtual_loss(self, leaf_node):\n cur_node = leaf_node\n while cur_node is not self.root_node:\n parent = cur_node.parent\n parent.substract_virtual_loss(self.virtual_loss, cur_node.sibling_index)\n cur_node = parent\n\n # those illegal actions are with full_N == 0, so won't be played\n def play(self, tau):\n N = self.root_node.full_N\n if abs(tau-1) < 1e-10:\n pi = N / np.sum(N)\n act = np.random.choice(range(len(pi)), p=pi)\n assert pi[act] > 0\n else:\n assert abs(tau) < 1e-10, f'tau={tau}(expected to be either 0 or 1 only)'\n act = random.choice(np.argwhere(abs(N - np.amax(N)) < 1e-10).flatten().tolist())\n pi = np.zeros([len(N)])\n pi[act] = 1\n\n # the paper says, AGZ resigns if both root value and best child value are lower than threshold\n # TODO: is it v or Q or Q+U to check?\n root_v = self.root_node.v\n # child'v is opponent's winning rate, need to reverse\n # Note that root_node.children are only for those legal action.\n children_v = [-child.v for child in self.root_node.children]\n if len(children_v) > 0:\n best_child_v = np.max(children_v)\n else:\n best_child_v = root_v # trick. Since it is for resign_check only, it works to let be root_v.\n values_of_resign_check = (root_v, best_child_v)\n\n return int(act), pi, values_of_resign_check\n\n\nclass Node(object):\n def __init__(self, c_puct, parent=None, sibling_index=None, value=None):\n self.children = None\n self._parent = parent\n\n self._c_puct = c_puct\n self._sibling_index = sibling_index\n self._value = value # corresponding \"action\" of env\n\n self.p = None\n self.W = None\n self.Q = None\n self.N = None\n self.v = 0.\n\n # below variables are only for speeding up MCTS\n self._sum_n = None\n self._best_children_indices = None\n self._full_n_size = None\n\n # given the real meaning of node.value, full_N is actually N for every \"action\" of env\n @property\n def full_N(self):\n assert self.expanded\n\n assert np.sum(self.N) > 0, f'full_N is called with self.N={self.N}'\n\n ret = np.zeros([self._full_n_size])\n for node in self.children:\n ret[node.value] = self.N[node.sibling_index]\n\n assert abs(np.sum(self.N) - np.sum(ret)) < 1e-10\n return ret\n\n # given the real meaning of node.value, full_P is actually P for every \"action\" of env\n @property\n def full_P(self):\n assert self.expanded\n\n ret = np.zeros([self._full_n_size])\n for node in self.children:\n ret[node.value] = self.p[node.sibling_index]\n\n assert abs(np.sum(self.p) - np.sum(ret)) < 1e-10\n return ret\n\n # given the real meaning of node.value, full_Q is actually Q for every \"action\" of env\n @property\n def full_Q(self):\n assert self.expanded\n\n ret = np.zeros([self._full_n_size])\n for node in self.children:\n ret[node.value] = self.Q[node.sibling_index]\n\n assert abs(np.sum(self.Q) - np.sum(ret)) < 1e-10\n return ret\n\n # given the real meaning of node.value, full_combined_V is actually combined_V for every \"action\" of env\n @property\n def full_combined_V(self):\n assert self.expanded\n\n v = self._children_v()\n ret = np.zeros([self._full_n_size])\n for node in self.children:\n ret[node.value] = v[node.sibling_index]\n\n assert abs(np.sum(v) - np.sum(ret)) < 1e-10\n return ret\n\n @property\n def expanded(self):\n return self.children is not None\n\n @property\n def value(self):\n return self._value\n\n @property\n def sibling_index(self):\n return self._sibling_index\n\n @property\n def parent(self):\n return self._parent\n\n def child_by_value(self, value):\n return next((child for child in self.children if child.value == value), None)\n\n def expand_and_evaluate(self, p, v, legal_moves):\n\n if self.expanded:\n return\n\n self.p = p[legal_moves == 1] # this.p is (typically much) shorter than p\n assert 0 < len(self.p) <= len(legal_moves), f'{len(self.p)}, {len(legal_moves)} '\n if abs(np.sum(self.p)) < 1e-10:\n self.p /= len(self.p)\n else:\n self.p /= np.sum(self.p)\n\n self.v = v\n self.W = np.zeros([len(self.p)])\n self.Q = np.zeros([len(self.p)])\n self.N = np.zeros([len(self.p)])\n\n actions = (i for i,v in enumerate(legal_moves) if v == 1)\n self.children = [Node(c_puct=self._c_puct, parent=self, sibling_index=i, value=a)\n for i,a in enumerate(actions)]\n assert len(self.children) > 0\n\n self._sum_n = 0\n self._best_children_indices = None\n self._full_n_size = len(legal_moves)\n\n def add_dirichlet_noise(self, eps, alpha):\n self.p = (1-eps)*self.p + eps*np.random.dirichlet([alpha]*len(self.p))\n self._best_children_indices = None\n\n def add_virtual_loss(self, virtual_loss, child):\n self.N[child] += virtual_loss\n self.W[child] -= virtual_loss\n self.Q[child] = self.W[child] / self.N[child]\n assert self.N[child] > 0, f'N[{child}]={self.N[child]}'\n\n self._sum_n += virtual_loss\n self._best_children_indices = None\n\n def substract_virtual_loss(self, virtual_loss, child):\n self.N[child] -= virtual_loss\n self.W[child] += virtual_loss\n self.Q[child] = self.W[child] / self.N[child] if self.N[child] > 1e-5 else 0\n assert self.N[child] >= 0, f'N[{child}]={self.N[child]}'\n\n self._sum_n -= virtual_loss\n self._best_children_indices = None\n\n def backup(self, v, child):\n self.N[child] += 1\n self.W[child] += v\n self.Q[child] = self.W[child] / self.N[child]\n assert self.N[child] > 0, f'N[{child}]={self.N[child]}'\n\n self._sum_n += 1\n self._best_children_indices = None\n\n def backup_and_stubstract_virtual_loss(self, virtual_loss, v, child):\n self.backup(v, child)\n self.substract_virtual_loss(virtual_loss, child)\n\n def best_children_indices(self):\n if self._best_children_indices is None:\n if len(self.p) == 1:\n self._best_children_indices = [0]\n else:\n v = self._children_v()\n self._best_children_indices = np.argwhere(abs(v-np.amax(v)) < 1e-10).flatten().tolist()\n\n return self._best_children_indices\n\n def _children_v(self):\n sqrt_sum_n = np.sqrt(self._sum_n)\n return self.Q + self._c_puct * self.p * sqrt_sum_n / (1 + self.N)\n\n def select_best_child_and_add_virtual_loss(self, virtual_loss):\n ci = random.choice(self.best_children_indices())\n next_node = self.children[ci]\n self.add_virtual_loss(virtual_loss, next_node.sibling_index)\n\n return next_node\n\n","sub_path":"src/reversi_zero/agent/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":14176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375280941","text":"# Given a long text string, count the number of occurrences of each word. Ignore case. Assume the boundary of a word is whitespace - a \" \", or a line break denoted by \"\\n\". Ignore all punctuation, such as . , ~ ? !. Assume hyphens are part of a word - \"two-year-old\" and \"two year old\" are one word, and three different words, respectively. \n\n\ndef count_words(text):\n text_dict = {}\n for word in text.lower().split(' '):\n if word in text_dict:\n text_dict[word] +=1\n else:\n text_dict[word] = 1\n\n for key, value in text_dict.items():\n print(key + \" \" + str(value))\n\n\n\nprint(count_words(\"I do not like green eggs and ham, I do not like them, Sam-I-Am\"))\n\n# i 2\n# do 2\n# not 2\n# like 2\n# green 1\n# eggs 1\n# and 1\n# ham 1\n# them 1\n# sam-i-am 1","sub_path":"CTI/replit_assignments/count_words.py","file_name":"count_words.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96716202","text":"\n\nfrom xai.brain.wordbase.nouns._rating import _RATING\n\n#calss header\nclass _RATINGS(_RATING, ):\n\tdef __init__(self,): \n\t\t_RATING.__init__(self)\n\t\tself.name = \"RATINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rating\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ratings.py","file_name":"_ratings.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"600804105","text":"# Write a function to check whether an input string is a valid\n# IPv4 address or IPv6 address or neither.\n#\n# IPv4 addresses are canonically represented in dot-decimal notation,\n# which consists of four decimal numbers, each ranging from 0 to 255,\n# separated by dots (\".\"), e.g.,172.16.254.1;\n# Besides, leading zeros in the IPv4 is invalid. For example,\n# the address 172.16.254.01 is invalid.\n#\n# IPv6 addresses are represented as eight groups of four hexadecimal digits,\n# each group representing 16 bits. The groups are separated by colons (\":\").\n# For example, the address 2001:0db8:85a3:0000:0000:8a2e:0370:7334 is valid.\n# Also, we could omit some leading zeros among four hexadecimal digits and\n# some low-case characters in the address to upper-case ones,\n# so 2001:db8:85a3:0:0:8A2E:0370:7334 is also a valid IPv6 address\n# (Omit leading zeros and using upper cases).\n# However, we don't replace a consecutive group of zero value with a\n# single empty group using two consecutive colons (::) to pursue simplicity.\n# For example, 2001:0db8:85a3::8A2E:0370:7334 is an invalid IPv6 address.\n# Besides, extra leading zeros in the IPv6 is also invalid. For example,\n# the address 02001:0db8:85a3:0000:0000:8a2e:0370:7334 is invalid.\n\n\nclass Solution:\n def validIPAddress(self, IP: str) -> str:\n def isIPv4(s):\n # no leading zeros: note str(int(\"02\")) != \"02\"\n try: return str(int(s)) == s and 0 <= int(s) <= 255\n except: return False\n\n def isIPv6(s):\n if len(s) > 4:\n return False\n # base 16: note int(\"-000\",16) = 0\n try: return int(s, 16) >= 0 and s[0] != '-'\n except: return False\n\n if IP.count(\".\") == 3 and all(isIPv4(p) for p in IP.split(\".\")):\n return \"IPv4\"\n if IP.count(\":\") == 7 and all(isIPv6(p) for p in IP.split(\":\")):\n return \"IPv6\"\n return \"Neither\"\n\n\nIP = \"172.16.254.1\"\nsol = Solution()\nprint(sol.validIPAddress(IP))\n","sub_path":"DivideandConquer/ValidateIPAddress.py","file_name":"ValidateIPAddress.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"570056973","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"out.csv\")\nhsn_counts = dict(df[\"Pre Hair Length\"].value_counts())\nfig, ax = plt.subplots()\nkeys = list(hsn_counts.keys())\nvalues = list(hsn_counts.values())\n\nax.scatter(keys, values, label=\"count\")\n\nfor a, b in zip(keys, values):\n plt.text(a, b, str(b))\nax.set_title(\"Client Hair Length Before Styling\")\nax.legend()\nplt.show()\n","sub_path":"hairbeforemodel.py","file_name":"hairbeforemodel.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"210515822","text":"from Calculator.Division import division\n\n\ndef samplemean(num):\n try:\n num_values = len(num)\n total = sum(num)\n return round(division(total, num_values), 8)\n except ZeroDivisionError:\n print(\"Divide by 0 Error\")\n except ValueError:\n print(\"Please Check your data inputs\")\n","sub_path":"Stats/MeanS.py","file_name":"MeanS.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373677259","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import View\nfrom .render import Render\nfrom .forms import ResumeForm\nfrom .models import Resume\n# Create your views here.\n\ndef CvCreate(request):\n form = ResumeForm(request.POST or None)\n\n if form.is_valid():\n form.save() \n return redirect('../create') \n\n # fields = ('name','birthdate','father','mother') \n model = Resume \n context = {\n 'form':form\n } \n\n return render(request,'core2/create.html',context)\n\nclass Pdf(View):\n def get(self, request):\n info = Resume.objects.all().order_by('-id')[:1]\n params = {\n 'info': info,\n 'request': request\n }\n return Render.render('core2/pdf.html', params)","sub_path":"core2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"85186766","text":"from mainwindow import *\nfrom PyQt5.QtGui import QPixmap, QPalette, QColor, QBrush\nfrom PyQt5.QtCore import QPoint, QSize\nfrom PyQt5.QtWidgets import *\n\nimport systempath\nimport log\nimport inihelper\n\nclass UIProcess(Ui_MainWindow, QMainWindow):\n def __init__(self, parent=None):\n super(UIProcess, self).__init__(parent)\n self.setupUi(self)\n log.loginfo.process_log('Initialize UI')\n # 获取屏幕分辨率\n self.screen = QDesktopWidget().screenGeometry()\n self.width = self.screen.width()\n self.height = self.screen.height()\n # 添加IA Logo\n pixMap = QPixmap(systempath.bundle_dir + '/Resource/IA.png')\n self.lb_ia.setMaximumWidth(self.width * 0.12)\n self.lb_ia.setFixedHeight(self.height * 0.11)\n self.lb_ia.setScaledContents(True)\n self.lb_ia.setPixmap(pixMap)\n # 读取标题与版本号\n self.lb_title.setText(inihelper.read_ini(systempath.bundle_dir + '/Config/Config.ini', 'Config', 'Title'))\n self.lb_ver.setText(inihelper.read_ini(systempath.bundle_dir + '/Config/Config.ini', 'Config', 'Version'))\n # 切换到主界面\n #self.tabWidget.tabBar().hide()\n self.tabWidget.setCurrentIndex(0)\n # 初始化统计信息显示区\n #self.le_time.setText('0')\n #self.le_pass.setText('0')\n #self.le_total.setText('0')\n #self.le_yield.setText('0')\n\n self.pe = QPalette()\n self.pe2 = QPalette()\n\n self.pe.setColor(QPalette.Window, QColor(0, 255, 0)) # 设置背景颜色\n # 初始化各界面\n self.init_main_ui()\n self.init_toolbar_ui()\n self.init_vision_ui()\n #self.init_motion_ui()\n self.init_seq()\n self.init_systeminfo()\n\n def init_main_ui(self):\n # 初始化主界面各控件大��\n self.systeminfo.setMaximumWidth(self.width * 0.2)\n self.setMinimumHeight(self.height * 0.5)\n self.setMinimumWidth(self.width * 0.5)\n self.lb_title.setFixedHeight(self.height * 0.11)\n self.lb_main_user.setMaximumWidth(self.width * 0.1)\n\n self.pe2.setColor(QPalette.WindowText, QColor(8, 80, 208)) # 设置字体颜色\n self.lb_main_user.setPalette(self.pe2)\n self.lb_user_title.setPalette(self.pe2)\n self.testlist.setStyleSheet('background-color: rgb(255, 255, 255);')\n\n def init_systeminfo(self):\n self.systeminfo.setRowCount(50)\n self.systeminfo.setColumnCount(2)\n self.systeminfo.setHorizontalHeaderLabels(['Item', 'Value'])\n self.systeminfo.setColumnWidth(0, self.width * 0.06)\n self.systeminfo.horizontalHeader().setStretchLastSection(True)\n self.systeminfo.verticalHeader().hide()\n data1 = ['State:', 'Total:', 'Pass:', 'Yeild:']\n data2 = ['Idle', '0', '0', '0']\n\n for i in range(4):\n newItem1 = QTableWidgetItem(data1[i])\n self.systeminfo.setItem(i, 0, newItem1)\n newItem2 = QTableWidgetItem(data2[i])\n self.systeminfo.setItem(i, 1, newItem2)\n\n def set_state(self, result):\n newItem = QTableWidgetItem(result)\n self.systeminfo.setItem(0, 1, newItem)\n if(result=='Testing'):\n newItem.setBackground(QColor(255,255,0))\n elif (result == 'Fail'):\n newItem.setBackground(QColor(255, 0, 0))\n elif (result == 'Pass'):\n newItem.setBackground(QColor(0, 255, 0))\n\n def set_count(self, result):\n # 统计测试个数及通过率\n total_cnt = int(self.systeminfo.item(1, 1).text()) + 1\n newItem = QTableWidgetItem(str(total_cnt))\n self.systeminfo.setItem(1, 1, newItem)\n if(result=='Pass'):\n pass_cnt = int(self.systeminfo.item(2,1).text()) + 1\n newItem = QTableWidgetItem(str(pass_cnt))\n self.systeminfo.setItem(2, 1, newItem)\n y_cnt = pass_cnt / total_cnt\n newItem = QTableWidgetItem(str(\"%.2f\" % (y_cnt * 100)) + '%')\n self.systeminfo.setItem(3, 1, newItem)\n\n def init_toolbar_ui(self):\n # 初始化工具栏\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(systempath.bundle_dir + \"/Resource/start.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(systempath.bundle_dir + \"/Resource/stop.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(systempath.bundle_dir + \"/Resource/home.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(systempath.bundle_dir + \"/Resource/refresh.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n self.actionStart.setIcon(icon1)\n self.actionStop.setIcon(icon2)\n self.actionMainwindow.setIcon(icon3)\n self.actionRefresh.setIcon(icon4)\n\n font = QtGui.QFont()\n font.setPointSize(10)\n self.mystepbar = QCheckBox()\n self.mystepbar.setText('StepTest')\n self.mystepbar.setToolTip('Enable step test')\n self.mystepbar.setFont(font)\n self.toolBar.addWidget(self.mystepbar)\n self.nextAction = QAction('Next', self)\n self.toolBar.addAction(self.nextAction)\n self.nextAction.triggered.connect(self.next_step)\n self.nextAction.setToolTip('Next step')\n self.nextAction.setDisabled(True)\n self.toolBar.addSeparator()\n self.myloopbar = QCheckBox()\n self.myloopbar.setText('LoopTest:')\n self.myloopbar.setToolTip('Enable loop test')\n self.toolBar.addWidget(self.myloopbar)\n self.myeditbar = QLineEdit()\n self.myeditbar.setText('0')\n self.myeditbar.setToolTip('Loop test times')\n self.myloopbar.setFont(font)\n self.myeditbar.setMaximumWidth(self.width * 0.03)\n self.myeditbar.setStyleSheet('background-color: rgb(237, 237, 237);')\n self.myeditbar.setFont(font)\n self.toolBar.addWidget(self.myeditbar)\n self.toolBar.addSeparator()\n self.actionContinue.setDisabled(True)\n\n self.language = QComboBox()\n #self.language.setText('LoopTest')\n self.language.setToolTip('Change language')\n self.language.addItem('English')\n self.language.addItem('中文')\n self.language.setFixedWidth(self.width * 0.05)\n # self.toolBar.addWidget(self.language)\n self.language.currentIndexChanged.connect(self.change_language)\n self.toolBar.setFixedHeight(self.height*0.03)\n self.toolBar.setIconSize(QSize(int(self.height*0.02),int(self.height*0.03)))\n self.language.setStyle(QStyleFactory.create(\"Fusion\")) # Plastique\n\n self.lan = inihelper.read_ini(systempath.bundle_dir + '/Config/Config.ini', 'Config', 'Language')\n if(self.lan=='EN'):\n self.English_ui()\n else:\n self.Chinese_ui()\n\n\n def init_vision_ui(self):\n # 初始化视觉界面\n self.visionui = inihelper.read_ini(systempath.bundle_dir + '/Config/Config.ini', 'Config', 'Vision')\n if (self.visionui != 'enable'):\n self.actionVision_Window.setVisible(False)\n self.pb_loadimg.setMaximumWidth(self.width * 0.1)\n self.pb_snap.setMaximumWidth(self.width * 0.1)\n self.pb_live.setMaximumWidth(self.width * 0.1)\n self.pb_opencamera.setMaximumWidth(self.width * 0.1)\n self.cb_camera.setMaximumWidth(self.width * 0.1)\n self.sb_extime.setMaximumWidth(self.width * 0.1)\n self.pb_snap.setDisabled(True)\n self.pb_live.setDisabled(True)\n\n def init_motion_ui(self):\n # 初始化运动界面\n self.lb_axis.setMaximumWidth(self.width * 0.08)\n #self.lb_axis.setMaximumHeight(self.height * 0.03)\n #self.lb_io.setMaximumWidth(self.width * 0.08)\n #self.lb_io.setMaximumHeight(self.height * 0.03)\n #self.cb_axis.setMaximumWidth(self.width * 0.2)\n #self.pb_jog1.setMaximumWidth(self.width * 0.08)\n #self.pb_jog2.setMaximumWidth(self.width * 0.08)\n #self.pb_absolute.setMaximumWidth(self.width * 0.08)\n #self.pb_relative.setMaximumWidth(self.width * 0.08)\n #self.pb_axis_stop.setMaximumWidth(self.width * 0.08)\n #self.pb_reset.setMaximumWidth(self.width * 0.08)\n\n # 初始化编辑测试序列的表格\n def init_seq(self):\n log.loginfo.process_log('Initialize sequence table')\n self.tableseq.setRowCount(50)\n self.tableseq.setColumnCount(7)\n self.tableseq.setHorizontalHeaderLabels(['TestItem', 'Function', 'Mode', 'Low Limit', 'Up Limit', 'Next Step', 'Level'])\n self.tableseq.setColumnWidth(0, self.width * 0.4)\n self.tableseq.setColumnWidth(1, self.width * 0.2)\n self.tableseq.horizontalHeader().setStretchLastSection(True)\n self.pb_saveseq.setMaximumWidth(self.width * 0.08)\n self.cb_seq.setMaximumWidth(self.width * 0.08)\n self.pb_delrow.setMaximumWidth(self.width * 0.08)\n self.pb_insertrow.setMaximumWidth(self.width * 0.08)\n\n def Chinese_ui(self):\n # 工具栏\n self.actionPause.setText('暂停')\n self.actionContinue.setText('继续')\n self.actionLoginTool.setText('登陆')\n self.actionEdit.setText('编辑')\n self.actionAutomation.setText('运动控制')\n self.actionClear.setText('清除日志')\n self.mystepbar.setText('单步测试')\n self.nextAction.setText('下一步')\n self.myloopbar.setText('循环测试:')\n # 菜单栏\n self.menuFile.setTitle('文件')\n self.actionOpen_CSV.setText('打开测试序列CSV')\n self.actionOpen_Result.setText('打开结果CSV')\n self.actionOpen_Log.setText('打开日志文件')\n self.actionReload_Scripts.setText('重新加载脚本')\n self.actionReload_CSV.setText('重新加载序列')\n self.actionClose_System.setText('退出系统')\n self.menuUser.setTitle('用户')\n self.actionLogin.setText('登陆系统')\n self.actionUser_Manage.setText('用户管理')\n self.menuTool.setTitle('工具')\n self.actionZmq_Debug.setText('Zmq调试工具')\n self.actionTcp_Debug.setText('Tcp调试工具')\n self.actionSerial_Debug.setText('串口调试工具')\n self.menuWindow.setTitle('窗口')\n self.actionMain_Window.setText('主窗口')\n self.actionEdit_Window.setText('序列编辑窗口')\n self.actionMotion_Window.setText('运动控制窗口')\n self.actionVision_Window.setText('视觉窗口')\n self.actionToolBar.setText('工具栏')\n # 测试序列\n self.testlist.setHeaderLabels(['测试项', '测试时间', '测试数据', '测试结果'])\n # 序列编辑\n self.lb_edit.setText('序列编辑')\n self.pb_insertrow.setText('插入行')\n self.pb_delrow.setText('删除选定行')\n self.pb_saveseq.setText('保存序列')\n self.tableseq.setHorizontalHeaderLabels(['测试项', '函数', '模式', '下限', '上限', '失败后跳转', '等级'])\n\n def English_ui(self):\n # 工具栏\n self.actionPause.setText('Pause')\n self.actionContinue.setText('Continue')\n self.actionLoginTool.setText('Login')\n self.actionEdit.setText('Edit')\n self.actionAutomation.setText('Automation')\n self.actionClear.setText('Clear')\n self.mystepbar.setText('StepTest')\n self.nextAction.setText('Next')\n self.myloopbar.setText('LoopTest:')\n # 菜单栏\n self.menuFile.setTitle('File')\n self.actionOpen_CSV.setText('Open Sequence')\n self.actionOpen_Result.setText('Open Result')\n self.actionOpen_Log.setText('Open Log')\n self.actionReload_Scripts.setText('Reload Scripts')\n self.actionReload_CSV.setText('Reload Sequence')\n self.actionClose_System.setText('Close System')\n self.menuUser.setTitle('User')\n self.actionLogin.setText('Login System')\n self.actionUser_Manage.setText('User Manage')\n self.menuTool.setTitle('Tool')\n self.actionZmq_Debug.setText('Zmq Debug')\n self.actionTcp_Debug.setText('Tcp Debug')\n self.actionSerial_Debug.setText('Serial Debug')\n self.menuWindow.setTitle('Window')\n self.actionMain_Window.setText('Main Window')\n self.actionEdit_Window.setText('Sequence Window')\n self.actionMotion_Window.setText('Motion Window')\n self.actionVision_Window.setText('Vision Window')\n self.actionToolBar.setText('ToolBar')\n # 测试序列\n self.testlist.setHeaderLabels(['TestItems', 'Test Time', 'TestData', 'TestResult'])\n # 序列编辑\n self.lb_edit.setText('Edit Test Sequence')\n self.pb_insertrow.setText('Insert Row')\n self.pb_delrow.setText('Delete Row')\n self.pb_saveseq.setText('Save')\n self.tableseq.setHorizontalHeaderLabels(['TestItem', 'Function', 'Mode', 'Low Limit', 'Up Limit', 'Next Step', 'Level'])\n\n def change_language(self):\n if(self.language.currentIndex() == 0):\n self.English_ui()\n else:\n self.Chinese_ui()","sub_path":"UI/uiprocess.py","file_name":"uiprocess.py","file_ext":"py","file_size_in_byte":13256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"632111300","text":"def palindrom(string):\n if string == string[::-1]:\n return True\n else:\n return False\n\n\ndef get_substring_combination(origin_string, hope_length):\n result_arr = []\n\n for i in range(len(origin_string) - (hope_length - 1)):\n sub_string = origin_string[i:i + hope_length]\n result_arr.append(sub_string)\n\n return result_arr\n\n\ndef solution(s):\n answer = 0\n s_length = len(s)\n\n while s_length > 0:\n sub_s = get_substring_combination(s, s_length)\n\n for sub in sub_s:\n if palindrom(sub):\n return len(sub)\n\n s_length -= 1\n\n return answer\n","sub_path":"Python/Lv3/가장_긴_팰린드롬/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"315572902","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 29 13:11:54 2016\n\n@author: Stranger\n\"\"\"\n\n\nimport urllib2\nimport re\n\n\ndef we_get(city_name):\n str_city='city='+str(city_name)\n url='http://apis.baidu.com/heweather/weather/free?'+str_city\n req = urllib2.Request(url)\n req.add_header(\"apikey\", \"badfbfd251581f2d4320fcac8c7538f4\")\n resp = urllib2.urlopen(req)\n content= resp.read().decode('utf-8')\n city=re.findall(r'\"city\":\"(.*?)\"',content,re.S)\n update=re.findall(r'\"loc\":\"(.*?)\"',content,re.S)\n txt=re.findall(r'\"txt_d\":\"(.*?)\"',content,re.S)\n max_t=re.findall(r'\"max\":\"(.*?)\"',content,re.S)\n min_t=re.findall(r'\"min\":\"(.*?)\"',content,re.S)\n hum=re.findall(r'\"hum\":\"(.*?)\"',content,re.S)\n pcpn=re.findall(r'\"pcpn\":\"(.*?)\"',content,re.S)\n vis=re.findall(r'\"vis\":\"(.*?)\"',content,re.S)\n wind_dir=re.findall(r'\"dir\":\"(.*?)\"',content,re.S)\n wind_sc=re.findall(r'\"sc\":\"(.*?)\"',content,re.S)\n wind_spd=re.findall(r'\"spd\":\"(.*?)\"',content,re.S)\n return u'城市:'+city[0]+'\\\\n'+u'更新时间:'+update[0]+'\\\\n'+u'天气情况:'+txt[0]+'\\\\n'+u'最高温度:'+max_t[0]+u'度'+'\\\\n'+u'最低温度:'+min_t[0]+u'度'+'\\\\n'+u'相对湿度:'+hum[0]+'%'+'\\\\n'+u'降水量:'+pcpn[0]+'mm'+'\\\\n'+u'能见度:'+vis[0]+'km'+'\\\\n'+u'风向:'+wind_dir[0]+'\\\\n'+u'风力:'+wind_sc[0]+'\\\\n'+u'风速:'+wind_spd[0]+'km/h'+'\\\\n'","sub_path":"SAE/app/plugins/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"40935420","text":"PALETTE = [\n {\n \"name\": \"orchidee 1\",\n \"label\": \"orchidee-1\",\n \"hex\": \"#883854\"\n },\n {\n \"name\": \"orchidee 2\",\n \"label\": \"orchidee-2\",\n \"hex\": \"#a74a7d\"\n },\n {\n \"name\": \"orchidee 3\",\n \"label\": \"orchidee-3\",\n \"hex\": \"#973e6a\"\n },\n {\n \"name\": \"orchidee 4\",\n \"label\": \"orchidee-4\",\n \"hex\": \"#b3558f\"\n },\n {\n \"name\": \"orchidee 5\",\n \"label\": \"orchidee-5\",\n \"hex\": \"#ba5c97\"\n },\n {\n \"name\": \"orchidee 6\",\n \"label\": \"orchidee-6\",\n \"hex\": \"#cd7bb5\"\n },\n {\n \"name\": \"orchidee 7\",\n \"label\": \"orchidee-7\",\n \"hex\": \"#c468a4\"\n },\n {\n \"name\": \"orchidee 8\",\n \"label\": \"orchidee-8\",\n \"hex\": \"#d88fc4\"\n },\n {\n \"name\": \"orchidee 9\",\n \"label\": \"orchidee-9\",\n \"hex\": \"#e0a3cf\"\n },\n {\n \"name\": \"orchidee 10\",\n \"label\": \"orchidee-10\",\n \"hex\": \"#f0c7e5\"\n },\n {\n \"name\": \"orchidee 11\",\n \"label\": \"orchidee-11\",\n \"hex\": \"#eab7dc\"\n },\n {\n \"name\": \"orchidee 12\",\n \"label\": \"orchidee-12\",\n \"hex\": \"#f4d5ea\"\n },\n {\n \"name\": \"orchidee 13\",\n \"label\": \"orchidee-13\",\n \"hex\": \"#f8e1ef\"\n },\n {\n \"name\": \"orchidee 14\",\n \"label\": \"orchidee-14\",\n \"hex\": \"#f9e9f0\"\n },\n {\n \"name\": \"merlot 1\",\n \"label\": \"merlot-1\",\n \"hex\": \"#8e4145\"\n },\n {\n \"name\": \"merlot 2\",\n \"label\": \"merlot-2\",\n \"hex\": \"#a04f5e\"\n },\n {\n \"name\": \"merlot 3\",\n \"label\": \"merlot-3\",\n \"hex\": \"#954751\"\n },\n {\n \"name\": \"merlot 4\",\n \"label\": \"merlot-4\",\n \"hex\": \"#aa586c\"\n },\n {\n \"name\": \"merlot 5\",\n \"label\": \"merlot-5\",\n \"hex\": \"#b16076\"\n },\n {\n \"name\": \"merlot 6\",\n \"label\": \"merlot-6\",\n \"hex\": \"#c57c94\"\n },\n {\n \"name\": \"merlot 7\",\n \"label\": \"merlot-7\",\n \"hex\": \"#b86880\"\n },\n {\n \"name\": \"merlot 8\",\n \"label\": \"merlot-8\",\n \"hex\": \"#d290a7\"\n },\n {\n \"name\": \"merlot 9\",\n \"label\": \"merlot-9\",\n \"hex\": \"#dda4b9\"\n },\n {\n \"name\": \"merlot 10\",\n \"label\": \"merlot-10\",\n \"hex\": \"#edc9d7\"\n },\n {\n \"name\": \"merlot 11\",\n \"label\": \"merlot-11\",\n \"hex\": \"#e6b8c9\"\n },\n {\n \"name\": \"merlot 12\",\n \"label\": \"merlot-12\",\n \"hex\": \"#f3d7df\"\n },\n {\n \"name\": \"merlot 13\",\n \"label\": \"merlot-13\",\n \"hex\": \"#f6e2e7\"\n },\n {\n \"name\": \"merlot 14\",\n \"label\": \"merlot-14\",\n \"hex\": \"#f7e9ea\"\n },\n {\n \"name\": \"wood 1\",\n \"label\": \"wood-1\",\n \"hex\": \"#5d4c46\"\n },\n {\n \"name\": \"wood 2\",\n \"label\": \"wood-2\",\n \"hex\": \"#766562\"\n },\n {\n \"name\": \"wood 3\",\n \"label\": \"wood-3\",\n \"hex\": \"#82726f\"\n },\n {\n \"name\": \"wood 4\",\n \"label\": \"wood-4\",\n \"hex\": \"#a59794\"\n },\n {\n \"name\": \"wood 5\",\n \"label\": \"wood-5\",\n \"hex\": \"#d1cac8\"\n },\n {\n \"name\": \"cramberry 1\",\n \"label\": \"cramberry-1\",\n \"hex\": \"#a1383d\"\n },\n {\n \"name\": \"cramberry 2\",\n \"label\": \"cramberry-2\",\n \"hex\": \"#b54356\"\n },\n {\n \"name\": \"cramberry 3\",\n \"label\": \"cramberry-3\",\n \"hex\": \"#ab3b49\"\n },\n {\n \"name\": \"cramberry 4\",\n \"label\": \"cramberry-4\",\n \"hex\": \"#c14e67\"\n },\n {\n \"name\": \"cramberry 5\",\n \"label\": \"cramberry-5\",\n \"hex\": \"#c4526d\"\n },\n {\n \"name\": \"cramberry 6\",\n \"label\": \"cramberry-6\",\n \"hex\": \"#d66d8c\"\n },\n {\n \"name\": \"cramberry 7\",\n \"label\": \"cramberry-7\",\n \"hex\": \"#cc5c79\"\n },\n {\n \"name\": \"cramberry 8\",\n \"label\": \"cramberry-8\",\n \"hex\": \"#df819f\"\n },\n {\n \"name\": \"cramberry 9\",\n \"label\": \"cramberry-9\",\n \"hex\": \"#e896b2\"\n },\n {\n \"name\": \"cramberry 10\",\n \"label\": \"cramberry-10\",\n \"hex\": \"#f3bdd0\"\n },\n {\n \"name\": \"cramberry 11\",\n \"label\": \"cramberry-11\",\n \"hex\": \"#f0acc4\"\n },\n {\n \"name\": \"cramberry 12\",\n \"label\": \"cramberry-12\",\n \"hex\": \"#f7cedb\"\n },\n {\n \"name\": \"cramberry 13\",\n \"label\": \"cramberry-13\",\n \"hex\": \"#fbdde6\"\n },\n {\n \"name\": \"cramberry 14\",\n \"label\": \"cramberry-14\",\n \"hex\": \"#fcecec\"\n },\n {\n \"name\": \"blackberry 1\",\n \"label\": \"blackberry-1\",\n \"hex\": \"#503e3b\"\n },\n {\n \"name\": \"blackberry 2\",\n \"label\": \"blackberry-2\",\n \"hex\": \"#5f454b\"\n },\n {\n \"name\": \"blackberry 3\",\n \"label\": \"blackberry-3\",\n \"hex\": \"#5d4748\"\n },\n {\n \"name\": \"blackberry 4\",\n \"label\": \"blackberry-4\",\n \"hex\": \"#70525c\"\n },\n {\n \"name\": \"blackberry 5\",\n \"label\": \"blackberry-5\",\n \"hex\": \"#765863\"\n },\n {\n \"name\": \"blackberry 6\",\n \"label\": \"blackberry-6\",\n \"hex\": \"#906d7d\"\n },\n {\n \"name\": \"blackberry 7\",\n \"label\": \"blackberry-7\",\n \"hex\": \"#7d5c6a\"\n },\n {\n \"name\": \"blackberry 8\",\n \"label\": \"blackberry-8\",\n \"hex\": \"#a37f91\"\n },\n {\n \"name\": \"blackberry 9\",\n \"label\": \"blackberry-9\",\n \"hex\": \"#b593a5\"\n },\n {\n \"name\": \"blackberry 10\",\n \"label\": \"blackberry-10\",\n \"hex\": \"#d5bbc8\"\n },\n {\n \"name\": \"blackberry 11\",\n \"label\": \"blackberry-11\",\n \"hex\": \"#c7aab9\"\n },\n {\n \"name\": \"blackberry 12\",\n \"label\": \"blackberry-12\",\n \"hex\": \"#e1ced7\"\n },\n {\n \"name\": \"blackberry 13\",\n \"label\": \"blackberry-13\",\n \"hex\": \"#eadbe0\"\n },\n {\n \"name\": \"blackberry 14\",\n \"label\": \"blackberry-14\",\n \"hex\": \"#f1e6e7\"\n },\n {\n \"name\": \"oxid 1\",\n \"label\": \"oxid-1\",\n \"hex\": \"#774643\"\n },\n {\n \"name\": \"oxid 2\",\n \"label\": \"oxid-2\",\n \"hex\": \"#774642\"\n },\n {\n \"name\": \"oxid 3\",\n \"label\": \"oxid-3\",\n \"hex\": \"#96545d\"\n },\n {\n \"name\": \"oxid 4\",\n \"label\": \"oxid-4\",\n \"hex\": \"#a4606d\"\n },\n {\n \"name\": \"oxid 5\",\n \"label\": \"oxid-5\",\n \"hex\": \"#c48292\"\n },\n {\n \"name\": \"cherry 1\",\n \"label\": \"cherry-1\",\n \"hex\": \"#e7bdc8\"\n },\n {\n \"name\": \"cherry 2\",\n \"label\": \"cherry-2\",\n \"hex\": \"#bd3a3a\"\n },\n {\n \"name\": \"cherry 3\",\n \"label\": \"cherry-3\",\n \"hex\": \"#cb444a\"\n },\n {\n \"name\": \"cherry 4\",\n \"label\": \"cherry-4\",\n \"hex\": \"#c54043\"\n },\n {\n \"name\": \"cherry 5\",\n \"label\": \"cherry-5\",\n \"hex\": \"#d14d59\"\n },\n {\n \"name\": \"cherry 6\",\n \"label\": \"cherry-6\",\n \"hex\": \"#d65261\"\n },\n {\n \"name\": \"cherry 7\",\n \"label\": \"cherry-7\",\n \"hex\": \"#e3697d\"\n },\n {\n \"name\": \"cherry 8\",\n \"label\": \"cherry-8\",\n \"hex\": \"#dc5a6b\"\n },\n {\n \"name\": \"cherry 9\",\n \"label\": \"cherry-9\",\n \"hex\": \"#eb7d92\"\n },\n {\n \"name\": \"cherry 10\",\n \"label\": \"cherry-10\",\n \"hex\": \"#f193a7\"\n },\n {\n \"name\": \"cherry 11\",\n \"label\": \"cherry-11\",\n \"hex\": \"#fabac9\"\n },\n {\n \"name\": \"cherry 12\",\n \"label\": \"cherry-12\",\n \"hex\": \"#f6a6b8\"\n },\n {\n \"name\": \"cherry 13\",\n \"label\": \"cherry-13\",\n \"hex\": \"#fbcbd5\"\n },\n {\n \"name\": \"cherry 14\",\n \"label\": \"cherry-14\",\n \"hex\": \"#fdd9e0\"\n },\n {\n \"name\": \"mohn 1\",\n \"label\": \"mohn-1\",\n \"hex\": \"#fee4e7\"\n },\n {\n \"name\": \"mohn 2\",\n \"label\": \"mohn-2\",\n \"hex\": \"#a64739\"\n },\n {\n \"name\": \"mohn 3\",\n \"label\": \"mohn-3\",\n \"hex\": \"#b75049\"\n },\n {\n \"name\": \"mohn 4\",\n \"label\": \"mohn-4\",\n \"hex\": \"#b24f45\"\n },\n {\n \"name\": \"mohn 5\",\n \"label\": \"mohn-5\",\n \"hex\": \"#c65b59\"\n },\n {\n \"name\": \"mohn 6\",\n \"label\": \"mohn-6\",\n \"hex\": \"#ca5d5c\"\n },\n {\n \"name\": \"mohn 7\",\n \"label\": \"mohn-7\",\n \"hex\": \"#da7579\"\n },\n {\n \"name\": \"mohn 8\",\n \"label\": \"mohn-8\",\n \"hex\": \"#d16667\"\n },\n {\n \"name\": \"mohn 9\",\n \"label\": \"mohn-9\",\n \"hex\": \"#e7898f\"\n },\n {\n \"name\": \"mohn 10\",\n \"label\": \"mohn-10\",\n \"hex\": \"#ef9ca3\"\n },\n {\n \"name\": \"mohn 11\",\n \"label\": \"mohn-11\",\n \"hex\": \"#f8c0c4\"\n },\n {\n \"name\": \"mohn 12\",\n \"label\": \"mohn-12\",\n \"hex\": \"#f5b0b5\"\n },\n {\n \"name\": \"mohn 13\",\n \"label\": \"mohn-13\",\n \"hex\": \"#fdd1d3\"\n },\n {\n \"name\": \"mohn 14\",\n \"label\": \"mohn-14\",\n \"hex\": \"#fedddd\"\n },\n {\n \"name\": \"earth 1\",\n \"label\": \"earth-1\",\n \"hex\": \"#fee7e5\"\n },\n {\n \"name\": \"earth 2\",\n \"label\": \"earth-2\",\n \"hex\": \"#af7669\"\n },\n {\n \"name\": \"earth 3\",\n \"label\": \"earth-3\",\n \"hex\": \"#bb8478\"\n },\n {\n \"name\": \"earth 4\",\n \"label\": \"earth-4\",\n \"hex\": \"#d3a79d\"\n },\n {\n \"name\": \"earth 5\",\n \"label\": \"earth-5\",\n \"hex\": \"#ecd6cf\"\n },\n {\n \"name\": \"tomato 1\",\n \"label\": \"tomato-1\",\n \"hex\": \"#ce443a\"\n },\n {\n \"name\": \"tomato 2\",\n \"label\": \"tomato-2\",\n \"hex\": \"#da5049\"\n },\n {\n \"name\": \"tomato 3\",\n \"label\": \"tomato-3\",\n \"hex\": \"#d64a41\"\n },\n {\n \"name\": \"tomato 4\",\n \"label\": \"tomato-4\",\n \"hex\": \"#e05956\"\n },\n {\n \"name\": \"tomato 5\",\n \"label\": \"tomato-5\",\n \"hex\": \"#e4605e\"\n },\n {\n \"name\": \"tomato 6\",\n \"label\": \"tomato-6\",\n \"hex\": \"#ef787a\"\n },\n {\n \"name\": \"tomato 7\",\n \"label\": \"tomato-7\",\n \"hex\": \"#e96667\"\n },\n {\n \"name\": \"tomato 8\",\n \"label\": \"tomato-8\",\n \"hex\": \"#f68a8e\"\n },\n {\n \"name\": \"tomato 9\",\n \"label\": \"tomato-9\",\n \"hex\": \"#f99ca1\"\n },\n {\n \"name\": \"tomato 10\",\n \"label\": \"tomato-10\",\n \"hex\": \"#ffc3c6\"\n },\n {\n \"name\": \"tomato 11\",\n \"label\": \"tomato-11\",\n \"hex\": \"#ffb2b5\"\n },\n {\n \"name\": \"tomato 12\",\n \"label\": \"tomato-12\",\n \"hex\": \"#ffd2d3\"\n },\n {\n \"name\": \"tomato 13\",\n \"label\": \"tomato-13\",\n \"hex\": \"#ffdedf\"\n },\n {\n \"name\": \"tomato 14\",\n \"label\": \"tomato-14\",\n \"hex\": \"#fdebe8\"\n },\n {\n \"name\": \"sepia 1\",\n \"label\": \"sepia-1\",\n \"hex\": \"#7d4b41\"\n },\n {\n \"name\": \"sepia 2\",\n \"label\": \"sepia-2\",\n \"hex\": \"#92554f\"\n },\n {\n \"name\": \"sepia 3\",\n \"label\": \"sepia-3\",\n \"hex\": \"#8a5048\"\n },\n {\n \"name\": \"sepia 4\",\n \"label\": \"sepia-4\",\n \"hex\": \"#a15f5b\"\n },\n {\n \"name\": \"sepia 5\",\n \"label\": \"sepia-5\",\n \"hex\": \"#a86663\"\n },\n {\n \"name\": \"sepia 6\",\n \"label\": \"sepia-6\",\n \"hex\": \"#c07f7e\"\n },\n {\n \"name\": \"sepia 7\",\n \"label\": \"sepia-7\",\n \"hex\": \"#b06e6b\"\n },\n {\n \"name\": \"sepia 8\",\n \"label\": \"sepia-8\",\n \"hex\": \"#ce9292\"\n },\n {\n \"name\": \"sepia 9\",\n \"label\": \"sepia-9\",\n \"hex\": \"#d9a4a3\"\n },\n {\n \"name\": \"sepia 10\",\n \"label\": \"sepia-10\",\n \"hex\": \"#edc9c7\"\n },\n {\n \"name\": \"sepia 11\",\n \"label\": \"sepia-11\",\n \"hex\": \"#e5b8b7\"\n },\n {\n \"name\": \"sepia 12\",\n \"label\": \"sepia-12\",\n \"hex\": \"#f3d7d5\"\n },\n {\n \"name\": \"sepia 13\",\n \"label\": \"sepia-13\",\n \"hex\": \"#f7e3de\"\n },\n {\n \"name\": \"sepia 14\",\n \"label\": \"sepia-14\",\n \"hex\": \"#f9ede7\"\n },\n {\n \"name\": \"magma 1\",\n \"label\": \"magma-1\",\n \"hex\": \"#c26447\"\n },\n {\n \"name\": \"magma 3\",\n \"label\": \"magma-3\",\n \"hex\": \"#e28e71\"\n },\n {\n \"name\": \"magma 4\",\n \"label\": \"magma-4\",\n \"hex\": \"#efb198\"\n },\n {\n \"name\": \"magma 5\",\n \"label\": \"magma-5\",\n \"hex\": \"#f9dccd\"\n },\n {\n \"name\": \"paprica 1\",\n \"label\": \"paprica-1\",\n \"hex\": \"#e95c2c\"\n },\n {\n \"name\": \"paprica 2\",\n \"label\": \"paprica-2\",\n \"hex\": \"#f1673f\"\n },\n {\n \"name\": \"paprica 3\",\n \"label\": \"paprica-3\",\n \"hex\": \"#f16337\"\n },\n {\n \"name\": \"paprica 4\",\n \"label\": \"paprica-4\",\n \"hex\": \"#f97351\"\n },\n {\n \"name\": \"paprica 5\",\n \"label\": \"paprica-5\",\n \"hex\": \"#fb7757\"\n },\n {\n \"name\": \"paprica 6\",\n \"label\": \"paprica-6\",\n \"hex\": \"#ff8e77\"\n },\n {\n \"name\": \"paprica 7\",\n \"label\": \"paprica-7\",\n \"hex\": \"#fe8064\"\n },\n {\n \"name\": \"paprica 8\",\n \"label\": \"paprica-8\",\n \"hex\": \"#ff9f8f\"\n },\n {\n \"name\": \"paprica 9\",\n \"label\": \"paprica-9\",\n \"hex\": \"#ffafa2\"\n },\n {\n \"name\": \"paprica 10\",\n \"label\": \"paprica-10\",\n \"hex\": \"#ffcec5\"\n },\n {\n \"name\": \"paprica 11\",\n \"label\": \"paprica-11\",\n \"hex\": \"#ffbfb5\"\n },\n {\n \"name\": \"paprica 12\",\n \"label\": \"paprica-12\",\n \"hex\": \"#ffd9d3\"\n },\n {\n \"name\": \"paprica 13\",\n \"label\": \"paprica-13\",\n \"hex\": \"#ffe2dd\"\n },\n {\n \"name\": \"paprica 14\",\n \"label\": \"paprica-14\",\n \"hex\": \"#fff0ea\"\n },\n {\n \"name\": \"grenadine 1\",\n \"label\": \"grenadine-1\",\n \"hex\": \"#a34f35\"\n },\n {\n \"name\": \"grenadine 2\",\n \"label\": \"grenadine-2\",\n \"hex\": \"#b45f46\"\n },\n {\n \"name\": \"grenadine 3\",\n \"label\": \"grenadine-3\",\n \"hex\": \"#ae5942\"\n },\n {\n \"name\": \"grenadine 4\",\n \"label\": \"grenadine-4\",\n \"hex\": \"#bf6b54\"\n },\n {\n \"name\": \"grenadine 5\",\n \"label\": \"grenadine-5\",\n \"hex\": \"#c5715a\"\n },\n {\n \"name\": \"grenadine 6\",\n \"label\": \"grenadine-6\",\n \"hex\": \"#d78b76\"\n },\n {\n \"name\": \"grenadine 7\",\n \"label\": \"grenadine-7\",\n \"hex\": \"#cb7a64\"\n },\n {\n \"name\": \"grenadine 8\",\n \"label\": \"grenadine-8\",\n \"hex\": \"#e19d8a\"\n },\n {\n \"name\": \"grenadine 9\",\n \"label\": \"grenadine-9\",\n \"hex\": \"#ecb0a0\"\n },\n {\n \"name\": \"grenadine 10\",\n \"label\": \"grenadine-10\",\n \"hex\": \"#f9d2c8\"\n },\n {\n \"name\": \"grenadine 11\",\n \"label\": \"grenadine-11\",\n \"hex\": \"#f2c2b5\"\n },\n {\n \"name\": \"grenadine 12\",\n \"label\": \"grenadine-12\",\n \"hex\": \"#fbdfd5\"\n },\n {\n \"name\": \"grenadine 13\",\n \"label\": \"grenadine-13\",\n \"hex\": \"#fde7df\"\n },\n {\n \"name\": \"grenadine 14\",\n \"label\": \"grenadine-14\",\n \"hex\": \"#feefe8\"\n },\n {\n \"name\": \"terra 1\",\n \"label\": \"terra-1\",\n \"hex\": \"#a66049\"\n },\n {\n \"name\": \"terra 2\",\n \"label\": \"terra-2\",\n \"hex\": \"#c07e65\"\n },\n {\n \"name\": \"terra 3\",\n \"label\": \"terra-3\",\n \"hex\": \"#cc8d74\"\n },\n {\n \"name\": \"terra 4\",\n \"label\": \"terra-4\",\n \"hex\": \"#e2b19b\"\n },\n {\n \"name\": \"terra 5\",\n \"label\": \"terra-5\",\n \"hex\": \"#f3ddcf\"\n },\n {\n \"name\": \"mandarine 1\",\n \"label\": \"mandarine-1\",\n \"hex\": \"#f56f2f\"\n },\n {\n \"name\": \"mandarine 2\",\n \"label\": \"mandarine-2\",\n \"hex\": \"#fc7f3e\"\n },\n {\n \"name\": \"mandarine 3\",\n \"label\": \"mandarine-3\",\n \"hex\": \"#fa7733\"\n },\n {\n \"name\": \"mandarine 4\",\n \"label\": \"mandarine-4\",\n \"hex\": \"#fe8a4d\"\n },\n {\n \"name\": \"mandarine 5\",\n \"label\": \"mandarine-5\",\n \"hex\": \"#ff9056\"\n },\n {\n \"name\": \"mandarine 6\",\n \"label\": \"mandarine-6\",\n \"hex\": \"#ffa774\"\n },\n {\n \"name\": \"mandarine 7\",\n \"label\": \"mandarine-7\",\n \"hex\": \"#ff985f\"\n },\n {\n \"name\": \"mandarine 8\",\n \"label\": \"mandarine-8\",\n \"hex\": \"#ffb689\"\n },\n {\n \"name\": \"mandarine 9\",\n \"label\": \"mandarine-9\",\n \"hex\": \"#ffc59f\"\n },\n {\n \"name\": \"mandarine 10\",\n \"label\": \"mandarine-10\",\n \"hex\": \"#ffddc4\"\n },\n {\n \"name\": \"mandarine 11\",\n \"label\": \"mandarine-11\",\n \"hex\": \"#ffd2b2\"\n },\n {\n \"name\": \"mandarine 12\",\n \"label\": \"mandarine-12\",\n \"hex\": \"#ffe6d1\"\n },\n {\n \"name\": \"mandarine 13\",\n \"label\": \"mandarine-13\",\n \"hex\": \"#ffeddb\"\n },\n {\n \"name\": \"mandarine 14\",\n \"label\": \"mandarine-14\",\n \"hex\": \"#fff2e5\"\n },\n {\n \"name\": \"zimt 1\",\n \"label\": \"zimt-1\",\n \"hex\": \"#d9793b\"\n },\n {\n \"name\": \"zimt 2\",\n \"label\": \"zimt-2\",\n \"hex\": \"#e78a4c\"\n },\n {\n \"name\": \"zimt 3\",\n \"label\": \"zimt-3\",\n \"hex\": \"#e28243\"\n },\n {\n \"name\": \"zimt 4\",\n \"label\": \"zimt-4\",\n \"hex\": \"#ee9456\"\n },\n {\n \"name\": \"zimt 5\",\n \"label\": \"zimt-5\",\n \"hex\": \"#ef9a5d\"\n },\n {\n \"name\": \"zimt 6\",\n \"label\": \"zimt-6\",\n \"hex\": \"#fab07b\"\n },\n {\n \"name\": \"zimt 7\",\n \"label\": \"zimt-7\",\n \"hex\": \"#f5a268\"\n },\n {\n \"name\": \"zimt 8\",\n \"label\": \"zimt-8\",\n \"hex\": \"#fdbd8f\"\n },\n {\n \"name\": \"zimt 9\",\n \"label\": \"zimt-9\",\n \"hex\": \"#ffcca5\"\n },\n {\n \"name\": \"zimt 10\",\n \"label\": \"zimt-10\",\n \"hex\": \"#ffe3c9\"\n },\n {\n \"name\": \"zimt 11\",\n \"label\": \"zimt-11\",\n \"hex\": \"#ffd9ba\"\n },\n {\n \"name\": \"zimt 12\",\n \"label\": \"zimt-12\",\n \"hex\": \"#ffebd7\"\n },\n {\n \"name\": \"zimt 13\",\n \"label\": \"zimt-13\",\n \"hex\": \"#fdf0e1\"\n },\n {\n \"name\": \"zimt 14\",\n \"label\": \"zimt-14\",\n \"hex\": \"#fef4e8\"\n },\n {\n \"name\": \"bernstein 1\",\n \"label\": \"bernstein-1\",\n \"hex\": \"#c68345\"\n },\n {\n \"name\": \"bernstein 2\",\n \"label\": \"bernstein-2\",\n \"hex\": \"#dfa168\"\n },\n {\n \"name\": \"bernstein 3\",\n \"label\": \"bernstein-3\",\n \"hex\": \"#e8ae79\"\n },\n {\n \"name\": \"bernstein 4\",\n \"label\": \"bernstein-4\",\n \"hex\": \"#f3c89f\"\n },\n {\n \"name\": \"bernstein 5\",\n \"label\": \"bernstein-5\",\n \"hex\": \"#fde9d5\"\n },\n {\n \"name\": \"papaya 1\",\n \"label\": \"papaya-1\",\n \"hex\": \"#ff8b28\"\n },\n {\n \"name\": \"papaya 2\",\n \"label\": \"papaya-2\",\n \"hex\": \"#ff9733\"\n },\n {\n \"name\": \"papaya 3\",\n \"label\": \"papaya-3\",\n \"hex\": \"#ff9233\"\n },\n {\n \"name\": \"papaya 4\",\n \"label\": \"papaya-4\",\n \"hex\": \"#ffa548\"\n },\n {\n \"name\": \"papaya 5\",\n \"label\": \"papaya-5\",\n \"hex\": \"#ffaa51\"\n },\n {\n \"name\": \"papaya 6\",\n \"label\": \"papaya-6\",\n \"hex\": \"#ffbe6e\"\n },\n {\n \"name\": \"papaya 7\",\n \"label\": \"papaya-7\",\n \"hex\": \"#ffb057\"\n },\n {\n \"name\": \"papaya 8\",\n \"label\": \"papaya-8\",\n \"hex\": \"#ffca85\"\n },\n {\n \"name\": \"papaya 9\",\n \"label\": \"papaya-9\",\n \"hex\": \"#ffd69b\"\n },\n {\n \"name\": \"papaya 10\",\n \"label\": \"papaya-10\",\n \"hex\": \"#ffe8c3\"\n },\n {\n \"name\": \"papaya 11\",\n \"label\": \"papaya-11\",\n \"hex\": \"#ffe0b2\"\n },\n {\n \"name\": \"papaya 12\",\n \"label\": \"papaya-12\",\n \"hex\": \"#ffedd1\"\n },\n {\n \"name\": \"papaya 13\",\n \"label\": \"papaya-13\",\n \"hex\": \"#fdf2dd\"\n },\n {\n \"name\": \"papaya 14\",\n \"label\": \"papaya-14\",\n \"hex\": \"#fdf9ea\"\n },\n {\n \"name\": \"cognac 1\",\n \"label\": \"cognac-1\",\n \"hex\": \"#ab693d\"\n },\n {\n \"name\": \"cognac 2\",\n \"label\": \"cognac-2\",\n \"hex\": \"#bc7a46\"\n },\n {\n \"name\": \"cognac 3\",\n \"label\": \"cognac-3\",\n \"hex\": \"#b3703b\"\n },\n {\n \"name\": \"cognac 4\",\n \"label\": \"cognac-4\",\n \"hex\": \"#c98953\"\n },\n {\n \"name\": \"cognac 5\",\n \"label\": \"cognac-5\",\n \"hex\": \"#ce8f59\"\n },\n {\n \"name\": \"cognac 6\",\n \"label\": \"cognac-6\",\n \"hex\": \"#dea773\"\n },\n {\n \"name\": \"cognac 7\",\n \"label\": \"cognac-7\",\n \"hex\": \"#d3965e\"\n },\n {\n \"name\": \"cognac 8\",\n \"label\": \"cognac-8\",\n \"hex\": \"#e6b787\"\n },\n {\n \"name\": \"cognac 9\",\n \"label\": \"cognac-9\",\n \"hex\": \"#eec69c\"\n },\n {\n \"name\": \"cognac 10\",\n \"label\": \"cognac-10\",\n \"hex\": \"#f7e0c3\"\n },\n {\n \"name\": \"cognac 11\",\n \"label\": \"cognac-11\",\n \"hex\": \"#f4d5b5\"\n },\n {\n \"name\": \"cognac 12\",\n \"label\": \"cognac-12\",\n \"hex\": \"#faead4\"\n },\n {\n \"name\": \"cognac 13\",\n \"label\": \"cognac-13\",\n \"hex\": \"#fcf0de\"\n },\n {\n \"name\": \"cognac 14\",\n \"label\": \"cognac-14\",\n \"hex\": \"#fdf4e6\"\n },\n {\n \"name\": \"taiga 1\",\n \"label\": \"taiga-1\",\n \"hex\": \"#a26d4b\"\n },\n {\n \"name\": \"taiga 2\",\n \"label\": \"taiga-2\",\n \"hex\": \"#bf8b66\"\n },\n {\n \"name\": \"taiga 3\",\n \"label\": \"taiga-3\",\n \"hex\": \"#cc9b76\"\n },\n {\n \"name\": \"taiga 5\",\n \"label\": \"taiga-5\",\n \"hex\": \"#f5e3d0\"\n },\n {\n \"name\": \"physalis 1\",\n \"label\": \"physalis-1\",\n \"hex\": \"#ff9824\"\n },\n {\n \"name\": \"physalis 2\",\n \"label\": \"physalis-2\",\n \"hex\": \"#ffaa33\"\n },\n {\n \"name\": \"physalis 3\",\n \"label\": \"physalis-3\",\n \"hex\": \"#ffa32a\"\n },\n {\n \"name\": \"physalis 4\",\n \"label\": \"physalis-4\",\n \"hex\": \"#ffb442\"\n },\n {\n \"name\": \"physalis 5\",\n \"label\": \"physalis-5\",\n \"hex\": \"#ffb849\"\n },\n {\n \"name\": \"physalis 6\",\n \"label\": \"physalis-6\",\n \"hex\": \"#ffc96b\"\n },\n {\n \"name\": \"physalis 7\",\n \"label\": \"physalis-7\",\n \"hex\": \"#ffbe56\"\n },\n {\n \"name\": \"physalis 8\",\n \"label\": \"physalis-8\",\n \"hex\": \"#ffd483\"\n },\n {\n \"name\": \"physalis 9\",\n \"label\": \"physalis-9\",\n \"hex\": \"#ffde99\"\n },\n {\n \"name\": \"physalis 10\",\n \"label\": \"physalis-10\",\n \"hex\": \"#ffedc1\"\n },\n {\n \"name\": \"physalis 11\",\n \"label\": \"physalis-11\",\n \"hex\": \"#ffe6af\"\n },\n {\n \"name\": \"physalis 12\",\n \"label\": \"physalis-12\",\n \"hex\": \"#fff2d1\"\n },\n {\n \"name\": \"physalis 13\",\n \"label\": \"physalis-13\",\n \"hex\": \"#fdf4da\"\n },\n {\n \"name\": \"physalis 14\",\n \"label\": \"physalis-14\",\n \"hex\": \"#fbf8e8\"\n },\n {\n \"name\": \"cacao 1\",\n \"label\": \"cacao-1\",\n \"hex\": \"#7e5a3f\"\n },\n {\n \"name\": \"cacao 2\",\n \"label\": \"cacao-2\",\n \"hex\": \"#976b4c\"\n },\n {\n \"name\": \"cacao 3\",\n \"label\": \"cacao-3\",\n \"hex\": \"#8f6548\"\n },\n {\n \"name\": \"cacao 4\",\n \"label\": \"cacao-4\",\n \"hex\": \"#a77858\"\n },\n {\n \"name\": \"cacao 5\",\n \"label\": \"cacao-5\",\n \"hex\": \"#ad7d5d\"\n },\n {\n \"name\": \"cacao 6\",\n \"label\": \"cacao-6\",\n \"hex\": \"#c49676\"\n },\n {\n \"name\": \"cacao 7\",\n \"label\": \"cacao-7\",\n \"hex\": \"#b58664\"\n },\n {\n \"name\": \"cacao 8\",\n \"label\": \"cacao-8\",\n \"hex\": \"#d1a88a\"\n },\n {\n \"name\": \"cacao 9\",\n \"label\": \"cacao-9\",\n \"hex\": \"#ddba9e\"\n },\n {\n \"name\": \"cacao 10\",\n \"label\": \"cacao-10\",\n \"hex\": \"#f0d9c4\"\n },\n {\n \"name\": \"cacao 11\",\n \"label\": \"cacao-11\",\n \"hex\": \"#e8cbb2\"\n },\n {\n \"name\": \"cacao 12\",\n \"label\": \"cacao-12\",\n \"hex\": \"#f3e3d1\"\n },\n {\n \"name\": \"cacao 13\",\n \"label\": \"cacao-13\",\n \"hex\": \"#f9eddd\"\n },\n {\n \"name\": \"cacao 14\",\n \"label\": \"cacao-14\",\n \"hex\": \"#faf3e4\"\n },\n {\n \"name\": \"caramel 1\",\n \"label\": \"caramel-1\",\n \"hex\": \"#c9933f\"\n },\n {\n \"name\": \"caramel 2\",\n \"label\": \"caramel-2\",\n \"hex\": \"#e1b05f\"\n },\n {\n \"name\": \"caramel 3\",\n \"label\": \"caramel-3\",\n \"hex\": \"#eabc71\"\n },\n {\n \"name\": \"caramel 4\",\n \"label\": \"caramel-4\",\n \"hex\": \"#f4d398\"\n },\n {\n \"name\": \"caramel 5\",\n \"label\": \"caramel-5\",\n \"hex\": \"#fdeed1\"\n },\n {\n \"name\": \"marille 1\",\n \"label\": \"marille-1\",\n \"hex\": \"#ffae12\"\n },\n {\n \"name\": \"marille 2\",\n \"label\": \"marille-2\",\n \"hex\": \"#ffbb2b\"\n },\n {\n \"name\": \"marille 3\",\n \"label\": \"marille-3\",\n \"hex\": \"#ffb41d\"\n },\n {\n \"name\": \"marille 4\",\n \"label\": \"marille-4\",\n \"hex\": \"#ffc13c\"\n },\n {\n \"name\": \"marille 5\",\n \"label\": \"marille-5\",\n \"hex\": \"#ffc744\"\n },\n {\n \"name\": \"marille 6\",\n \"label\": \"marille-6\",\n \"hex\": \"#ffd367\"\n },\n {\n \"name\": \"marille 7\",\n \"label\": \"marille-7\",\n \"hex\": \"#ffcc52\"\n },\n {\n \"name\": \"marille 8\",\n \"label\": \"marille-8\",\n \"hex\": \"#ffdd7f\"\n },\n {\n \"name\": \"marille 9\",\n \"label\": \"marille-9\",\n \"hex\": \"#ffe599\"\n },\n {\n \"name\": \"marille 10\",\n \"label\": \"marille-10\",\n \"hex\": \"#fff1c2\"\n },\n {\n \"name\": \"marille 11\",\n \"label\": \"marille-11\",\n \"hex\": \"#ffebaf\"\n },\n {\n \"name\": \"marille 12\",\n \"label\": \"marille-12\",\n \"hex\": \"#fff5d0\"\n },\n {\n \"name\": \"marille 13\",\n \"label\": \"marille-13\",\n \"hex\": \"#fdf7dc\"\n },\n {\n \"name\": \"marille 14\",\n \"label\": \"marille-14\",\n \"hex\": \"#fdfae8\"\n },\n {\n \"name\": \"kandis 1\",\n \"label\": \"kandis-1\",\n \"hex\": \"#715a47\"\n },\n {\n \"name\": \"kandis 2\",\n \"label\": \"kandis-2\",\n \"hex\": \"#856c57\"\n },\n {\n \"name\": \"kandis 3\",\n \"label\": \"kandis-3\",\n \"hex\": \"#7e6653\"\n },\n {\n \"name\": \"kandis 4\",\n \"label\": \"kandis-4\",\n \"hex\": \"#957d68\"\n },\n {\n \"name\": \"kandis 5\",\n \"label\": \"kandis-5\",\n \"hex\": \"#9c846f\"\n },\n {\n \"name\": \"kandis 6\",\n \"label\": \"kandis-6\",\n \"hex\": \"#b49f8b\"\n },\n {\n \"name\": \"kandis 7\",\n \"label\": \"kandis-7\",\n \"hex\": \"#a38d78\"\n },\n {\n \"name\": \"kandis 8\",\n \"label\": \"kandis-8\",\n \"hex\": \"#c3b19e\"\n },\n {\n \"name\": \"kandis 9\",\n \"label\": \"kandis-9\",\n \"hex\": \"#d1c1af\"\n },\n {\n \"name\": \"kandis 10\",\n \"label\": \"kandis-10\",\n \"hex\": \"#e8ddcf\"\n },\n {\n \"name\": \"kandis 11\",\n \"label\": \"kandis-11\",\n \"hex\": \"#ded0c0\"\n },\n {\n \"name\": \"kandis 12\",\n \"label\": \"kandis-12\",\n \"hex\": \"#efe6da\"\n },\n {\n \"name\": \"kandis 13\",\n \"label\": \"kandis-13\",\n \"hex\": \"#f5efe4\"\n },\n {\n \"name\": \"kandis 14\",\n \"label\": \"kandis-14\",\n \"hex\": \"#f7f3ea\"\n },\n {\n \"name\": \"sand 1\",\n \"label\": \"sand-1\",\n \"hex\": \"#c29f41\"\n },\n {\n \"name\": \"sand 2\",\n \"label\": \"sand-2\",\n \"hex\": \"#dcbb64\"\n },\n {\n \"name\": \"sand 3\",\n \"label\": \"sand-3\",\n \"hex\": \"#e4c676\"\n },\n {\n \"name\": \"sand 4\",\n \"label\": \"sand-4\",\n \"hex\": \"#f1da9d\"\n },\n {\n \"name\": \"sand 5\",\n \"label\": \"sand-5\",\n \"hex\": \"#fcf2d4\"\n },\n {\n \"name\": \"grapefruit 1\",\n \"label\": \"grapefruit-1\",\n \"hex\": \"#ffb805\"\n },\n {\n \"name\": \"grapefruit 2\",\n \"label\": \"grapefruit-2\",\n \"hex\": \"#ffc324\"\n },\n {\n \"name\": \"grapefruit 3\",\n \"label\": \"grapefruit-3\",\n \"hex\": \"#ffbc12\"\n },\n {\n \"name\": \"grapefruit 4\",\n \"label\": \"grapefruit-4\",\n \"hex\": \"#ffca38\"\n },\n {\n \"name\": \"grapefruit 5\",\n \"label\": \"grapefruit-5\",\n \"hex\": \"#ffcc3c\"\n },\n {\n \"name\": \"grapefruit 6\",\n \"label\": \"grapefruit-6\",\n \"hex\": \"#ffd965\"\n },\n {\n \"name\": \"grapefruit 7\",\n \"label\": \"grapefruit-7\",\n \"hex\": \"#ffd24e\"\n },\n {\n \"name\": \"grapefruit 8\",\n \"label\": \"grapefruit-8\",\n \"hex\": \"#ffe180\"\n },\n {\n \"name\": \"grapefruit 9\",\n \"label\": \"grapefruit-9\",\n \"hex\": \"#ffe897\"\n },\n {\n \"name\": \"grapefruit 10\",\n \"label\": \"grapefruit-10\",\n \"hex\": \"#fff2c0\"\n },\n {\n \"name\": \"grapefruit 11\",\n \"label\": \"grapefruit-11\",\n \"hex\": \"#ffeeae\"\n },\n {\n \"name\": \"grapefruit 12\",\n \"label\": \"grapefruit-12\",\n \"hex\": \"#fff6d2\"\n },\n {\n \"name\": \"grapefruit 13\",\n \"label\": \"grapefruit-13\",\n \"hex\": \"#fdf6db\"\n },\n {\n \"name\": \"grapefruit 14\",\n \"label\": \"grapefruit-14\",\n \"hex\": \"#fdf8e4\"\n },\n {\n \"name\": \"safran 1\",\n \"label\": \"safran-1\",\n \"hex\": \"#d2953b\"\n },\n {\n \"name\": \"safran 2\",\n \"label\": \"safran-2\",\n \"hex\": \"#e0a549\"\n },\n {\n \"name\": \"safran 3\",\n \"label\": \"safran-3\",\n \"hex\": \"#dc9f43\"\n },\n {\n \"name\": \"safran 4\",\n \"label\": \"safran-4\",\n \"hex\": \"#e8b057\"\n },\n {\n \"name\": \"safran 5\",\n \"label\": \"safran-5\",\n \"hex\": \"#ecb45c\"\n },\n {\n \"name\": \"safran 6\",\n \"label\": \"safran-6\",\n \"hex\": \"#f6c779\"\n },\n {\n \"name\": \"safran 7\",\n \"label\": \"safran-7\",\n \"hex\": \"#f0bc67\"\n },\n {\n \"name\": \"safran 8\",\n \"label\": \"safran-8\",\n \"hex\": \"#f9d18e\"\n },\n {\n \"name\": \"safran 9\",\n \"label\": \"safran-9\",\n \"hex\": \"#fbdca4\"\n },\n {\n \"name\": \"safran 10\",\n \"label\": \"safran-10\",\n \"hex\": \"#feedca\"\n },\n {\n \"name\": \"safran 11\",\n \"label\": \"safran-11\",\n \"hex\": \"#fee6b9\"\n },\n {\n \"name\": \"safran 12\",\n \"label\": \"safran-12\",\n \"hex\": \"#fef2d7\"\n },\n {\n \"name\": \"safran 13\",\n \"label\": \"safran-13\",\n \"hex\": \"#fbf4e0\"\n },\n {\n \"name\": \"safran 14\",\n \"label\": \"safran-14\",\n \"hex\": \"#fcf7e8\"\n },\n {\n \"name\": \"macchia 1\",\n \"label\": \"macchia-1\",\n \"hex\": \"#6e764d\"\n },\n {\n \"name\": \"macchia 2\",\n \"label\": \"macchia-2\",\n \"hex\": \"#8c9567\"\n },\n {\n \"name\": \"macchia 3\",\n \"label\": \"macchia-3\",\n \"hex\": \"#9ba274\"\n },\n {\n \"name\": \"macchia 4\",\n \"label\": \"macchia-4\",\n \"hex\": \"#bbc19a\"\n },\n {\n \"name\": \"macchia 5\",\n \"label\": \"macchia-5\",\n \"hex\": \"#e2e6d0\"\n },\n {\n \"name\": \"citro 1\",\n \"label\": \"citro-1\",\n \"hex\": \"#ffcf00\"\n },\n {\n \"name\": \"citro 2\",\n \"label\": \"citro-2\",\n \"hex\": \"#ffd700\"\n },\n {\n \"name\": \"citro 3\",\n \"label\": \"citro-3\",\n \"hex\": \"#ffd500\"\n },\n {\n \"name\": \"citro 4\",\n \"label\": \"citro-4\",\n \"hex\": \"#ffdb31\"\n },\n {\n \"name\": \"citro 5\",\n \"label\": \"citro-5\",\n \"hex\": \"#ffdd39\"\n },\n {\n \"name\": \"citro 6\",\n \"label\": \"citro-6\",\n \"hex\": \"#ffe564\"\n },\n {\n \"name\": \"citro 7\",\n \"label\": \"citro-7\",\n \"hex\": \"#ffe14d\"\n },\n {\n \"name\": \"citro 8\",\n \"label\": \"citro-8\",\n \"hex\": \"#ffea81\"\n },\n {\n \"name\": \"citro 9\",\n \"label\": \"citro-9\",\n \"hex\": \"#ffee99\"\n },\n {\n \"name\": \"citro 10\",\n \"label\": \"citro-10\",\n \"hex\": \"#fff4bf\"\n },\n {\n \"name\": \"citro 11\",\n \"label\": \"citro-11\",\n \"hex\": \"#fff1ac\"\n },\n {\n \"name\": \"citro 12\",\n \"label\": \"citro-12\",\n \"hex\": \"#fff7cf\"\n },\n {\n \"name\": \"citro 13\",\n \"label\": \"citro-13\",\n \"hex\": \"#fff8da\"\n },\n {\n \"name\": \"citro 14\",\n \"label\": \"citro-14\",\n \"hex\": \"#fdf9e3\"\n },\n {\n \"name\": \"anis 1\",\n \"label\": \"anis-1\",\n \"hex\": \"#936d3c\"\n },\n {\n \"name\": \"anis 2\",\n \"label\": \"anis-2\",\n \"hex\": \"#a87f49\"\n },\n {\n \"name\": \"anis 3\",\n \"label\": \"anis-3\",\n \"hex\": \"#9e7742\"\n },\n {\n \"name\": \"anis 4\",\n \"label\": \"anis-4\",\n \"hex\": \"#b58c54\"\n },\n {\n \"name\": \"anis 5\",\n \"label\": \"anis-5\",\n \"hex\": \"#bb935a\"\n },\n {\n \"name\": \"anis 6\",\n \"label\": \"anis-6\",\n \"hex\": \"#d0ab76\"\n },\n {\n \"name\": \"anis 7\",\n \"label\": \"anis-7\",\n \"hex\": \"#c49c64\"\n },\n {\n \"name\": \"anis 8\",\n \"label\": \"anis-8\",\n \"hex\": \"#dbbb8a\"\n },\n {\n \"name\": \"anis 9\",\n \"label\": \"anis-9\",\n \"hex\": \"#e5ca9f\"\n },\n {\n \"name\": \"anis 10\",\n \"label\": \"anis-10\",\n \"hex\": \"#f3e2c3\"\n },\n {\n \"name\": \"anis 11\",\n \"label\": \"anis-11\",\n \"hex\": \"#edd7b4\"\n },\n {\n \"name\": \"anis 12\",\n \"label\": \"anis-12\",\n \"hex\": \"#f8ead1\"\n },\n {\n \"name\": \"anis 13\",\n \"label\": \"anis-13\",\n \"hex\": \"#f9efd9\"\n },\n {\n \"name\": \"anis 14\",\n \"label\": \"anis-14\",\n \"hex\": \"#fcf6e7\"\n },\n {\n \"name\": \"petrol 1\",\n \"label\": \"petrol-1\",\n \"hex\": \"#4e686c\"\n },\n {\n \"name\": \"petrol 2\",\n \"label\": \"petrol-2\",\n \"hex\": \"#82a0a4\"\n },\n {\n \"name\": \"petrol 3\",\n \"label\": \"petrol-3\",\n \"hex\": \"#96b2b5\"\n },\n {\n \"name\": \"petrol 4\",\n \"label\": \"petrol-4\",\n \"hex\": \"#bfd2d3\"\n },\n {\n \"name\": \"petrol 5\",\n \"label\": \"petrol-5\",\n \"hex\": \"#e5eeeb\"\n },\n {\n \"name\": \"pistachio 1\",\n \"label\": \"pistachio-1\",\n \"hex\": \"#a1bb13\"\n },\n {\n \"name\": \"pistachio 2\",\n \"label\": \"pistachio-2\",\n \"hex\": \"#b8c72d\"\n },\n {\n \"name\": \"pistachio 3\",\n \"label\": \"pistachio-3\",\n \"hex\": \"#b2c421\"\n },\n {\n \"name\": \"pistachio 4\",\n \"label\": \"pistachio-4\",\n \"hex\": \"#c6cf3f\"\n },\n {\n \"name\": \"pistachio 5\",\n \"label\": \"pistachio-5\",\n \"hex\": \"#cbd247\"\n },\n {\n \"name\": \"pistachio 6\",\n \"label\": \"pistachio-6\",\n \"hex\": \"#dcde6a\"\n },\n {\n \"name\": \"pistachio 7\",\n \"label\": \"pistachio-7\",\n \"hex\": \"#d1d653\"\n },\n {\n \"name\": \"pistachio 8\",\n \"label\": \"pistachio-8\",\n \"hex\": \"#e5e481\"\n },\n {\n \"name\": \"pistachio 9\",\n \"label\": \"pistachio-9\",\n \"hex\": \"#ecea98\"\n },\n {\n \"name\": \"pistachio 10\",\n \"label\": \"pistachio-10\",\n \"hex\": \"#f8f4c4\"\n },\n {\n \"name\": \"pistachio 11\",\n \"label\": \"pistachio-11\",\n \"hex\": \"#f2eeaf\"\n },\n {\n \"name\": \"pistachio 12\",\n \"label\": \"pistachio-12\",\n \"hex\": \"#f9f5d0\"\n },\n {\n \"name\": \"pistachio 13\",\n \"label\": \"pistachio-13\",\n \"hex\": \"#f9f7db\"\n },\n {\n \"name\": \"pistachio 14\",\n \"label\": \"pistachio-14\",\n \"hex\": \"#fcf9e4\"\n },\n {\n \"name\": \"curuma 1\",\n \"label\": \"curuma-1\",\n \"hex\": \"#dd9d2b\"\n },\n {\n \"name\": \"curuma 2\",\n \"label\": \"curuma-2\",\n \"hex\": \"#e7ac32\"\n },\n {\n \"name\": \"curuma 3\",\n \"label\": \"curuma-3\",\n \"hex\": \"#e1a428\"\n },\n {\n \"name\": \"curuma 4\",\n \"label\": \"curuma-4\",\n \"hex\": \"#eeb640\"\n },\n {\n \"name\": \"curuma 5\",\n \"label\": \"curuma-5\",\n \"hex\": \"#f1bb47\"\n },\n {\n \"name\": \"curuma 6\",\n \"label\": \"curuma-6\",\n \"hex\": \"#f8cb65\"\n },\n {\n \"name\": \"curuma 7\",\n \"label\": \"curuma-7\",\n \"hex\": \"#f4c153\"\n },\n {\n \"name\": \"curuma 8\",\n \"label\": \"curuma-8\",\n \"hex\": \"#fbd57b\"\n },\n {\n \"name\": \"curuma 9\",\n \"label\": \"curuma-9\",\n \"hex\": \"#fedf95\"\n },\n {\n \"name\": \"curuma 10\",\n \"label\": \"curuma-10\",\n \"hex\": \"#ffefc0\"\n },\n {\n \"name\": \"curuma 11\",\n \"label\": \"curuma-11\",\n \"hex\": \"#ffe8ac\"\n },\n {\n \"name\": \"curuma 12\",\n \"label\": \"curuma-12\",\n \"hex\": \"#fff4cf\"\n },\n {\n \"name\": \"curuma 13\",\n \"label\": \"curuma-13\",\n \"hex\": \"#fef6dc\"\n },\n {\n \"name\": \"curuma 14\",\n \"label\": \"curuma-14\",\n \"hex\": \"#fef9e5\"\n },\n {\n \"name\": \"rock 1\",\n \"label\": \"rock-1\",\n \"hex\": \"#4d575d\"\n },\n {\n \"name\": \"rock 2\",\n \"label\": \"rock-2\",\n \"hex\": \"#606e76\"\n },\n {\n \"name\": \"rock 3\",\n \"label\": \"rock-3\",\n \"hex\": \"#6d7c86\"\n },\n {\n \"name\": \"rock 4\",\n \"label\": \"rock-4\",\n \"hex\": \"#909fa7\"\n },\n {\n \"name\": \"rock 5\",\n \"label\": \"rock-5\",\n \"hex\": \"#c6d1d6\"\n },\n {\n \"name\": \"apple 1\",\n \"label\": \"apple-1\",\n \"hex\": \"#7eaf2b\"\n },\n {\n \"name\": \"apple 2\",\n \"label\": \"apple-2\",\n \"hex\": \"#99bd37\"\n },\n {\n \"name\": \"apple 3\",\n \"label\": \"apple-3\",\n \"hex\": \"#8bb72a\"\n },\n {\n \"name\": \"apple 4\",\n \"label\": \"apple-4\",\n \"hex\": \"#a9c746\"\n },\n {\n \"name\": \"apple 5\",\n \"label\": \"apple-5\",\n \"hex\": \"#aeca4f\"\n },\n {\n \"name\": \"apple 6\",\n \"label\": \"apple-6\",\n \"hex\": \"#c5d86f\"\n },\n {\n \"name\": \"apple 7\",\n \"label\": \"apple-7\",\n \"hex\": \"#b6cf58\"\n },\n {\n \"name\": \"apple 8\",\n \"label\": \"apple-8\",\n \"hex\": \"#d1de83\"\n },\n {\n \"name\": \"apple 9\",\n \"label\": \"apple-9\",\n \"hex\": \"#dde79b\"\n },\n {\n \"name\": \"apple 10\",\n \"label\": \"apple-10\",\n \"hex\": \"#edf2c2\"\n },\n {\n \"name\": \"apple 11\",\n \"label\": \"apple-11\",\n \"hex\": \"#e7edae\"\n },\n {\n \"name\": \"apple 12\",\n \"label\": \"apple-12\",\n \"hex\": \"#f5f7d2\"\n },\n {\n \"name\": \"apple 13\",\n \"label\": \"apple-13\",\n \"hex\": \"#f6f8dd\"\n },\n {\n \"name\": \"apple 14\",\n \"label\": \"apple-14\",\n \"hex\": \"#f9f9e4\"\n },\n {\n \"name\": \"cumin 1\",\n \"label\": \"cumin-1\",\n \"hex\": \"#92763b\"\n },\n {\n \"name\": \"cumin 2\",\n \"label\": \"cumin-2\",\n \"hex\": \"#ab8743\"\n },\n {\n \"name\": \"cumin 3\",\n \"label\": \"cumin-3\",\n \"hex\": \"#a0803f\"\n },\n {\n \"name\": \"cumin 4\",\n \"label\": \"cumin-4\",\n \"hex\": \"#b99551\"\n },\n {\n \"name\": \"cumin 5\",\n \"label\": \"cumin-5\",\n \"hex\": \"#be9b55\"\n },\n {\n \"name\": \"cumin 6\",\n \"label\": \"cumin-6\",\n \"hex\": \"#d3b170\"\n },\n {\n \"name\": \"cumin 7\",\n \"label\": \"cumin-7\",\n \"hex\": \"#c7a25c\"\n },\n {\n \"name\": \"cumin 8\",\n \"label\": \"cumin-8\",\n \"hex\": \"#ddbf82\"\n },\n {\n \"name\": \"cumin 9\",\n \"label\": \"cumin-9\",\n \"hex\": \"#e7cc96\"\n },\n {\n \"name\": \"cumin 10\",\n \"label\": \"cumin-10\",\n \"hex\": \"#f6e6c0\"\n },\n {\n \"name\": \"cumin 11\",\n \"label\": \"cumin-11\",\n \"hex\": \"#efdcaf\"\n },\n {\n \"name\": \"cumin 12\",\n \"label\": \"cumin-12\",\n \"hex\": \"#f9efd0\"\n },\n {\n \"name\": \"cumin 13\",\n \"label\": \"cumin-13\",\n \"hex\": \"#fbf4db\"\n },\n {\n \"name\": \"cumin 14\",\n \"label\": \"cumin-14\",\n \"hex\": \"#fcf8e4\"\n },\n {\n \"name\": \"amethyst 1\",\n \"label\": \"amethyst-1\",\n \"hex\": \"#617591\"\n },\n {\n \"name\": \"amethyst 2\",\n \"label\": \"amethyst-2\",\n \"hex\": \"#8fa2bb\"\n },\n {\n \"name\": \"amethyst 3\",\n \"label\": \"amethyst-3\",\n \"hex\": \"#9fb1c7\"\n },\n {\n \"name\": \"amethyst 4\",\n \"label\": \"amethyst-4\",\n \"hex\": \"#c2cfdd\"\n },\n {\n \"name\": \"amethyst 5\",\n \"label\": \"amethyst-5\",\n \"hex\": \"#e8eef0\"\n },\n {\n \"name\": \"avocado 1\",\n \"label\": \"avocado-1\",\n \"hex\": \"#30853e\"\n },\n {\n \"name\": \"avocado 2\",\n \"label\": \"avocado-2\",\n \"hex\": \"#429c48\"\n },\n {\n \"name\": \"avocado 3\",\n \"label\": \"avocado-3\",\n \"hex\": \"#33933f\"\n },\n {\n \"name\": \"avocado 4\",\n \"label\": \"avocado-4\",\n \"hex\": \"#58ac5b\"\n },\n {\n \"name\": \"avocado 5\",\n \"label\": \"avocado-5\",\n \"hex\": \"#60b160\"\n },\n {\n \"name\": \"avocado 6\",\n \"label\": \"avocado-6\",\n \"hex\": \"#80c67e\"\n },\n {\n \"name\": \"avocado 7\",\n \"label\": \"avocado-7\",\n \"hex\": \"#6dba6c\"\n },\n {\n \"name\": \"avocado 8\",\n \"label\": \"avocado-8\",\n \"hex\": \"#96d393\"\n },\n {\n \"name\": \"avocado 9\",\n \"label\": \"avocado-9\",\n \"hex\": \"#abdca6\"\n },\n {\n \"name\": \"avocado 10\",\n \"label\": \"avocado-10\",\n \"hex\": \"#d0eecb\"\n },\n {\n \"name\": \"avocado 11\",\n \"label\": \"avocado-11\",\n \"hex\": \"#bee7ba\"\n },\n {\n \"name\": \"avocado 12\",\n \"label\": \"avocado-12\",\n \"hex\": \"#ddf4d8\"\n },\n {\n \"name\": \"avocado 13\",\n \"label\": \"avocado-13\",\n \"hex\": \"#e8f8e4\"\n },\n {\n \"name\": \"avocado 14\",\n \"label\": \"avocado-14\",\n \"hex\": \"#effaea\"\n },\n {\n \"name\": \"muskat 1\",\n \"label\": \"muskat-1\",\n \"hex\": \"#756647\"\n },\n {\n \"name\": \"muskat 2\",\n \"label\": \"muskat-2\",\n \"hex\": \"#887756\"\n },\n {\n \"name\": \"muskat 3\",\n \"label\": \"muskat-3\",\n \"hex\": \"#7f6e4e\"\n },\n {\n \"name\": \"muskat 4\",\n \"label\": \"muskat-4\",\n \"hex\": \"#958360\"\n },\n {\n \"name\": \"muskat 5\",\n \"label\": \"muskat-5\",\n \"hex\": \"#998765\"\n },\n {\n \"name\": \"muskat 6\",\n \"label\": \"muskat-6\",\n \"hex\": \"#b2a180\"\n },\n {\n \"name\": \"muskat 7\",\n \"label\": \"muskat-7\",\n \"hex\": \"#a3916e\"\n },\n {\n \"name\": \"muskat 8\",\n \"label\": \"muskat-8\",\n \"hex\": \"#c2b293\"\n },\n {\n \"name\": \"muskat 9\",\n \"label\": \"muskat-9\",\n \"hex\": \"#cfc2a5\"\n },\n {\n \"name\": \"muskat 10\",\n \"label\": \"muskat-10\",\n \"hex\": \"#e5dcc8\"\n },\n {\n \"name\": \"muskat 11\",\n \"label\": \"muskat-11\",\n \"hex\": \"#dbd0b8\"\n },\n {\n \"name\": \"muskat 12\",\n \"label\": \"muskat-12\",\n \"hex\": \"#eee6d5\"\n },\n {\n \"name\": \"muskat 13\",\n \"label\": \"muskat-13\",\n \"hex\": \"#f3eddf\"\n },\n {\n \"name\": \"muskat 14\",\n \"label\": \"muskat-14\",\n \"hex\": \"#f7f3e6\"\n },\n {\n \"name\": \"lapislazuli 1\",\n \"label\": \"lapislazuli-1\",\n \"hex\": \"#43547b\"\n },\n {\n \"name\": \"lapislazuli 2\",\n \"label\": \"lapislazuli-2\",\n \"hex\": \"#759ac1\"\n },\n {\n \"name\": \"lapislazuli 3\",\n \"label\": \"lapislazuli-3\",\n \"hex\": \"#89accf\"\n },\n {\n \"name\": \"lapislazuli 4\",\n \"label\": \"lapislazuli-4\",\n \"hex\": \"#b2cbe2\"\n },\n {\n \"name\": \"lapislazuli 5\",\n \"label\": \"lapislazuli-5\",\n \"hex\": \"#dfebf2\"\n },\n {\n \"name\": \"peppermint 1\",\n \"label\": \"peppermint-1\",\n \"hex\": \"#115d50\"\n },\n {\n \"name\": \"peppermint 2\",\n \"label\": \"peppermint-2\",\n \"hex\": \"#008c74\"\n },\n {\n \"name\": \"peppermint 3\",\n \"label\": \"peppermint-3\",\n \"hex\": \"#007b64\"\n },\n {\n \"name\": \"peppermint 4\",\n \"label\": \"peppermint-4\",\n \"hex\": \"#00a58b\"\n },\n {\n \"name\": \"peppermint 5\",\n \"label\": \"peppermint-5\",\n \"hex\": \"#00a98f\"\n },\n {\n \"name\": \"peppermint 6\",\n \"label\": \"peppermint-6\",\n \"hex\": \"#33c3ac\"\n },\n {\n \"name\": \"peppermint 7\",\n \"label\": \"peppermint-7\",\n \"hex\": \"#00b39a\"\n },\n {\n \"name\": \"peppermint 8\",\n \"label\": \"peppermint-8\",\n \"hex\": \"#56d2be\"\n },\n {\n \"name\": \"peppermint 9\",\n \"label\": \"peppermint-9\",\n \"hex\": \"#79dcca\"\n },\n {\n \"name\": \"peppermint 10\",\n \"label\": \"peppermint-10\",\n \"hex\": \"#aeecdf\"\n },\n {\n \"name\": \"peppermint 11\",\n \"label\": \"peppermint-11\",\n \"hex\": \"#94e4d6\"\n },\n {\n \"name\": \"peppermint 12\",\n \"label\": \"peppermint-12\",\n \"hex\": \"#c3f2e7\"\n },\n {\n \"name\": \"peppermint 13\",\n \"label\": \"peppermint-13\",\n \"hex\": \"#d3f6ec\"\n },\n {\n \"name\": \"peppermint 14\",\n \"label\": \"peppermint-14\",\n \"hex\": \"#e7f9f0\"\n },\n {\n \"name\": \"coffee 1\",\n \"label\": \"coffee-1\",\n \"hex\": \"#585147\"\n },\n {\n \"name\": \"coffee 2\",\n \"label\": \"coffee-2\",\n \"hex\": \"#696156\"\n },\n {\n \"name\": \"coffee 3\",\n \"label\": \"coffee-3\",\n \"hex\": \"#625a51\"\n },\n {\n \"name\": \"coffee 4\",\n \"label\": \"coffee-4\",\n \"hex\": \"#786f64\"\n },\n {\n \"name\": \"coffee 5\",\n \"label\": \"coffee-5\",\n \"hex\": \"#7f766b\"\n },\n {\n \"name\": \"coffee 6\",\n \"label\": \"coffee-6\",\n \"hex\": \"#9a9186\"\n },\n {\n \"name\": \"coffee 7\",\n \"label\": \"coffee-7\",\n \"hex\": \"#887f74\"\n },\n {\n \"name\": \"coffee 8\",\n \"label\": \"coffee-8\",\n \"hex\": \"#aaa299\"\n },\n {\n \"name\": \"coffee 9\",\n \"label\": \"coffee-9\",\n \"hex\": \"#bdb6ac\"\n },\n {\n \"name\": \"coffee 10\",\n \"label\": \"coffee-10\",\n \"hex\": \"#dad5cc\"\n },\n {\n \"name\": \"coffee 11\",\n \"label\": \"coffee-11\",\n \"hex\": \"#ccc5bc\"\n },\n {\n \"name\": \"coffee 12\",\n \"label\": \"coffee-12\",\n \"hex\": \"#e4e0d7\"\n },\n {\n \"name\": \"coffee 13\",\n \"label\": \"coffee-13\",\n \"hex\": \"#eceae2\"\n },\n {\n \"name\": \"coffee 14\",\n \"label\": \"coffee-14\",\n \"hex\": \"#f2f1e9\"\n },\n {\n \"name\": \"lava 1\",\n \"label\": \"lava-1\",\n \"hex\": \"#524f4a\"\n },\n {\n \"name\": \"lava 2\",\n \"label\": \"lava-2\",\n \"hex\": \"#524f4a\"\n },\n {\n \"name\": \"lava 3\",\n \"label\": \"lava-3\",\n \"hex\": \"#71706a\"\n },\n {\n \"name\": \"lava 4\",\n \"label\": \"lava-4\",\n \"hex\": \"#81817a\"\n },\n {\n \"name\": \"lava 5\",\n \"label\": \"lava-5\",\n \"hex\": \"#a5a59f\"\n },\n {\n \"name\": \"curacao 1\",\n \"label\": \"curacao-1\",\n \"hex\": \"#d7d7d1\"\n },\n {\n \"name\": \"curacao 2\",\n \"label\": \"curacao-2\",\n \"hex\": \"#2d4f53\"\n },\n {\n \"name\": \"curacao 3\",\n \"label\": \"curacao-3\",\n \"hex\": \"#007575\"\n },\n {\n \"name\": \"curacao 4\",\n \"label\": \"curacao-4\",\n \"hex\": \"#006667\"\n },\n {\n \"name\": \"curacao 5\",\n \"label\": \"curacao-5\",\n \"hex\": \"#008989\"\n },\n {\n \"name\": \"curacao 6\",\n \"label\": \"curacao-6\",\n \"hex\": \"#008f8f\"\n },\n {\n \"name\": \"curacao 7\",\n \"label\": \"curacao-7\",\n \"hex\": \"#21adae\"\n },\n {\n \"name\": \"curacao 8\",\n \"label\": \"curacao-8\",\n \"hex\": \"#009c9d\"\n },\n {\n \"name\": \"curacao 9\",\n \"label\": \"curacao-9\",\n \"hex\": \"#49bdbf\"\n },\n {\n \"name\": \"curacao 10\",\n \"label\": \"curacao-10\",\n \"hex\": \"#6ccacc\"\n },\n {\n \"name\": \"curacao 11\",\n \"label\": \"curacao-11\",\n \"hex\": \"#a7e3e3\"\n },\n {\n \"name\": \"curacao 12\",\n \"label\": \"curacao-12\",\n \"hex\": \"#8cd8da\"\n },\n {\n \"name\": \"curacao 13\",\n \"label\": \"curacao-13\",\n \"hex\": \"#bdebeb\"\n },\n {\n \"name\": \"curacao 14\",\n \"label\": \"curacao-14\",\n \"hex\": \"#cef2f1\"\n },\n {\n \"name\": \"olive 1\",\n \"label\": \"olive-1\",\n \"hex\": \"#dcf6f3\"\n },\n {\n \"name\": \"olive 2\",\n \"label\": \"olive-2\",\n \"hex\": \"#7f8f39\"\n },\n {\n \"name\": \"olive 3\",\n \"label\": \"olive-3\",\n \"hex\": \"#97a44c\"\n },\n {\n \"name\": \"olive 4\",\n \"label\": \"olive-4\",\n \"hex\": \"#8b9944\"\n },\n {\n \"name\": \"olive 5\",\n \"label\": \"olive-5\",\n \"hex\": \"#a7b15c\"\n },\n {\n \"name\": \"olive 6\",\n \"label\": \"olive-6\",\n \"hex\": \"#aab45f\"\n },\n {\n \"name\": \"olive 7\",\n \"label\": \"olive-7\",\n \"hex\": \"#bfc67b\"\n },\n {\n \"name\": \"olive 8\",\n \"label\": \"olive-8\",\n \"hex\": \"#ccd18f\"\n },\n {\n \"name\": \"olive 9\",\n \"label\": \"olive-9\",\n \"hex\": \"#d9dda5\"\n },\n {\n \"name\": \"olive 10\",\n \"label\": \"olive-10\",\n \"hex\": \"#eceeca\"\n },\n {\n \"name\": \"olive 11\",\n \"label\": \"olive-11\",\n \"hex\": \"#e3e7b9\"\n },\n {\n \"name\": \"olive 12\",\n \"label\": \"olive-12\",\n \"hex\": \"#f1f2d7\"\n },\n {\n \"name\": \"olive 13\",\n \"label\": \"olive-13\",\n \"hex\": \"#f6f7e3\"\n },\n {\n \"name\": \"olive 14\",\n \"label\": \"olive-14\",\n \"hex\": \"#f9f9e9\"\n },\n {\n \"name\": \"blueberry 1\",\n \"label\": \"blueberry-1\",\n \"hex\": \"#3c4067\"\n },\n {\n \"name\": \"blueberry 2\",\n \"label\": \"blueberry-2\",\n \"hex\": \"#245b95\"\n },\n {\n \"name\": \"blueberry 3\",\n \"label\": \"blueberry-3\",\n \"hex\": \"#2c4f85\"\n },\n {\n \"name\": \"blueberry 4\",\n \"label\": \"blueberry-4\",\n \"hex\": \"#1f6ba8\"\n },\n {\n \"name\": \"blueberry 5\",\n \"label\": \"blueberry-5\",\n \"hex\": \"#1f73b0\"\n },\n {\n \"name\": \"blueberry 6\",\n \"label\": \"blueberry-6\",\n \"hex\": \"#2c8ec8\"\n },\n {\n \"name\": \"blueberry 7\",\n \"label\": \"blueberry-7\",\n \"hex\": \"#1d7ebb\"\n },\n {\n \"name\": \"blueberry 8\",\n \"label\": \"blueberry-8\",\n \"hex\": \"#409fd5\"\n },\n {\n \"name\": \"blueberry 9\",\n \"label\": \"blueberry-9\",\n \"hex\": \"#60b2e2\"\n },\n {\n \"name\": \"blueberry 10\",\n \"label\": \"blueberry-10\",\n \"hex\": \"#93cff0\"\n },\n {\n \"name\": \"blueberry 11\",\n \"label\": \"blueberry-11\",\n \"hex\": \"#7ac1e8\"\n },\n {\n \"name\": \"blueberry 12\",\n \"label\": \"blueberry-12\",\n \"hex\": \"#acdbf2\"\n },\n {\n \"name\": \"blueberry 13\",\n \"label\": \"blueberry-13\",\n \"hex\": \"#c0e3f3\"\n },\n {\n \"name\": \"blueberry 14\",\n \"label\": \"blueberry-14\",\n \"hex\": \"#dbf0f5\"\n },\n {\n \"name\": \"alovera 1\",\n \"label\": \"alovera-1\",\n \"hex\": \"#33554d\"\n },\n {\n \"name\": \"alovera 2\",\n \"label\": \"alovera-2\",\n \"hex\": \"#347567\"\n },\n {\n \"name\": \"alovera 3\",\n \"label\": \"alovera-3\",\n \"hex\": \"#34695d\"\n },\n {\n \"name\": \"alovera 4\",\n \"label\": \"alovera-4\",\n \"hex\": \"#3e8677\"\n },\n {\n \"name\": \"alovera 5\",\n \"label\": \"alovera-5\",\n \"hex\": \"#418d7d\"\n },\n {\n \"name\": \"alovera 6\",\n \"label\": \"alovera-6\",\n \"hex\": \"#5daa9b\"\n },\n {\n \"name\": \"alovera 7\",\n \"label\": \"alovera-7\",\n \"hex\": \"#499687\"\n },\n {\n \"name\": \"alovera 8\",\n \"label\": \"alovera-8\",\n \"hex\": \"#75bcae\"\n },\n {\n \"name\": \"alovera 9\",\n \"label\": \"alovera-9\",\n \"hex\": \"#8fcdc1\"\n },\n {\n \"name\": \"alovera 10\",\n \"label\": \"alovera-10\",\n \"hex\": \"#bbe5dc\"\n },\n {\n \"name\": \"alovera 11\",\n \"label\": \"alovera-11\",\n \"hex\": \"#a6dad0\"\n },\n {\n \"name\": \"alovera 12\",\n \"label\": \"alovera-12\",\n \"hex\": \"#ccece4\"\n },\n {\n \"name\": \"alovera 13\",\n \"label\": \"alovera-13\",\n \"hex\": \"#d9f2eb\"\n },\n {\n \"name\": \"alovera 14\",\n \"label\": \"alovera-14\",\n \"hex\": \"#e5f6ee\"\n },\n {\n \"name\": \"enzian 1\",\n \"label\": \"enzian-1\",\n \"hex\": \"#3e3c5c\"\n },\n {\n \"name\": \"enzian 2\",\n \"label\": \"enzian-2\",\n \"hex\": \"#3e4d84\"\n },\n {\n \"name\": \"enzian 3\",\n \"label\": \"enzian-3\",\n \"hex\": \"#3c4374\"\n },\n {\n \"name\": \"enzian 4\",\n \"label\": \"enzian-4\",\n \"hex\": \"#415b98\"\n },\n {\n \"name\": \"enzian 5\",\n \"label\": \"enzian-5\",\n \"hex\": \"#4161a1\"\n },\n {\n \"name\": \"enzian 6\",\n \"label\": \"enzian-6\",\n \"hex\": \"#4b79bb\"\n },\n {\n \"name\": \"enzian 7\",\n \"label\": \"enzian-7\",\n \"hex\": \"#4368ab\"\n },\n {\n \"name\": \"enzian 8\",\n \"label\": \"enzian-8\",\n \"hex\": \"#5c8ecb\"\n },\n {\n \"name\": \"enzian 9\",\n \"label\": \"enzian-9\",\n \"hex\": \"#70a1d9\"\n },\n {\n \"name\": \"enzian 10\",\n \"label\": \"enzian-10\",\n \"hex\": \"#a0c6ec\"\n },\n {\n \"name\": \"enzian 11\",\n \"label\": \"enzian-11\",\n \"hex\": \"#8ab5e3\"\n },\n {\n \"name\": \"enzian 12\",\n \"label\": \"enzian-12\",\n \"hex\": \"#b4d4ee\"\n },\n {\n \"name\": \"enzian 13\",\n \"label\": \"enzian-13\",\n \"hex\": \"#c6dff3\"\n },\n {\n \"name\": \"enzian 14\",\n \"label\": \"enzian-14\",\n \"hex\": \"#dcecf4\"\n },\n {\n \"name\": \"eucalypto 1\",\n \"label\": \"eucalypto-1\",\n \"hex\": \"#3b5157\"\n },\n {\n \"name\": \"eucalypto 2\",\n \"label\": \"eucalypto-2\",\n \"hex\": \"#43656d\"\n },\n {\n \"name\": \"eucalypto 3\",\n \"label\": \"eucalypto-3\",\n \"hex\": \"#405c64\"\n },\n {\n \"name\": \"eucalypto 4\",\n \"label\": \"eucalypto-4\",\n \"hex\": \"#48717c\"\n },\n {\n \"name\": \"eucalypto 5\",\n \"label\": \"eucalypto-5\",\n \"hex\": \"#4a7680\"\n },\n {\n \"name\": \"eucalypto 6\",\n \"label\": \"eucalypto-6\",\n \"hex\": \"#64929e\"\n },\n {\n \"name\": \"eucalypto 7\",\n \"label\": \"eucalypto-7\",\n \"hex\": \"#53818c\"\n },\n {\n \"name\": \"eucalypto 8\",\n \"label\": \"eucalypto-8\",\n \"hex\": \"#78a5b2\"\n },\n {\n \"name\": \"eucalypto 9\",\n \"label\": \"eucalypto-9\",\n \"hex\": \"#8fb8c3\"\n },\n {\n \"name\": \"eucalypto 10\",\n \"label\": \"eucalypto-10\",\n \"hex\": \"#bad6dd\"\n },\n {\n \"name\": \"eucalypto 11\",\n \"label\": \"eucalypto-11\",\n \"hex\": \"#a5c8d1\"\n },\n {\n \"name\": \"eucalypto 12\",\n \"label\": \"eucalypto-12\",\n \"hex\": \"#cce2e6\"\n },\n {\n \"name\": \"eucalypto 13\",\n \"label\": \"eucalypto-13\",\n \"hex\": \"#d9e9ec\"\n },\n {\n \"name\": \"eucalypto 14\",\n \"label\": \"eucalypto-14\",\n \"hex\": \"#e6f1f0\"\n },\n {\n \"name\": \"lavendel 1\",\n \"label\": \"lavendel-1\",\n \"hex\": \"#443d49\"\n },\n {\n \"name\": \"lavendel 2\",\n \"label\": \"lavendel-2\",\n \"hex\": \"#514376\"\n },\n {\n \"name\": \"lavendel 3\",\n \"label\": \"lavendel-3\",\n \"hex\": \"#4b3d66\"\n },\n {\n \"name\": \"lavendel 4\",\n \"label\": \"lavendel-4\",\n \"hex\": \"#5b4b8c\"\n },\n {\n \"name\": \"lavendel 5\",\n \"label\": \"lavendel-5\",\n \"hex\": \"#615093\"\n },\n {\n \"name\": \"lavendel 6\",\n \"label\": \"lavendel-6\",\n \"hex\": \"#7b6bb4\"\n },\n {\n \"name\": \"lavendel 7\",\n \"label\": \"lavendel-7\",\n \"hex\": \"#6a59a1\"\n },\n {\n \"name\": \"lavendel 8\",\n \"label\": \"lavendel-8\",\n \"hex\": \"#8d7ec3\"\n },\n {\n \"name\": \"lavendel 9\",\n \"label\": \"lavendel-9\",\n \"hex\": \"#9f92d1\"\n },\n {\n \"name\": \"lavendel 10\",\n \"label\": \"lavendel-10\",\n \"hex\": \"#c3b9e6\"\n },\n {\n \"name\": \"lavendel 11\",\n \"label\": \"lavendel-11\",\n \"hex\": \"#b2a6db\"\n },\n {\n \"name\": \"lavendel 12\",\n \"label\": \"lavendel-12\",\n \"hex\": \"#d1c9ea\"\n },\n {\n \"name\": \"lavendel 13\",\n \"label\": \"lavendel-13\",\n \"hex\": \"#ddd6ef\"\n },\n {\n \"name\": \"lavendel 14\",\n \"label\": \"lavendel-14\",\n \"hex\": \"#ebe8f2\"\n },\n {\n \"name\": \"plum 1\",\n \"label\": \"plum-1\",\n \"hex\": \"#40485c\"\n },\n {\n \"name\": \"plum 2\",\n \"label\": \"plum-2\",\n \"hex\": \"#445973\"\n },\n {\n \"name\": \"plum 3\",\n \"label\": \"plum-3\",\n \"hex\": \"#45536a\"\n },\n {\n \"name\": \"plum 4\",\n \"label\": \"plum-4\",\n \"hex\": \"#486683\"\n },\n {\n \"name\": \"plum 5\",\n \"label\": \"plum-5\",\n \"hex\": \"#4b6b89\"\n },\n {\n \"name\": \"plum 6\",\n \"label\": \"plum-6\",\n \"hex\": \"#5c85a5\"\n },\n {\n \"name\": \"plum 7\",\n \"label\": \"plum-7\",\n \"hex\": \"#4f7392\"\n },\n {\n \"name\": \"plum 8\",\n \"label\": \"plum-8\",\n \"hex\": \"#6e97b6\"\n },\n {\n \"name\": \"plum 9\",\n \"label\": \"plum-9\",\n \"hex\": \"#86acc8\"\n },\n {\n \"name\": \"plum 10\",\n \"label\": \"plum-10\",\n \"hex\": \"#b3cfe1\"\n },\n {\n \"name\": \"plum 11\",\n \"label\": \"plum-11\",\n \"hex\": \"#9ec0d7\"\n },\n {\n \"name\": \"plum 12\",\n \"label\": \"plum-12\",\n \"hex\": \"#c4dbe7\"\n },\n {\n \"name\": \"plum 13\",\n \"label\": \"plum-13\",\n \"hex\": \"#d4e6ee\"\n },\n {\n \"name\": \"plum 14\",\n \"label\": \"plum-14\",\n \"hex\": \"#dfecf0\"\n },\n {\n \"name\": \"aubergine 1\",\n \"label\": \"aubergine-1\",\n \"hex\": \"#4e3b50\"\n },\n {\n \"name\": \"aubergine 2\",\n \"label\": \"aubergine-2\",\n \"hex\": \"#674578\"\n },\n {\n \"name\": \"aubergine 3\",\n \"label\": \"aubergine-3\",\n \"hex\": \"#60426d\"\n },\n {\n \"name\": \"aubergine 4\",\n \"label\": \"aubergine-4\",\n \"hex\": \"#764e8b\"\n },\n {\n \"name\": \"aubergine 5\",\n \"label\": \"aubergine-5\",\n \"hex\": \"#7e5693\"\n },\n {\n \"name\": \"aubergine 6\",\n \"label\": \"aubergine-6\",\n \"hex\": \"#9a71b1\"\n },\n {\n \"name\": \"aubergine 7\",\n \"label\": \"aubergine-7\",\n \"hex\": \"#88609f\"\n },\n {\n \"name\": \"aubergine 8\",\n \"label\": \"aubergine-8\",\n \"hex\": \"#ac86c1\"\n },\n {\n \"name\": \"aubergine 9\",\n \"label\": \"aubergine-9\",\n \"hex\": \"#be9bd1\"\n },\n {\n \"name\": \"aubergine 10\",\n \"label\": \"aubergine-10\",\n \"hex\": \"#d9c2e5\"\n },\n {\n \"name\": \"aubergine 11\",\n \"label\": \"aubergine-11\",\n \"hex\": \"#cdb0db\"\n },\n {\n \"name\": \"aubergine 12\",\n \"label\": \"aubergine-12\",\n \"hex\": \"#e4d1ea\"\n },\n {\n \"name\": \"aubergine 13\",\n \"label\": \"aubergine-13\",\n \"hex\": \"#eadcee\"\n },\n {\n \"name\": \"aubergine 14\",\n \"label\": \"aubergine-14\",\n \"hex\": \"#f0e5ee\"\n },\n {\n \"name\": \"crocus 1\",\n \"label\": \"crocus-1\",\n \"hex\": \"#4a4153\"\n },\n {\n \"name\": \"crocus 2\",\n \"label\": \"crocus-2\",\n \"hex\": \"#4e4a6a\"\n },\n {\n \"name\": \"crocus 3\",\n \"label\": \"crocus-3\",\n \"hex\": \"#4a455f\"\n },\n {\n \"name\": \"crocus 4\",\n \"label\": \"crocus-4\",\n \"hex\": \"#545379\"\n },\n {\n \"name\": \"crocus 5\",\n \"label\": \"crocus-5\",\n \"hex\": \"#5b5a82\"\n },\n {\n \"name\": \"crocus 6\",\n \"label\": \"crocus-6\",\n \"hex\": \"#6f719e\"\n },\n {\n \"name\": \"crocus 7\",\n \"label\": \"crocus-7\",\n \"hex\": \"#62638c\"\n },\n {\n \"name\": \"crocus 8\",\n \"label\": \"crocus-8\",\n \"hex\": \"#8487b2\"\n },\n {\n \"name\": \"crocus 9\",\n \"label\": \"crocus-9\",\n \"hex\": \"#999cc4\"\n },\n {\n \"name\": \"crocus 10\",\n \"label\": \"crocus-10\",\n \"hex\": \"#bec1dd\"\n },\n {\n \"name\": \"crocus 11\",\n \"label\": \"crocus-11\",\n \"hex\": \"#adb0d2\"\n },\n {\n \"name\": \"crocus 12\",\n \"label\": \"crocus-12\",\n \"hex\": \"#cccfe2\"\n },\n {\n \"name\": \"crocus 13\",\n \"label\": \"crocus-13\",\n \"hex\": \"#d9dceb\"\n },\n {\n \"name\": \"crocus 14\",\n \"label\": \"crocus-14\",\n \"hex\": \"#e3e5ed\"\n }\n]","sub_path":"py/obi.py","file_name":"obi.py","file_ext":"py","file_size_in_byte":61369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"406356961","text":"machine_type = {\n 'p': 'personal',\n 's': 'server'\n}\ndefault_type = 'p'\n\nsep_suffix = '.mtype'\ncontained_nvim = False\n\n\ndef get_tasks():\n tasks = {\n # SHELLS\n '~/.bashrc': 'bashrc',\n '~/.screenrc': 'screenrc',\n\n # VIM\n '~/.vimrc': 'vim/vimrc',\n '~/.vim': 'vim',\n '~/.vim/autoload/plug.vim': 'vim/bundle/vim-plug/plug.vim',\n\n # NeoVIM\n '~/.config/nvim': 'nvim',\n\n # GIT\n '~/.gitconfig': 'git/gitconfig',\n '~/.gitignore': 'git/gitignore',\n\n # ZSH\n '~/.zgen' : 'zsh/zgen',\n '~/.zsh' : 'zsh',\n '~/.zlogin' : 'zsh/zlogin',\n '~/.zlogout' : 'zsh/zlogout',\n '~/.zpreztorc': 'zsh/zpreztorc',\n '~/.zprofile': 'zsh/zprofile',\n '~/.zshenv' : 'zsh/zshenv',\n '~/.zshrc' : 'zsh/zshrc',\n\n # Bins\n '~/.local/bin/dotfiles': 'bin/dotfiles',\n '~/.local/bin/fasd': 'zsh/fasd/fasd',\n '~/.local/bin/is_mosh': 'zsh/is_mosh/is_mosh',\n '~/.local/bin/imgcat': 'bin/imgcat',\n '~/.local/bin/imgls': 'bin/imgls',\n '~/.local/bin/fzf': '~/.fzf/bin/fzf', # fzf is at $HOME/.fzf,\n '~/.local/bin/nvim': 'bin/nvim',\n\n # X\n '~/.Xmodmap': 'Xmodmap',\n\n # GTK\n '~/.gtkrc-2.0': 'gtkrc-2.0',\n\n # tmux\n '~/.tmux' : 'tmux',\n '~/.tmux.conf': 'tmux/tmux.conf',\n '~/.config/tmuxinator': 'tmux/tmuxinator',\n '~/.tmuxinator': 'tmux/tmuxinator',\n\n # .config\n '~/.config/terminator': 'config/terminator',\n '~/.config/pudb/pudb.cfg': 'config/pudb/pudb.cfg',\n\n # pip and python\n #'~/.pip/pip.conf': 'pip/pip.conf',\n '~/.pythonrc.py': 'python/pythonrc.py',\n '~/.pylintrc': 'python/pylintrc',\n '~/.condarc': 'python/condarc',\n '~/.config/pycodestyle': 'python/pycodestyle',\n }\n return tasks\n\ndef get_symlinks():\n links = {\n '~/.local/bin/nvim/python': '/usr/bin/python',\n '~/.local/bin/nvim/python3': '/usr/bin/python3'\n }\n return links\n\n\ndef get_post_actions(args):\n post_actions = [\n # source deactivate\n 'source deactivate',\n\n # zgen installation\n r'''# Update zgen modules and cache (the init file)\n zsh -c \"\n source ${HOME}/.zshrc # source zplug and list plugins\n zgen reset\n zgen update\n \"\n ''',\n\n # validate neovim package installation\n r'''# neovim package needs to be installed\n if which nvim 2>/dev/null; then\n /usr/local/bin/python3 -c 'import neovim' || /usr/bin/python3 -c 'import neovim'\n rc=$?; if [[ $rc != 0 ]]; then\n echo -e '\\033[0;33mNeovim requires 'neovim' package on the system python3. Please try:'\n echo -e ' /usr/local/bin/pip3 install neovim'\n echo -e '\\033[0m'\n fi\n fi\n ''',\n\n\n\n # Install tmux plugins via tpm\n '~/.tmux/plugins/tpm/bin/install_plugins',\n\n # Change default shell if possible\n r'''# Change default shell to zsh\n if [[ ! \"$SHELL\" = *zsh ]]; then\n echo -e '\\033[0;33mPlease type your password if you wish to change the default shell to ZSH\\e[m'\n chsh -s /bin/zsh && echo -e 'Successfully changed the default shell, please re-login'\n fi\n ''',\n\n # Create ~/.gitconfig.secret file and check user configuration\n r'''# Create ~/.gitconfig.secret and user configuration\n if [ ! -f ~/.gitconfig.secret ]; then\n cat > ~/.gitconfig.secret <&1 > /dev/null; then echo -ne '\n \\033[1;33m[!] Please configure git user name and email:\n git config --file ~/.gitconfig.secret user.name \"(YOUR NAME)\"\n git config --file ~/.gitconfig.secret user.email \"(YOUR EMAIL)\"\n \\033[0m'\n fi\n ''',\n\n # vim ruby integration\n 'gem install --user-install neovim',\n 'gem install --user-install tmuxinator',\n ]\n return post_actions\n","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524438681","text":"##############################################################################\n#\n# Copyright (c) 2007 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"KGS configuration file parser.\"\"\"\nimport datetime\nimport dateutil.parser\nimport os.path\nimport urllib2\nimport ConfigParser\nfrom zc.buildout.buildout import _update, _isurl\n\nMAIN_SECTION = 'KGS'\nEXTENDS_OPTION = 'extends'\n\ndef _open(base, filename, seen):\n \"\"\"Open a configuration file and return the result as a dictionary,\n\n Recursively open other files based on options found.\n\n Note: Shamelessly copied from zc.buildout!\n \"\"\"\n\n if _isurl(filename):\n fp = urllib2.urlopen(filename)\n base = filename[:filename.rfind('/')]\n elif _isurl(base):\n if os.path.isabs(filename):\n fp = open(filename)\n base = os.path.dirname(filename)\n else:\n filename = base + '/' + filename\n fp = urllib2.urlopen(filename)\n base = filename[:filename.rfind('/')]\n else:\n filename = os.path.join(base, filename)\n fp = open(filename)\n base = os.path.dirname(filename)\n\n if filename in seen:\n raise ValueError(\"Recursive file include\", seen, filename)\n\n seen.append(filename)\n\n result = {}\n\n parser = ConfigParser.RawConfigParser()\n parser.optionxform = lambda s: s\n parser.readfp(fp)\n extends = None\n for section in parser.sections():\n options = dict(parser.items(section))\n if section == MAIN_SECTION:\n extends = options.pop(EXTENDS_OPTION, extends)\n result[section] = options\n\n if extends:\n extends = extends.split()\n extends.reverse()\n for fname in extends:\n result = _update(_open(base, fname, seen), result)\n\n seen.pop()\n return result\n\ndef _getAbsolutePath(section, basePath, name, default):\n path = section.get(name, default)\n if path:\n if not os.path.isabs(path):\n path = os.path.join(basePath, path)\n if path and not os.path.exists(path):\n path = None\n return path\n\n\nclass Package(object):\n\n def __init__(self, name, versions, tested):\n self.name = name\n self.versions = versions\n self.tested = tested\n\n def __repr__(self):\n return '<%s %r>' %(self.__class__.__name__, self.name)\n\n\nclass KGS(object):\n\n name = u'noname'\n version = u'unknown'\n date = None\n changelog = None\n announcement = None\n files = ()\n packages = ()\n\n def __init__(self, path):\n self.path = path\n self._extract()\n\n def _extract(self):\n basePath = os.path.dirname(self.path)\n result = _open(basePath, self.path, [])\n if MAIN_SECTION in result:\n section = result[MAIN_SECTION]\n # Get name and version.\n self.name = section.get('name', self.name)\n self.version = section.get('version', self.version)\n # Get the changelog.\n self.changelog = _getAbsolutePath(\n section, basePath, 'changelog', self.changelog)\n # Get the announcement.\n self.announcement = _getAbsolutePath(\n section, basePath, 'announcement', self.announcement)\n # Get the date.\n dateStr = section.get('date')\n if dateStr:\n self.date = dateutil.parser.parse(dateStr).date()\n # Get the release files.\n files = section.get('files')\n if files:\n files = files.split()\n for path in files:\n if not os.path.isabs(path):\n path = os.path.join(basePath, path)\n if path and os.path.exists(path):\n self.files += (path,)\n del result[MAIN_SECTION]\n self.packages = []\n sections = result.keys()\n sections.sort()\n for section in sections:\n self.packages.append(\n Package(section,\n result[section]['versions'].split(),\n ConfigParser.ConfigParser._boolean_states[\n result[section]['tested']]\n )\n )\n\n def __repr__(self):\n return '<%s %r>' %(self.__class__.__name__, self.name)\n\n","sub_path":"zope.kgs/branches/jim-dev/src/zope/kgs/kgs.py","file_name":"kgs.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300094056","text":"import turtle\n\nt = turtle.Turtle() # creates a turtle object\n#t.ht() # makes the turtle 'invisible' so you don't see it drawing\ntick_length = 20\nradius = 150\n\nfor i in range(12): # executes the indented code 12 times\n t.left(90) # rotates the turtle left 90 degrees\n t.fd(tick_length) # moves the turtle forward tick_length units\n t.bk(tick_length) # moves the turtle backward tick_length units\n t.right(90) # rotates the turtle right 90 degrees\n t.circle(radius, 30) # draws the first 30 degrees of a circle with a radius of 'radius'","sub_path":"clockface.py","file_name":"clockface.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"326720002","text":"#!/usr/bin/python\n\n\nclass Solution(object):\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n if not p and not q:\n return True\n elif (p or q) and not (p and q): # if one of them is empty\n return False\n else:\n if p.val == q.val and \\\n self.isSameTree(p.left, q.left) and \\\n self.isSameTree(p.right, q.right):\n return True\n else:\n return False\n \n \n","sub_path":"leetcode/same-tree.py","file_name":"same-tree.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"514914723","text":"import tkinter as tk\nimport numpy as np\n\n#The whole point here is basically to let you draw curves that you can plug into the program\n#I'm gonna make it so this outputs a file and you can pipe that to the other program somehow\n\n#distance between mouse locations before we add another point to our array\nres = 50.0\n\npoints = []\ncur_len = 0\nis_pressed = 0\n\nwindow = tk.Tk()\n\ndef press(event=None):\n global is_pressed\n is_pressed = 1\n \ndef pressed_motion(event=None):\n global points\n global cur_len\n global is_pressed\n if is_pressed == 0:\n return\n if len(points) == 0:\n points.append(np.array([event.x, event.y]))\n else:\n cur = np.array([event.x, event.y])\n if np.linalg.norm(points[cur_len]-cur) >= res:\n points.append(cur)\n cur_len += 1\n canvas.create_oval(event.x-1, event.y-1, event.x+1, event.y+1)\n \ndef release(event=None):\n global is_pressed\n global points\n print(points)\n is_pressed = 0\n #save our shape\n points = np.array(points)\n np.savetxt(\"customshape.txt\", points)\n #quit program\n quit()\n\ncanvas = tk.Canvas(window, width=1000, height=1000)\ncanvas.pack()\ncanvas.bind(\"\", press)\ncanvas.bind(\"\", pressed_motion)\ncanvas.bind(\"\", release)\n\ntk.mainloop()\n","sub_path":"VaughanMethod/CurveDrawer.py","file_name":"CurveDrawer.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473852654","text":"import pyperclip\nimport time\n\nfrom selenium.webdriver import ActionChains\n\nfrom regression.python_test_utils import test_utils\nfrom regression.feature_utils.base_feature_test import BaseFeatureTest\n\n\nclass CopySelectedQueryResultsFeatureTest(BaseFeatureTest):\n \"\"\"\n Tests various ways to copy data from the query results grid.\n \"\"\"\n\n\n scenarios = [\n (\"Test Copying Query Results\", dict())\n ]\n\n def before(self):\n connection = test_utils.get_db_connection(self.server['db'],\n self.server['username'],\n self.server['db_password'],\n self.server['host'],\n self.server['port'])\n test_utils.drop_database(connection, \"acceptance_test_db\")\n test_utils.create_database(self.server, \"acceptance_test_db\")\n test_utils.create_table(self.server, \"acceptance_test_db\", \"test_table\")\n self.page.add_server(self.server)\n\n def runTest(self):\n self.page.toggle_open_tree_item(self.server['name'])\n self.page.toggle_open_tree_item('Databases')\n self.page.toggle_open_tree_item('acceptance_test_db')\n time.sleep(5)\n self.page.find_by_partial_link_text(\"Tools\").click()\n self.page.find_by_partial_link_text(\"Query Tool\").click()\n self.page.click_tab('Query-1')\n time.sleep(5)\n ActionChains(self.page.driver).send_keys(\"SELECT * FROM test_table\").perform()\n self.page.driver.switch_to_frame(self.page.driver.find_element_by_tag_name(\"iframe\"))\n self.page.find_by_id(\"btn-flash\").click()\n\n self._copies_rows()\n self._copies_columns()\n\n def _copies_rows(self):\n pyperclip.copy(\"old clipboard contents\")\n time.sleep(5)\n self.page.find_by_xpath(\"//*[contains(@class, 'sr')]/*[1]/input[@type='checkbox']\").click()\n self.page.find_by_xpath(\"//*[@id='btn-copy-row']\").click()\n\n self.assertEqual(\"'Some-Name','6'\",\n pyperclip.paste())\n\n def _copies_columns(self):\n pyperclip.copy(\"old clipboard contents\")\n\n self.page.find_by_xpath(\"//*[@data-test='output-column-header' and contains(., 'some_column')]/input\").click()\n self.page.find_by_xpath(\"//*[@id='btn-copy-row']\").click()\n\n self.assertEqual(\n \"\"\"'Some-Name'\n'Some-Other-Name'\"\"\",\n pyperclip.paste())\n\n def after(self):\n self.page.close_query_tool()\n self.page.remove_server(self.server)\n\n connection = test_utils.get_db_connection(self.server['db'],\n self.server['username'],\n self.server['db_password'],\n self.server['host'],\n self.server['port'])\n test_utils.drop_database(connection, \"acceptance_test_db\")\n","sub_path":"web/pgadmin/feature_tests/copy_selected_query_results_feature_test.py","file_name":"copy_selected_query_results_feature_test.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95485766","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 11 10:12:41 2019\n\n@author: quibim\n\"\"\"\n\nimport numpy as np\nimport cv2\nfrom skimage.color import rgb2lab\n\ndef segmentation(img):\n \n ##################### COLOR-BASED SEGMENTATION ###############################\n \n #Black boxes to remove colorbar and yellow lines\n nrows, ncols, dim = img.shape\n\n img[1:150, 1:25, :] = 0\n img[:, 230:ncols, :] = 0\n img[305:nrows, 200:ncols, :] = 0\n \n # Convert the image into a L*A*B colorspace\n labImage = rgb2lab(img)\n \n # First, it is needed to reshape the image to have an array of Mx3 size (due to \n # the 3 features: R,G,B)\n Z = labImage.reshape((-1,3))\n \n # Inputs: image (float32), clusters number,bestlabels, criteria, attemps, flags\n Z = np.float32(Z)\n nclusters = 8\n bestlabels = None\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 0.01) \n attempts = 10\n flags = cv2.KMEANS_PP_CENTERS #How the initial centers are taken\n \n # Apply K-Means\n ret,label,center=cv2.kmeans(Z,nclusters,bestlabels,criteria,attempts,flags)\n \n center = np.uint8(center)\n \n # Create and save the images\n dopplerImage = 0\n anatomicImage = 0\n \n for c in range(nclusters):\n seg = np.copy(img)\n seg = seg.reshape((-1,3))\n seg[label.flatten()!=c] = 0\n seg = seg.reshape(nrows,ncols,dim)\n \n if abs(center[c,0]) > 10 and ((center[c,1] > 20 and center[c,1] < 230) or (center[c,2] > 20 and center[c,2] < 230)):\n dopplerImage = dopplerImage + seg\n else:\n anatomicImage = anatomicImage + seg\n \n # Create mask to remove yellow box\n dopplerGray = cv2.cvtColor(dopplerImage, cv2.COLOR_BGR2GRAY)\n dopplerBin = cv2.threshold(dopplerGray, 1, 255, cv2.THRESH_BINARY)[1]\n \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n \n mask = cv2.erode(dopplerBin,kernel,iterations = 1)\n\n dopplerMasked = cv2.bitwise_and(dopplerImage,dopplerImage,mask = mask)\n \n return dopplerMasked, anatomicImage\n","sub_path":"doppler_segmentation.py","file_name":"doppler_segmentation.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"299043664","text":"import pytest\n\nfrom apistar import app, exceptions, routing, schema, test\n\n\nclass Location(schema.Object):\n properties = {\n 'latitude': schema.Number(minimum=-90.0, maximum=90.0),\n 'longitude': schema.Number(minimum=-180.0, maximum=180.0)\n }\n\n\nclass HighScore(schema.Object):\n properties = {\n 'name': schema.String(max_length=100),\n 'score': schema.Integer(minimum=0, maximum=100),\n 'completed': schema.Boolean(default=False),\n 'difficulty': schema.Enum(enum=['easy', 'medium', 'hard']),\n 'location': Location(default={'latitude': 0.0, 'longitude': 0.0})\n }\n\n\ndef basic_object(score: HighScore):\n return score\n\n\nroutes = [\n routing.Route('/basic_object/', 'POST', basic_object),\n]\n\napp = app.App(routes=routes)\nclient = test.TestClient(app)\n\n\ndef test_valid_object():\n response = client.post('/basic_object/', json={\n 'name': 'tom',\n 'score': 87,\n 'difficulty': 'easy',\n 'completed': True,\n 'location': {\n 'latitude': 51.477,\n 'longitude': 0.0\n }\n })\n assert response.status_code == 200\n assert response.json() == {\n 'name': 'tom',\n 'score': 87,\n 'difficulty': 'easy',\n 'completed': True,\n 'location': {\n 'latitude': 51.477,\n 'longitude': 0.0\n }\n }\n\n\ndef test_invalid_object():\n response = client.post('/basic_object/', json={\n 'score': 105,\n 'difficulty': 'foo'\n })\n assert response.status_code == 400\n assert response.json() == {\n 'name': 'This field is required.',\n 'difficulty': 'Must be a valid choice.',\n 'score': 'Must be less than or equal to 100.'\n }\n\n\nclass test_object_instantiation():\n location = Location({'latitude': 51.477, 'longitude': 0.0})\n value = HighScore({\n 'name': 'tom',\n 'score': 99.0,\n 'difficulty': 'easy',\n 'completed': 'True',\n 'location': location\n })\n assert value['name'] == 'tom'\n assert value['score'] == 99\n assert value['completed'] is True\n assert value['location'] == location\n\n\nclass test_object_default():\n value = HighScore({\n 'name': 'tom',\n 'score': 99,\n 'difficulty': 'easy',\n 'completed': True\n })\n assert value['location'] == {'latitude': 0.0, 'longitude': 0.0}\n\n\nclass test_raw_instance():\n class LocationRecord(object):\n def __init__(self, latitude, longitude):\n self.latitude = latitude\n self.longitude = longitude\n\n record = LocationRecord(latitude=0.0, longitude=90.0)\n assert Location(record) == {\n 'latitude': 0.0,\n 'longitude': 90.0\n }\n\n\nclass test_object_invalid_key():\n with pytest.raises(exceptions.SchemaError) as exc:\n HighScore({1: 'invalid'})\n assert str(exc.value) == 'Object keys must be strings.'\n\n\nclass test_object_invalid_type():\n with pytest.raises(exceptions.SchemaError) as exc:\n HighScore(1)\n assert str(exc.value) == 'Must be an object.'\n","sub_path":"tests/schema/test_object.py","file_name":"test_object.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455535917","text":"\"\"\"empty message\n\nRevision ID: 73e5edf64225\nRevises: cd78f40311db\nCreate Date: 2020-11-17 23:45:44.801844\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '73e5edf64225'\ndown_revision = 'cd78f40311db'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('tests', sa.Column('no_questions', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('tests', 'no_questions')\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/73e5edf64225_.py","file_name":"73e5edf64225_.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"216400647","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 17 10:48:40 2020\n\n@author: alisha\n\"\"\"\n\nimport re\nimport numpy as np\nimport funcs\nimport pandas as pd\n\ndef checkWordList(wordlistSol, wordlistAns, amountCond, strict = True, \n count_similar_meaning = False): \n '''\n this function outpusts a list which renders a list for each subject\n with the differences of the number of correctly recalled words\n \n these lists of differences has the order of how they had been presented\n to the participant\n \n output: list with the lengths of the amount of participant consisting\n of lists with the length of amountCond (=6)\n '''\n \n #replace nan in dataframe\n wordlistAns = wordlistAns.replace(np.nan, '', regex = True) \n \n for i in range(len(wordlistAns)):\n for j in range(amountCond*2):\n \n #split so that each word is one string\n wordlistAns.loc[i,j] = re.sub(\"[^\\w]\", \" \", \n wordlistAns.loc[i,j] ).split() \n\n for k in range(len(wordlistAns.loc[i][j])):\n \n #make everything uppercase so that it matches solutions\n wordlistAns.loc[i,j][k] = wordlistAns.loc[i,\n j][k].upper()\n \n #correct spelling if needed\n if wordlistAns.loc[i,j][k] == 'THOUGH':\n wordlistAns.loc[i,j][k] = 'TOUGH'\n \n if wordlistAns.loc[i,(j)][k] == 'BLYING':\n wordlistAns.loc[i,(j)][k] = 'LYING'\n \n if wordlistAns.loc[i,(j)][k] == 'MOUND':\n wordlistAns.loc[i,(j)][k] = 'BOUND'\n \n if wordlistAns.loc[i,(j)][k] == 'OCEANM':\n wordlistAns.loc[i,(j)][k] = 'OCEAN'\n \n #in case we wanna count \"almost\" correct answers as true\n if not strict:\n \n replace_dic = {'MAJOR':'MAYOR', 'NAVY':'NAVAL','BAKE':'BAKER',\n 'CHOOSE':'CHOSE','BAKERY':'BAKER','DRINK':'DRUNK',\n 'GUILT':'GUILTY','PROCH':'PORCH','SHEARS':'SHEAR'}\n wordlistAns.loc[i,j] = [replace_dic.get(n,n) for n in wordlistAns.loc[i,j]]\n \n if count_similar_meaning:\n \n replace_dic1 = {'MESSAGE':'REPLY','ALARM':'ALERT','PRISON':'CRIME',\n 'JAIL':'CRIME','TRASH':'WASTE','CONFESS':'ADMIT',\n 'DRUGS':'JOINT','DRUG':'JOINT','BEACH':'OCEAN',\n 'BOAT':'OCEAN'}\n wordlistAns.loc[i,j] = [replace_dic1.get(n,n) for n in wordlistAns.loc[i,j]]\n \n \n #split sols into chunks how they had been presented to individual subjects\n diffAll = []\n for i in range(len(wordlistSol)):\n chunkedSols = list(funcs.chunks(wordlistSol.loc[i].to_list(),9)) \n \n diffpersubject = []\n \n #let's compare solutions to answers to see how many correctly recalled words\n for j in range(amountCond*2):\n \n #answer of subjects\n answer = wordlistAns.loc[i,(j)]\n \n if not (j%2): #even numbers = immediate free recall\n \n #solution\n solution = chunkedSols[int(j/2)]\n \n #matches and notmatches between answers and solution\n match = set(answer) & set(solution) \n notmatches = list(set(answer).difference(solution)) \n \n #number of correctly recalled words\n numRecalledImm = len(match)\n \n else: #delayed recall\n \n #solution\n solution = chunkedSols[int((j-1)/2)]\n \n #matches and notmatches between answers and solution\n match = set(answer) & set(solution) \n notmatches = list(set(answer).difference(solution)) \n \n #number of correctly recalled words\n numRecallDel = len(match)\n \n #calculate difference between delayed and immediate\n diff = numRecallDel - numRecalledImm\n print('imm:', numRecalledImm, ', del:', numRecallDel,'---------------------')\n diffpersubject.append(diff)\n \n \n #check nomatches\n if notmatches:\n print(i,j,'sol', solution)\n print('nomatch', notmatches)\n \n diffAll.append(diffpersubject)\n \n \n return diffAll, wordlistAns\n\n\ndef orderEM(diffsEM, conditions):\n '''\n This function computes the difference between \n [delayed - immediate recall] and outputs a list with the length of \n numbr of participants\n \n diffsOrdered = [rest, video, game]\n '''\n \n diffsOrdered = [] #both rounds together\n diffs_1 = [] #first round\n diffs_2 = [] #second round\n \n for i in range(len(diffsEM)):\n closed = 0\n video = 0\n game = 0\n \n #initialize flags to determine whether we are in 1th or 2nd round\n flag_c = 0\n flag_v = 0\n flag_g = 0\n \n for j in range(6):\n \n if conditions.loc[i, (j)] == 'closed':\n \n #if closed is not empty (= second round)\n if flag_c:\n closed_2 = diffsEM[i][j]\n \n #first round\n else:\n closed_1 = diffsEM[i][j]\n \n #both rounds together\n closed += diffsEM[i][j]\n flag_c = 1\n\n if conditions.loc[i, (j)] == 'video':\n \n if flag_v:\n video_2 = diffsEM[i][j]\n \n else:\n video_1 = diffsEM[i][j]\n \n video += diffsEM[i][j]\n flag_v = 1\n\n if conditions.loc[i, (j)] == 'game':\n \n if flag_g:\n game_2 = diffsEM[i][j]\n \n else:\n game_1 = diffsEM[i][j]\n \n game += diffsEM[i][j]\n flag_g = 1\n \n diffsOrdered.append([closed,video, game])\n diffs_1.append([closed_1, video_1, game_1])\n diffs_2.append([closed_2, video_2, game_2])\n \n #put it into a dataframe\n diffsOrdered = pd.DataFrame(diffsOrdered)\n diffsOrdered.columns = ['rest','video','game']\n \n diffs_1 = pd.DataFrame(diffs_1)\n diffs_1.columns = ['rest','video','game']\n \n diffs_2 = pd.DataFrame(diffs_2)\n diffs_2.columns = ['rest','video','game']\n #plot\n ax = diffsOrdered.plot.bar(title = 'Wordlist Differences Immediate & Delayed (Del-Imm)')\n \n return diffsOrdered, diffs_1, diffs_2","sub_path":"wl_check.py","file_name":"wl_check.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"536361151","text":"from kivy.config import Config\nConfig.set('kivy', 'desktop', '1')\nConfig.set('kivy', 'window_icon','img/turtlecoin_icon_color_16.png')\nConfig.set('graphics', 'resizable', '1')\nConfig.set('graphics', 'width', '1250')\nConfig.set('graphics', 'height', '750')\nConfig.set('graphics', 'minimum_width', '1250')\nConfig.set('graphics', 'minimum_height', '750')\n\nfrom kivy.core.window import Window\nWindow.clearcolor = (64/255,193/255,142/255,1)\n#primary green = (0/255,133/255,61/255,1)\nimport kivy\nkivy.require('1.10.1')\n\nimport time\nimport threading\nimport queue\n\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition\nfrom kivy.properties import StringProperty, ObjectProperty\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.popup import Popup\n\nfrom kivy.clock import Clock\nfrom datetime import datetime\nfrom explorer_completo import Turtle_Explorer\nfrom wallet_completo import Turtle_Wallet\nfrom functools import partial\n\n\n####################################---Screens---###########################################\nclass OnStartScreen(Screen):\n pass\nclass OnOpenWalletScreen(Screen):\n pass\nclass WalletScreen(Screen):\n pass\nclass TransactionScreen(Screen):\n pass\n\nclass BlockExplorerScreen(Screen):\n pass\nclass BlockSearchScreen(Screen):\n pass\n\nclass ScreenManagement(ScreenManager):\n pass\n##############################################################################################\n####################################---Popup windows---#######################################\nclass AboutPopup(Popup):\n pass\n\nclass WalletPrompt(Popup):\n pass\n\nclass ChooseWalletPrompt(Popup):\n pass\n\nclass StatusLabel(RelativeLayout):\n pass\n##############################################################################################\n####################################---data layouts---########################################\n\nclass WalletStatusLabel(RelativeLayout):\n pass\n\nclass BigLabel(Label):\n pass\nclass SmallLabel(Label):\n blockHash = \"\"\n def setBlkHash (self, blkhash):\n self.blockHash = blkhash\n def goToSearch(self, instance, value):\n myApp.search_on_click(\"blk\", self.blockHash)\n\n\nclass walletTxData(RelativeLayout):\n transferType = StringProperty()\n txhash_payId = StringProperty('Hash\\n{}\\nPaymentId\\n{}')\n txamount = StringProperty('Amount\\n{}')\n txfee = StringProperty('Fee\\n{}')\n txtime = StringProperty('Time\\n{}')\n \n wtransferType = ObjectProperty(None)\n wTxhash_andpaymentId = ObjectProperty(None)\n wTxamount = ObjectProperty(None)\n wTxfee = ObjectProperty(None)\n wTxtime = ObjectProperty(None)\n forsearchhash = \"\"\n def setText(self, transferType,txhash, paymentId, txamountt, txfeee, txtimee):\n self.transferType = transferType\n self.forsearchhash = str(txhash)\n self.txhash_payId = self.txhash_payId.format('[color=556b2f][ref='+txhash+']'+txhash+'[/ref][/color]', paymentId)\n self.txamount = self.txamount.format(txamountt)\n self.txfee = self.txfee.format(txfeee)\n self.txtime = self.txtime.format(txtimee)\n\n self.wtransferType.text = self.transferType\n self.wTxhash_andpaymentId.text = self.txhash_payId\n self.wTxhash_andpaymentId.markup = True\n self.wTxhash_andpaymentId.bind(on_ref_press = self.goToSearch)\n \n self.wTxamount.text = self.txamount\n self.wTxfee.text = self.txfee\n self.wTxtime.text = self.txtime\n def goToSearch(self, instance, value):\n myApp.root.transition.direction = 'left'\n myApp.root.current = 'blocksearchscreen'\n myApp.search_on_click(\"tx\", self.forsearchhash)\n \nclass BlkSearchData(RelativeLayout):\n item1 = StringProperty()\n item2 = StringProperty()\n blkResultItem1 = ObjectProperty(None)\n blkResultItem2 = ObjectProperty(None)\n myHeight = \"\"\n def setText(self,item1text, item2text):\n self.item1 = item1text\n self.item2 = item2text\n self.blkResultItem1.text = self.item1\n self.blkResultItem2.text = self.item2\n self.blkResultItem1.bind(on_ref_press = self.goToSearch)\n \n def setMyHeight(self, blkHeight):\n self.myHeight = str(blkHeight) #store blk height value for search on click\n \n def goToSearch(self, instance, value):\n if value == '<':\n myApp.search_on_click(\"blk\", str(int(self.myHeight)+1))#give clickability to arrows\n elif value == '>':\n self.myHeight = int(self.myHeight) -1 \n if self.myHeight < 0:\n self.myHeight = 0\n myApp.search_on_click(\"blk\", str(self.myHeight))\n\n\nclass BlkSearchTx(RelativeLayout):\n hashText = StringProperty()\n feeText = StringProperty()\n totalAmountText = StringProperty()\n sizeText = StringProperty()\n forSearchHash = \"\"\n tx_hash = ObjectProperty(None)\n tx_fee = ObjectProperty(None)\n tx_totalAmount = ObjectProperty(None)\n tx_Size = ObjectProperty(None)\n def setText(self, hashtext, fee, totalamount, size, forsearchHash):\n self.hashText = hashtext\n self.feeText = fee\n self.totalAmountText = totalamount\n self.sizeText = size\n self.tx_hash.text = self.hashText\n self.tx_hash.markup = True\n self.tx_hash.bind(on_ref_press = self.goToSearch)\n self.tx_fee.text = self.feeText \n self.tx_totalAmount.text = self.totalAmountText\n self.tx_Size.text = self.sizeText\n self.forSearchHash = forsearchHash\n def goToSearch(self, instance, value):\n myApp.search_on_click(\"tx\", self.forSearchHash)\n \nclass TXInputs(RelativeLayout):\n In_amount = StringProperty()\n In_image = StringProperty()\n input_amount = ObjectProperty(None)\n input_image = ObjectProperty(None)\n\n def setText(self, amount, image):\n self.In_amount = amount\n self.In_image = image\n self.input_amount.text = self.In_amount\n self.input_image.text = self.In_image\n \nclass TXOutputs(RelativeLayout):\n Out_amount = StringProperty()\n Out_key = StringProperty()\n output_amount = ObjectProperty(None)\n output_key = ObjectProperty(None)\n def setText(self, amount, key):\n self.Out_amount = amount\n self.Out_key = key\n self.output_amount.text = self.Out_amount\n self.output_key.text = self.Out_key\n\nclass TxPoolData(RelativeLayout):\n itemText_amount = StringProperty()\n itemText_fee = StringProperty()\n itemText_size = StringProperty()\n itemText_hash = StringProperty()\n txp_amount = ObjectProperty(None)\n txp_fee = ObjectProperty(None)\n txp_size = ObjectProperty(None)\n txp_hash = ObjectProperty(None)\n\n def setText(self, amount, fee, size, hashhash):\n self.itemText_amount = amount\n self.itemText_fee = fee\n self.itemText_size = size\n self.itemText_hash = hashhash\n self.txp_amount.text = self.itemText_amount\n self.txp_fee.text = self.itemText_fee\n self.txp_size.text = self.itemText_size\n self.txp_hash.text = '[color=556b2f][ref='+self.itemText_hash+']'+self.itemText_hash+'[/ref][/color]'\n self.txp_hash.markup = True\n self.txp_hash.bind(on_ref_press = self.goToSearch)\n def goToSearch(self, instance, value):\n myApp.root.transition.direction = 'left'\n myApp.root.current = 'blocksearchscreen'\n myApp.search_on_click(\"tx\", self.itemText_hash)\n \nclass RecentBlockData(RelativeLayout):\n itemText_height = StringProperty()\n itemText_size = StringProperty()\n itemText_hash = StringProperty()\n itemText_difficulty = StringProperty()\n itemText_tx = StringProperty()\n itemText_datetime = StringProperty()\n rctblk_height = ObjectProperty(None)\n rctblk_size = ObjectProperty(None)\n rctblk_hash = ObjectProperty(None)\n rctblk_difficulty = ObjectProperty(None)\n rctblk_tx = ObjectProperty(None)\n rctblk_datetime = ObjectProperty(None)\n\n def setText(self, height, size, hashhash, difficulty, tx, datetime):\n self.itemText_height = height\n self.itemText_size = size\n self.itemText_hash = hashhash\n self.itemText_difficulty = difficulty\n self.itemText_tx = tx\n self.itemText_datetime = datetime\n \n self.rctblk_height.text = '[color=556b2f][ref='+self.itemText_height+']'+self.itemText_height+'[/ref][/color]'\n self.rctblk_height.markup = True\n self.rctblk_height.bind(on_ref_press = self.goToSearch_byHeight)\n \n self.rctblk_size.text = self.itemText_size \n self.rctblk_hash.text = '[color=556b2f][ref='+self.itemText_hash+']'+self.itemText_hash+'[/ref][/color]'\n self.rctblk_hash.markup = True\n \n self.rctblk_hash.bind(on_ref_press = self.goToSearch_byHash)\n self.rctblk_difficulty.text = self.itemText_difficulty\n self.rctblk_tx.text = self.itemText_tx\n self.rctblk_datetime.text = self.itemText_datetime\n \n def goToSearch_byHash(self, instance, value):\n myApp.root.transition.direction = 'left'\n myApp.root.current = 'blocksearchscreen'\n myApp.search_on_click(\"blk\", self.itemText_hash)\n \n def goToSearch_byHeight(self, instance, value):\n myApp.root.transition.direction = 'left'\n myApp.root.current = 'blocksearchscreen'\n height = self.itemText_height.replace(\",\",\"\")\n myApp.search_on_click(\"blk\", height)\n##############################################################################################\n#these functions calls the search function in the explorer\n# then puts the result data into Queue\n\ndef searchBlk_q(HashN):\n blk_search_q.put(Turtle_Explorer().searchBlk(HashN)) \n\ndef searchTx_q(HashN):\n tx_search_q.put(Turtle_Explorer().searchTx(HashN))\n\n##########################################################################################\n#some global variable that is used\n\ntx_search_q = queue.Queue(maxsize = 1)\nblk_search_q = queue.Queue(maxsize = 1)\nsearchAvailable1 = Label(pos_hint = {\"x\":0.1, \"y\":0.5}, size_hint = (0.5,None),\n color = (0,0,0,1))\nsearchAvailable1.text = \"Search will be available when the daemon is synched.\"\nsearchAvailable2 = Label(pos_hint = {\"x\":0.1, \"y\":0.5}, size_hint = (0.5,None),\n color = (0,0,0,1))\nsearchAvailable2.text = \"Search will be available when the daemon is synched.\"\n\n###########################################################################################\nclass Turtle_Completo(App):\n\n status_text = StringProperty(\"DAEMON STATUS\\nSYNC...\\n\\nLOCAL: \\nNETWORK: \\nPEERS: \")\n wallet_status_text = StringProperty(\"WALLETD STATUS\\nNo Wallet\\n\\nLOCAL: \\nNETWORK: \\nPEERS: \")\n walletPathError = StringProperty()\n publicAddress = StringProperty()\n walletFeeText = StringProperty(\"fee: TRTL\")\n unlockedbalance = StringProperty(\"Available Balance: TRTL\")\n lockedbalance = StringProperty(\"Locked Balance: TRTL\")\n \n TRTLdaemon = None #turtle daemon object\n WalltOpened = False #bool value checking is a wallet service is open or not\n wp = None #this stores the wallet opening popup window\n chF = None # this stores the file choosing prompt window when click browse on the wallet opening popup\n myTRTLWallet = None # turtle wallet object\n \n #acessing and add widgets inside a screen syntax:\n #appname(or self).root.screenname.ids.actualyid.add_widget()\n \n def on_start(self):\n pass\n\n def on_pause(self):\n pass\n\n def on_resume(self):\n pass\n\n def on_stop(self):\n Clock.unschedule(self.update_SynchStatus)\n Clock.unschedule(self.check_search_queue)\n self.TRTLdaemon.Kill_SubProcessTRTLDaemon()\n self.myTRTLWallet.Kill_SubProcessWalletd() #needs to check how to properly kill both wallet and daemon threads when closing\n #killDaemon()\n #global daemon\n #daemon.terminate()\n #daemon.kill()\n\n def build(self):\n global myApp\n TRTLdaemon = None\n self.title = \"Turtle Completo v0.1.0\"\n###################################################################################################################\n#get info from Queue update daemon sych status\n def update_SynchStatus(self, dt):\n dheight = self.TRTLdaemon.heightInfo\n nheight = self.TRTLdaemon.netHeight\n peers = self.TRTLdaemon.peers\n pool = self.TRTLdaemon.txPool\n blocks = self.TRTLdaemon.blocks\n \n if dheight == None or nheight == None:\n self.status_text = \"DAEMON STATUS\\nSYNC...\\n\\nLOCAL: \\nNETWORK: \\nPEERS: \"\n elif dheight != nheight:\n self.status_text = \"DAEMON STATUS\\nSYNC...\\n\\nLOCAL: {}\\nNETWORK: {}\\nPEERS: {}\".format(str(dheight),str(nheight),str(peers))\n self.root.blocksearchscreen.ids.searchtxbutton.disabled = True\n self.root.blocksearchscreen.ids.searchblkbutton.disabled = True\n else:\n self.status_text = \"DAEMON STATUS\\n\\nLOCAL: {}\\nNETWORK: {}\\nPEERS: {}\".format(str(dheight),str(nheight),str(peers))\n self.root.blocksearchscreen.ids.searchtxbutton.disabled = False\n self.root.blocksearchscreen.ids.searchblkbutton.disabled = False\n\n if pool == None or blocks == None:\n pass\n else:\n self.update_Txpool(pool)\n self.update_RecentBlocks(blocks)\n\n#-------------------------------------------------------------------------------------\n#update recent blocks and txpool to gridview in a scroll view\n def update_Txpool(self, txpoolInfo):\n #This part and updates tx pool info on the app\n self.root.blockexplorerscreen.ids.txp_datalistTemplate.clear_widgets()\n try:\n if txpoolInfo!=[]:\n for i in range(len(txpoolInfo)):\n TxPoolData1 = TxPoolData()\n if i%2!=1:\n pass\n else:\n TxPoolData1.colors = (1,1,1,0.1)\n amount = \"{:,}\".format(txpoolInfo[i]['amount_out']/100)\n fee = \"{:,}\".format(txpoolInfo[i]['fee']/100)\n size = \"{:,}\".format(txpoolInfo[i]['size'])\n txhash = str(txpoolInfo[i]['hash'])\n TxPoolData1.setText(amount,fee,size,txhash)\n self.root.blockexplorerscreen.ids.txp_datalistTemplate.add_widget(TxPoolData1)\n else:\n pass\n except Exception as e:\n print(\"something wrong in Txpool\")\n print(e)\n\n def update_RecentBlocks(self, blocks):\n #This part and updates recent blocks info on the app\n self.root.blockexplorerscreen.ids.rctblk_datalistTemplate.clear_widgets()\n try:\n for i in range(len(blocks)):\n RecentBlockData1 = RecentBlockData()\n if i%2!=1:\n pass\n else:\n RecentBlockData1.colors = (1,1,1,0.1)\n height = \"{:,}\".format(blocks[i]['height'])\n size = \"{:,}\".format(blocks[i]['cumul_size'])\n blockhash = str(blocks[i]['hash'])\n difficulty = \"{:,}\".format(blocks[i]['difficulty'])\n txs = \"{:,}\".format(blocks[i]['tx_count'])\n dateTime = datetime.fromtimestamp(blocks[i]['timestamp']).strftime('%Y-%m-%d %H:%M:%S')\n RecentBlockData1.setText(height,size,blockhash,difficulty,txs,dateTime,)\n self.root.blockexplorerscreen.ids.rctblk_datalistTemplate.add_widget(RecentBlockData1)\n except Exception as e:\n print(\"something wrong in recentblks\")\n print(e)\n#-------------------------------------------------------------------------------------\n def update_TxSearchResult(self):\n #This part formats and construct the search transaction results then display them in labels\n self.root.blocksearchscreen.ids.txsearch_display.clear_widgets()\n #get tx search result from queue\n Result = tx_search_q.get()\n #if there is error display error msg, if not construct and display search result\n if Result[\"error\"] == \"yes\":\n ErrorLabel = BigLabel(text = Result['msg'])\n self.root.blocksearchscreen.ids.txsearch_display.add_widget(ErrorLabel) \n elif Result[\"error\"] == \"no\":\n current_h = Result['current_height']\n MyResult = Result['result']['result']\n #calculate confirmation from block height\n if current_h - MyResult['block']['height'] <= 0:\n confirmation = 0\n else:\n confirmation = \"{:,}\".format(current_h - MyResult['block']['height'])\n #first confirmation timestamp\n Firstconfirmation = datetime.fromtimestamp(MyResult['block']['timestamp']).strftime('%Y-%m-%d %H:%M:%S')\n \n #calculate sum of the outputs\n sumOfOutputs = sum(amount['amount'] for amount in MyResult['tx']['vout'])\n sumOfOutputs = \"{:,}\".format(sumOfOutputs/100)\n\n #get block hash, used for on click search\n blkHash = MyResult['block']['hash']\n\n TxLabel = BigLabel(text = \"[b]TRANSACTION[/b]\")\n #get transaction hash\n txHashLabel = SmallLabel(text = \"Hash: \"+MyResult['txDetails']['hash'])\n #confirmation and first confirmation time\n ConfirmLabel = SmallLabel(text = \"Confirmations: \" + confirmation +\", First confirmation time: \"+Firstconfirmation)\n #fee\n feeLabel = SmallLabel(text = \"Fee: \" + \"{:,}\".format(MyResult['txDetails']['fee']/100) + \" TRTL\")\n #sum of outputs\n sumOutLabel = SmallLabel(text = \"Sum of outputs: \" + sumOfOutputs+ \" TRTL\")\n #transaction size\n sizeLabel = SmallLabel(text = \"Size: \"+ \"{:,}\".format(MyResult['txDetails']['size']))\n #mixin count\n mixinLabel = SmallLabel(text = \"Mixin: \"+ \"{:,}\".format(MyResult['txDetails']['mixin']))\n \n InBlockLabel = BigLabel(text = \"[b]IN BLOCK[/b]\" )\n #'[color=556b2f][ref='+self.hashText+']'+self.hashText+'[/ref][/color]'\n #in block info, add on_click_search behavior for block hash\n blkHashLabel = SmallLabel(text = \"Hash: \"+'[color=556b2f][ref='+blkHash+']'+blkHash+'[/ref][/color]') #add click to hash search\n blkHashLabel.markup = True\n blkHashLabel.setBlkHash(blkHash)\n blkHashLabel.bind(on_ref_press = blkHashLabel.goToSearch)\n\n #block height\n blkheighLabel = SmallLabel(text = \"Height: \" + \"{:,}\".format(MyResult['block']['height']))\n timeLabel = SmallLabel(text = \"Timestamp: \" + Firstconfirmation)\n\n #input header\n inputHeader = TXInputs()\n inputHeader.setText(\"Amount\", \"Image\")\n inputHeader.colors = (0.5, 0.5, 0.5, 0.8)\n #loop through input data and save to list\n inputList = []\n if MyResult['tx']['vin'][0]['type'] == 'ff': #check tx input counts\n inputCount = str(0)\n else:\n inputCount = \"{:,}\".format(len(MyResult['tx']['vin']))\n for i in range(len(MyResult['tx']['vin'])):\n txinput = TXInputs()\n if i%2!=1:\n pass\n else:\n txinput.colors = (1,1,1,0.1)\n inAmount = \"{:,}\".format(MyResult['tx']['vin'][i]['value']['amount']/100)\n txinput.setText(inAmount+\" TRTL\",\n str(MyResult['tx']['vin'][i]['value']['k_image']))\n inputList.append(txinput)\n #input big label\n InputLabel = BigLabel(text = \"[b]INPUTS (\"+inputCount+\")[/b]\" )\n #add 2 padding label\n paddingLabel1 = SmallLabel()\n paddingLabel2 = SmallLabel()\n #save above constructors into a list\n LabelList = [TxLabel,txHashLabel,ConfirmLabel,feeLabel,sumOutLabel,\n sizeLabel,mixinLabel, paddingLabel1,InBlockLabel, blkHashLabel,\n blkheighLabel,timeLabel, paddingLabel2,InputLabel,inputHeader]\n #append inputs to the big list\n LabelList = LabelList + inputList\n\n #get output count\n outputCount = \"{:,}\".format(len(MyResult['tx']['vout']))\n #output title lable\n OutputLabel = BigLabel(text = \"[b]OUTPUTS (\"+outputCount+\")[/b]\" )\n #output header\n outputHeader = TXOutputs()\n outputHeader.setText(\"Amount\", \"Key\")\n outputHeader.colors = (0.5, 0.5, 0.5, 0.8)\n #loop through outputs and save to a list\n outputList = [OutputLabel,outputHeader]\n for i in range(len(MyResult['tx']['vout'])):\n txoutput = TXOutputs()\n if i%2!=1:\n pass\n else:\n txoutput.colors = (1,1,1,0.1)\n outAmount = \"{:,}\".format(MyResult['tx']['vout'][i]['amount']/100)\n txoutput.setText(outAmount+\" TRTL\",\n str(MyResult['tx']['vout'][i]['target']['data']['key']))\n outputList.append(txoutput)\n #append outputlist to big list\n LabelList += outputList\n paddingLabel3 = BigLabel()\n LabelList.append(paddingLabel3)\n #add labels in big list to container widget\n for lb in LabelList:\n self.root.blocksearchscreen.ids.txsearch_display.add_widget(lb)\n LabelList = []\n \n def update_BlkSearchResult(self):\n #This part formats and construct the search block results then display them in labels\n self.root.blocksearchscreen.ids.blksearch_display.clear_widgets()\n #get result hash table from queue\n Result = blk_search_q.get()\n #if there is error, display error msg, if not, display search block result\n if Result[\"error\"] == \"yes\":\n ErrorLabel = BigLabel(text = Result['msg'])\n self.root.blocksearchscreen.ids.blksearch_display.add_widget(ErrorLabel) \n elif Result[\"error\"] == \"no\":\n Result = Result['result']['result']\n \n #big labels are for titles, small labels for content\n BlockTitleLabel = BigLabel(text = \"[b]BLOCK[/b]\", size_hint = (0.1,1), pos_hint={\"x\":0, \"y\":0},valign = 'bottom')\n blockHashLabel = SmallLabel(text = \"[b]\"+Result['block']['hash']+\"[/b]\",size_hint = (0.9,1), pos_hint={\"x\":0.08, \"y\":0},\n valign ='bottom', font_size = 16, color = (47/255,79/255,79/255,1))\n blkTitleandHashLabel = RelativeLayout(size_hint = (1,None), height = 30)\n blkTitleandHashLabel.add_widget(BlockTitleLabel)\n blkTitleandHashLabel.add_widget(blockHashLabel)\n #add padding\n paddingLabel1 = SmallLabel(height = 10)\n\n #1 block height and total transaction size \n #'[color=556b2f][ref='+self.hashText+']'+self.hashText+'[/ref][/color]'\n heighAndTotalTxSizelabel = BlkSearchData()\n heighAndTotalTxSizelabel.setText(\"Height: {} {:,} {}\".format(\"[b][size=20][color=556b2f][ref=<]<[/ref][/color][/size][/b]\",Result['block']['height'],\"[b][size=20][color=556b2f][ref=>]>[/ref][/color][/size][/b]\"),\n \"Total transaction size, bytes: {:,}\".format(Result['block']['transactionsCumulativeSize']))\n heighAndTotalTxSizelabel.setMyHeight(Result['block']['height'])\n \n #2 time and total block size\n timeAndTotalblkSizelabel = BlkSearchData()\n #the genesis block returns 0 in the timestamp, set genesis timestamp to nothing\n if Result['block']['timestamp'] != 0: #check if it is genesis block\n timeAndTotalblkSizelabel.setText(\"Timestamp: \"+datetime.fromtimestamp(Result['block']['timestamp']).strftime('%Y-%m-%d %H:%M:%S'),\n \"Total block size, bytes: {:,}\".format(Result['block']['blockSize']))\n else:\n timeAndTotalblkSizelabel.setText(\"Timestamp: \",\n \"Total block size, bytes: {:,}\".format(Result['block']['blockSize']))\n #3 version and block size median\n versionAndCurrentTxMedianLabel = BlkSearchData()\n versionAndCurrentTxMedianLabel.setText(\"Version: {}.{}\".format(Result['block']['major_version'],Result['block']['minor_version']),\n \"Current txs median, bytes: {:,}\".format(Result['block']['sizeMedian']))\n #4 difficulty and effective tx median\n difficultyAndEffectiveTxMedianLabel = BlkSearchData()\n difficultyAndEffectiveTxMedianLabel.setText(\"Difficulty: {:,}\".format(Result['block']['difficulty']),\n \"Effective txs median, bytes: {:,}\".format(Result['block']['effectiveSizeMedian']))\n #5 Orphan status and reward penalty\n orphanAndRewardPenaltylabel = BlkSearchData()\n orphan = \"No\" if Result['block']['orphan_status'] == False else \"Yes\"\n orphanAndRewardPenaltylabel.setText(\"Orphan: {}\".format(orphan),\n \"Reward penalty: {:,}%\".format(Result['block']['penalty']))\n #6 transactions and base reward\n txsAndBaserewardLabel = BlkSearchData()\n txsAndBaserewardLabel.setText(\"Transactions: {:,}\".format(len(Result['block']['transactions'])),\n \"Base reward: {:,} TRTL\".format(Result['block']['baseReward']/100))\n #7 total coins in the network and transaction fee\n totalCoinNetAndTxFeeLabel = BlkSearchData()\n totalCoinNetAndTxFeeLabel.setText(\"Total coins in the network: {:,} TRTL\".format(int(Result['block']['alreadyGeneratedCoins'])/100),\n \"Transactions fee: {:,} TRTL\".format(Result['block']['totalFeeAmount']/100))\n #8 total transactions in the network and reward\n totalTxsInNetAndReward = BlkSearchData()\n totalTxsInNetAndReward.setText(\"Total transactions in the network: {:,} TRTL\".format(Result['block']['alreadyGeneratedTransactions']),\n \"Reward: {:,} TRTL\".format(Result['block']['reward']/100))\n #add padding in the search result display\n paddingLabel2 = SmallLabel()\n TxsLabel = BigLabel(text = \"[b]TRANSACTIONS[/b]\")\n \n #add tx header in the block to the result display\n TxheaderLabel = BlkSearchTx()\n TxheaderLabel.colors = (0.5, 0.5, 0.5, 0.8)\n TxheaderLabel.setText(\"Hash\", \"Fee\", \"Total Amount\", \"Size\", \"ForSearchHash\")\n\n #append the above data to a list\n blockInfoList = [blkTitleandHashLabel,paddingLabel1,heighAndTotalTxSizelabel,timeAndTotalblkSizelabel,\n versionAndCurrentTxMedianLabel,difficultyAndEffectiveTxMedianLabel,orphanAndRewardPenaltylabel,\n txsAndBaserewardLabel,totalCoinNetAndTxFeeLabel,totalTxsInNetAndReward,paddingLabel2,TxsLabel,\n TxheaderLabel]\n #loop through block transaction data and construct a display label object and append to a list\n tmpTxList = []\n for i in range(len(Result['block']['transactions'])):\n txLabel = BlkSearchTx()\n if i%2!=1:\n pass\n else:\n txLabel.colors = (1,1,1,0.1)\n #'[color=556b2f][ref='+self.hashText+']'+self.hashText+'[/ref][/color]'\n txLabel.setText('[color=556b2f][ref='+Result['block']['transactions'][i]['hash']+']'+Result['block']['transactions'][i]['hash']+'[/ref][/color]',\n \"{:,} TRTL\".format(Result['block']['transactions'][i]['fee']/100),\n \"{:,} TRTL\".format(Result['block']['transactions'][i]['amount_out']/100),\n \"{:,}\".format(Result['block']['transactions'][i]['size']),\n Result['block']['transactions'][i]['hash']) #last hash is for searching purpose\n tmpTxList.append(txLabel)\n \n #append the tx data to the bigger list that has the general info\n blockInfoList+=tmpTxList\n #add last padding\n paddingLabel3 = BigLabel()\n blockInfoList.append(paddingLabel3)\n #loop through the label list and add all the labels in the search result display\n for lb in blockInfoList:\n self.root.blocksearchscreen.ids.blksearch_display.add_widget(lb)\n blockInfoList = []\n\n #this is initiates the tx searching thread\n def TxSchThread(self):\n self.root.blocksearchscreen.ids.txsearch_display.clear_widgets()\n txHash = self.root.blocksearchscreen.ids.searchTx_inputText.text\n \n txThread = threading.Thread(target = searchTx_q, args = (txHash,))\n txThread.start()\n \n SearchingLabel = Label(text=\"Searching Transaction...\", size_hint= (1, None), height = 50, font_size = 20,\n color = (0,0,0,0.8))\n self.root.blocksearchscreen.ids.txsearch_display.add_widget(SearchingLabel)\n #this is initiates the blk searching thread\n def BlkSchThread(self):\n self.root.blocksearchscreen.ids.blksearch_display.clear_widgets()\n blkHash = self.root.blocksearchscreen.ids.searchBlk_inputText.text\n \n blkthread = threading.Thread(target = searchBlk_q, args = (blkHash,))\n blkthread.start()\n \n SearchingLabel = Label(text=\"Searching Block...\", size_hint= (1, None), height = 50, font_size = 20,\n color = (0,0,0,0.8))\n self.root.blocksearchscreen.ids.blksearch_display.add_widget(SearchingLabel)\n\n def check_search_queue(self, dt): #for checking search queue has something or not \n if tx_search_q.empty(): #and this function is scheduled on start\n pass\n else:\n self.update_TxSearchResult()\n if blk_search_q.empty():\n pass\n else:\n self.update_BlkSearchResult()\n \n def search_on_click(self, searchType, hashN): #search by clicking on the live explorer data\n if searchType == 'tx':\n self.root.blocksearchscreen.ids.txsearch_display.clear_widgets()\n self.root.blocksearchscreen.ids.searchTx_inputText.text = hashN\n \n txThread = threading.Thread(target = searchTx_q, args = (hashN,))\n txThread.start()\n \n SearchingLabel = Label(text=\"Searching Transaction...\", size_hint= (1, None), height = 50, font_size = 20,\n color = (0,0,0,0.8))\n self.root.blocksearchscreen.ids.txsearch_display.add_widget(SearchingLabel)\n elif searchType == 'blk':\n self.root.blocksearchscreen.ids.blksearch_display.clear_widgets()\n self.root.blocksearchscreen.ids.searchBlk_inputText.text = hashN\n\n blkthread = threading.Thread(target = searchBlk_q, args = (hashN,))\n blkthread.start()\n \n SearchingLabel = Label(text=\"Searching Block...\", size_hint= (1, None), height = 50, font_size = 20,\n color = (0,0,0,0.8))\n self.root.blocksearchscreen.ids.blksearch_display.add_widget(SearchingLabel)\n#---------------------------------------------------------------------------------------------------------------\n #this function is called when we click the turtle completo button on start screen\n # it start the explorer basically but not the wallet yet\n def start_on_blockexplorer(self): \n self.root.transition.direction = 'left'\n self.root.current = 'blockexplorerscreen'\n\n self.TRTLdaemon = Turtle_Explorer()\n self.TRTLdaemon.daemon = True\n self.TRTLdaemon.start()\n\n #self.status_text =\"DAEMON STATUS\\nSYNC...\\n\\nLOCAL: \\nNETWORK: \\nPEERS: \"\n\n #searchable control\n self.root.blocksearchscreen.ids.searchtxbutton.disabled = True\n self.root.blocksearchscreen.ids.searchblkbutton.disabled = True\n\n self.root.blocksearchscreen.ids.txsearch_display.add_widget(searchAvailable1)\n self.root.blocksearchscreen.ids.blksearch_display.add_widget(searchAvailable2)\n \n #schedule status update every 20 sec\n Clock.schedule_interval(self.update_SynchStatus,20)\n Clock.schedule_interval(self.check_search_queue, 2)\n self.appIsRunning = True\n \n def quitApp(self):\n self.on_stop()\n App.get_running_app().stop()\n Window.close()\n\n #about pop up window\n def show_popup(self):\n p = AboutPopup()\n p.open()\n#-------------------------------------------\n #when click wallet button on side bar, if the wallet is not opened\n # popup will show\n #else nothing\n def ToWallet(self):\n self.root.current = 'walletscreen'\n if self.WalltOpened:\n pass\n else:\n self.wp = WalletPrompt()\n self.wp.open()\n \n #this is called when we open a wallet on the popup\n def openWallet(self):\n #check if the path is a .wallet file\n path = self.wp.ids.wallet_path.text\n self.wp.ids.wallet_path.text = \"\"\n tmpfile = path.split('.')\n\n #if the path does not end with wallet, then shows an error\n if tmpfile[-1]!='wallet':\n self.walletPathError = \"Please provide a valid .wallet file\"\n else:\n path = path.replace('\\\\',\"\\\\\\\\\")\n path = '\"'+path+'\"'\n \n password = self.wp.ids.wallet_password1.text\n walletd_arg = ['turtleservices\\\\turtle-service','-w',path,'-p',password,'--daemon-address localhost --daemon-port 11898 --rpc-password']\n self.myTRTLWallet = Turtle_Wallet(walletd_arg)\n self.myTRTLWallet.daemon = True\n self.myTRTLWallet.start()\n self.wp.dismiss()\n Clock.schedule_interval(self.updateWalletdStat,5)\n self.root.walletscreen.ids.sendtxbutton.disabled = True\n self.WalltOpened = True\n\n #this function is called and scheduled when a wallet file is opened\n #it updated the wallet info on display\n def updateWalletdStat(self, dt):\n Address = self.myTRTLWallet.walletAddress\n availableB = self.myTRTLWallet.AvailableBalance\n lockedB = self.myTRTLWallet.lockedBalance\n wpeers = self.myTRTLWallet.wpeers\n wnheight = self.myTRTLWallet.walletNetBlock\n wdheight = self.myTRTLWallet.walletLocalBlock\n wFee = self.myTRTLWallet.wFee\n walletTx = self.myTRTLWallet.walletTx\n self.root.walletscreen.ids.wallettransaction.clear_widgets()\n if Address != None:\n self.publicAddress = Address\n self.walletFeeText = \"Node fee: {} TRTL *The node fee will be automatically added to the send amount\".format(wFee)\n \n if wdheight == None or wnheight == None:\n self.wallet_status_text = \"WALLETD STATUS\\nSYNC...\\n\\nLOCAL: \\nNETWORK: \\nPEERS: \"\n elif wdheight < wnheight-1:\n self.wallet_status_text = \"WALLETD STATUS\\nSYNC...\\n\\nLOCAL: {}\\nNETWORK: {}\\nPEERS: {}\".format(str(wdheight),str(wnheight),str(wpeers))\n if lockedB == None or availableB == None:\n pass\n else:\n self.unlockedbalance = \"Available Balance: {} TRTL Correct balance will be displayed when walletd-service is fully synched...\".format(availableB)\n self.lockedbalance = \"Locked Balance: {} TRTL\".format(lockedB)\n else:\n self.wallet_status_text = \"WALLETD STATUS\\n\\nLOCAL: {}\\nNETWORK: {}\\nPEERS: {}\".format(str(wdheight),str(wnheight),str(wpeers))\n if lockedB == None or availableB == None:\n pass\n else:\n self.unlockedbalance = \"Available Balance: {} TRTL\".format(availableB)\n self.lockedbalance = \"Locked Balance: {} TRTL\".format(lockedB)\n self.root.walletscreen.ids.sendtxbutton.disabled = False\n\n if walletTx != None:\n i = 0\n walletTx = reversed(walletTx)\n for tx in walletTx:\n wTx = walletTxData()\n if i%2!=1:\n pass\n else:\n wTx.colors = (1,1,1,0.1)\n \n txfee = round(tx['transactions'][0]['fee']/100,2)\n txamount = round(tx['transactions'][0]['amount']/100,2)\n if txamount < 0:\n txamount = round((tx['transactions'][0]['amount']/100 + txfee)*-1,2)\n transfertype = '[b]OUT[/b]'\n else:\n transfertype = '[b]IN[/b]'\n txhash = tx['transactions'][0]['transactionHash']\n payId = tx['transactions'][0]['paymentId']\n txTime = datetime.fromtimestamp(tx['transactions'][0]['timestamp']).strftime('%Y-%m-%d %H:%M:%S')\n wTx.setText(transfertype,txhash, payId, txamount, txfee, txTime)\n self.root.walletscreen.ids.wallettransaction.add_widget(wTx)\n i+=1\n #datetime.fromtimestamp(Result['block']['timestamp']).strftime('%Y-%m-%d %H:%M:%S')\n #see to do list\n def createWallet(self):\n global myApp\n self.WalltOpened = True\n self.wp.dismiss()\n #see to do list\n def importWallet(self):\n global myApp\n self.WalltOpened = True\n self.wp.dismiss()\n #this function opens the choosing wallet file prompt\n def browseWalletPath(self):\n self.chF = ChooseWalletPrompt()\n self.chF.open()\n #this is function gets the chosen file path \n def getWalletPath(self):\n path = str(self.chF.ids.file_icon_view.selection[0])\n self.wp.ids.wallet_path.text = path\n self.chF.dismiss()\n #see to do list\n def sendTx(self):\n address = self.root.walletscreen.ids.sendTxAddress.text\n amount = self.root.walletscreen.ids.sendTxAmount.text \n paymentId = self.root.walletscreen.ids.sendTxPaymentId.text\n\n if address=='' or amount == '' :\n print(\"please check transaction info\")\n else:\n amount = float(amount)*100\n self.root.walletscreen.ids.sendTxAddress.text = ''\n self.root.walletscreen.ids.sendTxAmount.text = ''\n self.root.walletscreen.ids.sendTxPaymentId.text = ''\n self.myTRTLWallet.sendTransaction(address, amount, paymentId)\n\n \nif __name__ ==\"__main__\":\n myApp = Turtle_Completo()\n myApp.run()\n","sub_path":"turtle_completo.py","file_name":"turtle_completo.py","file_ext":"py","file_size_in_byte":39372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"546489161","text":"from case.CBaseCase import *\n\nclass T3186_bmc_BMCLogSELWhenADPTSensNA(CBaseCase):\n '''\n [Purpose ]: The firmware will log an entry to the SEL when a sensor reading is \n unavailable that affects how the firmware controls the fan speed.\n [Author ]: eric.wang5@emc.com\n [Sprint ]: ATOM 2.0.7\n [Tickets ]: ATOM-1673\n [Platform]: Europa, Oberon, Hyperion\n [Type ]: Auto\n '''\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n \n def config(self):\n CBaseCase.config(self)\n \n def test(self):\n \n _sp = self.enclosure.sp\n list_sensor_to_check = []\n if self.str_platform in ['hyperion', 'europa']:\n list_sensor_to_check = [0x30, 0x31, 0x32, 0x33, 0x48, 0x49]\n int_matched_cpu_sel = 2\n int_matched_dimm_sel = 4\n elif self.str_platform in ['oberon']:\n list_sensor_to_check = [0x30, 0x31, 0x48]\n int_matched_cpu_sel = 1\n int_matched_dimm_sel = 2\n else:\n pass\n \n # 1. Check sensor reading of CPU and DIMM temperature.\n self.log('INFO', '1. Check sensor reading of CPU and DIMM temperature.')\n for int_sensor_number in list_sensor_to_check:\n _sp.validate_sensor(int_sensor_number)\n \n # 2. Reboot host, and hold host in reset.\n self.log('INFO', '2. Reboot host, and hold host in reset.')\n _sp.reset_host_cold_reset()\n self.delay(5)\n _sp.hold_host_in_reset()\n _sp.sel.start_reserve_sel_in_memory()\n \n # 3. Wait 6 minutes\n self.delay(360)\n \n # 4. Check SEL, to verify whether BMC log SEL(s) to report the DIMM and CPU temp sensors are NA.\n self.log('INFO', '4. Check SEL, to verify whether BMC log SEL(s) to report the DIMM and CPU temp sensors are NA.')\n if _sp.sel.check_matched_sel_from_memory(\n str_generator_id = '0x20', \n str_sensor_type = '0xc8', \n str_sensor_num = '0xe8', \n str_event_type = '0x6f', \n lst_event_data_and_mask = ['0xa0', '0xff', '0x01', '0xff', '0x00', '0x00'],\n int_expected_matched_num = int_matched_cpu_sel) == 0:\n self.log('INFO','CPU thermal sensor fault SEL number matched.')\n else:\n raise Exception('FAIL', 'CPU thermal sensor fault SEL number not matched.')\n if _sp.sel.check_matched_sel_from_memory(\n str_generator_id = '0x20', \n str_sensor_type = '0xc8', \n str_sensor_num = '0xe8', \n str_event_type = '0x6f', \n lst_event_data_and_mask = ['0xa0', '0xff', '0x02', '0xff', '0x00', '0x00'],\n int_expected_matched_num = int_matched_dimm_sel) == 0:\n self.log('INFO','DIMM thermal sensor fault SEL number matched.')\n else:\n raise Exception('FAIL', 'DIMM thermal sensor fault SEL number not matched.')\n \n # 5. Check sensor reading of CPU and DIMM temperature.\n self.log('INFO', '5. Check sensor reading of CPU and DIMM temperature.')\n b_result = True\n list_available_sensor = []\n for int_sensor_number in list_sensor_to_check:\n try:\n _sp.validate_sensor(int_sensor_number)\n list_available_sensor.append(int_sensor_number)\n b_result = False\n except:\n pass\n if not b_result:\n raise Exception('FAIL', 'Below sensors are still available when host hold in reset: %s' % \",\".join(list_available_sensor))\n \n def deconfig(self):\n _sp = self.enclosure.sp\n CBaseCase.deconfig(self)\n _sp.sel.stop_reserve_sel_in_memory()\n _sp.release_host_from_reset()\n","sub_path":"case/OUT_OF_DATE/T3186_bmc_BMCLogSELWhenADPTSensNA.py","file_name":"T3186_bmc_BMCLogSELWhenADPTSensNA.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452517820","text":"class Exemple:\n \"\"\"\n Un exemple contient 2 valeurs :\n - un dictionnaire d'attributs (dict)\n - une étiquette (str)\n \"\"\"\n\n def __init__(self, noms_attributs, valeurs_attributs, etiquette=\"\"):\n \"\"\"\n etiquette peut être non précisée en quel cas on aurait\n un exemple non étiqueté\n \"\"\"\n #si on a un problème de types\n if not isinstance(noms_attributs, list) \\\n or not isinstance(valeurs_attributs, list):\n raise TypeError(\"noms_attributs et valeurs_attributs doivent être\" \\\n \" des listes et pas des {0} et {1}\" \\\n .format(type(noms_attributs),\n type(valeurs_attributs)))\n if not isinstance(etiquette, str):\n raise TypeError(\"etiquette doit être un str et pas un {}\" \\\n .format(type(etiquette)))\n #si les deux listes n'ont pas le même nombre d'éléments\n if len(valeurs_attributs) != len(noms_attributs):\n raise ValueError(\"noms_attributs et valeurs_attributs doivent \" \\\n \"avoir le même nombre d'éléments\")\n self.etiquette = etiquette\n self.dict_attributs = dict()\n #on ajoute chaque attribut au dictionnaire\n for i in range(len(noms_attributs)):\n self.dict_attributs[noms_attributs[i]] = valeurs_attributs[i]\n\n","sub_path":"Exemple.py","file_name":"Exemple.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"407944862","text":"from datetime import datetime\nimport re\nfrom typing import Callable\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.operators import ExtractURLOperator, DownloadOperator, FakeDownloadOperator, ChecksumOperator\n\ndefault_args = {\n \"start_date\": datetime(2019, 2, 21),\n \"owner\": \"mapohl\",\n \"base_folder\": \"/usr/local/data\"\n}\n\n\ndef extract_vbb_download_url(**kwargs) -> str:\n url = kwargs[\"url\"]\n response = kwargs[\"response\"]\n\n match = re.search(\n r'',\n response.content.decode(\"utf-8\"))\n\n if not match:\n return\n\n return \"{}://{}{}\".format(url.scheme, url.netloc, match.group(1))\n\n\ndef extract_vrs_download_url(**kwargs) -> str:\n response = kwargs[\"response\"]\n\n match = re.search(\n r'GTFS-Daten[^<]*',\n response.content.decode(\"utf-8\"))\n\n if not match:\n return None\n\n return match.group(1)\n\n\ndef extract_kvv_download_url(**kwargs) -> str:\n response = kwargs[\"response\"]\n\n match = re.search(\n r'[^<]*',\n response.content.decode(\"utf-8\"))\n\n if not match:\n return None\n\n return match.group(1)\n\n\ndef create_provider_dag(\n parent_dag_id: str,\n provider_id: str,\n provider_description: str,\n provider_url: str,\n extract_func: Callable,\n check_url: bool,\n def_args: dict,\n source_file: str = None):\n provider_dag_id = \"{}.{}\".format(parent_dag_id, provider_id)\n\n def_args[\"provider_id\"] = provider_id\n provider_dag = DAG(dag_id=provider_dag_id,\n description=\"This DAG extracts the GTFS archive provided by {}.\".format(provider_description),\n default_args=def_args,\n catchup=False)\n\n checksum_operator = ChecksumOperator(dag=provider_dag,\n task_id=\"checksum_task\")\n\n if source_file:\n fake_download_operator = FakeDownloadOperator(dag=provider_dag,\n task_id=\"download_task\",\n source_file=source_file)\n\n fake_download_operator >> checksum_operator\n else:\n extract_url_operator = ExtractURLOperator(dag=provider_dag,\n task_id=\"extract_url_task\",\n url=provider_url,\n extract_download_url=extract_func,\n check_url=check_url)\n\n download_operator = DownloadOperator(dag=provider_dag,\n task_id=\"download_task\")\n\n extract_url_operator >> download_operator >> checksum_operator\n\n return provider_dag\n\n\ndag_metadata = [\n (\"vbb\", \"VBB Berlin/Brandenburg\",\n \"http://www.vbb.de/unsere-themen/vbbdigital/api-entwicklerinfos/datensaetze\",\n extract_vbb_download_url,\n False,\n None),\n (\"vrs\", \"VRS Köln\",\n \"https://www.vrsinfo.de/fahrplan/oepnv-daten-fuer-webentwickler.html\",\n extract_vrs_download_url,\n False,\n None),\n (\"kvv\", \"Karlsruher Verkehrsverbund\",\n \"https://www.kvv.de/fahrt-planen/fahrplaene/open-data.html\",\n extract_kvv_download_url,\n False,\n None)\n]\n\nmain_dag_id = \"gtfs_pipeline\"\nwith DAG(dag_id=main_dag_id,\n description=\"Extracts the GTFS data from various sources.\",\n schedule_interval=None,\n default_args=default_args,\n catchup=False) as dag:\n\n provider_start = DummyOperator(task_id=\"start\")\n\n extract_tasks = []\n for prov_id, prov_desc, prov_url, prov_extract_func, prov_check_url, prov_source_file, in dag_metadata:\n sub_dag = create_provider_dag(parent_dag_id=main_dag_id,\n provider_id=prov_id,\n provider_description=prov_desc,\n provider_url=prov_url,\n extract_func=prov_extract_func,\n check_url=prov_check_url,\n def_args=default_args,\n source_file=prov_source_file)\n sub_dag_task = SubDagOperator(\n task_id=prov_id,\n dag=dag,\n subdag=sub_dag)\n\n provider_start >> sub_dag_task\n","sub_path":"airflow/dags/extract_dag.py","file_name":"extract_dag.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262730054","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0016_auto_20151110_2049'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='date_placed',\n field=models.DateField(null=True),\n ),\n ]\n","sub_path":"products/migrations/0017_auto_20151111_0201.py","file_name":"0017_auto_20151111_0201.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1369415","text":"from rest_framework import viewsets, status\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\nfrom collections import OrderedDict\nfrom django.contrib.auth.models import User\nfrom rest_framework.decorators import action\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication\n\nfrom .utils import *\nfrom ..models import *\nfrom .serializers.Navigation import *\nfrom .serializers.Other import *\nfrom ..models import *\n\n\nclass NavigationCategoryViewSet(viewsets.ModelViewSet):\n\n queryset = NavigationCategory.objects.all()\n serializer_class = NavigationCategorySerializer\n\n action_to_serializer = {\n \"list\": NavigationCategoryDetailSerializer,\n \"retrieve\": NavigationCategoryDetailSerializer,\n }\n\n def get_serializer_class(self):\n return self.action_to_serializer.get(\n self.action,\n self.serializer_class\n )\n\n\nclass SubNavigationCategoryViewSet(viewsets.ModelViewSet):\n\n queryset = SubNavigationCategory.objects.all()\n serializer_class = SubNavigationCategorySerializer\n\n action_to_serializer = {\n \"list\": SubNavigationCategoryRetrieveSerializer,\n \"retrieve\": SubNavigationCategoryRetrieveSerializer\n }\n\n def get_serializer_class(self):\n return self.action_to_serializer.get(\n self.action,\n self.serializer_class\n )\n\n\nclass CatalogPagination(PageNumberPagination):\n\n page_size = 4\n page_size_param = 'page_size'\n max_page_size = 50\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('total_count', self.page.paginator.count),\n ('page_size', self.page_size),\n ('current_page', self.page.number),\n ('items', data)\n ]))\n\n\nclass AboutUsCategoryViewSet(viewsets.ModelViewSet):\n\n queryset = AboutUsCategory.objects.all()\n serializer_class = AboutUsCategorySerializer\n\n\nclass OurAchievementsViewSet(viewsets.ModelViewSet):\n\n queryset = OurAchievements.objects.filter(in_archive=False)\n serializer_class = OurAchievementsSerializer\n\n\nclass UserView(viewsets.ModelViewSet):\n\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n @action(methods=['get'], detail=False, url_path='user-data')\n def get_user_data(self, *args, **kwargs):\n user = self.request.user\n if user.is_authenticated:\n token_key = Token.objects.get(user=user).key\n is_anon = user.username == f'unknown{user.id}'\n else:\n user, token_key = create_new_anon()\n customer = Customer.objects.create(user=user)\n Cart.objects.create(owner=customer, for_anonymous_user=True)\n is_anon = True\n return response.Response({'user': UserSerializer(user).data, 'is_anon': is_anon, 'token': token_key})\n\n @action(methods=['post'], detail=False, url_path='update_user_data')\n def update_user_date(self, *args, **kwargs):\n user = self.request.user\n customer = Customer.objects.get(user=user)\n customer.first_name = self.request.data['firstName']\n customer.second_name = self.request.data['secondName']\n customer.father_name = self.request.data['fatherName']\n customer.address = self.request.data['adress']\n customer.phone = self.request.data['phoneNumber']\n customer.save()\n user.customer = customer\n user.save()\n return response.Response({'detail': 'success'})\n\n @action(methods=['post'], detail=False, url_path='change_password')\n def change_password(self, *args, **kwargs):\n user = self.request.user\n user.set_password(self.request.data['password'])\n return response.Response({'detail': 'success'})\n\n\nclass RegisterView(viewsets.ModelViewSet):\n authentication_classes = [TokenAuthentication, SessionAuthentication, BasicAuthentication]\n\n @action(methods=['post'], detail=False)\n def register_user(self, *args, **kwargs):\n\n userEmail = self.request.data['email']\n\n user = User.objects.filter(email=userEmail)\n if not user:\n user_id = User.objects.all().last().id\n userfirstname = self.request.data['firstName']\n userlastname = self.request.data['secondName']\n fatherName = self.request.data['fatherName']\n username = f'Пользователь _{user_id}'\n new_user = User.objects.create(\n username=username,\n email=userEmail\n )\n new_user.save()\n new_user.set_password(self.request.data['password'])\n new_user.save()\n customer = Customer.objects.create(\n user=new_user,\n first_name=userfirstname,\n second_name=userlastname,\n father_name=fatherName\n )\n Cart.objects.create(owner=customer)\n Token.objects.get_or_create(user=new_user)\n return response.Response({'detail': 'User successfully register', 'username': username}, status=status.HTTP_200_OK)\n return response.Response({'detail': 'Email уже привязан к другому аккаунту'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass OrderView(viewsets.ModelViewSet):\n\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n\n @action(methods=['post'], detail=False, url_path='create')\n def create_order(self, *args, **kwargs):\n customer = Customer.objects.get(user=self.request.user)\n cart_id = self.request.data['cart_id']\n cart = Cart.objects.get(pk=cart_id)\n place_type_key = int(self.request.data['place_type']) # 0 or 1\n place_type = ADDRESS_TYPE[place_type_key][0]\n place = self.request.data['customer']['address']\n if place_type_key == 0:\n email = self.request.user.email\n full_name = customer.get_full_name\n phone = customer.phone\n else:\n email = self.request.data['customer']['email']\n full_name = self.request.data['customer']['fullName']\n phone = self.request.data['customer']['phone']\n cart.put_in_order()\n Order.objects.create(\n customer=customer,\n cart=cart,\n place_type=place_type,\n place=place,\n email=email,\n customer_full_name=full_name,\n phone=phone\n )\n Cart.objects.create(owner=customer)\n return response.Response({'detail': 'success'}, status=status.HTTP_200_OK)\n\n @action(methods=['get'], detail=False, url_path='current_user_orders')\n def get_user_orders(self, *args, **kwargs):\n customer = Customer.objects.get(user=self.request.user)\n orders = Order.objects.filter(customer=customer)\n if orders:\n return response.Response(OrderSerializer(orders, many=True).data)\n return response.Response({'detail': 'Current user has no orders'}, status=status.HTTP_204_NO_CONTENT)\n","sub_path":"mainapp/api/OtherViews.py","file_name":"OtherViews.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282557777","text":"import synapse.exc as s_exc\nimport synapse.lib.urlhelp as s_urlhelp\n\nimport synapse.tests.utils as s_t_utils\n\nclass UrlTest(s_t_utils.SynTest):\n def test_urlchop(self):\n\n url = 'http://vertex.link:8080/hehe.html'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'http',\n 'port': 8080,\n 'host': 'vertex.link',\n 'path': '/hehe.html',\n },\n info\n )\n\n url = 'tcp://pennywise:candy@vertex.link/'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'tcp',\n 'user': 'pennywise',\n 'host': 'vertex.link',\n 'path': '/',\n 'passwd': 'candy',\n },\n info\n )\n\n url = 'tcp://pennywise@vertex.link'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'tcp',\n 'user': 'pennywise',\n 'host': 'vertex.link',\n 'path': '',\n },\n info\n )\n\n url = 'tcp://1.2.3.4:8080/api/v1/wow?key=valu&foo=bar'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'tcp',\n 'host': '1.2.3.4',\n 'port': 8080,\n 'path': '/api/v1/wow',\n 'query': {'key': 'valu',\n 'foo': 'bar',\n }\n },\n info\n )\n\n url = 'http://[1fff:0:a88:85a3::ac1f]:8001/index.html'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'http',\n 'host': '1fff:0:a88:85a3::ac1f',\n 'port': 8001,\n 'path': '/index.html',\n },\n info\n )\n\n url = 'http://::1/index.html'\n info = s_urlhelp.chopurl(url)\n self.eq({'scheme': 'http',\n 'host': '::1',\n 'path': '/index.html',\n },\n info\n )\n\n self.raises(s_exc.BadUrl, s_urlhelp.chopurl,\n 'www.vertex.link')\n","sub_path":"synapse/tests/test_lib_urlhelp.py","file_name":"test_lib_urlhelp.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627720178","text":"\"\"\"My chocobo cooking script.\"\"\"\n\"\"\"I left the chocobo here because I want to thank the chocobo package\n\"\"author for teaching me how to package!\"\"\"\nimport os\nimport warnings\nimport scipy\nfrom sklearn.preprocessing import StandardScaler\nimport scipy.stats\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport pandas as pd\n\ndef last_period (df,interval,periods,date_column,to_past,unique_id='NULL'):\n \"\"\"df: the dataframe\"\"\"\n \"\"\"interval: hours, days or weeks\"\"\"\n \"\"\"periods: #of interval\"\"\"\n \"\"\"to_past: list\"\"\"\n \"\"\"unique_id: (optional) list\"\"\"\n \n import warnings\n warnings.filterwarnings(\"ignore\")\n \n def concatenate_list_data(list):\n result= ''\n for element in list:\n result += str(element)\n return result\n \n \n df['num'] = df.index\n \n prefix = 'l'+str(periods)+interval[0]\n \n if interval == 'weeks':\n df[prefix] = df[date_column].apply(lambda x: x- timedelta(weeks=periods))\n if interval == 'days':\n df[prefix] = df[date_column].apply(lambda x: x- timedelta(days=periods))\n if interval == 'hours':\n df[prefix] = df[date_column].apply(lambda x: x- timedelta(days=periods))\n \n \n \n \n if unique_id == 'NULL':\n for col in to_past:\n col_name = 'v'+col\n df[col_name] = df['num'].apply(lambda x: df[col][x]/ \n df[(df[date_column]==df[prefix][x])][col].sum()) \n else:\n if len(unique_id)==1:\n for col in to_past:\n col_name = 'v'+col\n df[col_name] = df['num'].apply(lambda x: df[col][x]/ \n df[(df[unique_id[0]]==df[unique_id[0]][x]) & (df[date_column]==df[prefix][x])][col].sum())\n \n else:\n df_clean = df[unique_id]\n df['primarykey'] = df_clean.sum(axis=1).astype(str)\n for col in to_past:\n col_name = 'v'+col\n df[col_name] = df['num'].apply(lambda x: df[col][x]/ \n df[(df['primarykey']==df['primarykey'][x]) & (df[date_column]==df[prefix][x])][col].sum())\n \n df = df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]\n return df","sub_path":"likelycause2/help_func.py","file_name":"help_func.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627094140","text":"from vehicle import *\n\n# 자동차 : 속도, 생상, 바퀴, 좌석\n\nclass Car(Vehicle):\n def __init__(self, speed, color, wheels, seats):\n super().__init__(speed, color)\n self.wheels = wheels\n self.seats = seats\n\n\nmyCar = Car(10, 'white', 4, 4)","sub_path":"11. Module/Car Manager/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491997594","text":"#!/usr/bin/env python3\n############################################################\n# Program is part of MiNoPy #\n# Author: Sara Mirzaee, Zhang Yunjun, Heresh Fattahi #\n############################################################\n# Modified from prep4timeseries.py in ISCE-2.2.0/contrib/stack/topsStack\n\nimport warnings\nimport logging\nwarnings.filterwarnings(\"ignore\")\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\n\nimport isce\nfrom isceobj.Planet.Planet import Planet\nimport os\nimport glob\nimport shelve\nimport argparse\nimport numpy as np\nfrom minopy.objects.utils import read_attribute, read\nfrom mintpy.utils import isce_utils, ptime, readfile, writefile, utils as ut\n\n\n\nEXAMPLE = \"\"\"example:\n prep_slc_isce.py -s ./merged/SLC -m ./reference/IW1.xml -b ./baselines -g ./merged/geom_reference #for topsStack\n prep_slc_isce.py -s ./merged/SLC -m .merged/SLC/20190510/referenceShelve/data.dat -b ./baselines -g ./merged/geom_reference #for stripmapStack\n \"\"\"\n\n\ndef create_parser():\n \"\"\"Command line parser.\"\"\"\n parser = argparse.ArgumentParser(description='Prepare ISCE metadata files.',\n formatter_class=argparse.RawTextHelpFormatter,\n epilog=EXAMPLE)\n parser.add_argument('-s', '--slc-dir', dest='slcDir', type=str, default=None,\n help='The directory which contains all SLCs\\n'+\n 'e.g.: $PROJECT_DIR/merged/SLC')\n parser.add_argument('-f', '--file-pattern', nargs = '+', dest='slcFiles', type=str,\n default=['*.slc.full'],\n help='A list of files that will be used in minopy\\n'\n 'e.g.: 20180705.slc.full')\n parser.add_argument('-m', '--meta-file', dest='metaFile', type=str, default=None,\n help='Metadata file to extract common metada for the stack:\\n'\n 'e.g.: for ISCE/topsStack: reference/IW3.xml')\n parser.add_argument('-b', '--baseline-dir', dest='baselineDir', type=str, default=None,\n help=' directory with baselines ')\n parser.add_argument('-g', '--geometry-dir', dest='geometryDir', type=str, default=None,\n help=' directory with geometry files ')\n parser.add_argument('--force', dest='update_mode', action='store_false',\n help='Force to overwrite all .rsc metadata files.')\n return parser\n\n\ndef cmd_line_parse(iargs = None):\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n if all(not i for i in [inps.slcDir, inps.geometryDir, inps.metaFile]):\n parser.print_usage()\n raise SystemExit('error: at least one of the following arguments are required: -s, -g, -m')\n return inps\n\n\n#########################################################################\ndef load_product(xmlname):\n \"\"\"Load the product using Product Manager.\"\"\"\n from iscesys.Component.ProductManager import ProductManager as PM\n pm = PM()\n pm.configure()\n obj = pm.loadProduct(xmlname)\n return obj\n\n\ndef extract_tops_metadata(xml_file):\n \"\"\"Read metadata from xml file for Sentinel-1/TOPS\n Parameters: xml_file : str, path of the .xml file, i.e. reference/IW1.xml\n Returns: meta : dict, metadata\n \"\"\"\n import isce\n from isceobj.Planet.Planet import Planet\n\n obj = load_product(xml_file)\n burst = obj.bursts[0]\n burstEnd = obj.bursts[-1]\n\n metadata = {}\n metadata['prf'] = burst.prf\n metadata['startUTC'] = burst.burstStartUTC\n metadata['stopUTC'] = burstEnd.burstStopUTC\n metadata['radarWavelength'] = burst.radarWavelength\n metadata['rangePixelSize'] = burst.rangePixelSize\n metadata['startingRange'] = burst.startingRange\n metadata['passDirection'] = burst.passDirection\n metadata['polarization'] = burst.polarization\n metadata['trackNumber'] = burst.trackNumber\n metadata['orbitNumber'] = burst.orbitNumber\n\n time_seconds = (burst.burstStartUTC.hour * 3600.0 +\n burst.burstStartUTC.minute * 60.0 +\n burst.burstStartUTC.second)\n metadata['CENTER_LINE_UTC'] = time_seconds\n\n orbit = burst.orbit\n peg = orbit.interpolateOrbit(burst.sensingMid, method='hermite')\n\n Vs = np.linalg.norm(peg.getVelocity())\n metadata['satelliteSpeed'] = Vs\n metadata['azimuthPixelSize'] = Vs*burst.azimuthTimeInterval\n\n refElp = Planet(pname='Earth').ellipsoid\n llh = refElp.xyz_to_llh(peg.getPosition())\n refElp.setSCH(llh[0], llh[1], orbit.getENUHeading(burst.sensingMid))\n metadata['earthRadius'] = refElp.pegRadCur\n metadata['altitude'] = llh[2]\n\n # for Sentinel-1\n metadata['beam_mode'] = 'IW'\n metadata['swathNumber'] = burst.swathNumber\n\n # Sentinel-1 TOPS spatial resolution\n iw_str = 'IW2'\n if os.path.basename(xml_file).startswith('IW'):\n iw_str = os.path.splitext(os.path.basename(xml_file))[0]\n metadata['azimuthResolution'] = isce_utils.S1_TOPS_RESOLUTION[iw_str]['azimuthResolution']\n metadata['rangeResolution'] = isce_utils.S1_TOPS_RESOLUTION[iw_str]['rangeResolution']\n\n # 1. multipel subswaths\n xml_files = glob.glob(os.path.join(os.path.dirname(xml_file), 'IW*.xml'))\n if len(xml_files) > 1:\n swath_num = [load_product(fname).bursts[0].swathNumber for fname in xml_files]\n metadata['swathNumber'] = ''.join(str(i) for i in sorted(swath_num))\n\n # 2. calculate ASF frame number for Sentinel-1\n metadata['firstFrameNumber'] = int(0.2 * (burst.burstStartUTC - obj.ascendingNodeTime).total_seconds())\n metadata['lastFrameNumber'] = int(0.2 * (burstEnd.burstStopUTC - obj.ascendingNodeTime).total_seconds())\n return metadata, burst\n\n\ndef extract_stripmap_metadata(meta_file):\n \"\"\"Read metadata from shelve file for StripMap stack from ISCE\n Parameters: meta_file : str, path of the shelve file, i.e. referenceShelve/data.dat\n Returns: meta : dict, metadata\n \"\"\"\n\n if os.path.basename(meta_file) == \"data.dat\": #shelve file from stripmapStack\n fbase = os.path.splitext(meta_file)[0]\n with shelve.open(fbase, flag='r') as mdb:\n frame = mdb['frame']\n\n elif meta_file.endswith(\".xml\"): #XML file from stripmapApp\n frame = load_product(meta_file)\n\n else:\n raise ValueError('un-recognized isce/stripmap metadata file: {}'.format(meta_file))\n\n metadata = {}\n metadata['prf'] = frame.PRF\n metadata['startUTC'] = frame.sensingStart\n metadata['stopUTC'] = frame.sensingStop\n metadata['radarWavelength'] = frame.radarWavelegth\n metadata['rangePixelSize'] = frame.instrument.rangePixelSize\n metadata['startingRange'] = frame.startingRange\n metadata['polarization'] = str(frame.polarization).replace('/', '')\n if metadata['polarization'].startswith(\"b'\"):\n metadata['polarization'] = metadata['polarization'][2:4]\n metadata['trackNumber'] = frame.trackNumber\n metadata['orbitNumber'] = frame.orbitNumber\n\n time_seconds = (frame.sensingStart.hour * 3600.0 +\n frame.sensingStart.minute * 60.0 +\n frame.sensingStart.second)\n metadata['CENTER_LINE_UTC'] = time_seconds\n\n orbit = frame.orbit\n peg = orbit.interpolateOrbit(frame.sensingMid, method='hermite')\n\n Vs = np.linalg.norm(peg.getVelocity())\n metadata['satelliteSpeed'] = Vs\n metadata['azimuthPixelSize'] = Vs/frame.PRF\n\n refElp = Planet(pname='Earth').ellipsoid\n llh = refElp.xyz_to_llh(peg.getPosition())\n refElp.setSCH(llh[0], llh[1], orbit.getENUHeading(frame.sensingMid))\n metadata['earthRadius'] = refElp.pegRadCur\n metadata['altitude'] = llh[2]\n\n # for StripMap\n metadata['beam_mode'] = 'SM'\n return metadata, frame\n\n\ndef extract_multilook_number(geom_dir, metadata=dict()):\n '''\n for fbase in ['hgt','lat','lon','los']:\n fbase = os.path.join(geom_dir, fbase)\n fnames = glob.glob('{}*.rdr'.format(fbase)) + glob.glob('{}*.geo'.format(fbase))\n if len(fnames) > 0:\n fullXmlFile = '{}.full.xml'.format(fnames[0])\n if os.path.isfile(fullXmlFile):\n fullXmlDict = readfile.read_isce_xml(fullXmlFile)\n xmlDict = readfile.read_attribute(fnames[0])\n metadata['ALOOKS'] = int(int(fullXmlDict['LENGTH']) / int(xmlDict['LENGTH']))\n metadata['RLOOKS'] = int(int(fullXmlDict['WIDTH']) / int(xmlDict['WIDTH']))\n break\n '''\n # default\n for key in ['ALOOKS', 'RLOOKS']:\n if key not in metadata:\n metadata[key] = 1\n return metadata\n\ndef extract_isce_metadata(meta_file, geom_dir=None, rsc_file=None, update_mode=True):\n \"\"\"Extract metadata from ISCE stack products\n Parameters: meta_file : str, path of metadata file, reference/IW1.xml or referenceShelve/data.dat\n geom_dir : str, path of geometry directory.\n rsc_file : str, output file name of ROIPAC format rsc file\n Returns: metadata : dict\n \"\"\"\n\n if not rsc_file:\n rsc_file = os.path.join(os.path.dirname(meta_file), 'data.rsc')\n\n # check existing rsc_file\n if update_mode and ut.run_or_skip(rsc_file, in_file=meta_file, check_readable=False) == 'skip':\n return readfile.read_roipac_rsc(rsc_file)\n\n # 1. extract metadata from XML / shelve file\n fbase = os.path.basename(meta_file)\n if fbase.startswith(\"IW\"):\n print('extract metadata from ISCE/topsStack xml file:', meta_file)\n #metadata = extract_tops_metadata(meta_file)[0]\n metadata = isce_utils.extract_tops_metadata(meta_file)[0]\n metadata['sensor_type'] = 'tops'\n elif fbase.startswith(\"data\"):\n print('extract metadata from ISCE/stripmapStack shelve file:', meta_file)\n #metadata = extract_stripmap_metadata(meta_file)[0]\n metadata = isce_utils.extract_stripmap_metadata(meta_file)[0]\n metadata['sensor_type'] = 'stripmap'\n elif fbase.endswith(\".xml\"):\n #metadata = extract_stripmap_metadata(meta_file)[0]\n metadata = isce_utils.extract_stripmap_metadata(meta_file)[0]\n\n else:\n raise ValueError(\"unrecognized ISCE metadata file: {}\".format(meta_file))\n\n # 2. extract metadata from geometry file\n if geom_dir:\n metadata = isce_utils.extract_geometry_metadata(geom_dir, metadata)\n\n # 3. common metadata\n metadata['PROCESSOR'] = 'isce'\n metadata['ANTENNA_SIDE'] = '-1'\n\n # convert all value to string format\n for key, value in metadata.items():\n metadata[key] = str(value)\n\n # write to .rsc file\n metadata = readfile.standardize_metadata(metadata)\n if rsc_file:\n print('writing ', rsc_file)\n writefile.write_roipac_rsc(metadata, rsc_file)\n return metadata\n\n\ndef add_slc_metadata(metadata_in, dates=[], baseline_dict={}):\n \"\"\"Add metadata unique for each interferogram\n Parameters: metadata_in : dict, input common metadata for the entire dataset\n dates : list of str in YYYYMMDD or YYMMDD format\n baseline_dict : dict, output of baseline_timeseries()\n Returns: metadata : dict, updated metadata\n \"\"\"\n # make a copy of input metadata\n metadata = {}\n for k in metadata_in.keys():\n metadata[k] = metadata_in[k]\n metadata['DATE'] = '{}'.format(dates[1])\n if baseline_dict:\n bperp_top = baseline_dict[dates[1]][0] - baseline_dict[dates[0]][0]\n bperp_bottom = baseline_dict[dates[1]][1] - baseline_dict[dates[0]][1]\n metadata['P_BASELINE_TOP_HDR'] = str(bperp_top)\n metadata['P_BASELINE_BOTTOM_HDR'] = str(bperp_bottom)\n return metadata\n\n\n#########################################################################\ndef read_tops_baseline(baseline_file):\n bperps = []\n with open(baseline_file, 'r') as f:\n for line in f:\n l = line.split(\":\")\n if l[0] == \"Bperp (average)\":\n bperps.append(float(l[1]))\n bperp_top = np.mean(bperps)\n bperp_bottom = np.mean(bperps)\n return [bperp_top, bperp_bottom]\n\n\ndef read_stripmap_baseline(baseline_file):\n fDict = readfile.read_template(baseline_file, delimiter=' ')\n bperp_top = float(fDict['PERP_BASELINE_TOP'])\n bperp_bottom = float(fDict['PERP_BASELINE_BOTTOM'])\n return [bperp_top, bperp_bottom]\n\n\ndef read_baseline_timeseries(baseline_dir, beam_mode='IW'):\n \"\"\"Read bperp time-series from files in baselines directory\n Parameters: baseline_dir : str, path to the baselines directory\n beam_mode : str, IW for Sentinel-1/TOPS\n SM for StripMap data\n Returns: bDict : dict, in the following format:\n {'20141213': [0.0, 0.0],\n '20141225': [104.6, 110.1],\n ...\n }\n \"\"\"\n print('read perp baseline time-series from {}'.format(baseline_dir))\n # grab all existed baseline files\n bFiles = sorted(glob.glob(os.path.join(baseline_dir, '*/*.txt'))) #for TOPS\n bFiles += sorted(glob.glob(os.path.join(baseline_dir, '*.txt'))) #for stripmap\n\n # read files into dict\n bDict = {}\n for bFile in bFiles:\n dates = os.path.basename(bFile).split('.txt')[0].split('_')\n if beam_mode == 'IW':\n bDict[dates[1]] = read_tops_baseline(bFile)\n elif beam_mode == 'SM':\n bDict[dates[1]] = read_stripmap_baseline(bFile)\n else:\n raise ValueError('Unrecognized beam_mode/processor: {}'.format(beam_mode))\n bDict[dates[0]] = [0, 0]\n return bDict\n\n\n#########################################################################\ndef prepare_geometry(geom_dir, metadata=dict(), update_mode=True):\n \"\"\"Prepare and extract metadata from geometry files\"\"\"\n print('prepare .rsc file for geometry files')\n # grab all existed files\n\n isce_files = ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']\n isce_files = [os.path.join(os.path.abspath(geom_dir), x + '.rdr.full.xml') for x in isce_files]\n # isce_files = [os.path.join(os.path.abspath(geom_dir), '{}.rdr.full.xml'.format(i))\n # for i in ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']]\n if not os.path.exists(isce_files[0]):\n isce_files = ['hgt', 'lat', 'lon', 'los', 'shadowMask', 'incLocal']\n isce_files = [os.path.join(os.path.abspath(geom_dir), x + '.rdr.xml') for x in isce_files]\n\n isce_files = [i for i in isce_files if os.path.isfile(i)]\n # write rsc file for each file\n for isce_file in isce_files:\n # prepare metadata for current file\n geom_metadata = read_attribute(isce_file.split('.xml')[0], metafile_ext='.xml')\n geom_metadata.update(metadata)\n # write .rsc file\n rsc_file = isce_file.split('.xml')[0]+'.rsc'\n writefile.write_roipac_rsc(geom_metadata, rsc_file,\n update_mode=update_mode,\n print_msg=True)\n return metadata\n\n\ndef prepare_stack(inputDir, filePattern, metadata=dict(), baseline_dict=dict(), update_mode=True):\n print('prepare .rsc file for ', filePattern)\n if not os.path.exists(glob.glob(os.path.join(os.path.abspath(inputDir), '*', filePattern + '.xml'))[0]):\n filePattern = filePattern.split('.full')[0]\n isce_files = sorted(glob.glob(os.path.join(os.path.abspath(inputDir), '*', filePattern + '.xml')))\n if len(isce_files) == 0:\n raise FileNotFoundError('no file found in pattern: {}'.format(filePattern))\n slc_dates = np.sort(os.listdir(inputDir))\n # write .rsc file for each interferogram file\n num_file = len(isce_files)\n prog_bar = ptime.progressBar(maxValue=num_file)\n for i in range(num_file):\n isce_file = isce_files[i].split('.xml')[0]\n # prepare metadata for current file\n slc_metadata = read_attribute(isce_file, metafile_ext='.xml')\n slc_metadata.update(metadata)\n dates = [slc_dates[0], os.path.basename(os.path.dirname(isce_file))]\n slc_metadata = add_slc_metadata(slc_metadata, dates, baseline_dict)\n\n # write .rsc file\n rsc_file = isce_file+'.rsc'\n writefile.write_roipac_rsc(slc_metadata, rsc_file,\n update_mode=update_mode,\n print_msg=False)\n prog_bar.update(i+1, suffix='{}_{}'.format(dates[0], dates[1]))\n prog_bar.close()\n return\n\n\n#########################################################################\ndef main(iargs=None):\n inps = cmd_line_parse(iargs)\n\n # read common metadata\n metadata = {}\n if inps.metaFile:\n metadata = extract_isce_metadata(inps.metaFile,\n geom_dir=inps.geometryDir,\n update_mode=inps.update_mode)\n\n # prepare metadata for geometry file\n if inps.geometryDir:\n metadata = prepare_geometry(inps.geometryDir,\n metadata=metadata,\n update_mode=inps.update_mode)\n\n # read baseline info\n baseline_dict = {}\n if inps.baselineDir:\n baseline_dict = read_baseline_timeseries(inps.baselineDir,\n beam_mode=metadata['beam_mode'])\n\n # prepare metadata for ifgram file\n if inps.slcDir and inps.slcFiles:\n for namePattern in inps.slcFiles:\n prepare_stack(inps.slcDir, namePattern,\n metadata=metadata,\n baseline_dict=baseline_dict,\n update_mode=inps.update_mode)\n print('Done.')\n return\n\n\n#########################################################################\nif __name__ == '__main__':\n \"\"\"Main driver.\"\"\"\n main()\n","sub_path":"minopy/prep_slc_isce.py","file_name":"prep_slc_isce.py","file_ext":"py","file_size_in_byte":17850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"652636531","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 1 10:26:24 2018\r\n\r\n@author: Aolme8\r\n\"\"\"\r\nimport cv2 as cv2\r\nimport time as time\r\nimport imutils\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom imutils import face_utils\r\nimport imutils\r\nimport dlib\r\n#from keras import layers\r\n#from keras import models\r\n#import scipy.ndimage\r\n#import os\r\n\r\n\r\nvideo = 'test.mp4'\r\n\r\n\r\n# Function from: http://programmingcomputervision.com/\r\ndef draw_flow(im,flow,step=8):\r\n \"\"\" Plot optical flow at sample points\r\n spaced step pixels apart. \"\"\"\r\n \r\n h,w = im.shape[:2]\r\n y,x = np.mgrid[step/2:h:step,step/2:w:step].reshape(2,-1)\r\n fx,fy = flow[y.astype(int),x.astype(int)].T\r\n \r\n # create line endpoints\r\n lines = np.vstack([x,y,x+fx,y+fy]).T.reshape(-1,2,2)\r\n lines = np.int32(lines)\r\n \r\n # create image and draw\r\n vis = cv2.cvtColor(im,cv2.COLOR_GRAY2BGR)\r\n for (x1,y1),(x2,y2) in lines:\r\n cv2.line(vis,(x1,y1),(x2,y2),(0,255,0),1)\r\n cv2.circle(vis,(x1,y1),1,(0,255,0), -1)\r\n \r\n return vis\r\n\r\n# Modified Function from: https://www.pyimagesearch.com/2017/04/10/detect-eyes-nose-lips-jaw-dlib-opencv-python/\r\ndef detect_face(image_file, shape_predictor):\r\n roi = []\r\n # initialize dlib's face detector (HOG-based) and then create\r\n # the facial landmark predictor\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(shape_predictor)\r\n\r\n image = image_file\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n \r\n # detect faces in the grayscale image\r\n rects = detector(gray, 1)\r\n \r\n # loop over the face detections\r\n for (i, rect) in enumerate(rects):\r\n # determine the facial landmarks for the face region, then\r\n # convert the landmark (x, y)-coordinates to a NumPy array\r\n shape = predictor(gray, rect)\r\n shape = face_utils.shape_to_np(shape)\r\n\r\n # loop over the face parts individually\r\n for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():\r\n if (name == 'mouth'):\r\n # loop over the subset of facial landmarks, drawing the\r\n # specific face part\r\n for (x, y) in shape[i:j]:\r\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\r\n\r\n # extract the ROI of the face region as a separate image\r\n (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))\r\n #r = cv2.selectROI(clone)\r\n roi.append((x, y, w, h))\r\n\r\n return roi\r\n\r\n \r\ndef talking(video):\r\n false_positives = 0\r\n false_negatives = 0\r\n true_positives = 0\r\n true_negatives = 0\r\n \r\n count = 0\r\n\r\n print (\"Start\")\r\n camera = cv2.VideoCapture(video)\r\n time.sleep(0.25)\r\n \r\n ret, frame1 = camera.read()\r\n \r\n prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\r\n prvs = imutils.resize(prvs, width=700)\r\n frame1 = imutils.resize(frame1, width=700)\r\n hsv = np.zeros_like(frame1)\r\n hsv[...,1] = 255\r\n \r\n print(\"Start getting frames\")\r\n camera.set(cv2.CAP_PROP_FPS, 10)\r\n while True:\r\n count = count + 1\r\n green = False\r\n #slow down the fps\r\n grabbed, frame = camera.read()\r\n if grabbed:\r\n frame = imutils.resize(frame, width=700)\r\n pass\r\n else:\r\n break\r\n \r\n color = frame[3,3]\r\n print(color)\r\n if (color[1] == 255):\r\n green = True\r\n \r\n mouth_roi = detect_face(frame, 'shape_predictor_68_face_landmarks.dat')\r\n\r\n # blur the frame and convert it to the HSV color space\r\n blurred = cv2.GaussianBlur(frame, (11,11), 0)\r\n kernel2 = np.array([[0, -1, 0],\r\n [-1, 5, -1],\r\n [0, -1, 0]])\r\n blurred = cv2.filter2D(blurred, -1, kernel2)\r\n blurred = cv2.medianBlur(blurred, 15)\r\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\r\n\r\n \r\n grayframe = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) \r\n grayframe = cv2.medianBlur(grayframe, 5) \r\n current_frame = grayframe\r\n \r\n flow = cv2.calcOpticalFlowFarneback(prvs, grayframe, None, 0.5, 3, 18, 3, 5, 1.2, 0)\r\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\r\n mag[mag == -np.inf] = 0.001\r\n hsv[...,0] = ang*180/np.pi/2\r\n ang = ang*180/np.pi/2\r\n \r\n \r\n prvs = current_frame.astype(np.uint8)\r\n\r\n motion_vectors = draw_flow(current_frame, flow)\r\n \r\n count_roi = 0\r\n count_r = 0\r\n talking = False\r\n for roi in mouth_roi:\r\n for x in range(roi[0], roi[0]+roi[2]):\r\n for y in range(roi[1], roi[1]+roi[3]):\r\n if (mag[y, x] >= 1 and mag[y, x] <=2.5):\r\n talking = True\r\n if (green == True):\r\n count_roi = count_roi + 1\r\n elif (green == False):\r\n count_r = count_r + 1\r\n if (talking == True):\r\n cv2.putText(motion_vectors, \"talking\", (roi[0],roi[1]+15), cv2.FONT_HERSHEY_DUPLEX, 0.3, (255,0,0), 1)\r\n cv2.rectangle(motion_vectors, (roi[0],roi[1]), (roi[0]+roi[2], roi[1]+roi[3]), (255,0,0), 1)\r\n \r\n if (talking == False and green == False):\r\n true_negatives = true_negatives + 1\r\n elif (talking == False and green == True):\r\n false_negatives = false_negatives + 1\r\n \r\n \r\n if (count_roi != 0):\r\n true_positives = true_positives + 1\r\n elif(count_r != 0):\r\n false_positives = false_positives + 1\r\n \r\n \r\n cv2.imshow('Motion vector plot', motion_vectors)\r\n \r\n print (\"True Positives: \")\r\n print (true_positives)\r\n print (\"True Negatives: \")\r\n print (true_negatives)\r\n print (\"False Positives: \")\r\n print (false_positives)\r\n print (\"False Negatives: \")\r\n print (false_negatives)\r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n\r\n\r\n camera.release()\r\n cv2.destroyAllWindows()\r\n print (\"FINAL RESULTS\")\r\n print (\"True Positives: \")\r\n print (true_positives)\r\n print (\"True Negatives: \")\r\n print (true_negatives)\r\n print (\"False Positives: \")\r\n print (false_positives)\r\n print (\"False Negatives: \")\r\n print (false_negatives)\r\n return (true_positives + true_negatives)/count\r\n\r\n\r\n\r\n### Implementation Starts Here###\r\n\r\naccuracy = talking(video)\r\nprint (accuracy)\r\nprint (\"finish\")\r\n\r\n#model = models.Sequential()\r\n#model.add(layers.Conv2D(32, (3, 3), activation='relu',\r\n# input_shape=(150, 150, 3)))\r\n#model.add(layers.MaxPooling2D((2, 2)))\r\n#model.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\n#model.add(layers.MaxPooling2D((2, 2)))\r\n#model.add(layers.Conv2D(128, (3, 3), activation='relu'))\r\n#model.add(layers.MaxPooling2D((2, 2)))\r\n#model.add(layers.Conv2D(128, (3, 3), activation='relu'))\r\n#model.add(layers.MaxPooling2D((2, 2)))\r\n#model.add(layers.Flatten())\r\n#model.add(layers.Dropout(0.5)) # Dropout layer at 50%\r\n#model.add(layers.Dense(512, activation='relu'))\r\n#model.add(layers.Dense(1, activation='sigmoid'))\r\n#\r\n#\r\n#model.summary()\r\n#\r\n#\r\n#from keras import optimizers\r\n#\r\n#model.compile(loss='binary_crossentropy',\r\n# optimizer=optimizers.RMSprop(lr=1e-4),\r\n# metrics=['acc'])\r\n#\r\n#\r\n#\r\n#\r\n#''' Data Processing '''\r\n#\r\n#\r\n#from keras.preprocessing.image import ImageDataGenerator\r\n#\r\n#train_datagen = ImageDataGenerator(rescale=1./255)\r\n#test_datagen = ImageDataGenerator(rescale=1./255)\r\n#\r\n#\r\n#base_dir = '../Keras/cats_and_dogs_small' # Need to change the dictionary if it is necessary\r\n#train_dir = os.path.join(base_dir, 'train')\r\n#validation_dir = os.path.join(base_dir, 'validation')\r\n#\r\n#train_generator = train_datagen.flow_from_directory(\r\n# train_dir, # Images are here.\r\n# target_size=(150, 150), # All images will be resized to 150x150\r\n# batch_size=20, # Process twenty images at a time.\r\n# class_mode='binary') # 0/1 entries.\r\n#\r\n#\r\n## validation_generator processes data from given directory.\r\n#validation_generator = test_datagen.flow_from_directory(\r\n# validation_dir,\r\n# target_size=(150, 150),\r\n# batch_size=20,\r\n# class_mode='binary')\r\n#\r\n#for data_batch, labels_batch in train_generator:\r\n# print('data batch shape:', data_batch.shape)\r\n# print('labels batch shape:', labels_batch.shape)\r\n# break\r\n#\r\n#history = model.fit_generator(\r\n# train_generator,\r\n# steps_per_epoch=100, # MUST be carefully set. See above.\r\n# epochs=30, # MUST be carefully set. See above.\r\n# validation_data=validation_generator,\r\n# validation_steps=50) # MUST be carefully set. See above.\r\n#\r\n#model.save('talking_1.h5')\r\n#\r\n## Plots\r\n#''' Plot the loss and accuracy of the model '''\r\n#\r\n#import matplotlib.pyplot as plt\r\n#\r\n#acc = history.history['acc']\r\n#val_acc = history.history['val_acc']\r\n#loss = history.history['loss']\r\n#val_loss = history.history['val_loss']\r\n#\r\n#epochs = range(len(acc))\r\n#\r\n#\r\n#plt.plot(epochs, acc, 'bo', label='Training acc')\r\n#plt.plot(epochs, val_acc, 'b', label='Validation acc')\r\n#plt.title('Training and validation accuracy')\r\n#plt.legend()\r\n#\r\n#plt.figure()\r\n#\r\n#plt.plot(epochs, loss, 'bo', label='Training loss')\r\n#plt.plot(epochs, val_loss, 'b', label='Validation loss')\r\n#plt.title('Training and validation loss')\r\n#plt.legend()\r\n#\r\n#plt.show()","sub_path":"talking.py","file_name":"talking.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"206477123","text":"from GNode import GNode\nfrom ReduceGNode import ReducedGnode\nfrom Settings import settings\nimport mysql.connector\nimport mysql.connector.pooling\nimport pickle\nimport json\nfrom Choices import *\nimport copy\nimport time\n\n\n# 存储节点的仓库类 单例\nclass NodeRepository:\n\n def __init__(self):\n pass\n\n def getnode(self, id):\n pass\n\n def remove(self, id):\n pass\n\n def addnode(self, node):\n pass\n\n def printl(self):\n pass\n\n def getnum(self):\n pass\n\n def loadNodes(self, idList):\n pass\n\n def cleanTable(self):\n pass\n\n def saveLeafIdList(self, idList):\n pass\n\n\nclass MemoryNodeRepository(NodeRepository):\n \"\"\"docstring for MemoryNodeRepository\"\"\"\n\n def __init__(self):\n self._repository = [] # 存储过程中需要的所有节点\n self._nodeId = [] # 存储节点的id 便于存取\n self._choices = []\n self._choicesId = {}\n self._leafIdList = []\n self._Gnodenum = 0\n def initRepository(self,nodes):\n for node in nodes:\n self.addnode(node)\n def getChoice(self,id):\n if str(id) in self._choicesId:\n index = self._choicesId[str(id)]\n return self._choices[index]\n else:\n return -1\n def addChoice(self,choice):\n if str(choice.getId()) not in self._choicesId:\n self._choicesId[str(choice.getId())] = len(self._choices)\n self._choices.append(choice)\n def loadChoices(self, idList):\n choices = []\n List = {} # 避免重复取到相同的节点\n for id in idList:\n if str(id) not in List:\n List[str(id)] = \"\"\n choice = self.getChoice(id)\n choices.append(choice)\n return choices\n def getnode(self, id):\n if id in self._nodeId:\n index = self._nodeId.index(id)\n return self._repository[index]\n else:\n return -1\n\n def remove(self, id):\n if id in self._nodeId:\n index = self._nodeId.index(id)\n del self._repository[index]\n del self._nodeId[index]\n else:\n print(\"没有该节点\")\n\n def addnode(self, node):\n if node.getId() not in self._nodeId:\n self._repository.append(node)\n self._nodeId.append(node.getId())\n\n def printl(self):\n return self._nodeId\n\n def getnum(self):\n return len(self._repository)\n\n def loadNodes(self, idList):\n nodes = []\n List = [] # 避免重复取到相同的节点\n for id in idList:\n if id not in List:\n List.append(id)\n n = self.getnode(id)\n nodes.append(n)\n return nodes\n\n def loadAllNodes(self):\n pass\n def cleanTable(self):\n pass\n\n def updateNode(self, node):\n pass\n\n def saveLeafIdList(self, idList, id):\n self._leafIdList = copy.deepcopy(idList)\n self._Gnodenum = id\n def getLeafIdList(self):\n return self._leafIdList ,self._Gnodenum\n def saveUpperNodeIds(self, upperIdList, newIdList):\n pass\n\n def getUpperNodeIds(self):\n pass\n\n def updateNode(self, node, paramsList):\n pass\n def addNodeToBuffer(self,node):\n pass\n\n def delNodeFromBuffer(self, node):\n pass\n def bufferUpdateToDB(self):\n pass\n def printType(self):\n print(\"neicunmoshi:\")\nclass DataBaseNodeRepository(NodeRepository):\n \n # 创建连接池\n def __init__(self):\n self.cnxpool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"nodepool\",\n pool_size=30,\n **settings.dbConfig['local'])\n self.buffer = {}\n self._choices = []\n self._choicesId = {}\n def getChoice(self,id):\n if str(id) in self._choicesId:\n index = self._choicesId[str(id)]\n return self._choices[index]\n else:\n return -1\n def addChoice(self,choice):\n if str(choice.getId()) not in self._choicesId:\n self._choicesId[str(choice.getId())] = len(self._choices)\n self._choices.append(choice)\n def loadChoices(self, idList):\n choices = []\n List = {} # 避免重复取到相同的节点\n for id in idList:\n if str(id) not in List:\n List[str(id)] = \"\"\n choice = self.getChoice(id)\n choices.append(choice)\n return choices\n # 根据节点的id取到相应node\n def getnode(self, id):\n if str(id) in self.buffer:\n return self.buffer[str(id)]\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n selectSql = \"select * from `GNode` where `id` = '\" + str(id) + \"'\"\n cursor.execute(selectSql)\n result = cursor.fetchall()\n if (len(result) == 0):\n print(\"getnode\")\n print(id)\n return -1\n if (result[0][6] == \"GNode\"):\n node = GNode()\n GNode.Id = GNode.Id - 1\n else:\n node = ReducedGnode()\n GNode.Id = GNode.Id - 1\n node.setStateSet(json.loads(result[0][5]))\n node.setId(result[0][0])\n node.setOutEdges(pickle.loads(result[0][1]))\n node.setChildrenId(json.loads(result[0][2]))\n node.setParentsId(json.loads(result[0][3]))\n node.setCmtsFromDB(pickle.loads(result[0][4]))\n node.setType(result[0][6])\n return node\n except Exception:\n print(\"发生异常\")\n print(id)\n raise\n return -1\n finally:\n cnx.close()\n\n def remove(self, id):\n if str(id) in self.buffer:\n del self.buffer[str(id)]\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n delSql = \"delete from `GNode` where `id` = '\" + str(id) + \"'\"\n cursor.execute(delSql)\n cnx.commit()\n except Exception:\n print(\"没有此节点\")\n cnx.rollback()\n finally:\n cnx.close()\n\n def addnode(self, node):\n # print(\"addnode\",node.getId())\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n insertSql = \"insert into `GNode`(`id`,`outEdges`,`childrenId`,`parentsId`,`CMTs`,`stateSet`,`type`) values (%s,%s,%s,%s,%s,%s,%s)\"\n cursor.execute(insertSql, (node.getId(), pickle.dumps(node.getOutEdges()), json.dumps(node.getChildrenId()),\n json.dumps(node.getParentsId()), pickle.dumps(node.getCmts()),\n json.dumps(node.getStateSet()), node.getType()))\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def addNodeToBuffer(self,node):\n self.buffer[str(node.getId())] = node\n\n def delNodeFromBuffer(self,node):\n if str(node.getId()) in self.buffer:\n del self.buffer[str(node.getId())]\n\n def printl(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n selectSql = \"select id from `GNode`\"\n cursor.execute(selectSql)\n result = cursor.fetchall()\n idList = []\n for id in result:\n idList.append(id[0])\n return idList\n except Exception:\n print(\"发生异常\")\n raise\n return -1\n finally:\n cnx.close()\n\n def getnum(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n selectSql = \"select count(*) from `GNode`\"\n cursor.execute(selectSql)\n result = cursor.fetchall()\n return result[0][0]\n except Exception:\n print(\"发生异常num\")\n raise\n return -1\n finally:\n cnx.close()\n\n def loadNodes(self, idList):\n if (len(idList) == 0):\n return []\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n nodeList = []\n try:\n idListTmp = [str(id) for id in idList]\n idListstr = \"(\" + \",\".join(idListTmp) + \")\"\n selectSql = \"select * from `GNode` where `id` in \" + idListstr + \"\"\n cursor.execute(selectSql)\n resultSet = cursor.fetchall()\n for result in resultSet:\n if (result[6] == \"GNode\"):\n node = GNode()\n GNode.Id = GNode.Id - 1\n else:\n node = ReducedGnode()\n GNode.Id = GNode.Id - 1\n node.setStateSet(json.loads(result[5]))\n node.setId(result[0])\n node.setOutEdges(pickle.loads(result[1]))\n node.setChildrenId(json.loads(result[2]))\n node.setParentsId(json.loads(result[3]))\n node.setCmtsFromDB(pickle.loads(result[4]))\n node.setType(result[6])\n nodeList.append(node)\n return nodeList\n except Exception:\n print(\"发生异常\")\n raise\n return nodeList\n finally:\n cnx.close()\n\n def cleanTable(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n dropSql = \"drop table `GNode`\"\n cursor.execute(dropSql)\n dropSql = \"drop table `leafidlist`\"\n cursor.execute(dropSql)\n dropSql = \"drop table `mergeLeafOver`\"\n cursor.execute(dropSql)\n createSql = (\"CREATE TABLE IF NOT EXISTS `GNode`(\" +\n \"`id` INT UNSIGNED,\" +\n \"`outEdges` LONGBLOB,\" +\n \"`childrenId` LONGText,\" +\n \"`parentsId` LONGText,\" +\n \"`CMTs` LONGBlob,\" +\n \"`stateSet` LONGText,\" +\n \"`type` VARCHAR(20),\" +\n \"PRIMARY KEY (`id`),\" +\n \"INDEX GnodeID (`id`)\" +\n \")ENGINE=InnoDB DEFAULT CHARSET=utf8\")\n cursor.execute(createSql)\n createSql = (\"CREATE TABLE IF NOT EXISTS `leafidlist`(\" +\n \"`id` INT UNSIGNED AUTO_INCREMENT,\" +\n \"`leaflist` MediumText,\" +\n \"`GnodeId` INT,\" +\n \"PRIMARY KEY (`id`)\" +\n \")ENGINE=InnoDB DEFAULT CHARSET=utf8\")\n cursor.execute(createSql)\n createSql = (\"CREATE TABLE IF NOT EXISTS `mergeLeafOver`(\" +\n \"`id` INT UNSIGNED AUTO_INCREMENT,\" +\n \"`upperNodeIds` MediumText,\" +\n \"`newIdList` MediumText,\" +\n \"PRIMARY KEY (`id`)\" +\n \")ENGINE=InnoDB DEFAULT CHARSET=utf8\")\n cursor.execute(createSql)\n cnx.commit()\n except Exception as e:\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def updateNode(self, node):\n # print(\"updateNode\",node.getId(),node.getOutEdges())\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n updateSql = \"update `GNode` set `outEdges` = %s,`childrenId` = %s,`parentsId` = %s,`CMTs` = %s,`stateSet` = %s,`type` = %s where `id` = %s\"\n cursor.execute(updateSql, (\n pickle.dumps(node.getOutEdges()), json.dumps(node.getChildrenId()), json.dumps(node.getParentsId()),\n pickle.dumps(node.getCmts()), json.dumps(node.getStateSet()), node.getType(), node.getId()))\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n\n def getParams(self,node,paramsList):\n allParams=['outEdges','childrenId','parentsId','CMTs','stateSet','type','id']\n params = []\n for param in paramsList:\n if(param == allParams[0]):\n params.append(pickle.dumps(node.getOutEdges()))\n elif(param == allParams[1]):\n params.append(json.dumps(node.getChildrenId()))\n elif(param == allParams[2]):\n params.append(json.dumps(node.getParentsId()))\n elif(param == allParams[3]):\n params.append(pickle.dumps(node.getCmts()))\n elif(param == allParams[4]):\n params.append(json.dumps(node.getStateSet()))\n elif(param == allParams[5]):\n params.append(node.getType())\n elif(param == allParams[6]):\n params.append(node.getId())\n params.append(node.getId())\n params = tuple(params)\n return params\n #paramsList:\n # ['outEdges','childrenId','parentsId','CMTs','stateSet','type','id']\n def updateNode(self,node,paramsList):\n if str(node.getId()) in self.buffer:\n return\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n updateSql = \"update `GNode` set\"\n #`outEdges` = %s,`childrenId` = %s,`parentsId` = %s,`CMTs` = %s,`stateSet` = %s,`type` = %s where `id` = %s\"\n for param in paramsList:\n updateSql = updateSql + \" `\" + param + '` = %s,'\n updateSql = updateSql[:-1]\n updateSql = updateSql+' where `id` = %s'\n params = self.getParams(node,paramsList)\n cursor.execute(updateSql, params)\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def batchUpdate(self,cursor,params):\n paramData=[]\n updateSql = \"UPDATE `GNode` SET\"\n dataList=[]\n for id in self.buffer:\n dataList.append(self.getParams(self.buffer[id],params))\n #['outEdges','childrenId','parentsId','CMTs','stateSet','type','id']\n for i in range(0,len(params)):\n updateSql = updateSql+\"`\"+params[i]+\"` = CASE `id` \"\n for data in dataList:\n updateSql = updateSql+\"WHEN %s THEN %s \"\n paramData.append(data[-1])\n paramData.append(data[i])\n updateSql=updateSql+\"END,\"\n updateSql = updateSql[:-1]\n updateSql = updateSql+\" WHERE `id` IN (\"\n for data in dataList:\n updateSql = updateSql+\"%s,\"\n paramData.append(data[-1])\n updateSql = updateSql[:-1]\n updateSql = updateSql+\")\"\n return updateSql,paramData\n\n def bufferUpdateToDB(self):\n if len(self.buffer)==0:\n return\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n updateSql,data = self.batchUpdate(cursor,[\"outEdges\",\"childrenId\",\"parentsId\"])\n cursor.execute(updateSql,data)\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def saveLeafIdList(self, idList, id):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n insertSql = \"insert into `leafidlist`(`leaflist`,`GnodeId`) values('\" + json.dumps(idList) + \"','\" + str(\n id) + \"')\"\n cursor.execute(insertSql)\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def getLeafIdList(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n searchSql = \"select * from `leafidlist`\"\n cursor.execute(searchSql)\n result = cursor.fetchall()\n print(result)\n idList = json.loads(result[0][1])\n #print(idList)\n return idList, result[0][2]\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def saveUpperNodeIds(self, upperIdList, newIdList):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n insertSql = \"insert into `mergeLeafOver`(`upperNodeIds`,`newIdList`) values('\" + json.dumps(\n upperIdList) + \"','\" + json.dumps(newIdList) + \"')\"\n cursor.execute(insertSql)\n cnx.commit()\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def getUpperNodeIds(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n try:\n searchSql = \"select * from `mergeLeafOver`\"\n cursor.execute(searchSql)\n result = cursor.fetchall()\n return json.loads(result[0][1]), json.loads(result[0][2])\n except Exception:\n print(\"发生异常\")\n raise\n cnx.rollback()\n finally:\n cnx.close()\n\n def loadAllNodes(self):\n cnx = self.cnxpool.get_connection()\n cursor = cnx.cursor()\n nodeList = []\n try:\n selectSql = \"select * from `GNode`\"\n cursor.execute(selectSql)\n resultSet = cursor.fetchall()\n for result in resultSet:\n if (result[6] == \"GNode\"):\n node = GNode()\n GNode.Id = GNode.Id - 1\n else:\n node = ReducedGnode()\n GNode.Id = GNode.Id - 1\n node.setStateSet(json.loads(result[5]))\n node.setId(result[0])\n node.setOutEdges(pickle.loads(result[1]))\n node.setChildrenId(json.loads(result[2]))\n node.setParentsId(json.loads(result[3]))\n node.setCmtsFromDB(pickle.loads(result[4]))\n node.setType(result[6])\n nodeList.append(node)\n return nodeList\n except Exception:\n print(\"发生异常\")\n raise\n return nodeList\n finally:\n cnx.close()\nif (settings.mode == \"database\" or settings.mode == 'db'):\n nodeRepository = DataBaseNodeRepository()\nelse:\n nodeRepository = MemoryNodeRepository()\n# nodeRepository.cleanTable()\nif __name__ == '__main__':\n if (settings.mode == \"database\" or settings.mode == 'db'):\n GnodeList = DataBaseNodeRepository()\n else:\n GnodeList = MemoryNodeRepository()\n GnodeList.cleanTable()","sub_path":"SmartContract1-master/NodeRepository.py","file_name":"NodeRepository.py","file_ext":"py","file_size_in_byte":19007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"365844356","text":"import numpy as np\n\ndef readMats(n):\n return [np.loadtxt(\"/tmp/matrix%i\"%i,skiprows=1) for i in xrange(n)]\n\ndef writeMats(*mats):\n for (i,mat) in enumerate(mats):\n mat = np.atleast_2d(mat) \n assert mat.ndim == 2\n with open(\"/tmp/matrix%i\"%i,\"w\") as fd:\n fd.write(\"%i %i\\n\"%mat.shape)\n np.savetxt(fd,mat,fmt=\"%.4f\")\n","sub_path":"john/doo/comm.py","file_name":"comm.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419685557","text":"# -*- coding: utf-8 -*- \nfrom src.Package.package import ReqElecPath,FrameHeader,FrameTail,Time\nfrom src.CommonUse.staticVar import staticVar\nfrom src.Thread.thread_route_map import InsertRouteThread\n\nimport time \nimport wx\nclass ReqElecPathDialog(wx.Dialog):\n def __init__(self,parent):\n wx.Dialog.__init__(self,None,-1,u\"电磁路径分布数据请求\",size=(400,650))\n\n self.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.LIGHT, underline=False, faceName=u\"微软雅黑\",\n encoding=wx.FONTENCODING_DEFAULT))\n\n panel=wx.Panel(self,-1)\n \n ###############################\n self.parent=parent \n self.tail=FrameTail(0,0,0xAA)\n \n self.id=staticVar.getid()\n self.lowid=self.id&0x00FF\n self.highid=self.id>>8\n \n self.List=staticVar.getCentreFreq()\n self.ListFreq=staticVar.getFreq()\n ###############################################\n self.radioBox1=wx.RadioBox(panel,-1,choices=[u\"本地获取\",u\"中心站获取\"],style=wx.RA_VERTICAL)\n self.radioBox2=wx.RadioBox(panel,-1,choices=[u\"显示历史分布\",u\"显示实时分布\"],style=wx.RA_VERTICAL)\n self.radioBox3=wx.RadioBox(panel,-1,choices=[u\"选择频率\",u\"手动频率\"])\n self.radioBox4=wx.RadioBox(panel,-1,choices=[u\"YES\",u\"NO\"])\n \n self.radioBox1.SetSelection(1)\n self.radioBox2.SetSelection(1)\n self.radioBox3.SetSelection(1)\n self.radioBox4.SetSelection(1)\n self.FreqSection=wx.ComboBox(panel,-1,u\"FM调频广播频段\",choices=self.List)\n self.FreqSection.SetSelection(0)\n self.CentreFreq=wx.TextCtrl(panel,-1,size=(80,25))\n self.BandWidth=wx.TextCtrl(panel,-1,size=(80,25))\n \n #############################################\n curTime=time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))\n Year=int(curTime[0:4])\n Month=int(curTime[4:6])\n Day=int(curTime[6:8])\n Hour=int(curTime[8:10])\n Min=int(curTime[10:12])+2\n \n \n ###############################################\n\n self.StartTimeYear=wx.ComboBox(panel,-1,str(Year),choices=[\"2015\",\"2016\",\"2017\",\"2018\"])\n self.StartTimeMonth=wx.ComboBox(panel,-1,str(Month),choices=[\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"])\n self.StartTimeDay=wx.TextCtrl(panel,-1,str(Day),size=(60,25))\n self.StartTimeHour=wx.TextCtrl(panel,-1,str(Hour),size=(60,25))\n self.StartTimeMinute=wx.TextCtrl(panel,-1,str(Min),size=(60,25))\n\n self.EndTimeYear=wx.ComboBox(panel,-1,str(Year),choices=[\"2015\",\"2016\",\"2017\",\"2018\"])\n self.EndTimeMonth=wx.ComboBox(panel,-1,str(Month),choices=[\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"])\n self.EndTimeDay=wx.TextCtrl(panel,-1,str(Day),size=(60,25))\n self.EndTimeHour=wx.TextCtrl(panel,-1,str(Hour),size=(60,25))\n self.EndTimeMinute=wx.TextCtrl(panel,-1,str(Min),size=(60,25))\n\n\n sizer=wx.BoxSizer(wx.VERTICAL)\n sizer.Add((15,15))\n sizer.Add(self.radioBox3,0,wx.LEFT,20)\n sizer.Add(self.FreqSection,0,wx.LEFT|wx.TOP,20)\n\n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(wx.StaticText(panel,-1,u\"中心频率(MHz)\",size=(100,25)),0,wx.LEFT,20)\n hBox1.Add(self.CentreFreq,0,wx.LEFT,20)\n sizer.Add(hBox1)\n \n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(wx.StaticText(panel,-1,u\"带宽(MHz)\",size=(100,25)),0,wx.LEFT,20)\n hBox1.Add(self.BandWidth,0,wx.LEFT,20)\n sizer.Add(hBox1)\n\n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(wx.StaticText(panel,-1,u\"分布数据来源:\",size=(100,25)),0,wx.LEFT|wx.ALIGN_TOP,20)\n hBox1.Add(self.radioBox1,0,wx.LEFT|wx.ALIGN_TOP,20)\n sizer.Add(hBox1)\n \n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(wx.StaticText(panel,-1,u\"T-POA定位:\",size=(100,25)),0,wx.LEFT|wx.ALIGN_TOP,20)\n hBox1.Add(self.radioBox4,0,wx.LEFT|wx.ALIGN_TOP,20)\n sizer.Add(hBox1)\n \n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(wx.StaticText(panel,-1,u\"显示数据来源:\",size=(100,25)),0,wx.LEFT|wx.ALIGN_TOP,20)\n hBox1.Add(self.radioBox2,0,wx.LEFT|wx.ALIGN_TOP,20)\n sizer.Add(hBox1)\n \n sizer.Add((10,10))\n sizer.Add(wx.StaticText(panel,-1,u\"起始时间(年-月-日-时-分):\",size=(160,25)),0,wx.LEFT,20)\n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(self.StartTimeYear,0,wx.LEFT,20)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.StartTimeMonth,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.StartTimeDay,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.StartTimeHour,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.StartTimeMinute,0)\n sizer.Add(hBox1)\n\n sizer.Add(wx.StaticText(panel,-1,u\"终止时间(年-月-日-时-分):\",size=(160,25)),0,wx.LEFT,20)\n sizer.Add((10,10))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n hBox1.Add(self.EndTimeYear,0,wx.LEFT,20)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.EndTimeMonth,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.EndTimeDay,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.EndTimeHour,0)\n hBox1.Add(wx.StaticText(panel,-1,\"-\"),0,wx.LEFT|wx.RIGHT|wx.ALIGN_BOTTOM,5)\n hBox1.Add(self.EndTimeMinute,0)\n sizer.Add(hBox1)\n sizer.Add((30,30))\n hBox1=wx.BoxSizer(wx.HORIZONTAL)\n self.btn_ok=wx.Button(panel,-1,\"OK\",size=(60,25))\n \n hBox1.Add(self.btn_ok,0,wx.LEFT,20)\n hBox1.Add(wx.Button(panel,wx.ID_CANCEL,\"CANCEL\",size=(60,25)),0,wx.LEFT,20)\n sizer.Add(hBox1)\n panel.SetSizer(sizer)\n \n \n self.Layout()\n \n self.Centre( wx.BOTH )\n \n #Events\n self.Bind(wx.EVT_RADIOBOX, self.OnRadio,self.radioBox3)\n self.btn_ok.Bind(wx.EVT_BUTTON, self.OnbtnOk)\n self.FreqSection.Enable(False)\n self.CentreFreq.Enable(True)\n self.BandWidth.Enable(True)\n \n \n def ByteToTime(self,time):\n Obj=Time()\n Obj.HighYear=time[0]>>4\n Obj.LowYear=time[0]&0x00F\n Obj.Month=time[1]\n Obj.Day=time[2]\n Obj.HighHour=time[3]>>2\n Obj.LowHour=time[3]&0x03\n Obj.Minute=time[4]\n \n return Obj \n \n\n def OnRadio(self,event):\n if(self.radioBox3.GetSelection()==0):\n self.FreqSection.Enable(True)\n self.BandWidth.Enable(False)\n self.CentreFreq.Enable(False)\n\n elif(self.radioBox3.GetSelection()==1):\n self.FreqSection.Enable(False)\n self.CentreFreq.Enable(True)\n self.BandWidth.Enable(True)\n \n def OnbtnOk(self,event):\n\n if(self.radioBox1.GetSelection()):\n reqElec=ReqElecPath()\n reqElec.CommonHeader=FrameHeader(0x55,0xA3,self.lowid,self.highid)\n reqElec.CommonTail=self.tail\n \n reqElec.DataSource=15\n \n if(self.radioBox3.GetSelection()==0):\n centreFreq=self.ListFreq[self.FreqSection.GetSelection()][0]\n bandWidth=self.ListFreq[self.FreqSection.GetSelection()][1]\n else:\n centreFreq=int(self.CentreFreq.GetValue())\n bandWidth=int(self.BandWidth.GetValue())\n \n if(self.radioBox4.GetSelection()==0):\n reqElec.TPOA=0xFF\n \n reqElec.HighCentreFreq=centreFreq>>8\n reqElec.LowCentreFreq=centreFreq&0x00FF\n reqElec.BandWidth=bandWidth\n \n startTime=(int(self.StartTimeYear.GetValue()),int(self.StartTimeMonth.GetValue()), \\\n int(self.StartTimeDay.GetValue()),int(self.StartTimeHour.GetValue()), \\\n int(self.StartTimeMinute.GetValue())\n )\n endTime=(int(self.EndTimeYear.GetValue()),int(self.EndTimeMonth.GetValue()), \\\n int(self.EndTimeDay.GetValue()),int(self.EndTimeHour.GetValue()), \\\n int(self.EndTimeMinute.GetValue())\n )\n \n reqElec.StartTime=self.ByteToTime(startTime)\n reqElec.EndTime=self.ByteToTime(endTime)\n #\n # if(staticVar.getSock()):\n # staticVar.getSock().sendall(bytearray(reqElec))\n\n self.parent.queueRequest.put(reqElec)\n \n else:\n if(self.parent.thread_route_map==0):\n self.parent.thread_route_map=InsertRouteThread(self.parent)\n self.parent.thread_route_map.start()\n else:\n self.parent.thread_route_map.event.set()\n \n self.Close()\n \n \n \n \n \n \n \n \n","sub_path":"src/MapDialog/req_route.py","file_name":"req_route.py","file_ext":"py","file_size_in_byte":9419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304358588","text":"#!/usr/local/bin/python3\nimport os,sys\n\ncurdir = os.path.abspath(\".\")\n\nfor f in [f for f in os.listdir(curdir) if f.endswith(\".avi\")]:\n fout = f[:-4] + \".out\"\n cmd = \"sbatch --output=%s/%s avi_to_h5.sh %s/%s\" % (curdir, fout, curdir, f)\n for arg in sys.argv[1:]:\n cmd += \" \" + arg\n print(cmd)\n os.system(cmd)\n","sub_path":"spts/scripts/submit_avi_to_h5.py","file_name":"submit_avi_to_h5.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336341509","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nupdate/disease.py\n\nUpdate the disease terms in database\n\nCreated by Måns Magnusson on 2017-04-03.\nCopyright (c) 2017 __MoonsoInc__. All rights reserved.\n\"\"\"\n\nimport logging\n\nimport click\n\nfrom scout.load.hpo import load_disease_terms\nfrom scout.utils.handle import get_file_handle\n\nfrom scout.utils.requests import fetch_mim_files\nLOG = logging.getLogger(__name__)\n\n@click.command('diseases', short_help='Update disease terms')\n@click.option('--api-key', help='Specify the api key')\n@click.pass_context\ndef diseases(context, api_key):\n \"\"\"\n Update disease terms in mongo database.\n \"\"\"\n adapter = context.obj['adapter']\n \n # Fetch the omim information\n api_key = api_key or context.obj.get('omim_api_key')\n if not api_key:\n LOG.warning(\"Please provide a omim api key to load the omim gene panel\")\n context.abort()\n\n try:\n mim_files = fetch_mim_files(api_key, genemap2=True)\n except Exception as err:\n LOG.warning(err)\n context.abort()\n \n LOG.info(\"Dropping DiseaseTerms\")\n adapter.disease_term_collection.drop()\n LOG.debug(\"DiseaseTerms dropped\")\n\n load_disease_terms(\n adapter=adapter,\n genemap_lines=mim_files['genemap2'], \n )\n\n LOG.info(\"Successfully loaded all disease terms\")\n","sub_path":"scout/commands/update/disease.py","file_name":"disease.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"}